From ebfc4e6ad02b0cef34ec3f446007b98d85af9296 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 6 Oct 2016 17:24:09 +0000 Subject: [PATCH 001/233] Initial commit, copy of commit 86587affafe77ef555f7c3839839de44f0f203f3 Author: Tian Xia Date: Tue Oct 4 10:01:52 2016 -0700 Allow filtering of show commands through admission control --- storage/rocksdb/CMakeLists.txt | 108 + storage/rocksdb/README | 38 + storage/rocksdb/event_listener.cc | 80 + storage/rocksdb/event_listener.h | 40 + storage/rocksdb/get_rocksdb_files.sh | 27 + storage/rocksdb/ha_rocksdb.cc | 10699 ++++++++++++++++ storage/rocksdb/ha_rocksdb.h | 1054 ++ storage/rocksdb/ha_rocksdb_proto.h | 80 + storage/rocksdb/logger.h | 73 + .../rocksdb/include/dup_key_update.inc | 69 + .../include/locking_issues_case1_1.inc | 51 + .../include/locking_issues_case1_2.inc | 48 + .../rocksdb/include/locking_issues_case2.inc | 97 + .../rocksdb/include/locking_issues_case3.inc | 69 + .../rocksdb/include/locking_issues_case4.inc | 68 + .../rocksdb/include/locking_issues_case5.inc | 75 + .../rocksdb/include/locking_issues_case6.inc | 75 + .../rocksdb/include/locking_issues_case7.inc | 89 + .../include/rocksdb_concurrent_delete.inc | 53 + storage/rocksdb/mysql-test/rocksdb/my.cnf | 7 + .../rocksdb/optimize_table_check_sst.pl | 22 + .../rocksdb/mysql-test/rocksdb/r/1st.result | 22 + .../rocksdb/r/add_index_inplace.result | 378 + .../r/add_index_inplace_sstfilewriter.result | 72 + .../r/allow_no_pk_concurrent_insert.result | 7 + .../rocksdb/r/allow_no_primary_key.result | 251 + .../r/allow_no_primary_key_with_sk.result | 780 ++ .../rocksdb/r/allow_os_buffer.result | 1 + .../mysql-test/rocksdb/r/alter_table.result | 183 + .../mysql-test/rocksdb/r/analyze_table.result | 29 + .../rocksdb/r/apply_changes_iter.result | 64 + .../rocksdb/r/autoinc_secondary.result | 16 + .../mysql-test/rocksdb/r/autoinc_vars.result | 64 + .../mysql-test/rocksdb/r/autoincrement.result | 1 + .../mysql-test/rocksdb/r/bloomfilter.result | 1235 ++ .../mysql-test/rocksdb/r/bloomfilter2.result | 71 + .../mysql-test/rocksdb/r/bloomfilter3.result | 122 + .../mysql-test/rocksdb/r/bloomfilter4.result | 30 + .../rocksdb/r/bloomfilter_skip.result | 1235 ++ .../mysql-test/rocksdb/r/bulk_load.result | 49 + .../mysql-test/rocksdb/r/cardinality.result | 50 + .../mysql-test/rocksdb/r/check_table.result | 68 + .../mysql-test/rocksdb/r/checkpoint.result | 59 + .../rocksdb/r/checksum_table.result | 87 + .../rocksdb/r/checksum_table_live.result | 20 + .../rocksdb/r/col_opt_default.result | 20 + .../rocksdb/r/col_opt_not_null.result | 2612 ++++ .../mysql-test/rocksdb/r/col_opt_null.result | 2270 ++++ .../rocksdb/r/col_opt_unsigned.result | 741 ++ .../rocksdb/r/col_opt_zerofill.result | 723 ++ .../mysql-test/rocksdb/r/collation.result | 128 + .../rocksdb/r/collation_exception.result | 25 + .../rocksdb/r/compact_deletes.result | 93 + .../rocksdb/r/compression_zstd.result | 2 + .../rocksdb/r/concurrent_alter.result | 12 + .../r/cons_snapshot_read_committed.result | 151 + .../r/cons_snapshot_repeatable_read.result | 144 + .../r/cons_snapshot_serializable.result | 24 + .../r/corrupted_data_reads_debug.result | 74 + .../mysql-test/rocksdb/r/create_table.result | 165 + .../mysql-test/rocksdb/r/deadlock.result | 37 + .../mysql-test/rocksdb/r/delete.result | 166 + .../rocksdb/r/delete_before_lock.result | 15 + .../mysql-test/rocksdb/r/delete_ignore.result | 59 + .../mysql-test/rocksdb/r/delete_quick.result | 24 + .../rocksdb/r/delete_with_keys.result | 38 + .../mysql-test/rocksdb/r/describe.result | 19 + .../mysql-test/rocksdb/r/drop_database.result | 6 + .../rocksdb/r/drop_index_inplace.result | 154 + .../mysql-test/rocksdb/r/drop_table.result | 71 + .../mysql-test/rocksdb/r/drop_table2.result | 53 + .../mysql-test/rocksdb/r/drop_table3.result | 20 + .../rocksdb/r/dup_key_update.result | 362 + .../rocksdb/r/duplicate_table.result | 15 + .../rocksdb/r/fail_system_cf.result | 4 + .../mysql-test/rocksdb/r/foreign_key.result | 25 + .../rocksdb/r/gap_lock_issue254.result | 9 + .../rocksdb/r/gap_lock_raise_error.result | 504 + .../rocksdb/r/get_error_message.result | 8 + .../mysql-test/rocksdb/r/handler_basic.result | 115 + .../mysql-test/rocksdb/r/hermitage.result | 648 + .../mysql-test/rocksdb/r/i_s_ddl.result | 17 + .../rocksdb/mysql-test/rocksdb/r/index.result | 42 + .../rocksdb/r/index_file_map.result | 28 + .../rocksdb/r/index_key_block_size.result | 51 + .../mysql-test/rocksdb/r/index_primary.result | 48 + .../rocksdb/r/index_type_btree.result | 42 + .../rocksdb/r/index_type_hash.result | 42 + .../rocksdb/r/information_schema.result | 78 + .../r/innodb_i_s_tables_disabled.result | 120 + .../mysql-test/rocksdb/r/insert.result | 202 + .../rocksdb/r/insert_optimized_config.result | 16 + .../rocksdb/r/insert_with_keys.result | 63 + .../mysql-test/rocksdb/r/issue100.result | 23 + .../rocksdb/r/issue100_delete.result | 17 + .../mysql-test/rocksdb/r/issue111.result | 32 + .../mysql-test/rocksdb/r/issue290.result | 28 + .../mysql-test/rocksdb/r/issue314.result | 12 + .../rocksdb/r/level_read_committed.result | 111 + .../rocksdb/r/level_read_uncommitted.result | 116 + .../rocksdb/r/level_repeatable_read.result | 100 + .../rocksdb/r/level_serializable.result | 56 + .../mysql-test/rocksdb/r/loaddata.result | 239 + .../rocksdb/mysql-test/rocksdb/r/lock.result | 108 + .../rocksdb/r/lock_rows_not_exist.result | 40 + .../rocksdb/r/locking_issues.result | 490 + .../rocksdb/mysql-test/rocksdb/r/misc.result | 84 + ...inlog_gtid_skip_empty_trans_rocksdb.result | 143 + .../mysql-test/rocksdb/r/mysqldump.result | 131 + .../mysql-test/rocksdb/r/mysqldump2.result | 16 + .../rocksdb/r/negative_stats.result | 9 + .../mysql-test/rocksdb/r/no_merge_sort.result | 63 + .../rocksdb/r/optimize_table.result | 81 + .../mysql-test/rocksdb/r/partition.result | 30 + .../mysql-test/rocksdb/r/perf_context.result | 160 + .../mysql-test/rocksdb/r/read_only_tx.result | 38 + .../rocksdb/r/records_in_range.result | 210 + .../mysql-test/rocksdb/r/repair_table.result | 37 + .../mysql-test/rocksdb/r/replace.result | 32 + .../mysql-test/rocksdb/r/rocksdb.result | 2456 ++++ .../rocksdb/r/rocksdb_cf_options.result | 61 + .../rocksdb/r/rocksdb_cf_reverse.result | 120 + .../rocksdb/r/rocksdb_checksums.result | 129 + .../r/rocksdb_concurrent_delete.result | 56 + .../rocksdb/r/rocksdb_datadir.result | 2 + .../mysql-test/rocksdb/r/rocksdb_icp.result | 227 + .../rocksdb/r/rocksdb_icp_rev.result | 193 + .../mysql-test/rocksdb/r/rocksdb_locks.result | 63 + .../mysql-test/rocksdb/r/rocksdb_parts.result | 123 + .../rocksdb/r/rocksdb_qcache.result | 37 + .../mysql-test/rocksdb/r/rocksdb_range.result | 290 + .../rocksdb/r/rocksdb_range2.result | 11 + .../rocksdb/r/rocksdb_row_stats.result | 66 + ...sdb_table_stats_sampling_pct_change.result | 23 + .../mysql-test/rocksdb/r/rpl_read_free.result | 321 + .../rocksdb/r/rpl_row_not_found.result | 56 + .../rocksdb/r/rpl_row_rocksdb.result | 42 + .../mysql-test/rocksdb/r/rpl_row_stats.result | 88 + .../rocksdb/r/rpl_row_triggers.result | 242 + .../mysql-test/rocksdb/r/rpl_savepoint.result | 103 + .../mysql-test/rocksdb/r/rpl_statement.result | 54 + .../rocksdb/r/rpl_statement_not_found.result | 56 + .../mysql-test/rocksdb/r/rqg_examples.result | 3 + .../mysql-test/rocksdb/r/rqg_runtime.result | 29 + .../rocksdb/r/rqg_transactions.result | 11 + .../mysql-test/rocksdb/r/select.result | 373 + .../rocksdb/r/select_for_update.result | 35 + ...elect_for_update_skip_locked_nowait.result | 28 + .../r/select_lock_in_share_mode.result | 38 + .../mysql-test/rocksdb/r/show_engine.result | 416 + .../rocksdb/r/show_table_status.result | 24 + .../mysql-test/rocksdb/r/shutdown.result | 9 + .../mysql-test/rocksdb/r/singledelete.result | 66 + .../rocksdb/r/slow_query_log.result | 10 + .../mysql-test/rocksdb/r/statistics.result | 69 + .../mysql-test/rocksdb/r/table_stats.result | 9 + .../mysql-test/rocksdb/r/tbl_opt_ai.result | 38 + .../rocksdb/r/tbl_opt_avg_row_length.result | 18 + .../rocksdb/r/tbl_opt_checksum.result | 18 + .../rocksdb/r/tbl_opt_connection.result | 26 + .../rocksdb/r/tbl_opt_data_index_dir.result | 20 + .../rocksdb/r/tbl_opt_delay_key_write.result | 18 + .../rocksdb/r/tbl_opt_insert_method.result | 18 + .../rocksdb/r/tbl_opt_key_block_size.result | 18 + .../rocksdb/r/tbl_opt_max_rows.result | 18 + .../rocksdb/r/tbl_opt_min_rows.result | 18 + .../rocksdb/r/tbl_opt_pack_keys.result | 18 + .../rocksdb/r/tbl_opt_password.result | 18 + .../rocksdb/r/tbl_opt_row_format.result | 18 + .../mysql-test/rocksdb/r/tbl_opt_union.result | 16 + .../rocksdb/r/tbl_standard_opts.result | 46 + .../mysql-test/rocksdb/r/transaction.result | 936 ++ .../rocksdb/r/truncate_table.result | 33 + .../rocksdb/r/truncate_table3.result | 18 + .../mysql-test/rocksdb/r/type_binary.result | 48 + .../rocksdb/r/type_binary_indexes.result | 80 + .../mysql-test/rocksdb/r/type_bit.result | 53 + .../rocksdb/r/type_bit_indexes.result | 58 + .../mysql-test/rocksdb/r/type_blob.result | 57 + .../rocksdb/r/type_blob_indexes.result | 188 + .../mysql-test/rocksdb/r/type_bool.result | 73 + .../mysql-test/rocksdb/r/type_char.result | 76 + .../rocksdb/r/type_char_indexes.result | 73 + .../r/type_char_indexes_collation.result | 109 + .../rocksdb/r/type_date_time.result | 53 + .../rocksdb/r/type_date_time_indexes.result | 119 + .../mysql-test/rocksdb/r/type_decimal.result | 179 + .../mysql-test/rocksdb/r/type_enum.result | 47 + .../rocksdb/r/type_enum_indexes.result | 69 + .../mysql-test/rocksdb/r/type_fixed.result | 131 + .../rocksdb/r/type_fixed_indexes.result | 129 + .../mysql-test/rocksdb/r/type_float.result | 306 + .../rocksdb/r/type_float_indexes.result | 189 + .../mysql-test/rocksdb/r/type_int.result | 212 + .../rocksdb/r/type_int_indexes.result | 99 + .../mysql-test/rocksdb/r/type_set.result | 49 + .../rocksdb/r/type_set_indexes.result | 80 + .../mysql-test/rocksdb/r/type_text.result | 57 + .../rocksdb/r/type_text_indexes.result | 165 + .../rocksdb/r/type_varbinary.result | 93 + .../mysql-test/rocksdb/r/type_varchar.result | 743 ++ .../rocksdb/r/type_varchar_debug.result | 254 + .../mysql-test/rocksdb/r/unique_check.result | 72 + .../mysql-test/rocksdb/r/unique_sec.result | 185 + .../rocksdb/r/unique_sec_rev_cf.result | 162 + .../r/unsupported_tx_isolations.result | 18 + .../mysql-test/rocksdb/r/update.result | 113 + .../mysql-test/rocksdb/r/update_ignore.result | 57 + .../mysql-test/rocksdb/r/update_multi.result | 691 + .../rocksdb/r/update_with_keys.result | 38 + .../rocksdb/r/validate_datadic.result | 9 + .../mysql-test/rocksdb/r/write_sync.result | 39 + .../mysql-test/rocksdb/slow_query_log.awk | 27 + storage/rocksdb/mysql-test/rocksdb/t/1st.test | 36 + .../rocksdb/t/add_index_inplace.cnf | 5 + .../rocksdb/t/add_index_inplace.test | 290 + .../t/add_index_inplace_sstfilewriter.test | 102 + .../t/allow_no_pk_concurrent_insert.test | 22 + .../rocksdb/t/allow_no_primary_key.test | 91 + .../t/allow_no_primary_key_with_sk.test | 137 + .../mysql-test/rocksdb/t/allow_os_buffer.test | 30 + .../mysql-test/rocksdb/t/alter_table.test | 94 + .../mysql-test/rocksdb/t/analyze_table.test | 31 + .../rocksdb/t/apply_changes_iter.test | 44 + .../rocksdb/t/autoinc_secondary.test | 16 + .../mysql-test/rocksdb/t/autoinc_vars.test | 65 + .../mysql-test/rocksdb/t/autoincrement.test | 3 + .../rocksdb/t/bloomfilter-master.opt | 2 + .../mysql-test/rocksdb/t/bloomfilter.inc | 63 + .../mysql-test/rocksdb/t/bloomfilter.test | 1 + .../rocksdb/t/bloomfilter2-master.opt | 1 + .../mysql-test/rocksdb/t/bloomfilter2.test | 103 + .../rocksdb/t/bloomfilter3-master.opt | 3 + .../mysql-test/rocksdb/t/bloomfilter3.test | 118 + .../rocksdb/t/bloomfilter4-master.opt | 1 + .../mysql-test/rocksdb/t/bloomfilter4.test | 52 + .../rocksdb/t/bloomfilter_load_select.inc | 189 + .../rocksdb/t/bloomfilter_skip-master.opt | 3 + .../rocksdb/t/bloomfilter_skip.test | 1 + .../rocksdb/t/bloomfilter_table_def.tmpl | 36 + .../mysql-test/rocksdb/t/bulk_load.test | 110 + .../rocksdb/t/cardinality-master.opt | 4 + .../mysql-test/rocksdb/t/cardinality.test | 41 + .../mysql-test/rocksdb/t/check_log_for_xa.py | 31 + .../mysql-test/rocksdb/t/check_table.inc | 54 + .../mysql-test/rocksdb/t/check_table.test | 12 + .../mysql-test/rocksdb/t/checkpoint.test | 107 + .../mysql-test/rocksdb/t/checksum_table.test | 76 + .../rocksdb/t/checksum_table_live.test | 24 + .../mysql-test/rocksdb/t/col_not_null.inc | 55 + .../rocksdb/t/col_not_null_timestamp.inc | 61 + .../rocksdb/mysql-test/rocksdb/t/col_null.inc | 34 + .../mysql-test/rocksdb/t/col_opt_default.test | 27 + .../rocksdb/t/col_opt_not_null.test | 224 + .../mysql-test/rocksdb/t/col_opt_null.test | 216 + .../rocksdb/t/col_opt_unsigned.test | 74 + .../rocksdb/t/col_opt_zerofill.test | 67 + .../mysql-test/rocksdb/t/collation-master.opt | 1 + .../mysql-test/rocksdb/t/collation.test | 181 + .../rocksdb/t/collation_exception-master.opt | 2 + .../rocksdb/t/collation_exception.test | 27 + .../rocksdb/t/compact_deletes-master.opt | 3 + .../mysql-test/rocksdb/t/compact_deletes.test | 87 + .../rocksdb/t/compact_deletes_test.inc | 43 + .../rocksdb/t/compression_zstd-master.opt | 1 + .../rocksdb/t/compression_zstd.test | 4 + .../rocksdb/t/concurrent_alter.test | 34 + .../t/cons_snapshot_read_committed.test | 6 + .../t/cons_snapshot_repeatable_read.test | 6 + .../rocksdb/t/cons_snapshot_serializable.test | 6 + .../rocksdb/t/consistent_snapshot.inc | 136 + .../rocksdb/t/corrupted_data_reads_debug.test | 80 + .../mysql-test/rocksdb/t/create_table.test | 192 + .../mysql-test/rocksdb/t/deadlock.test | 43 + .../rocksdb/mysql-test/rocksdb/t/delete.test | 101 + .../rocksdb/t/delete_before_lock.test | 36 + .../mysql-test/rocksdb/t/delete_ignore.test | 37 + .../mysql-test/rocksdb/t/delete_quick.test | 32 + .../rocksdb/t/delete_with_keys.test | 39 + .../mysql-test/rocksdb/t/describe.test | 24 + .../rocksdb/mysql-test/rocksdb/t/disabled.def | 4 + .../mysql-test/rocksdb/t/drop_database.test | 11 + .../rocksdb/t/drop_index_inplace.test | 116 + .../rocksdb/t/drop_stats_procedure.inc | 3 + .../rocksdb/t/drop_table-master.opt | 3 + .../mysql-test/rocksdb/t/drop_table.test | 115 + .../mysql-test/rocksdb/t/drop_table2.test | 110 + .../mysql-test/rocksdb/t/drop_table2_check.pl | 19 + .../rocksdb/t/drop_table3-master.opt | 2 + .../mysql-test/rocksdb/t/drop_table3.inc | 47 + .../mysql-test/rocksdb/t/drop_table3.test | 5 + .../t/drop_table3_repopulate_table.inc | 15 + .../rocksdb/t/drop_table_compactions.pl | 37 + .../rocksdb/t/drop_table_repopulate_table.inc | 15 + .../mysql-test/rocksdb/t/drop_table_sync.inc | 6 + .../mysql-test/rocksdb/t/dup_key_update.test | 41 + .../mysql-test/rocksdb/t/duplicate_table.test | 16 + .../mysql-test/rocksdb/t/fail_system_cf.test | 17 + .../mysql-test/rocksdb/t/foreign_key.test | 45 + .../rocksdb/t/gap_lock_issue254-master.opt | 1 + .../rocksdb/t/gap_lock_issue254.test | 14 + .../rocksdb/t/gap_lock_raise_error.test | 37 + .../mysql-test/rocksdb/t/gen_insert.pl | 32 + .../rocksdb/t/get_error_message.test | 25 + .../mysql-test/rocksdb/t/handler_basic.test | 53 + .../mysql-test/rocksdb/t/hermitage.inc | 257 + .../mysql-test/rocksdb/t/hermitage.test | 10 + .../mysql-test/rocksdb/t/hermitage_init.inc | 8 + .../rocksdb/mysql-test/rocksdb/t/i_s_ddl.test | 24 + .../rocksdb/mysql-test/rocksdb/t/index.inc | 121 + .../rocksdb/mysql-test/rocksdb/t/index.test | 23 + .../rocksdb/t/index_file_map-master.opt | 1 + .../mysql-test/rocksdb/t/index_file_map.test | 51 + .../rocksdb/t/index_key_block_size.test | 70 + .../mysql-test/rocksdb/t/index_primary.test | 64 + .../rocksdb/t/index_type_btree.test | 12 + .../mysql-test/rocksdb/t/index_type_hash.test | 12 + .../rocksdb/t/information_schema-master.opt | 1 + .../rocksdb/t/information_schema.test | 72 + .../rocksdb/t/init_stats_procedure.inc | 35 + .../rocksdb/t/innodb_i_s_tables_disabled.test | 35 + .../rocksdb/mysql-test/rocksdb/t/insert.test | 99 + .../t/insert_optimized_config-master.opt | 6 + .../rocksdb/t/insert_optimized_config.test | 41 + .../rocksdb/t/insert_with_keys.test | 93 + .../mysql-test/rocksdb/t/issue100.test | 23 + .../rocksdb/t/issue100_delete-master.opt | 1 + .../mysql-test/rocksdb/t/issue100_delete.test | 19 + .../mysql-test/rocksdb/t/issue111.test | 38 + .../mysql-test/rocksdb/t/issue290.test | 40 + .../mysql-test/rocksdb/t/issue314.test | 16 + .../rocksdb/t/level_read_committed.test | 6 + .../rocksdb/t/level_read_uncommitted.test | 6 + .../rocksdb/t/level_repeatable_read.test | 5 + .../rocksdb/t/level_serializable.test | 5 + .../rocksdb/mysql-test/rocksdb/t/loaddata.inc | 117 + .../mysql-test/rocksdb/t/loaddata.test | 8 + .../rocksdb/mysql-test/rocksdb/t/lock.test | 202 + .../rocksdb/t/lock_rows_not_exist.test | 110 + .../mysql-test/rocksdb/t/locking_issues.test | 67 + .../rocksdb/mysql-test/rocksdb/t/misc.test | 45 + ...g_gtid_skip_empty_trans_rocksdb-master.opt | 1 + ...lbinlog_gtid_skip_empty_trans_rocksdb.test | 16 + .../mysql-test/rocksdb/t/mysqldump-master.opt | 1 + .../mysql-test/rocksdb/t/mysqldump.test | 65 + .../rocksdb/t/mysqldump2-master.opt | 1 + .../mysql-test/rocksdb/t/mysqldump2.test | 43 + .../mysql-test/rocksdb/t/negative_stats.test | 26 + .../mysql-test/rocksdb/t/no_merge_sort.test | 32 + .../rocksdb/t/no_primary_key_basic_ops.inc | 65 + .../rocksdb/t/optimize_table-master.opt | 1 + .../mysql-test/rocksdb/t/optimize_table.inc | 78 + .../mysql-test/rocksdb/t/optimize_table.test | 8 + .../mysql-test/rocksdb/t/partition.test | 42 + .../mysql-test/rocksdb/t/perf_context.test | 92 + .../rocksdb/t/read_only_tx-master.opt | 1 + .../mysql-test/rocksdb/t/read_only_tx.test | 70 + .../rocksdb/t/records_in_range-master.opt | 4 + .../rocksdb/t/records_in_range.test | 144 + .../mysql-test/rocksdb/t/repair_table.inc | 38 + .../mysql-test/rocksdb/t/repair_table.test | 8 + .../rocksdb/mysql-test/rocksdb/t/replace.test | 54 + .../mysql-test/rocksdb/t/rocksdb-master.opt | 1 + .../rocksdb/mysql-test/rocksdb/t/rocksdb.test | 1925 +++ .../rocksdb/t/rocksdb_cf_options-master.opt | 1 + .../rocksdb/t/rocksdb_cf_options-master.sh | 5 + .../rocksdb/t/rocksdb_cf_options.test | 76 + .../rocksdb/t/rocksdb_cf_reverse-master.opt | 1 + .../rocksdb/t/rocksdb_cf_reverse.test | 71 + .../rocksdb/t/rocksdb_checksums-master.opt | 2 + .../mysql-test/rocksdb/t/rocksdb_checksums.pl | 16 + .../rocksdb/t/rocksdb_checksums.test | 124 + .../rocksdb/t/rocksdb_concurrent_delete.test | 24 + .../rocksdb/t/rocksdb_concurrent_insert.py | 95 + .../mysql-test/rocksdb/t/rocksdb_datadir.test | 30 + .../rocksdb/t/rocksdb_icp-master.opt | 1 + .../mysql-test/rocksdb/t/rocksdb_icp.inc | 154 + .../mysql-test/rocksdb/t/rocksdb_icp.test | 44 + .../rocksdb/t/rocksdb_icp_rev-master.opt | 1 + .../mysql-test/rocksdb/t/rocksdb_icp_rev.test | 7 + .../mysql-test/rocksdb/t/rocksdb_locks.test | 92 + .../rocksdb/t/rocksdb_parts-master.opt | 1 + .../mysql-test/rocksdb/t/rocksdb_parts.test | 121 + .../rocksdb/t/rocksdb_qcache-master.opt | 1 + .../mysql-test/rocksdb/t/rocksdb_qcache.test | 30 + .../rocksdb/t/rocksdb_range-master.opt | 1 + .../mysql-test/rocksdb/t/rocksdb_range.test | 193 + .../mysql-test/rocksdb/t/rocksdb_range2.test | 20 + .../rocksdb/t/rocksdb_row_stats.test | 57 + ...cksdb_table_stats_sampling_pct_change.test | 80 + .../mysql-test/rocksdb/t/rpl_read_free.cnf | 14 + .../mysql-test/rocksdb/t/rpl_read_free.test | 302 + .../rocksdb/t/rpl_row_not_found.cnf | 9 + .../rocksdb/t/rpl_row_not_found.inc | 92 + .../rocksdb/t/rpl_row_not_found.test | 4 + .../mysql-test/rocksdb/t/rpl_row_rocksdb.cnf | 7 + .../mysql-test/rocksdb/t/rpl_row_rocksdb.test | 47 + .../mysql-test/rocksdb/t/rpl_row_stats.cnf | 7 + .../mysql-test/rocksdb/t/rpl_row_stats.test | 46 + .../mysql-test/rocksdb/t/rpl_row_triggers.cnf | 19 + .../rocksdb/t/rpl_row_triggers.test | 262 + .../mysql-test/rocksdb/t/rpl_savepoint.cnf | 7 + .../mysql-test/rocksdb/t/rpl_savepoint.test | 90 + .../mysql-test/rocksdb/t/rpl_statement.cnf | 7 + .../mysql-test/rocksdb/t/rpl_statement.test | 57 + .../rocksdb/t/rpl_statement_not_found.cnf | 9 + .../rocksdb/t/rpl_statement_not_found.test | 2 + storage/rocksdb/mysql-test/rocksdb/t/rqg.inc | 43 + .../rocksdb/t/rqg_examples-master.opt | 1 + .../mysql-test/rocksdb/t/rqg_examples.test | 8 + .../rocksdb/t/rqg_runtime-master.opt | 1 + .../mysql-test/rocksdb/t/rqg_runtime.test | 53 + .../rocksdb/t/rqg_transactions-master.opt | 1 + .../rocksdb/t/rqg_transactions.test | 10 + .../mysql-test/rocksdb/t/se-innodb.out | 1 + .../rocksdb/mysql-test/rocksdb/t/select.test | 202 + .../rocksdb/t/select_for_update.test | 55 + .../select_for_update_skip_locked_nowait.test | 46 + .../rocksdb/t/select_lock_in_share_mode.test | 58 + .../mysql-test/rocksdb/t/set_checkpoint.inc | 27 + .../rocksdb/t/show_engine-master.opt | 1 + .../mysql-test/rocksdb/t/show_engine.test | 75 + .../rocksdb/t/show_table_status-master.opt | 2 + .../rocksdb/t/show_table_status.test | 64 + .../mysql-test/rocksdb/t/shutdown-master.opt | 1 + .../mysql-test/rocksdb/t/shutdown.test | 36 + .../rocksdb/t/singledelete-master.opt | 1 + .../mysql-test/rocksdb/t/singledelete.test | 89 + .../rocksdb/t/slow_query_log-master.opt | 1 + .../mysql-test/rocksdb/t/slow_query_log.test | 34 + .../mysql-test/rocksdb/t/sst_count_rows.sh | 52 + .../rocksdb/t/statistics-master.opt | 3 + .../mysql-test/rocksdb/t/statistics.test | 74 + .../mysql-test/rocksdb/t/table_stats.test | 27 + .../mysql-test/rocksdb/t/tbl_opt_ai.test | 29 + .../rocksdb/t/tbl_opt_avg_row_length.test | 23 + .../rocksdb/t/tbl_opt_checksum.test | 19 + .../rocksdb/t/tbl_opt_connection.test | 32 + .../rocksdb/t/tbl_opt_data_index_dir.test | 37 + .../rocksdb/t/tbl_opt_delay_key_write.test | 23 + .../rocksdb/t/tbl_opt_insert_method.test | 23 + .../rocksdb/t/tbl_opt_key_block_size.test | 23 + .../rocksdb/t/tbl_opt_max_rows.test | 23 + .../rocksdb/t/tbl_opt_min_rows.test | 23 + .../rocksdb/t/tbl_opt_pack_keys.test | 23 + .../rocksdb/t/tbl_opt_password.test | 27 + .../rocksdb/t/tbl_opt_row_format.test | 23 + .../mysql-test/rocksdb/t/tbl_opt_union.test | 28 + .../rocksdb/t/tbl_standard_opts.test | 42 + .../mysql-test/rocksdb/t/transaction.test | 105 + .../rocksdb/t/transaction_isolation.inc | 150 + .../rocksdb/t/transaction_select.inc | 15 + .../mysql-test/rocksdb/t/truncate_table.test | 74 + .../rocksdb/t/truncate_table3-master.opt | 2 + .../mysql-test/rocksdb/t/truncate_table3.test | 5 + .../mysql-test/rocksdb/t/type_binary.inc | 45 + .../mysql-test/rocksdb/t/type_binary.test | 8 + .../rocksdb/t/type_binary_indexes-master.opt | 1 + .../rocksdb/t/type_binary_indexes.test | 99 + .../rocksdb/mysql-test/rocksdb/t/type_bit.inc | 53 + .../mysql-test/rocksdb/t/type_bit.test | 8 + .../rocksdb/t/type_bit_indexes-master.opt | 1 + .../rocksdb/t/type_bit_indexes.test | 113 + .../mysql-test/rocksdb/t/type_blob.inc | 49 + .../mysql-test/rocksdb/t/type_blob.test | 8 + .../rocksdb/t/type_blob_indexes-master.opt | 1 + .../rocksdb/t/type_blob_indexes.test | 176 + .../mysql-test/rocksdb/t/type_bool.inc | 64 + .../mysql-test/rocksdb/t/type_bool.test | 8 + .../mysql-test/rocksdb/t/type_char.inc | 45 + .../mysql-test/rocksdb/t/type_char.test | 19 + .../rocksdb/t/type_char_indexes-master.opt | 1 + .../rocksdb/t/type_char_indexes.test | 107 + .../t/type_char_indexes_collation-master.opt | 1 + .../t/type_char_indexes_collation.test | 142 + .../mysql-test/rocksdb/t/type_date_time.inc | 45 + .../mysql-test/rocksdb/t/type_date_time.test | 9 + .../t/type_date_time_indexes-master.opt | 1 + .../rocksdb/t/type_date_time_indexes.test | 157 + .../mysql-test/rocksdb/t/type_decimal.test | 163 + .../mysql-test/rocksdb/t/type_enum.inc | 50 + .../mysql-test/rocksdb/t/type_enum.test | 8 + .../rocksdb/t/type_enum_indexes-master.opt | 1 + .../rocksdb/t/type_enum_indexes.test | 93 + .../mysql-test/rocksdb/t/type_fixed.inc | 85 + .../mysql-test/rocksdb/t/type_fixed.test | 8 + .../rocksdb/t/type_fixed_indexes-master.opt | 1 + .../rocksdb/t/type_fixed_indexes.test | 107 + .../mysql-test/rocksdb/t/type_float.inc | 108 + .../mysql-test/rocksdb/t/type_float.test | 8 + .../rocksdb/t/type_float_indexes-master.opt | 1 + .../rocksdb/t/type_float_indexes.test | 175 + .../rocksdb/mysql-test/rocksdb/t/type_int.inc | 68 + .../mysql-test/rocksdb/t/type_int.test | 8 + .../rocksdb/t/type_int_indexes-master.opt | 1 + .../rocksdb/t/type_int_indexes.test | 75 + .../rocksdb/mysql-test/rocksdb/t/type_set.inc | 49 + .../mysql-test/rocksdb/t/type_set.test | 8 + .../rocksdb/t/type_set_indexes-master.opt | 1 + .../rocksdb/t/type_set_indexes.test | 104 + .../mysql-test/rocksdb/t/type_text.inc | 49 + .../mysql-test/rocksdb/t/type_text.test | 8 + .../rocksdb/t/type_text_indexes-master.opt | 1 + .../rocksdb/t/type_text_indexes.test | 171 + .../mysql-test/rocksdb/t/type_varbinary.inc | 75 + .../mysql-test/rocksdb/t/type_varbinary.test | 8 + .../mysql-test/rocksdb/t/type_varchar.inc | 77 + .../mysql-test/rocksdb/t/type_varchar.test | 75 + .../rocksdb/t/type_varchar_debug.test | 137 + .../rocksdb/t/type_varchar_endspace.inc | 84 + .../mysql-test/rocksdb/t/unique_check.test | 145 + .../mysql-test/rocksdb/t/unique_sec.inc | 198 + .../mysql-test/rocksdb/t/unique_sec.test | 33 + .../rocksdb/t/unique_sec_rev_cf.test | 5 + .../rocksdb/t/unsupported_tx_isolations.test | 25 + .../rocksdb/mysql-test/rocksdb/t/update.test | 72 + .../rocksdb/t/update_ignore-master.opt | 1 + .../mysql-test/rocksdb/t/update_ignore.test | 35 + .../mysql-test/rocksdb/t/update_multi.test | 15 + .../rocksdb/t/update_multi_exec.inc | 27 + .../rocksdb/t/update_with_keys.test | 78 + .../rocksdb/t/validate_datadic.test | 102 + .../mysql-test/rocksdb/t/write_sync.test | 42 + .../mysql-test/rocksdb_hotbackup/base.cnf | 25 + .../rocksdb_hotbackup/include/cleanup.inc | 3 + .../rocksdb_hotbackup/include/load_data.sh | 43 + .../include/load_data_and_run.sh | 9 + .../rocksdb_hotbackup/include/setup.inc | 16 + .../include/setup_replication_gtid.sh | 20 + .../setup_replication_gtid_and_sync.inc | 4 + .../rocksdb_hotbackup/include/stream_run.sh | 71 + .../mysql-test/rocksdb_hotbackup/my.cnf | 2 + .../rocksdb_hotbackup/r/gtid.result | 23 + .../rocksdb_hotbackup/r/stream.result | 20 + .../mysql-test/rocksdb_hotbackup/r/wdt.result | 20 + .../rocksdb_hotbackup/r/xbstream.result | 20 + .../rocksdb_hotbackup/t/gtid-master.opt | 1 + .../rocksdb_hotbackup/t/gtid-slave.opt | 1 + .../mysql-test/rocksdb_hotbackup/t/gtid.test | 47 + .../rocksdb_hotbackup/t/stream.test | 22 + .../mysql-test/rocksdb_hotbackup/t/wdt.test | 22 + .../rocksdb_hotbackup/t/xbstream.test | 22 + .../mysql-test/rocksdb_rpl/combinations | 2 + .../include/rpl_no_unique_check_on_lag.inc | 71 + .../consistent_snapshot_mixed_engines.result | 68 + .../rocksdb_rpl/r/multiclient_2pc.result | 27 + .../r/rpl_crash_safe_wal_corrupt.result | 135 + .../rocksdb_rpl/r/rpl_gtid_crash_safe.result | 361 + .../r/rpl_gtid_crash_safe_wal_corrupt.result | 140 + .../r/rpl_gtid_rocksdb_sys_header.result | 16 + .../r/rpl_no_unique_check_on_lag.result | 34 + .../r/rpl_no_unique_check_on_lag_mts.result | 31 + .../r/rpl_rocksdb_2pc_crash_recover.result | 44 + .../rocksdb_rpl/r/rpl_rocksdb_snapshot.result | 222 + .../rpl_rocksdb_snapshot_without_gtid.result | 15 + .../r/rpl_rocksdb_stress_crash.result | 28 + .../rocksdb_rpl/rpl_1slave_base.cnf | 51 + .../mysql-test/rocksdb_rpl/t/combinations | 2 + ...nsistent_snapshot_mixed_engines-master.opt | 1 + .../t/consistent_snapshot_mixed_engines.test | 81 + .../rocksdb_rpl/t/multiclient_2pc-mater.opt | 1 + .../rocksdb_rpl/t/multiclient_2pc.test | 71 + .../t/rpl_check_for_binlog_info.pl | 19 + .../t/rpl_crash_safe_wal_corrupt.cnf | 9 + .../t/rpl_crash_safe_wal_corrupt.test | 12 + .../t/rpl_gtid_crash_safe-master.opt | 1 + .../t/rpl_gtid_crash_safe-slave.opt | 2 + .../rocksdb_rpl/t/rpl_gtid_crash_safe.test | 41 + .../t/rpl_gtid_crash_safe_wal_corrupt.cnf | 14 + .../t/rpl_gtid_crash_safe_wal_corrupt.inc | 153 + .../t/rpl_gtid_crash_safe_wal_corrupt.test | 12 + .../t/rpl_gtid_rocksdb_sys_header-master.opt | 1 + .../t/rpl_gtid_rocksdb_sys_header-slave.opt | 1 + .../t/rpl_gtid_rocksdb_sys_header.test | 39 + .../t/rpl_no_unique_check_on_lag-slave.opt | 1 + .../t/rpl_no_unique_check_on_lag.test | 6 + .../rpl_no_unique_check_on_lag_mts-slave.opt | 1 + .../t/rpl_no_unique_check_on_lag_mts.test | 2 + .../rpl_rocksdb_2pc_crash_recover-master.opt | 1 + .../t/rpl_rocksdb_2pc_crash_recover-slave.opt | 1 + .../t/rpl_rocksdb_2pc_crash_recover.test | 56 + .../t/rpl_rocksdb_snapshot-master.opt | 1 + .../t/rpl_rocksdb_snapshot-slave.opt | 1 + .../rocksdb_rpl/t/rpl_rocksdb_snapshot.test | 373 + .../t/rpl_rocksdb_snapshot_without_gtid.test | 17 + .../t/rpl_rocksdb_stress_crash-master.opt | 2 + .../t/rpl_rocksdb_stress_crash-slave.opt | 2 + .../t/rpl_rocksdb_stress_crash.test | 26 + .../rocksdb_stress/include/rocksdb_stress.inc | 56 + .../rocksdb/mysql-test/rocksdb_stress/my.cnf | 8 + .../rocksdb_stress/r/rocksdb_stress.result | 21 + .../r/rocksdb_stress_crash.result | 21 + .../rocksdb_stress/t/load_generator.py | 1029 ++ .../rocksdb_stress/t/rocksdb_stress.test | 31 + .../t/rocksdb_stress_crash.test | 32 + .../rocksdb_sys_vars/r/all_vars.result | 13 + ...cess_hint_on_compaction_start_basic.result | 7 + ...rocksdb_advise_random_on_open_basic.result | 7 + ...low_concurrent_memtable_write_basic.result | 64 + .../r/rocksdb_allow_mmap_reads_basic.result | 7 + .../r/rocksdb_allow_mmap_writes_basic.result | 7 + .../r/rocksdb_allow_os_buffer_basic.result | 7 + .../r/rocksdb_background_sync_basic.result | 68 + ...b_base_background_compactions_basic.result | 7 + .../r/rocksdb_block_cache_size_basic.result | 7 + ...ocksdb_block_restart_interval_basic.result | 7 + .../r/rocksdb_block_size_basic.result | 7 + .../rocksdb_block_size_deviation_basic.result | 7 + .../r/rocksdb_bulk_load_basic.result | 100 + .../r/rocksdb_bulk_load_size_basic.result | 72 + .../r/rocksdb_bytes_per_sync_basic.result | 7 + ...cache_index_and_filter_blocks_basic.result | 7 + .../r/rocksdb_checksums_pct_basic.result | 93 + ...ocksdb_collect_sst_properties_basic.result | 7 + .../rocksdb_commit_in_the_middle_basic.result | 100 + .../r/rocksdb_compact_cf_basic.result | 39 + ...sdb_compaction_readahead_size_basic.result | 70 + ...compaction_sequential_deletes_basic.result | 64 + ...n_sequential_deletes_count_sd_basic.result | 64 + ..._sequential_deletes_file_size_basic.result | 46 + ...ion_sequential_deletes_window_basic.result | 64 + .../r/rocksdb_create_checkpoint_basic.result | 15 + .../r/rocksdb_create_if_missing_basic.result | 14 + ...reate_missing_column_families_basic.result | 14 + .../r/rocksdb_datadir_basic.result | 7 + .../rocksdb_db_write_buffer_size_basic.result | 7 + ...optimizer_no_zero_cardinality_basic.result | 64 + .../r/rocksdb_default_cf_options_basic.result | 7 + ..._obsolete_files_period_micros_basic.result | 7 + .../r/rocksdb_disable_2pc_basic.result | 75 + .../r/rocksdb_disabledatasync_basic.result | 7 + .../rocksdb_enable_bulk_load_api_basic.result | 14 + ...ocksdb_enable_thread_tracking_basic.result | 7 + ...e_write_thread_adaptive_yield_basic.result | 64 + .../r/rocksdb_error_if_exists_basic.result | 14 + ...sdb_flush_memtable_on_analyze_basic.result | 58 + ...ksdb_force_flush_memtable_now_basic.result | 50 + ..._force_index_records_in_range_basic.result | 106 + ...db_hash_index_allow_collision_basic.result | 7 + .../r/rocksdb_index_type_basic.result | 7 + .../r/rocksdb_info_log_level_basic.result | 93 + .../rocksdb_is_fd_close_on_exec_basic.result | 7 + .../r/rocksdb_keep_log_file_num_basic.result | 7 + .../r/rocksdb_lock_scanned_rows_basic.result | 170 + .../r/rocksdb_lock_wait_timeout_basic.result | 72 + ...rocksdb_log_file_time_to_roll_basic.result | 7 + ...b_manifest_preallocation_size_basic.result | 7 + ...db_max_background_compactions_basic.result | 7 + ...ocksdb_max_background_flushes_basic.result | 7 + .../r/rocksdb_max_log_file_size_basic.result | 7 + ...ocksdb_max_manifest_file_size_basic.result | 7 + .../r/rocksdb_max_open_files_basic.result | 7 + .../r/rocksdb_max_row_locks_basic.result | 72 + .../r/rocksdb_max_subcompactions_basic.result | 7 + .../r/rocksdb_max_total_wal_size_basic.result | 7 + .../r/rocksdb_merge_buf_size_basic.result | 43 + ...cksdb_merge_combine_read_size_basic.result | 29 + ..._reader_for_compaction_inputs_basic.result | 7 + .../r/rocksdb_no_block_cache_basic.result | 7 + .../rocksdb_override_cf_options_basic.result | 7 + .../r/rocksdb_paranoid_checks_basic.result | 7 + ...rocksdb_pause_background_work_basic.result | 75 + .../r/rocksdb_perf_context_level_basic.result | 114 + ...ter_and_index_blocks_in_cache_basic.result | 7 + ...db_rate_limiter_bytes_per_sec_basic.result | 101 + .../rocksdb_read_free_rpl_tables_basic.result | 65 + .../r/rocksdb_records_in_range_basic.result | 100 + .../r/rocksdb_rpl_skip_tx_api_basic.test | 68 + ...seconds_between_stat_computes_basic.result | 64 + ...ksdb_signal_drop_index_thread_basic.result | 64 + ...sdb_skip_bloom_filter_on_read_basic.result | 100 + .../r/rocksdb_skip_fill_cache_basic.result | 100 + .../r/rocksdb_skip_unique_check_basic.result | 163 + ...ksdb_skip_unique_check_tables_basic.result | 65 + ...rocksdb_stats_dump_period_sec_basic.result | 7 + .../r/rocksdb_store_checksums_basic.result | 100 + ...ocksdb_strict_collation_check_basic.result | 75 + ...b_strict_collation_exceptions_basic.result | 36 + ...ksdb_table_cache_numshardbits_basic.result | 7 + ...ksdb_table_stats_sampling_pct_basic.result | 85 + .../r/rocksdb_unsafe_for_binlog_basic.result | 100 + .../r/rocksdb_use_adaptive_mutex_basic.result | 7 + .../r/rocksdb_use_fsync_basic.result | 7 + .../r/rocksdb_validate_tables_basic.result | 7 + .../r/rocksdb_verify_checksums_basic.result | 100 + .../r/rocksdb_wal_bytes_per_sync_basic.result | 7 + .../r/rocksdb_wal_dir_basic.result | 7 + .../r/rocksdb_wal_recovery_mode_basic.result | 46 + .../r/rocksdb_wal_size_limit_mb_basic.result | 7 + .../r/rocksdb_wal_ttl_seconds_basic.result | 7 + .../rocksdb_whole_key_filtering_basic.result | 7 + .../r/rocksdb_write_disable_wal_basic.result | 114 + ...gnore_missing_column_families_basic.result | 100 + .../r/rocksdb_write_sync_basic.result | 114 + .../rocksdb_sys_vars/t/all_vars.test | 39 + ...access_hint_on_compaction_start_basic.test | 7 + .../rocksdb_advise_random_on_open_basic.test | 6 + ...allow_concurrent_memtable_write_basic.test | 18 + .../t/rocksdb_allow_mmap_reads_basic.test | 6 + .../t/rocksdb_allow_mmap_writes_basic.test | 6 + .../t/rocksdb_allow_os_buffer_basic.test | 6 + .../t/rocksdb_background_sync_basic.test | 18 + ...sdb_base_background_compactions_basic.test | 7 + .../t/rocksdb_block_cache_size_basic.test | 7 + .../rocksdb_block_restart_interval_basic.test | 6 + .../t/rocksdb_block_size_basic.test | 7 + .../t/rocksdb_block_size_deviation_basic.test | 7 + .../t/rocksdb_bulk_load_basic.test | 18 + .../t/rocksdb_bulk_load_size_basic.test | 16 + .../t/rocksdb_bytes_per_sync_basic.test | 7 + ...b_cache_index_and_filter_blocks_basic.test | 6 + .../t/rocksdb_checksums_pct_basic.test | 17 + .../rocksdb_collect_sst_properties_basic.test | 8 + .../t/rocksdb_commit_in_the_middle_basic.test | 18 + .../t/rocksdb_compact_cf_basic.test | 16 + ...cksdb_compaction_readahead_size_basic.test | 23 + ...b_compaction_sequential_deletes_basic.test | 18 + ...ion_sequential_deletes_count_sd_basic.test | 18 + ...on_sequential_deletes_file_size_basic.test | 16 + ...ction_sequential_deletes_window_basic.test | 18 + .../t/rocksdb_create_checkpoint_basic.test | 29 + .../t/rocksdb_create_if_missing_basic.test | 16 + ..._create_missing_column_families_basic.test | 16 + .../t/rocksdb_datadir_basic.test | 6 + .../t/rocksdb_db_write_buffer_size_basic.test | 6 + ...g_optimizer_no_zero_cardinality_basic.test | 18 + .../t/rocksdb_default_cf_options_basic.test | 6 + ...te_obsolete_files_period_micros_basic.test | 6 + .../t/rocksdb_disable_2pc_basic.test | 20 + .../t/rocksdb_disabledatasync_basic.test | 6 + .../t/rocksdb_enable_bulk_load_api_basic.test | 16 + .../rocksdb_enable_thread_tracking_basic.test | 6 + ...ble_write_thread_adaptive_yield_basic.test | 18 + .../t/rocksdb_error_if_exists_basic.test | 16 + ...cksdb_flush_memtable_on_analyze_basic.test | 44 + ...ocksdb_force_flush_memtable_now_basic.test | 17 + ...db_force_index_records_in_range_basic.test | 23 + ...ksdb_hash_index_allow_collision_basic.test | 7 + .../t/rocksdb_index_type_basic.test | 7 + .../t/rocksdb_info_log_level_basic.test | 21 + .../t/rocksdb_is_fd_close_on_exec_basic.test | 6 + .../t/rocksdb_keep_log_file_num_basic.test | 7 + .../t/rocksdb_lock_scanned_rows_basic.test | 22 + .../t/rocksdb_lock_wait_timeout_basic.test | 16 + .../rocksdb_log_file_time_to_roll_basic.test | 6 + ...sdb_manifest_preallocation_size_basic.test | 6 + ...ksdb_max_background_compactions_basic.test | 7 + .../rocksdb_max_background_flushes_basic.test | 6 + .../t/rocksdb_max_log_file_size_basic.test | 6 + .../rocksdb_max_manifest_file_size_basic.test | 7 + .../t/rocksdb_max_open_files_basic.test | 6 + .../t/rocksdb_max_row_locks_basic.test | 16 + .../t/rocksdb_max_subcompactions_basic.test | 7 + .../t/rocksdb_max_total_wal_size_basic.test | 6 + .../t/rocksdb_merge_buf_size_basic.test | 50 + ...rocksdb_merge_combine_read_size_basic.test | 32 + ...le_reader_for_compaction_inputs_basic.test | 7 + .../t/rocksdb_no_block_cache_basic.test | 6 + .../t/rocksdb_override_cf_options_basic.test | 6 + .../t/rocksdb_paranoid_checks_basic.test | 7 + .../rocksdb_pause_background_work_basic.test | 20 + .../t/rocksdb_perf_context_level_basic.test | 18 + ...ilter_and_index_blocks_in_cache_basic.test | 6 + ...ksdb_rate_limiter_bytes_per_sec_basic.test | 63 + .../t/rocksdb_read_free_rpl_tables_basic.test | 15 + .../t/rocksdb_records_in_range_basic.test | 18 + .../t/rocksdb_rpl_skip_tx_api_basic.test | 18 + ...b_seconds_between_stat_computes_basic.test | 18 + ...ocksdb_signal_drop_index_thread_basic.test | 19 + ...cksdb_skip_bloom_filter_on_read_basic.test | 18 + .../t/rocksdb_skip_fill_cache_basic.test | 18 + .../t/rocksdb_skip_unique_check_basic.test | 21 + ...ocksdb_skip_unique_check_tables_basic.test | 15 + .../rocksdb_stats_dump_period_sec_basic.test | 6 + .../t/rocksdb_store_checksums_basic.test | 18 + .../rocksdb_strict_collation_check_basic.test | 19 + ...sdb_strict_collation_exceptions_basic.test | 35 + ...ocksdb_table_cache_numshardbits_basic.test | 6 + ...ocksdb_table_stats_sampling_pct_basic.test | 22 + .../t/rocksdb_unsafe_for_binlog_basic.test | 18 + .../t/rocksdb_use_adaptive_mutex_basic.test | 6 + .../t/rocksdb_use_fsync_basic.test | 6 + .../t/rocksdb_validate_tables_basic.test | 6 + .../t/rocksdb_verify_checksums_basic.test | 18 + .../t/rocksdb_wal_bytes_per_sync_basic.test | 6 + .../t/rocksdb_wal_dir_basic.test | 6 + .../t/rocksdb_wal_recovery_mode_basic.test | 17 + .../t/rocksdb_wal_size_limit_mb_basic.test | 6 + .../t/rocksdb_wal_ttl_seconds_basic.test | 6 + .../t/rocksdb_whole_key_filtering_basic.test | 6 + .../t/rocksdb_write_disable_wal_basic.test | 18 + ..._ignore_missing_column_families_basic.test | 18 + .../t/rocksdb_write_sync_basic.test | 18 + storage/rocksdb/properties_collector.cc | 555 + storage/rocksdb/properties_collector.h | 190 + storage/rocksdb/rdb_buff.h | 452 + storage/rocksdb/rdb_cf_manager.cc | 236 + storage/rocksdb/rdb_cf_manager.h | 106 + storage/rocksdb/rdb_cf_options.cc | 340 + storage/rocksdb/rdb_cf_options.h | 92 + storage/rocksdb/rdb_compact_filter.h | 115 + storage/rocksdb/rdb_comparator.h | 93 + storage/rocksdb/rdb_datadic.cc | 4584 +++++++ storage/rocksdb/rdb_datadic.h | 1054 ++ storage/rocksdb/rdb_i_s.cc | 1189 ++ storage/rocksdb/rdb_i_s.h | 34 + storage/rocksdb/rdb_index_merge.cc | 604 + storage/rocksdb/rdb_index_merge.h | 229 + storage/rocksdb/rdb_mutex_wrapper.cc | 234 + storage/rocksdb/rdb_mutex_wrapper.h | 139 + storage/rocksdb/rdb_perf_context.cc | 240 + storage/rocksdb/rdb_perf_context.h | 134 + storage/rocksdb/rdb_sst_info.cc | 417 + storage/rocksdb/rdb_sst_info.h | 101 + storage/rocksdb/rdb_threads.cc | 81 + storage/rocksdb/rdb_threads.h | 116 + storage/rocksdb/rdb_utils.cc | 311 + storage/rocksdb/rdb_utils.h | 206 + storage/rocksdb/rocksdb-range-access.txt | 353 + storage/rocksdb/tools/mysql_ldb.cc | 17 + storage/rocksdb/unittest/CMakeLists.txt | 22 + .../unittest/test_properties_collector.cc | 60 + 822 files changed, 81367 insertions(+) create mode 100644 storage/rocksdb/CMakeLists.txt create mode 100644 storage/rocksdb/README create mode 100644 storage/rocksdb/event_listener.cc create mode 100644 storage/rocksdb/event_listener.h create mode 100755 storage/rocksdb/get_rocksdb_files.sh create mode 100644 storage/rocksdb/ha_rocksdb.cc create mode 100644 storage/rocksdb/ha_rocksdb.h create mode 100644 storage/rocksdb/ha_rocksdb_proto.h create mode 100644 storage/rocksdb/logger.h create mode 100644 storage/rocksdb/mysql-test/rocksdb/include/dup_key_update.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_1.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_2.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case2.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case3.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case4.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case5.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case6.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case7.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/include/rocksdb_concurrent_delete.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/my.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb/optimize_table_check_sst.pl create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/1st.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_sstfilewriter.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/allow_no_pk_concurrent_insert.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/allow_os_buffer.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/alter_table.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/apply_changes_iter.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/autoinc_secondary.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/autoincrement.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/bloomfilter2.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/cardinality.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/check_table.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/checkpoint.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/checksum_table.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/checksum_table_live.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/col_opt_default.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/collation.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/collation_exception.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/compact_deletes.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/compression_zstd.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/concurrent_alter.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_serializable.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/create_table.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/deadlock.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/delete.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/delete_before_lock.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/delete_ignore.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/delete_quick.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/delete_with_keys.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/describe.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/drop_database.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/drop_index_inplace.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/drop_table.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/dup_key_update.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/duplicate_table.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/fail_system_cf.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/gap_lock_issue254.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/gap_lock_raise_error.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/get_error_message.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/hermitage.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/index.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/index_file_map.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/index_key_block_size.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/index_primary.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/index_type_btree.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/index_type_hash.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/information_schema.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/insert.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/insert_optimized_config.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/insert_with_keys.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/issue100.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/issue100_delete.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/issue111.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/issue290.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/issue314.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/level_read_committed.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/level_read_uncommitted.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/level_repeatable_read.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/level_serializable.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/loaddata.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/lock.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/lock_rows_not_exist.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/misc.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/mysqlbinlog_gtid_skip_empty_trans_rocksdb.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/mysqldump2.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/negative_stats.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/optimize_table.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/partition.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/perf_context.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/read_only_tx.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/repair_table.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/replace.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_reverse.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_concurrent_delete.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_datadir.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_qcache.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_row_stats.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_table_stats_sampling_pct_change.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rpl_read_free.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rpl_row_not_found.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rpl_row_rocksdb.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rpl_row_stats.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rpl_row_triggers.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rpl_savepoint.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rqg_examples.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rqg_runtime.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rqg_transactions.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/select.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/select_for_update.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/select_for_update_skip_locked_nowait.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/show_engine.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/show_table_status.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/shutdown.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/singledelete.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/slow_query_log.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/statistics.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/table_stats.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_ai.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_avg_row_length.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_checksum.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_connection.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_delay_key_write.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_insert_method.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_key_block_size.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_max_rows.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_min_rows.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_pack_keys.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_password.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_row_format.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_union.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/tbl_standard_opts.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/transaction.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_binary.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_binary_indexes.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_bit.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_bit_indexes.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_blob.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_blob_indexes.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_bool.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_char.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_enum.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_fixed.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_fixed_indexes.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_float.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_int.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_int_indexes.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_set.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_set_indexes.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_text.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_text_indexes.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_varbinary.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_varchar_debug.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/unique_check.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/unsupported_tx_isolations.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/update.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/update_ignore.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/update_multi.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/update_with_keys.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/validate_datadic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/write_sync.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/slow_query_log.awk create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/1st.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/allow_no_pk_concurrent_insert.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key_with_sk.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/allow_os_buffer.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/alter_table.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/analyze_table.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/apply_changes_iter.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/autoinc_secondary.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/autoincrement.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bloomfilter-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.tmpl create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/cardinality-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/cardinality.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/check_log_for_xa.py create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/check_table.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/check_table.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/checksum_table.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/checksum_table_live.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/col_not_null.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/col_not_null_timestamp.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/col_null.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/col_opt_default.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/col_opt_not_null.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/col_opt_null.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/col_opt_unsigned.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/col_opt_zerofill.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/collation-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/collation.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/collation_exception-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/collation_exception.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/compact_deletes-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/compression_zstd-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/concurrent_alter.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_read_committed.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_repeatable_read.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_serializable.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/consistent_snapshot.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/create_table.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/deadlock.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/delete.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/delete_before_lock.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/delete_ignore.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/delete_quick.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/delete_with_keys.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/describe.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/disabled.def create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/drop_database.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/drop_index_inplace.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/drop_stats_procedure.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/drop_table.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/drop_table2_check.pl create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/drop_table3-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/drop_table3.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/drop_table3.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/drop_table3_repopulate_table.inc create mode 100755 storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/drop_table_repopulate_table.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/drop_table_sync.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/dup_key_update.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/fail_system_cf.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/foreign_key.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/gap_lock_raise_error.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/gen_insert.pl create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/handler_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/hermitage.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/hermitage.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/hermitage_init.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/index.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/index.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/index_file_map-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/index_file_map.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/index_key_block_size.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/index_primary.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/index_type_btree.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/index_type_hash.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/information_schema-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/information_schema.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/init_stats_procedure.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/insert.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/insert_with_keys.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/issue100.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/issue100_delete-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/issue100_delete.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/issue111.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/issue290.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/issue314.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/level_read_committed.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/level_read_uncommitted.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/level_repeatable_read.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/level_serializable.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/loaddata.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/loaddata.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/lock.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/lock_rows_not_exist.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/misc.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/mysqldump-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/mysqldump2-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/mysqldump2.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/negative_stats.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/no_merge_sort.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/no_primary_key_basic_ops.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/optimize_table-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/optimize_table.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/partition.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/perf_context.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/read_only_tx-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/records_in_range-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/records_in_range.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/repair_table.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/repair_table.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/replace.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.opt create mode 100755 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.sh create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.pl create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_delete.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_insert.py create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range2.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_row_stats.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_table_stats_sampling_pct_change.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rqg.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rqg_examples-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rqg_examples.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/se-innodb.out create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/select.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/select_for_update.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/select_for_update_skip_locked_nowait.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/select_lock_in_share_mode.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/show_engine-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/show_engine.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/show_table_status-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/shutdown-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/shutdown.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/singledelete-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/singledelete.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/slow_query_log-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/slow_query_log.test create mode 100755 storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/statistics-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/statistics.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/table_stats.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_ai.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_avg_row_length.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_checksum.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_connection.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_delay_key_write.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_insert_method.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_key_block_size.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_max_rows.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_min_rows.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_pack_keys.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_password.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_row_format.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_union.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/tbl_standard_opts.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/transaction.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/transaction_isolation.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/transaction_select.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/truncate_table.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/truncate_table3-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/truncate_table3.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_binary.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_binary.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_bit.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_bit.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_blob.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_blob.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_bool.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_bool.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_char.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_char.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_date_time.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_date_time.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_enum.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_enum.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_fixed.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_fixed.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_float.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_float.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_int.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_int.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_set.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_set.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_text.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_text.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_varchar.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/unique_check.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/unsupported_tx_isolations.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/update.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/update_ignore-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/update_ignore.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/update_multi.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/update_multi_exec.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/update_with_keys.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/write_sync.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/base.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/include/cleanup.inc create mode 100755 storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data.sh create mode 100755 storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_and_run.sh create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup.inc create mode 100755 storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid.sh create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid_and_sync.inc create mode 100755 storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/my.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/r/gtid.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/r/stream.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/r/wdt.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-slave.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/t/stream.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/t/wdt.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/combinations create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/include/rpl_no_unique_check_on_lag.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/r/consistent_snapshot_mixed_engines.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/r/multiclient_2pc.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_crash_safe_wal_corrupt.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe_wal_corrupt.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_rocksdb_sys_header.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag_mts.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_2pc_crash_recover.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot_without_gtid.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_stress_crash.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/rpl_1slave_base.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/combinations create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc-mater.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_check_for_binlog_info.pl create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-slave.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-slave.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag-slave.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts-slave.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-slave.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-slave.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot_without_gtid.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-slave.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_stress/include/rocksdb_stress.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb_stress/my.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress_crash.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_stress/t/load_generator.py create mode 100644 storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress_crash.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_access_hint_on_compaction_start_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_advise_random_on_open_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_concurrent_memtable_write_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_reads_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_writes_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_os_buffer_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_background_sync_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_base_background_compactions_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_cache_size_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_restart_interval_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_deviation_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_size_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bytes_per_sync_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_cache_index_and_filter_blocks_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_checksums_pct_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_collect_sst_properties_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_commit_in_the_middle_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compact_cf_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_readahead_size_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_count_sd_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_file_size_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_window_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_checkpoint_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_if_missing_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_missing_column_families_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_datadir_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_db_write_buffer_size_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_optimizer_no_zero_cardinality_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_default_cf_options_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delete_obsolete_files_period_micros_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disabledatasync_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_bulk_load_api_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_thread_tracking_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_write_thread_adaptive_yield_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_error_if_exists_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_flush_memtable_now_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_index_records_in_range_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_hash_index_allow_collision_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_index_type_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_info_log_level_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_is_fd_close_on_exec_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_keep_log_file_num_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_scanned_rows_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_wait_timeout_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_log_file_time_to_roll_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_manifest_preallocation_size_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_flushes_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_log_file_size_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_manifest_file_size_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_open_files_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_row_locks_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_subcompactions_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_total_wal_size_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_buf_size_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_combine_read_size_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_new_table_reader_for_compaction_inputs_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_no_block_cache_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_override_cf_options_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_paranoid_checks_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pause_background_work_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_perf_context_level_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rate_limiter_bytes_per_sec_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_read_free_rpl_tables_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_records_in_range_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_seconds_between_stat_computes_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_signal_drop_index_thread_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_bloom_filter_on_read_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_fill_cache_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_tables_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_stats_dump_period_sec_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_checksums_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_check_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_exceptions_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_cache_numshardbits_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_stats_sampling_pct_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_unsafe_for_binlog_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_adaptive_mutex_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_fsync_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_validate_tables_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_checksums_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_bytes_per_sync_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_dir_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_recovery_mode_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_size_limit_mb_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_ttl_seconds_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_whole_key_filtering_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_disable_wal_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_ignore_missing_column_families_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_sync_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/all_vars.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_os_buffer_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_checkpoint_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disabledatasync_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_buf_size_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_combine_read_size_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rpl_skip_tx_api_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_checksums_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_exceptions_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_checksums_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_sync_basic.test create mode 100644 storage/rocksdb/properties_collector.cc create mode 100644 storage/rocksdb/properties_collector.h create mode 100644 storage/rocksdb/rdb_buff.h create mode 100644 storage/rocksdb/rdb_cf_manager.cc create mode 100644 storage/rocksdb/rdb_cf_manager.h create mode 100644 storage/rocksdb/rdb_cf_options.cc create mode 100644 storage/rocksdb/rdb_cf_options.h create mode 100644 storage/rocksdb/rdb_compact_filter.h create mode 100644 storage/rocksdb/rdb_comparator.h create mode 100644 storage/rocksdb/rdb_datadic.cc create mode 100644 storage/rocksdb/rdb_datadic.h create mode 100644 storage/rocksdb/rdb_i_s.cc create mode 100644 storage/rocksdb/rdb_i_s.h create mode 100644 storage/rocksdb/rdb_index_merge.cc create mode 100644 storage/rocksdb/rdb_index_merge.h create mode 100644 storage/rocksdb/rdb_mutex_wrapper.cc create mode 100644 storage/rocksdb/rdb_mutex_wrapper.h create mode 100644 storage/rocksdb/rdb_perf_context.cc create mode 100644 storage/rocksdb/rdb_perf_context.h create mode 100644 storage/rocksdb/rdb_sst_info.cc create mode 100644 storage/rocksdb/rdb_sst_info.h create mode 100644 storage/rocksdb/rdb_threads.cc create mode 100644 storage/rocksdb/rdb_threads.h create mode 100644 storage/rocksdb/rdb_utils.cc create mode 100644 storage/rocksdb/rdb_utils.h create mode 100644 storage/rocksdb/rocksdb-range-access.txt create mode 100644 storage/rocksdb/tools/mysql_ldb.cc create mode 100644 storage/rocksdb/unittest/CMakeLists.txt create mode 100644 storage/rocksdb/unittest/test_properties_collector.cc diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt new file mode 100644 index 00000000000..f2c52ce84c4 --- /dev/null +++ b/storage/rocksdb/CMakeLists.txt @@ -0,0 +1,108 @@ +# TODO: Copyrights + +IF (NOT EXISTS "${CMAKE_SOURCE_DIR}/rocksdb/Makefile") + MESSAGE(SEND_ERROR "Missing Makefile in rocksdb directory. Try \"git submodule update\".") +ENDIF() + +# get a list of rocksdb library source files +# run with env -i to avoid passing variables +EXECUTE_PROCESS( + COMMAND env -i ${CMAKE_SOURCE_DIR}/storage/rocksdb/get_rocksdb_files.sh + OUTPUT_VARIABLE SCRIPT_OUTPUT + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} +) +# split the list into lines +STRING(REGEX MATCHALL "[^\n]+" ROCKSDB_LIB_SOURCES ${SCRIPT_OUTPUT}) + +INCLUDE_DIRECTORIES( + ${CMAKE_SOURCE_DIR}/rocksdb + ${CMAKE_SOURCE_DIR}/rocksdb/include + ${CMAKE_SOURCE_DIR}/rocksdb/third-party/gtest-1.7.0/fused-src +) + +ADD_DEFINITIONS(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DOS_LINUX + -DZLIB) + +SET(ROCKSDB_SOURCES + ha_rocksdb.cc ha_rocksdb.h ha_rocksdb_proto.h + logger.h + rdb_comparator.h + rdb_datadic.cc rdb_datadic.h + rdb_cf_options.cc rdb_cf_options.h + rdb_cf_manager.cc rdb_cf_manager.h + properties_collector.cc properties_collector.h + event_listener.cc event_listener.h + rdb_i_s.cc rdb_i_s.h + rdb_index_merge.cc rdb_index_merge.h + rdb_perf_context.cc rdb_perf_context.h + rdb_mutex_wrapper.cc rdb_mutex_wrapper.h + rdb_sst_info.cc rdb_sst_info.h + rdb_utils.cc rdb_utils.h rdb_buff.h + rdb_threads.cc rdb_threads.h + ${ROCKSDB_LIB_SOURCES} +) + +IF(WITH_FB_TSAN) + SET(PIC_EXT "_pic") +ELSE() + SET(PIC_EXT "") +ENDIF() + +SET(rocksdb_static_libs ) +IF (NOT "$ENV{WITH_SNAPPY}" STREQUAL "") + SET(rocksdb_static_libs ${rocksdb_static_libs} + $ENV{WITH_SNAPPY}/lib/libsnappy${PIC_EXT}.a) + ADD_DEFINITIONS(-DSNAPPY) +ELSE() + SET(rocksdb_static_libs ${rocksdb_static_libs} snappy) +ENDIF() + +IF (NOT "$ENV{WITH_LZ4}" STREQUAL "") + SET(rocksdb_static_libs ${rocksdb_static_libs} + $ENV{WITH_LZ4}/lib/liblz4${PIC_EXT}.a) + ADD_DEFINITIONS(-DLZ4) +ELSE() + SET(rocksdb_static_libs ${rocksdb_static_libs} lz4) +ENDIF() + +IF (NOT "$ENV{WITH_BZ2}" STREQUAL "") + SET(rocksdb_static_libs ${rocksdb_static_libs} + $ENV{WITH_BZ2}/lib/libbz2${PIC_EXT}.a) + ADD_DEFINITIONS(-DBZIP2) +ELSE() + SET(rocksdb_static_libs ${rocksdb_static_libs} bz2) +ENDIF() + +# link ZSTD only if instructed +IF (NOT "$ENV{WITH_ZSTD}" STREQUAL "") + SET(rocksdb_static_libs ${rocksdb_static_libs} + $ENV{WITH_ZSTD}/lib/libzstd${PIC_EXT}.a) + ADD_DEFINITIONS(-DZSTD) +ENDIF() + +SET(rocksdb_static_libs ${rocksdb_static_libs} ${ZLIB_LIBRARY} "-lrt") + +MYSQL_ADD_PLUGIN(rocksdb_se ${ROCKSDB_SOURCES} STORAGE_ENGINE DEFAULT STATIC_ONLY + LINK_LIBRARIES ${rocksdb_static_libs} +) + +IF(WITH_EMBEDDED_SERVER) + ADD_SUBDIRECTORY(unittest) +ENDIF() + +IF (WITH_ROCKSDB_SE_STORAGE_ENGINE) + # TODO: read this file list from src.mk:TOOL_SOURCES + SET(ROCKSDB_TOOL_SOURCES + ${CMAKE_SOURCE_DIR}/rocksdb/tools/ldb_tool.cc + ${CMAKE_SOURCE_DIR}/rocksdb/tools/ldb_cmd.cc + ${CMAKE_SOURCE_DIR}/rocksdb/tools/sst_dump_tool.cc + ) + MYSQL_ADD_EXECUTABLE(sst_dump ${CMAKE_SOURCE_DIR}/rocksdb/tools/sst_dump.cc ${ROCKSDB_TOOL_SOURCES}) + TARGET_LINK_LIBRARIES(sst_dump rocksdb_se) + + MYSQL_ADD_EXECUTABLE(ldb ${CMAKE_SOURCE_DIR}/rocksdb/tools/ldb.cc ${ROCKSDB_TOOL_SOURCES}) + TARGET_LINK_LIBRARIES(ldb rocksdb_se) + + MYSQL_ADD_EXECUTABLE(mysql_ldb ${CMAKE_SOURCE_DIR}/storage/rocksdb/tools/mysql_ldb.cc ${ROCKSDB_TOOL_SOURCES}) + TARGET_LINK_LIBRARIES(mysql_ldb rocksdb_se) +ENDIF() diff --git a/storage/rocksdb/README b/storage/rocksdb/README new file mode 100644 index 00000000000..472b7986f91 --- /dev/null +++ b/storage/rocksdb/README @@ -0,0 +1,38 @@ +== Summary == +This directory contains RocksDB-based Storage Engine (RDBSE) for MySQL = "MyRocks". + +== Resources == +See https://github.com/facebook/mysql-5.6/wiki/Getting-Started-with-MyRocks +Facebook group: https://www.facebook.com/groups/mysqlonrocksdb/ + +== Coding Conventions == +The baseline for MyRocks coding conventions is the MySQL set, available at +http://dev.mysql.com/doc/internals/en/coding-guidelines.html. + +Several refinements: + 0. There is an umbrella C++ namespace named "myrocks" for all MyRocks code. + 1. We introduced "RDB" as the super-short abbreviation for "RocksDB". We will + use it as a name prefix, with different capitalization (see below), to ease + up code navigation with ctags and grep. + N.B. For ease of matching, we'll keep the variables and functions dealing + with sysvars as close as possible to the outside visible names of + sysvars, which start with "rocksdb_" prefix, the outward storage + engine name. + 2. The names for classes, interfaces, and C++ structures (which act as + classes), start with prefix "Rdb_". + NB: For historical reasons, we'll keep the "ha_" class + name for ha_rocksdb class, which is an exception to the rule. + 3. The names for global objects and functions start with prefix "rdb_". + 4. The names for macros and constants start with prefix "RDB_". + 5. Regular class member names start with "m_". + 6. Static class member names start with "s_". + 7. Given the 80 character per line limit, we'll not always use full English + words in names, when a well known or easily recognizable abbreviation + exists (like "tx" for "transaction" or "param" for "parameter" etc). + 8. When needing to disambiguate, we use different suffixes for that, like + "_arg" for a function argument/parameter, "_arr" for a C style array, and + "_vect" for a std::vector etc. + +== Running Tests == +To run tests from rocksdb, rocksd_rpl or rocksdb_sys_vars packages, use the following parameters: +--mysqld=--default-storage-engine=rocksdb --mysqld=--skip-innodb --mysqld=--default-tmp-storage-engine=MyISAM --mysqld=--rocksdb \ No newline at end of file diff --git a/storage/rocksdb/event_listener.cc b/storage/rocksdb/event_listener.cc new file mode 100644 index 00000000000..851e4782416 --- /dev/null +++ b/storage/rocksdb/event_listener.cc @@ -0,0 +1,80 @@ +/* + Copyright (c) 2015, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* The C++ file's header */ +#include "./event_listener.h" + +/* C++ standard header files */ +#include +#include + +/* MySQL includes */ +#include +#include + +/* MyRocks includes */ +#include "./ha_rocksdb.h" +#include "./properties_collector.h" +#include "./rdb_datadic.h" + +namespace myrocks { + +static std::vector +extract_index_stats( + const std::vector& files, + const rocksdb::TablePropertiesCollection& props +) { + std::vector ret; + for (auto fn : files) { + auto it = props.find(fn); + DBUG_ASSERT(it != props.end()); + std::vector stats; + Rdb_tbl_prop_coll::read_stats_from_tbl_props(it->second, &stats); + ret.insert(ret.end(), stats.begin(), stats.end()); + } + return ret; +} + +void Rdb_event_listener::OnCompactionCompleted( + rocksdb::DB *db, + const rocksdb::CompactionJobInfo& ci +) { + DBUG_ASSERT(db != nullptr); + DBUG_ASSERT(m_ddl_manager != nullptr); + + if (ci.status.ok()) { + m_ddl_manager->adjust_stats( + extract_index_stats(ci.output_files, ci.table_properties), + extract_index_stats(ci.input_files, ci.table_properties)); + } +} + +void Rdb_event_listener::OnFlushCompleted( + rocksdb::DB* db, + const rocksdb::FlushJobInfo& flush_job_info +) { + DBUG_ASSERT(db != nullptr); + DBUG_ASSERT(m_ddl_manager != nullptr); + + auto tbl_props = std::make_shared( + flush_job_info.table_properties); + + std::vector stats; + Rdb_tbl_prop_coll::read_stats_from_tbl_props(tbl_props, &stats); + m_ddl_manager->adjust_stats(stats); +} + +} // namespace myrocks diff --git a/storage/rocksdb/event_listener.h b/storage/rocksdb/event_listener.h new file mode 100644 index 00000000000..f92c6d0dea1 --- /dev/null +++ b/storage/rocksdb/event_listener.h @@ -0,0 +1,40 @@ +/* + Copyright (c) 2015, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +#include "rocksdb/listener.h" + +namespace myrocks { + +class Rdb_ddl_manager; + +class Rdb_event_listener : public rocksdb::EventListener +{ + public: + explicit Rdb_event_listener(Rdb_ddl_manager* ddl_manager) : + m_ddl_manager(ddl_manager) { + } + + void OnCompactionCompleted( + rocksdb::DB *db, const rocksdb::CompactionJobInfo& ci) override; + void OnFlushCompleted( + rocksdb::DB* db, const rocksdb::FlushJobInfo& flush_job_info) override; + + private: + Rdb_ddl_manager* m_ddl_manager; +}; + +} // namespace myrocks diff --git a/storage/rocksdb/get_rocksdb_files.sh b/storage/rocksdb/get_rocksdb_files.sh new file mode 100755 index 00000000000..121d664a086 --- /dev/null +++ b/storage/rocksdb/get_rocksdb_files.sh @@ -0,0 +1,27 @@ +#!/bin/bash +MKFILE=`mktemp` +# create and run a simple makefile +# include rocksdb make file relative to the path of this script +echo "include rocksdb/src.mk +all: + @echo \$(LIB_SOURCES)" > $MKFILE +for f in `make --makefile $MKFILE` +do + echo ../../rocksdb/$f +done +rm $MKFILE + +# create build_version.cc file. Only create one if it doesn't exists or if it is different +# this is so that we don't rebuild mysqld every time +bv=rocksdb/util/build_version.cc +date=$(date +%F) +git_sha=$(pushd rocksdb >/dev/null && git rev-parse HEAD 2>/dev/null && popd >/dev/null) +if [ ! -f $bv ] || [ -z $git_sha ] || [ ! `grep $git_sha $bv` ] +then +echo "#include \"build_version.h\" +const char* rocksdb_build_git_sha = +\"rocksdb_build_git_sha:$git_sha\"; +const char* rocksdb_build_git_date = +\"rocksdb_build_git_date:$date\"; +const char* rocksdb_build_compile_date = __DATE__;" > $bv +fi diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc new file mode 100644 index 00000000000..4fbaf01f154 --- /dev/null +++ b/storage/rocksdb/ha_rocksdb.cc @@ -0,0 +1,10699 @@ +/* + Copyright (c) 2012, Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#define MYSQL_SERVER 1 + +/* The C++ file's header */ +#include "./ha_rocksdb.h" + +/* C++ standard header files */ +#include +#include +#include +#include + +/* MySQL includes */ +#include "./debug_sync.h" +#include "./my_bit.h" +#include "./my_stacktrace.h" +#include "./sql_table.h" +#include +#include +#include + +/* RocksDB includes */ +#include "rocksdb/compaction_filter.h" +#include "rocksdb/rate_limiter.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/utilities/checkpoint.h" +#include "rocksdb/utilities/convenience.h" +#include "rocksdb/utilities/flashcache.h" +#include "rocksdb/utilities/memory_util.h" + +/* MyRocks includes */ +#include "./event_listener.h" +#include "./ha_rocksdb_proto.h" +#include "./logger.h" +#include "./rdb_cf_manager.h" +#include "./rdb_cf_options.h" +#include "./rdb_datadic.h" +#include "./rdb_i_s.h" +#include "./rdb_index_merge.h" +#include "./rdb_mutex_wrapper.h" +#include "./rdb_threads.h" + +#ifdef TARGET_OS_LINUX +extern my_bool cachedev_enabled; +#endif /* TARGET_OS_LINUX */ + +// Internal MySQL APIs not exposed in any header. +extern "C" +{ +/** + Mark transaction to rollback and mark error as fatal to a sub-statement. + @param thd Thread handle + @param all TRUE <=> rollback main transaction. +*/ +void thd_mark_transaction_to_rollback(MYSQL_THD thd, bool all); + +/** + * Get the user thread's binary logging format + * @param thd user thread + * @return Value to be used as index into the binlog_format_names array +*/ +int thd_binlog_format(const MYSQL_THD thd); + +/** + * Check if binary logging is filtered for thread's current db. + * @param thd Thread handle + * @retval 1 the query is not filtered, 0 otherwise. +*/ +bool thd_binlog_filter_ok(const MYSQL_THD thd); +} + +namespace myrocks { + +static st_global_stats global_stats; +static st_export_stats export_stats; + +/** + Updates row counters based on the table type and operation type. +*/ +void ha_rocksdb::update_row_stats(operation_type type) { + DBUG_ASSERT(type < ROWS_MAX); + // Find if we are modifying system databases. + if (table->s && m_tbl_def->m_is_mysql_system_table) + global_stats.system_rows[type].inc(); + else + global_stats.rows[type].inc(); +} + +void dbug_dump_database(rocksdb::DB *db); +static handler *rocksdb_create_handler(my_core::handlerton *hton, + my_core::TABLE_SHARE *table_arg, + my_core::MEM_ROOT *mem_root); + +bool can_use_bloom_filter(THD *thd, + const std::shared_ptr& kd, + const rocksdb::Slice &eq_cond, + const bool use_all_keys, + bool is_ascending); + +/////////////////////////////////////////////////////////// +// Parameters and settings +/////////////////////////////////////////////////////////// +static char * rocksdb_default_cf_options; +static char * rocksdb_override_cf_options; +Rdb_cf_options rocksdb_cf_options_map; + +/////////////////////////////////////////////////////////// +// Globals +/////////////////////////////////////////////////////////// +handlerton *rocksdb_hton; + +rocksdb::TransactionDB *rdb= nullptr; + +static std::shared_ptr rocksdb_stats; +static std::unique_ptr flashcache_aware_env; +static std::shared_ptr + properties_collector_factory; + +Rdb_dict_manager dict_manager; +Rdb_cf_manager cf_manager; +Rdb_ddl_manager ddl_manager; +Rdb_binlog_manager binlog_manager; + + +/** + MyRocks background thread control + N.B. This is besides RocksDB's own background threads + (@see rocksdb::CancelAllBackgroundWork()) +*/ + +static Rdb_background_thread rdb_bg_thread; + + +// List of table names (using regex) that are exceptions to the strict +// collation check requirement. +Regex_list_handler *rdb_collation_exceptions; + +static const char* const ERRSTR_ROLLBACK_ONLY + = "This transaction was rolled back and cannot be " + "committed. Only supported operation is to roll it back, " + "so all pending changes will be discarded. " + "Please restart another transaction."; + + +static void +rocksdb_flush_all_memtables() +{ + Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); + for (auto cf_handle : cf_manager.get_all_cf()) { + rdb->Flush(rocksdb::FlushOptions(), cf_handle); + } +} + +static void +rocksdb_compact_column_family_stub(THD* thd, + struct st_mysql_sys_var* var, + void* var_ptr, + const void* save) +{ +} + +static int +rocksdb_compact_column_family(THD* thd, + struct st_mysql_sys_var* var, + void* var_ptr, + struct st_mysql_value* value) +{ + char buff[STRING_BUFFER_USUAL_SIZE]; + int len = sizeof(buff); + + if (const char* cf = value->val_str(value, buff, &len)) { + bool is_automatic; + auto cfh = cf_manager.get_cf(cf, "", nullptr, &is_automatic); + if (cfh != nullptr && rdb != nullptr) { + sql_print_information("RocksDB: Manual compaction of column family: %s\n", cf); + rdb->CompactRange(rocksdb::CompactRangeOptions(), cfh, nullptr, nullptr); + } + } + return 0; +} + +/////////////////////////////////////////////////////////// +// Hash map: table name => open table handler +/////////////////////////////////////////////////////////// + +namespace // anonymous namespace = not visible outside this source file +{ + +struct Rdb_open_tables_map +{ + /* Hash table used to track the handlers of open tables */ + my_core::HASH m_hash; + /* The mutex used to protect the hash table */ + mutable mysql_mutex_t m_mutex; + + void init_hash(void) + { + (void) my_hash_init(&m_hash, my_core::system_charset_info, 32, 0, 0, + (my_hash_get_key) Rdb_open_tables_map::get_hash_key, + 0, 0); + } + + void free_hash(void) + { + my_hash_free(&m_hash); + } + + static uchar* get_hash_key(Rdb_table_handler *table_handler, + size_t *length, + my_bool not_used __attribute__((__unused__))); + + Rdb_table_handler* get_table_handler(const char *table_name); + void release_table_handler(Rdb_table_handler *table_handler); + + std::vector get_table_names(void) const; +}; + +} // anonymous namespace + +static Rdb_open_tables_map rdb_open_tables; + + +static std::string rdb_normalize_dir(std::string dir) +{ + while (dir.size() > 0 && dir.back() == '/') + { + dir.resize(dir.size() - 1); + } + return dir; +} + + +static int rocksdb_create_checkpoint( + THD* thd __attribute__((__unused__)), + struct st_mysql_sys_var* var __attribute__((__unused__)), + void* save __attribute__((__unused__)), + struct st_mysql_value* value) +{ + char buf[512]; + int len = sizeof(buf); + const char* checkpoint_dir_raw= value->val_str(value, buf, &len); + if (checkpoint_dir_raw) { + if (rdb != nullptr) { + std::string checkpoint_dir= rdb_normalize_dir(checkpoint_dir_raw); + // NO_LINT_DEBUG + sql_print_information("RocksDB: creating checkpoint in directory : %s\n", + checkpoint_dir.c_str()); + rocksdb::Checkpoint* checkpoint; + auto status = rocksdb::Checkpoint::Create(rdb, &checkpoint); + if (status.ok()) { + status = checkpoint->CreateCheckpoint(checkpoint_dir.c_str()); + if (status.ok()) { + sql_print_information( + "RocksDB: created checkpoint in directory : %s\n", + checkpoint_dir.c_str()); + } else { + my_printf_error( + ER_UNKNOWN_ERROR, + "RocksDB: Failed to create checkpoint directory. status %d %s", + MYF(0), status.code(), status.ToString().c_str()); + } + delete checkpoint; + } else { + std::string err_text(status.ToString()); + my_printf_error(ER_UNKNOWN_ERROR, + "RocksDB: failed to initialize checkpoint. status %d %s\n", + MYF(0), status.code(), err_text.c_str()); + } + return status.code(); + } + } + return HA_ERR_INTERNAL_ERROR; +} + +/* This method is needed to indicate that the + ROCKSDB_CREATE_CHECKPOINT command is not read-only */ +static void +rocksdb_create_checkpoint_stub(THD* thd, + struct st_mysql_sys_var* var, + void* var_ptr, + const void* save) +{ +} + +static void +rocksdb_force_flush_memtable_now_stub(THD* thd, + struct st_mysql_sys_var* var, + void* var_ptr, + const void* save) +{ +} + +static int +rocksdb_force_flush_memtable_now(THD* thd, + struct st_mysql_sys_var* var, + void* var_ptr, + struct st_mysql_value* value) +{ + sql_print_information("RocksDB: Manual memtable flush\n"); + rocksdb_flush_all_memtables(); + return 0; +} + +static void rocksdb_drop_index_wakeup_thread( + my_core::THD* thd __attribute__((__unused__)), + struct st_mysql_sys_var* var __attribute__((__unused__)), + void* var_ptr __attribute__((__unused__)), + const void* save); + +static my_bool rocksdb_pause_background_work= 0; +static mysql_mutex_t rdb_sysvars_mutex; + +static void rocksdb_set_pause_background_work( + my_core::THD* thd __attribute__((__unused__)), + struct st_mysql_sys_var* var __attribute__((__unused__)), + void* var_ptr __attribute__((__unused__)), + const void* save) +{ + mysql_mutex_lock(&rdb_sysvars_mutex); + bool pause_requested= *static_cast(save); + if (rocksdb_pause_background_work != pause_requested) { + if (pause_requested) { + rdb->PauseBackgroundWork(); + } else { + rdb->ContinueBackgroundWork(); + } + rocksdb_pause_background_work= pause_requested; + } + mysql_mutex_unlock(&rdb_sysvars_mutex); +} + +static void +rocksdb_set_compaction_options(THD* thd, + struct st_mysql_sys_var* var, + void* var_ptr, + const void* save); + +static void +rocksdb_set_table_stats_sampling_pct(THD* thd, + struct st_mysql_sys_var* var, + void* var_ptr, + const void* save); + +static void +rocksdb_set_rate_limiter_bytes_per_sec(THD* thd, + struct st_mysql_sys_var* var, + void* var_ptr, + const void* save); + +static void rdb_set_collation_exception_list(const char *exception_list); +static void +rocksdb_set_collation_exception_list(THD* thd, + struct st_mysql_sys_var* var, + void* var_ptr, + const void* save); + +static void +rocksdb_set_bulk_load(THD* thd, + struct st_mysql_sys_var* var __attribute__((__unused__)), + void* var_ptr, + const void* save); +////////////////////////////////////////////////////////////////////////////// +// Options definitions +////////////////////////////////////////////////////////////////////////////// +static long long rocksdb_block_cache_size; +/* Use unsigned long long instead of uint64_t because of MySQL compatibility */ +static unsigned long long // NOLINT(runtime/int) + rocksdb_rate_limiter_bytes_per_sec; +static uint64_t rocksdb_info_log_level; +static char * rocksdb_wal_dir; +static uint64_t rocksdb_index_type; +static char rocksdb_background_sync; +static uint32_t rocksdb_debug_optimizer_n_rows; +static my_bool rocksdb_debug_optimizer_no_zero_cardinality; +static uint32_t rocksdb_wal_recovery_mode; +static uint32_t rocksdb_access_hint_on_compaction_start; +static char * rocksdb_compact_cf_name; +static char * rocksdb_checkpoint_name; +static my_bool rocksdb_signal_drop_index_thread; +static my_bool rocksdb_strict_collation_check= 1; +static my_bool rocksdb_disable_2pc= 0; +static char * rocksdb_strict_collation_exceptions; +static my_bool rocksdb_collect_sst_properties= 1; +static my_bool rocksdb_force_flush_memtable_now_var= 0; +static uint64_t rocksdb_number_stat_computes= 0; +static uint32_t rocksdb_seconds_between_stat_computes= 3600; +static long long rocksdb_compaction_sequential_deletes= 0l; +static long long rocksdb_compaction_sequential_deletes_window= 0l; +static long long rocksdb_compaction_sequential_deletes_file_size= 0l; +static uint32_t rocksdb_validate_tables = 1; +static char * rocksdb_datadir; +static uint32_t rocksdb_table_stats_sampling_pct; +static my_bool rocksdb_enable_bulk_load_api= 1; +static my_bool rpl_skip_tx_api_var= 0; + +std::atomic rocksdb_snapshot_conflict_errors(0); + +static rocksdb::DBOptions rdb_init_rocksdb_db_options(void) +{ + rocksdb::DBOptions o; + + o.create_if_missing= true; + o.listeners.push_back(std::make_shared(&ddl_manager)); + o.info_log_level= rocksdb::InfoLogLevel::INFO_LEVEL; + o.max_subcompactions= DEFAULT_SUBCOMPACTIONS; + + return o; +} + +static rocksdb::DBOptions rocksdb_db_options= rdb_init_rocksdb_db_options(); +static rocksdb::BlockBasedTableOptions rocksdb_tbl_options; + +static std::shared_ptr rocksdb_rate_limiter; + +/* This enum needs to be kept up to date with rocksdb::InfoLogLevel */ +static const char* info_log_level_names[] = { + "debug_level", + "info_level", + "warn_level", + "error_level", + "fatal_level", + NullS +}; + +static TYPELIB info_log_level_typelib = { + array_elements(info_log_level_names) - 1, + "info_log_level_typelib", + info_log_level_names, + nullptr +}; + +static void +rocksdb_set_rocksdb_info_log_level(THD* thd, + struct st_mysql_sys_var* var, + void* var_ptr, + const void* save) +{ + mysql_mutex_lock(&rdb_sysvars_mutex); + rocksdb_info_log_level = *static_cast(save); + rocksdb_db_options.info_log->SetInfoLogLevel( + static_cast(rocksdb_info_log_level)); + mysql_mutex_unlock(&rdb_sysvars_mutex); +} + +static const char* index_type_names[] = { + "kBinarySearch", + "kHashSearch", + NullS +}; + +static TYPELIB index_type_typelib = { + array_elements(index_type_names) - 1, + "index_type_typelib", + index_type_names, + nullptr +}; + +//TODO: 0 means don't wait at all, and we don't support it yet? +static MYSQL_THDVAR_ULONG(lock_wait_timeout, PLUGIN_VAR_RQCMDARG, + "Number of seconds to wait for lock", + nullptr, nullptr, /*default*/ 1, /*min*/ 1, /*max*/ 1024*1024*1024, 0); + +static MYSQL_THDVAR_BOOL(bulk_load, PLUGIN_VAR_RQCMDARG, + "Use bulk-load mode for inserts. This enables both " + "rocksdb_skip_unique_check and rocksdb_commit_in_the_middle.", + nullptr, rocksdb_set_bulk_load, FALSE); + +static MYSQL_SYSVAR_BOOL(enable_bulk_load_api, + rocksdb_enable_bulk_load_api, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Enables using SstFileWriter for bulk loading", + nullptr, nullptr, rocksdb_enable_bulk_load_api); + +static MYSQL_THDVAR_STR(skip_unique_check_tables, + PLUGIN_VAR_RQCMDARG|PLUGIN_VAR_MEMALLOC, + "Skip unique constraint checking for the specified tables", nullptr, nullptr, + ".*"); + +static MYSQL_THDVAR_BOOL(skip_unique_check, PLUGIN_VAR_RQCMDARG, + "Skip unique constraint checking for all tables", nullptr, nullptr, FALSE); + +static MYSQL_THDVAR_BOOL(commit_in_the_middle, PLUGIN_VAR_RQCMDARG, + "Commit rows implicitly every rocksdb_bulk_load_size, on bulk load/insert, " + "update and delete", + nullptr, nullptr, FALSE); + +static MYSQL_THDVAR_STR(read_free_rpl_tables, + PLUGIN_VAR_RQCMDARG|PLUGIN_VAR_MEMALLOC, + "List of tables that will use read-free replication on the slave " + "(i.e. not lookup a row during replication)", nullptr, nullptr, ""); + +static MYSQL_SYSVAR_BOOL( + rpl_skip_tx_api, + rpl_skip_tx_api_var, + PLUGIN_VAR_RQCMDARG, + "Use write batches for replication thread instead of tx api", nullptr, + nullptr, FALSE); + +static MYSQL_THDVAR_BOOL(skip_bloom_filter_on_read, PLUGIN_VAR_RQCMDARG, + "Skip using bloom filter for reads", nullptr, nullptr, FALSE); + +static MYSQL_THDVAR_ULONG(max_row_locks, PLUGIN_VAR_RQCMDARG, + "Maximum number of locks a transaction can have", + nullptr, nullptr, /*default*/ 1024*1024*1024, /*min*/ 1, + /*max*/ 1024*1024*1024, 0); + +static MYSQL_THDVAR_BOOL(lock_scanned_rows, PLUGIN_VAR_RQCMDARG, + "Take and hold locks on rows that are scanned but not updated", + nullptr, nullptr, FALSE); + +static MYSQL_THDVAR_ULONG(bulk_load_size, PLUGIN_VAR_RQCMDARG, + "Max #records in a batch for bulk-load mode", + nullptr, nullptr, /*default*/ 1000, /*min*/ 1, /*max*/ 1024*1024*1024, 0); + +static MYSQL_THDVAR_ULONGLONG(merge_buf_size, PLUGIN_VAR_RQCMDARG, + "Size to allocate for merge sort buffers written out to disk " + "during inplace index creation.", + nullptr, nullptr, + /* default (64MB) */ (ulonglong) 67108864, + /* min (100B) */ 100, + /* max */ SIZE_T_MAX, 1); + +static MYSQL_THDVAR_ULONGLONG(merge_combine_read_size, PLUGIN_VAR_RQCMDARG, + "Size that we have to work with during combine (reading from disk) phase of " + "external sort during fast index creation.", + nullptr, nullptr, + /* default (1GB) */ (ulonglong) 1073741824, + /* min (100B) */ 100, + /* max */ SIZE_T_MAX, 1); + +static MYSQL_SYSVAR_BOOL(create_if_missing, + *reinterpret_cast(&rocksdb_db_options.create_if_missing), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::create_if_missing for RocksDB", + nullptr, nullptr, rocksdb_db_options.create_if_missing); + +static MYSQL_SYSVAR_BOOL(create_missing_column_families, + *reinterpret_cast( + &rocksdb_db_options.create_missing_column_families), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::create_missing_column_families for RocksDB", + nullptr, nullptr, rocksdb_db_options.create_missing_column_families); + +static MYSQL_SYSVAR_BOOL(error_if_exists, + *reinterpret_cast(&rocksdb_db_options.error_if_exists), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::error_if_exists for RocksDB", + nullptr, nullptr, rocksdb_db_options.error_if_exists); + +static MYSQL_SYSVAR_BOOL(paranoid_checks, + *reinterpret_cast(&rocksdb_db_options.paranoid_checks), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::paranoid_checks for RocksDB", + nullptr, nullptr, rocksdb_db_options.paranoid_checks); + +static MYSQL_SYSVAR_ULONGLONG(rate_limiter_bytes_per_sec, + rocksdb_rate_limiter_bytes_per_sec, + PLUGIN_VAR_RQCMDARG, + "DBOptions::rate_limiter bytes_per_sec for RocksDB", + nullptr, rocksdb_set_rate_limiter_bytes_per_sec, /* default */ 0L, + /* min */ 0L, /* max */ MAX_RATE_LIMITER_BYTES_PER_SEC, 0); + +static MYSQL_SYSVAR_ENUM(info_log_level, + rocksdb_info_log_level, + PLUGIN_VAR_RQCMDARG, + "Filter level for info logs to be written mysqld error log. " + "Valid values include 'debug_level', 'info_level', 'warn_level'" + "'error_level' and 'fatal_level'.", + nullptr, rocksdb_set_rocksdb_info_log_level, + rocksdb::InfoLogLevel::ERROR_LEVEL, &info_log_level_typelib); + +static MYSQL_THDVAR_INT(perf_context_level, + PLUGIN_VAR_RQCMDARG, + "Perf Context Level for rocksdb internal timer stat collection", + nullptr, nullptr, + /* default */ rocksdb::PerfLevel::kUninitialized, + /* min */ rocksdb::PerfLevel::kUninitialized, + /* max */ rocksdb::PerfLevel::kOutOfBounds - 1, 0); + +static MYSQL_SYSVAR_UINT(wal_recovery_mode, + rocksdb_wal_recovery_mode, + PLUGIN_VAR_RQCMDARG, + "DBOptions::wal_recovery_mode for RocksDB", + nullptr, nullptr, 2, + /* min */ 0L, /* max */ 3, 0); + +static MYSQL_SYSVAR_ULONG(compaction_readahead_size, + rocksdb_db_options.compaction_readahead_size, + PLUGIN_VAR_RQCMDARG, + "DBOptions::compaction_readahead_size for RocksDB", + nullptr, nullptr, rocksdb_db_options.compaction_readahead_size, + /* min */ 0L, /* max */ ULONG_MAX, 0); + +static MYSQL_SYSVAR_BOOL(new_table_reader_for_compaction_inputs, + *reinterpret_cast + (&rocksdb_db_options.new_table_reader_for_compaction_inputs), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::new_table_reader_for_compaction_inputs for RocksDB", + nullptr, nullptr, rocksdb_db_options.new_table_reader_for_compaction_inputs); + +static MYSQL_SYSVAR_UINT(access_hint_on_compaction_start, + rocksdb_access_hint_on_compaction_start, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::access_hint_on_compaction_start for RocksDB", + nullptr, nullptr, 1, + /* min */ 0L, /* max */ 3, 0); + +static MYSQL_SYSVAR_BOOL(allow_concurrent_memtable_write, + *reinterpret_cast( + &rocksdb_db_options.allow_concurrent_memtable_write), + PLUGIN_VAR_RQCMDARG, + "DBOptions::allow_concurrent_memtable_write for RocksDB", + nullptr, nullptr, rocksdb_db_options.allow_concurrent_memtable_write); + +static MYSQL_SYSVAR_BOOL(enable_write_thread_adaptive_yield, + *reinterpret_cast( + &rocksdb_db_options.enable_write_thread_adaptive_yield), + PLUGIN_VAR_RQCMDARG, + "DBOptions::enable_write_thread_adaptive_yield for RocksDB", + nullptr, nullptr, rocksdb_db_options.enable_write_thread_adaptive_yield); + +static MYSQL_SYSVAR_INT(max_open_files, + rocksdb_db_options.max_open_files, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_open_files for RocksDB", + nullptr, nullptr, rocksdb_db_options.max_open_files, + /* min */ -1, /* max */ INT_MAX, 0); + +static MYSQL_SYSVAR_ULONG(max_total_wal_size, + rocksdb_db_options.max_total_wal_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_total_wal_size for RocksDB", + nullptr, nullptr, rocksdb_db_options.max_total_wal_size, + /* min */ 0L, /* max */ LONG_MAX, 0); + +static MYSQL_SYSVAR_BOOL(disabledatasync, + *reinterpret_cast(&rocksdb_db_options.disableDataSync), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::disableDataSync for RocksDB", + nullptr, nullptr, rocksdb_db_options.disableDataSync); + +static MYSQL_SYSVAR_BOOL(use_fsync, + *reinterpret_cast(&rocksdb_db_options.use_fsync), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::use_fsync for RocksDB", + nullptr, nullptr, rocksdb_db_options.use_fsync); + +static MYSQL_SYSVAR_STR(wal_dir, rocksdb_wal_dir, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::wal_dir for RocksDB", + nullptr, nullptr, rocksdb_db_options.wal_dir.c_str()); + +static MYSQL_SYSVAR_ULONG(delete_obsolete_files_period_micros, + rocksdb_db_options.delete_obsolete_files_period_micros, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::delete_obsolete_files_period_micros for RocksDB", + nullptr, nullptr, rocksdb_db_options.delete_obsolete_files_period_micros, + /* min */ 0L, /* max */ LONG_MAX, 0); + +static MYSQL_SYSVAR_INT(base_background_compactions, + rocksdb_db_options.base_background_compactions, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::base_background_compactions for RocksDB", + nullptr, nullptr, rocksdb_db_options.base_background_compactions, + /* min */ -1, /* max */ MAX_BACKGROUND_COMPACTIONS, 0); + +static MYSQL_SYSVAR_INT(max_background_compactions, + rocksdb_db_options.max_background_compactions, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_background_compactions for RocksDB", + nullptr, nullptr, rocksdb_db_options.max_background_compactions, + /* min */ 1, /* max */ MAX_BACKGROUND_COMPACTIONS, 0); + +static MYSQL_SYSVAR_INT(max_background_flushes, + rocksdb_db_options.max_background_flushes, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_background_flushes for RocksDB", + nullptr, nullptr, rocksdb_db_options.max_background_flushes, + /* min */ 1, /* max */ MAX_BACKGROUND_FLUSHES, 0); + +static MYSQL_SYSVAR_UINT(max_subcompactions, + rocksdb_db_options.max_subcompactions, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_subcompactions for RocksDB", + nullptr, nullptr, rocksdb_db_options.max_subcompactions, + /* min */ 1, /* max */ MAX_SUBCOMPACTIONS, 0); + +static MYSQL_SYSVAR_ULONG(max_log_file_size, + rocksdb_db_options.max_log_file_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_log_file_size for RocksDB", + nullptr, nullptr, rocksdb_db_options.max_log_file_size, + /* min */ 0L, /* max */ LONG_MAX, 0); + +static MYSQL_SYSVAR_ULONG(log_file_time_to_roll, + rocksdb_db_options.log_file_time_to_roll, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::log_file_time_to_roll for RocksDB", + nullptr, nullptr, rocksdb_db_options.log_file_time_to_roll, + /* min */ 0L, /* max */ LONG_MAX, 0); + +static MYSQL_SYSVAR_ULONG(keep_log_file_num, + rocksdb_db_options.keep_log_file_num, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::keep_log_file_num for RocksDB", + nullptr, nullptr, rocksdb_db_options.keep_log_file_num, + /* min */ 0L, /* max */ LONG_MAX, 0); + +static MYSQL_SYSVAR_ULONG(max_manifest_file_size, + rocksdb_db_options.max_manifest_file_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_manifest_file_size for RocksDB", + nullptr, nullptr, rocksdb_db_options.max_manifest_file_size, + /* min */ 0L, /* max */ ULONG_MAX, 0); + +static MYSQL_SYSVAR_INT(table_cache_numshardbits, + rocksdb_db_options.table_cache_numshardbits, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::table_cache_numshardbits for RocksDB", + nullptr, nullptr, rocksdb_db_options.table_cache_numshardbits, + /* min */ 0, /* max */ INT_MAX, 0); + +static MYSQL_SYSVAR_ULONG(wal_ttl_seconds, + rocksdb_db_options.WAL_ttl_seconds, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::WAL_ttl_seconds for RocksDB", + nullptr, nullptr, rocksdb_db_options.WAL_ttl_seconds, + /* min */ 0L, /* max */ LONG_MAX, 0); + +static MYSQL_SYSVAR_ULONG(wal_size_limit_mb, + rocksdb_db_options.WAL_size_limit_MB, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::WAL_size_limit_MB for RocksDB", + nullptr, nullptr, rocksdb_db_options.WAL_size_limit_MB, + /* min */ 0L, /* max */ LONG_MAX, 0); + +static MYSQL_SYSVAR_ULONG(manifest_preallocation_size, + rocksdb_db_options.manifest_preallocation_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::manifest_preallocation_size for RocksDB", + nullptr, nullptr, rocksdb_db_options.manifest_preallocation_size, + /* min */ 0L, /* max */ LONG_MAX, 0); + +static MYSQL_SYSVAR_BOOL(allow_os_buffer, + *reinterpret_cast(&rocksdb_db_options.allow_os_buffer), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::allow_os_buffer for RocksDB", + nullptr, nullptr, rocksdb_db_options.allow_os_buffer); + +static MYSQL_SYSVAR_BOOL(allow_mmap_reads, + *reinterpret_cast(&rocksdb_db_options.allow_mmap_reads), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::allow_mmap_reads for RocksDB", + nullptr, nullptr, rocksdb_db_options.allow_mmap_reads); + +static MYSQL_SYSVAR_BOOL(allow_mmap_writes, + *reinterpret_cast(&rocksdb_db_options.allow_mmap_writes), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::allow_mmap_writes for RocksDB", + nullptr, nullptr, rocksdb_db_options.allow_mmap_writes); + +static MYSQL_SYSVAR_BOOL(is_fd_close_on_exec, + *reinterpret_cast(&rocksdb_db_options.is_fd_close_on_exec), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::is_fd_close_on_exec for RocksDB", + nullptr, nullptr, rocksdb_db_options.is_fd_close_on_exec); + +static MYSQL_SYSVAR_UINT(stats_dump_period_sec, + rocksdb_db_options.stats_dump_period_sec, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::stats_dump_period_sec for RocksDB", + nullptr, nullptr, rocksdb_db_options.stats_dump_period_sec, + /* min */ 0, /* max */ INT_MAX, 0); + +static MYSQL_SYSVAR_BOOL(advise_random_on_open, + *reinterpret_cast(&rocksdb_db_options.advise_random_on_open), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::advise_random_on_open for RocksDB", + nullptr, nullptr, rocksdb_db_options.advise_random_on_open); + +static MYSQL_SYSVAR_ULONG(db_write_buffer_size, + rocksdb_db_options.db_write_buffer_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::db_write_buffer_size for RocksDB", + nullptr, nullptr, rocksdb_db_options.db_write_buffer_size, + /* min */ 0L, /* max */ LONG_MAX, 0); + +static MYSQL_SYSVAR_BOOL(use_adaptive_mutex, + *reinterpret_cast(&rocksdb_db_options.use_adaptive_mutex), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::use_adaptive_mutex for RocksDB", + nullptr, nullptr, rocksdb_db_options.use_adaptive_mutex); + +static MYSQL_SYSVAR_ULONG(bytes_per_sync, + rocksdb_db_options.bytes_per_sync, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::bytes_per_sync for RocksDB", + nullptr, nullptr, rocksdb_db_options.bytes_per_sync, + /* min */ 0L, /* max */ LONG_MAX, 0); + +static MYSQL_SYSVAR_ULONG(wal_bytes_per_sync, + rocksdb_db_options.wal_bytes_per_sync, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::wal_bytes_per_sync for RocksDB", + nullptr, nullptr, rocksdb_db_options.wal_bytes_per_sync, + /* min */ 0L, /* max */ LONG_MAX, 0); + +static MYSQL_SYSVAR_BOOL(enable_thread_tracking, + *reinterpret_cast(&rocksdb_db_options.enable_thread_tracking), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::enable_thread_tracking for RocksDB", + nullptr, nullptr, rocksdb_db_options.enable_thread_tracking); + +static MYSQL_SYSVAR_LONGLONG(block_cache_size, rocksdb_block_cache_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "block_cache size for RocksDB", + nullptr, nullptr, /* RocksDB's default is 8 MB: */ 8*1024*1024L, + /* min */ 1024L, /* max */ LONGLONG_MAX, /* Block size */1024L); + +static MYSQL_SYSVAR_BOOL(cache_index_and_filter_blocks, + *reinterpret_cast( + &rocksdb_tbl_options.cache_index_and_filter_blocks), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::cache_index_and_filter_blocks for RocksDB", + nullptr, nullptr, true); + +// When pin_l0_filter_and_index_blocks_in_cache is true, RocksDB will use the +// LRU cache, but will always keep the filter & idndex block's handle checked +// out (=won't call ShardedLRUCache::Release), plus the parsed out objects +// the LRU cache will never push flush them out, hence they're pinned. +// +// This fixes the mutex contention between :ShardedLRUCache::Lookup and +// ShardedLRUCache::Release which reduced the QPS ratio (QPS using secondary +// index / QPS using PK). +static MYSQL_SYSVAR_BOOL(pin_l0_filter_and_index_blocks_in_cache, + *reinterpret_cast( + &rocksdb_tbl_options.pin_l0_filter_and_index_blocks_in_cache), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "pin_l0_filter_and_index_blocks_in_cache for RocksDB", + nullptr, nullptr, true); + +static MYSQL_SYSVAR_ENUM(index_type, + rocksdb_index_type, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::index_type for RocksDB", + nullptr, nullptr, + (uint64_t)rocksdb_tbl_options.index_type, &index_type_typelib); + +static MYSQL_SYSVAR_BOOL(hash_index_allow_collision, + *reinterpret_cast(&rocksdb_tbl_options.hash_index_allow_collision), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::hash_index_allow_collision for RocksDB", + nullptr, nullptr, rocksdb_tbl_options.hash_index_allow_collision); + +static MYSQL_SYSVAR_BOOL(no_block_cache, + *reinterpret_cast(&rocksdb_tbl_options.no_block_cache), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::no_block_cache for RocksDB", + nullptr, nullptr, rocksdb_tbl_options.no_block_cache); + +static MYSQL_SYSVAR_ULONG(block_size, + rocksdb_tbl_options.block_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::block_size for RocksDB", + nullptr, nullptr, rocksdb_tbl_options.block_size, + /* min */ 1L, /* max */ LONG_MAX, 0); + +static MYSQL_SYSVAR_INT(block_size_deviation, + rocksdb_tbl_options.block_size_deviation, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::block_size_deviation for RocksDB", + nullptr, nullptr, rocksdb_tbl_options.block_size_deviation, + /* min */ 0, /* max */ INT_MAX, 0); + +static MYSQL_SYSVAR_INT(block_restart_interval, + rocksdb_tbl_options.block_restart_interval, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::block_restart_interval for RocksDB", + nullptr, nullptr, rocksdb_tbl_options.block_restart_interval, + /* min */ 1, /* max */ INT_MAX, 0); + +static MYSQL_SYSVAR_BOOL(whole_key_filtering, + *reinterpret_cast(&rocksdb_tbl_options.whole_key_filtering), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::whole_key_filtering for RocksDB", + nullptr, nullptr, rocksdb_tbl_options.whole_key_filtering); + +static MYSQL_SYSVAR_STR(default_cf_options, rocksdb_default_cf_options, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "default cf options for RocksDB", + nullptr, nullptr, ""); + +static MYSQL_SYSVAR_STR(override_cf_options, rocksdb_override_cf_options, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "option overrides per cf for RocksDB", + nullptr, nullptr, ""); + +static MYSQL_SYSVAR_BOOL(background_sync, + rocksdb_background_sync, + PLUGIN_VAR_RQCMDARG, + "turns on background syncs for RocksDB", + nullptr, nullptr, FALSE); + +static MYSQL_THDVAR_BOOL(write_sync, + PLUGIN_VAR_RQCMDARG, + "WriteOptions::sync for RocksDB", + nullptr, nullptr, rocksdb::WriteOptions().sync); + +static MYSQL_THDVAR_BOOL(write_disable_wal, + PLUGIN_VAR_RQCMDARG, + "WriteOptions::disableWAL for RocksDB", + nullptr, nullptr, rocksdb::WriteOptions().disableWAL); + +static MYSQL_THDVAR_BOOL(write_ignore_missing_column_families, + PLUGIN_VAR_RQCMDARG, + "WriteOptions::ignore_missing_column_families for RocksDB", + nullptr, nullptr, rocksdb::WriteOptions().ignore_missing_column_families); + +static MYSQL_THDVAR_BOOL(skip_fill_cache, + PLUGIN_VAR_RQCMDARG, + "Skip filling block cache on read requests", + nullptr, nullptr, FALSE); + +static MYSQL_THDVAR_BOOL(unsafe_for_binlog, + PLUGIN_VAR_RQCMDARG, + "Allowing statement based binary logging which may break consistency", + nullptr, nullptr, FALSE); + +static MYSQL_THDVAR_UINT(records_in_range, + PLUGIN_VAR_RQCMDARG, + "Used to override the result of records_in_range(). Set to a positive number to override", + nullptr, nullptr, 0, + /* min */ 0, /* max */ INT_MAX, 0); + +static MYSQL_THDVAR_UINT(force_index_records_in_range, + PLUGIN_VAR_RQCMDARG, + "Used to override the result of records_in_range() when FORCE INDEX is used.", + nullptr, nullptr, 0, + /* min */ 0, /* max */ INT_MAX, 0); + +static MYSQL_SYSVAR_UINT(debug_optimizer_n_rows, + rocksdb_debug_optimizer_n_rows, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY | PLUGIN_VAR_NOSYSVAR, + "Test only to override rocksdb estimates of table size in a memtable", + nullptr, nullptr, 0, /* min */ 0, /* max */ INT_MAX, 0); + +static MYSQL_SYSVAR_BOOL(debug_optimizer_no_zero_cardinality, + rocksdb_debug_optimizer_no_zero_cardinality, + PLUGIN_VAR_RQCMDARG, + "In case if cardinality is zero, overrides it with some value", + nullptr, nullptr, TRUE); + +static MYSQL_SYSVAR_STR(compact_cf, rocksdb_compact_cf_name, + PLUGIN_VAR_RQCMDARG, + "Compact column family", + rocksdb_compact_column_family, rocksdb_compact_column_family_stub, ""); + +static MYSQL_SYSVAR_STR(create_checkpoint, rocksdb_checkpoint_name, + PLUGIN_VAR_RQCMDARG, + "Checkpoint directory", + rocksdb_create_checkpoint, rocksdb_create_checkpoint_stub, ""); + +static MYSQL_SYSVAR_BOOL(signal_drop_index_thread, + rocksdb_signal_drop_index_thread, + PLUGIN_VAR_RQCMDARG, + "Wake up drop index thread", + nullptr, rocksdb_drop_index_wakeup_thread, FALSE); + +static MYSQL_SYSVAR_BOOL(pause_background_work, + rocksdb_pause_background_work, + PLUGIN_VAR_RQCMDARG, + "Disable all rocksdb background operations", + nullptr, rocksdb_set_pause_background_work, FALSE); + +static MYSQL_SYSVAR_BOOL(disable_2pc, + rocksdb_disable_2pc, + PLUGIN_VAR_RQCMDARG, + "Disable two phase commit for MyRocks", + nullptr, nullptr, TRUE); + +static MYSQL_SYSVAR_BOOL(strict_collation_check, + rocksdb_strict_collation_check, + PLUGIN_VAR_RQCMDARG, + "Enforce case sensitive collation for MyRocks indexes", + nullptr, nullptr, TRUE); + +static MYSQL_SYSVAR_STR(strict_collation_exceptions, + rocksdb_strict_collation_exceptions, + PLUGIN_VAR_RQCMDARG|PLUGIN_VAR_MEMALLOC, + "List of tables (using regex) that are excluded " + "from the case sensitive collation enforcement", + nullptr, rocksdb_set_collation_exception_list, ""); + +static MYSQL_SYSVAR_BOOL(collect_sst_properties, + rocksdb_collect_sst_properties, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Enables collecting SST file properties on each flush", + nullptr, nullptr, rocksdb_collect_sst_properties); + +static MYSQL_SYSVAR_BOOL( + force_flush_memtable_now, + rocksdb_force_flush_memtable_now_var, + PLUGIN_VAR_RQCMDARG, + "Forces memstore flush which may block all write requests so be careful", + rocksdb_force_flush_memtable_now, + rocksdb_force_flush_memtable_now_stub, FALSE); + +static MYSQL_THDVAR_BOOL( + flush_memtable_on_analyze, + PLUGIN_VAR_RQCMDARG, + "Forces memtable flush on ANALZYE table to get accurate cardinality", + nullptr, nullptr, true); + +static MYSQL_SYSVAR_UINT(seconds_between_stat_computes, + rocksdb_seconds_between_stat_computes, + PLUGIN_VAR_RQCMDARG, + "Sets a number of seconds to wait between optimizer stats recomputation. " + "Only changed indexes will be refreshed.", + nullptr, nullptr, rocksdb_seconds_between_stat_computes, + /* min */ 0L, /* max */ UINT_MAX, 0); + +static MYSQL_SYSVAR_LONGLONG( + compaction_sequential_deletes, + rocksdb_compaction_sequential_deletes, + PLUGIN_VAR_RQCMDARG, + "RocksDB will trigger compaction for the file if it has more than this number sequential deletes per window", + nullptr, rocksdb_set_compaction_options, + DEFAULT_COMPACTION_SEQUENTIAL_DELETES, + /* min */ 0L, /* max */ MAX_COMPACTION_SEQUENTIAL_DELETES, 0); + +static MYSQL_SYSVAR_LONGLONG( + compaction_sequential_deletes_window, + rocksdb_compaction_sequential_deletes_window, + PLUGIN_VAR_RQCMDARG, + "Size of the window for counting rocksdb_compaction_sequential_deletes", + nullptr, rocksdb_set_compaction_options, + DEFAULT_COMPACTION_SEQUENTIAL_DELETES_WINDOW, + /* min */ 0L, /* max */ MAX_COMPACTION_SEQUENTIAL_DELETES_WINDOW, 0); + +static MYSQL_SYSVAR_LONGLONG( + compaction_sequential_deletes_file_size, + rocksdb_compaction_sequential_deletes_file_size, + PLUGIN_VAR_RQCMDARG, + "Minimum file size required for compaction_sequential_deletes", + nullptr, rocksdb_set_compaction_options, 0L, + /* min */ -1L, /* max */ LONGLONG_MAX, 0); + +static MYSQL_SYSVAR_BOOL(compaction_sequential_deletes_count_sd, + rocksdb_compaction_sequential_deletes_count_sd, + PLUGIN_VAR_RQCMDARG, + "Counting SingleDelete as rocksdb_compaction_sequential_deletes", + nullptr, nullptr, rocksdb_compaction_sequential_deletes_count_sd); + +static MYSQL_THDVAR_INT(checksums_pct, + PLUGIN_VAR_RQCMDARG, + "How many percentages of rows to be checksummed", + nullptr, nullptr, 100, + /* min */ 0, /* max */ 100, 0); + +static MYSQL_THDVAR_BOOL(store_checksums, + PLUGIN_VAR_RQCMDARG, + "Include checksums when writing index/table records", + nullptr, nullptr, false /* default value */); + +static MYSQL_THDVAR_BOOL(verify_checksums, + PLUGIN_VAR_RQCMDARG, + "Verify checksums when reading index/table records", + nullptr, nullptr, false /* default value */); + +static MYSQL_SYSVAR_UINT(validate_tables, + rocksdb_validate_tables, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Verify all .frm files match all RocksDB tables (0 means no verification, " + "1 means verify and fail on error, and 2 means verify but continue", + nullptr, nullptr, 1 /* default value */, 0 /* min value */, + 2 /* max value */, 0); + +static MYSQL_SYSVAR_STR(datadir, + rocksdb_datadir, + PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, + "RocksDB data directory", + nullptr, nullptr, "./.rocksdb"); + +static MYSQL_SYSVAR_UINT( + table_stats_sampling_pct, + rocksdb_table_stats_sampling_pct, + PLUGIN_VAR_RQCMDARG, + "Percentage of entries to sample when collecting statistics about table " + "properties. Specify either 0 to sample everything or percentage [" + STRINGIFY_ARG(RDB_TBL_STATS_SAMPLE_PCT_MIN) ".." + STRINGIFY_ARG(RDB_TBL_STATS_SAMPLE_PCT_MAX) "]. " "By default " + STRINGIFY_ARG(RDB_DEFAULT_TBL_STATS_SAMPLE_PCT) "% of entries are " + "sampled.", + nullptr, rocksdb_set_table_stats_sampling_pct, /* default */ + RDB_DEFAULT_TBL_STATS_SAMPLE_PCT, /* everything */ 0, + /* max */ RDB_TBL_STATS_SAMPLE_PCT_MAX, 0); + +static const longlong ROCKSDB_WRITE_BUFFER_SIZE_DEFAULT= 4194304; +static const int ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE= 100; + +static struct st_mysql_sys_var* rocksdb_system_variables[]= { + MYSQL_SYSVAR(lock_wait_timeout), + MYSQL_SYSVAR(max_row_locks), + MYSQL_SYSVAR(lock_scanned_rows), + MYSQL_SYSVAR(bulk_load), + MYSQL_SYSVAR(skip_unique_check_tables), + MYSQL_SYSVAR(skip_unique_check), + MYSQL_SYSVAR(commit_in_the_middle), + MYSQL_SYSVAR(read_free_rpl_tables), + MYSQL_SYSVAR(rpl_skip_tx_api), + MYSQL_SYSVAR(bulk_load_size), + MYSQL_SYSVAR(merge_buf_size), + MYSQL_SYSVAR(enable_bulk_load_api), + MYSQL_SYSVAR(merge_combine_read_size), + MYSQL_SYSVAR(skip_bloom_filter_on_read), + + MYSQL_SYSVAR(create_if_missing), + MYSQL_SYSVAR(create_missing_column_families), + MYSQL_SYSVAR(error_if_exists), + MYSQL_SYSVAR(paranoid_checks), + MYSQL_SYSVAR(rate_limiter_bytes_per_sec), + MYSQL_SYSVAR(info_log_level), + MYSQL_SYSVAR(max_open_files), + MYSQL_SYSVAR(max_total_wal_size), + MYSQL_SYSVAR(disabledatasync), + MYSQL_SYSVAR(use_fsync), + MYSQL_SYSVAR(wal_dir), + MYSQL_SYSVAR(delete_obsolete_files_period_micros), + MYSQL_SYSVAR(base_background_compactions), + MYSQL_SYSVAR(max_background_compactions), + MYSQL_SYSVAR(max_background_flushes), + MYSQL_SYSVAR(max_log_file_size), + MYSQL_SYSVAR(max_subcompactions), + MYSQL_SYSVAR(log_file_time_to_roll), + MYSQL_SYSVAR(keep_log_file_num), + MYSQL_SYSVAR(max_manifest_file_size), + MYSQL_SYSVAR(table_cache_numshardbits), + MYSQL_SYSVAR(wal_ttl_seconds), + MYSQL_SYSVAR(wal_size_limit_mb), + MYSQL_SYSVAR(manifest_preallocation_size), + MYSQL_SYSVAR(allow_os_buffer), + MYSQL_SYSVAR(allow_mmap_reads), + MYSQL_SYSVAR(allow_mmap_writes), + MYSQL_SYSVAR(is_fd_close_on_exec), + MYSQL_SYSVAR(stats_dump_period_sec), + MYSQL_SYSVAR(advise_random_on_open), + MYSQL_SYSVAR(db_write_buffer_size), + MYSQL_SYSVAR(use_adaptive_mutex), + MYSQL_SYSVAR(bytes_per_sync), + MYSQL_SYSVAR(wal_bytes_per_sync), + MYSQL_SYSVAR(enable_thread_tracking), + MYSQL_SYSVAR(perf_context_level), + MYSQL_SYSVAR(wal_recovery_mode), + MYSQL_SYSVAR(access_hint_on_compaction_start), + MYSQL_SYSVAR(new_table_reader_for_compaction_inputs), + MYSQL_SYSVAR(compaction_readahead_size), + MYSQL_SYSVAR(allow_concurrent_memtable_write), + MYSQL_SYSVAR(enable_write_thread_adaptive_yield), + + MYSQL_SYSVAR(block_cache_size), + MYSQL_SYSVAR(cache_index_and_filter_blocks), + MYSQL_SYSVAR(pin_l0_filter_and_index_blocks_in_cache), + MYSQL_SYSVAR(index_type), + MYSQL_SYSVAR(hash_index_allow_collision), + MYSQL_SYSVAR(no_block_cache), + MYSQL_SYSVAR(block_size), + MYSQL_SYSVAR(block_size_deviation), + MYSQL_SYSVAR(block_restart_interval), + MYSQL_SYSVAR(whole_key_filtering), + + MYSQL_SYSVAR(default_cf_options), + MYSQL_SYSVAR(override_cf_options), + + MYSQL_SYSVAR(background_sync), + + MYSQL_SYSVAR(write_sync), + MYSQL_SYSVAR(write_disable_wal), + MYSQL_SYSVAR(write_ignore_missing_column_families), + + MYSQL_SYSVAR(skip_fill_cache), + MYSQL_SYSVAR(unsafe_for_binlog), + + MYSQL_SYSVAR(records_in_range), + MYSQL_SYSVAR(force_index_records_in_range), + MYSQL_SYSVAR(debug_optimizer_n_rows), + MYSQL_SYSVAR(debug_optimizer_no_zero_cardinality), + + MYSQL_SYSVAR(compact_cf), + MYSQL_SYSVAR(signal_drop_index_thread), + MYSQL_SYSVAR(pause_background_work), + MYSQL_SYSVAR(disable_2pc), + MYSQL_SYSVAR(strict_collation_check), + MYSQL_SYSVAR(strict_collation_exceptions), + MYSQL_SYSVAR(collect_sst_properties), + MYSQL_SYSVAR(force_flush_memtable_now), + MYSQL_SYSVAR(flush_memtable_on_analyze), + MYSQL_SYSVAR(seconds_between_stat_computes), + + MYSQL_SYSVAR(compaction_sequential_deletes), + MYSQL_SYSVAR(compaction_sequential_deletes_window), + MYSQL_SYSVAR(compaction_sequential_deletes_file_size), + MYSQL_SYSVAR(compaction_sequential_deletes_count_sd), + + MYSQL_SYSVAR(datadir), + MYSQL_SYSVAR(create_checkpoint), + + MYSQL_SYSVAR(checksums_pct), + MYSQL_SYSVAR(store_checksums), + MYSQL_SYSVAR(verify_checksums), + + MYSQL_SYSVAR(validate_tables), + MYSQL_SYSVAR(table_stats_sampling_pct), + nullptr +}; + + +static rocksdb::WriteOptions rdb_get_rocksdb_write_options(my_core::THD* thd) +{ + rocksdb::WriteOptions opt; + + opt.sync= THDVAR(thd, write_sync); + opt.disableWAL= THDVAR(thd, write_disable_wal); + opt.ignore_missing_column_families= + THDVAR(thd, write_ignore_missing_column_families); + + return opt; +} + +/////////////////////////////////////////////////////////////////////////////////////////// + +/** + @brief + Function we use in the creation of our hash to get key. +*/ + +uchar* Rdb_open_tables_map::get_hash_key( + Rdb_table_handler *table_handler, size_t *length, + my_bool not_used __attribute__((__unused__))) +{ + *length= table_handler->m_table_name_length; + return reinterpret_cast(table_handler->m_table_name); +} + + +/* + The following is needed as an argument for mysql_stage_register, + irrespectively of whether we're compiling with P_S or not. +*/ +PSI_stage_info stage_waiting_on_row_lock= { 0, "Waiting for row lock", 0}; + +#ifdef HAVE_PSI_INTERFACE +static PSI_thread_key rdb_background_psi_thread_key; +static PSI_thread_key rdb_drop_idx_psi_thread_key; + +static PSI_stage_info *all_rocksdb_stages[]= +{ + & stage_waiting_on_row_lock +}; + + +static my_core::PSI_mutex_key rdb_psi_open_tbls_mutex_key, + rdb_signal_bg_psi_mutex_key, rdb_signal_drop_idx_psi_mutex_key, + rdb_collation_data_mutex_key, + rdb_mem_cmp_space_mutex_key, + key_mutex_tx_list, rdb_sysvars_psi_mutex_key; + +static PSI_mutex_info all_rocksdb_mutexes[]= +{ + { &rdb_psi_open_tbls_mutex_key, "open tables", PSI_FLAG_GLOBAL}, + { &rdb_signal_bg_psi_mutex_key, "stop background", PSI_FLAG_GLOBAL}, + { &rdb_signal_drop_idx_psi_mutex_key, "signal drop index", PSI_FLAG_GLOBAL}, + { &rdb_collation_data_mutex_key, "collation data init", PSI_FLAG_GLOBAL}, + { &rdb_mem_cmp_space_mutex_key, "collation space char data init", + PSI_FLAG_GLOBAL}, + { &key_mutex_tx_list, "tx_list", PSI_FLAG_GLOBAL}, + { &rdb_sysvars_psi_mutex_key, "setting sysvar", PSI_FLAG_GLOBAL}, +}; + +static PSI_rwlock_key key_rwlock_collation_exception_list; +static PSI_rwlock_key key_rwlock_read_free_rpl_tables; +static PSI_rwlock_key key_rwlock_skip_unique_check_tables; + +static PSI_rwlock_info all_rocksdb_rwlocks[]= +{ + { &key_rwlock_collation_exception_list, "collation_exception_list", + PSI_FLAG_GLOBAL}, + { &key_rwlock_read_free_rpl_tables, "read_free_rpl_tables", PSI_FLAG_GLOBAL}, + { &key_rwlock_skip_unique_check_tables, "skip_unique_check_tables", + PSI_FLAG_GLOBAL}, +}; + +PSI_cond_key rdb_signal_bg_psi_cond_key, rdb_signal_drop_idx_psi_cond_key; + +static PSI_cond_info all_rocksdb_conds[]= +{ + { &rdb_signal_bg_psi_cond_key, "cond signal background", PSI_FLAG_GLOBAL}, + { &rdb_signal_drop_idx_psi_cond_key, "cond signal drop index", + PSI_FLAG_GLOBAL}, +}; + +static PSI_thread_info all_rocksdb_threads[]= +{ + { &rdb_background_psi_thread_key, "background", PSI_FLAG_GLOBAL}, + { &rdb_drop_idx_psi_thread_key, "drop index", PSI_FLAG_GLOBAL}, +}; + +static void init_rocksdb_psi_keys() +{ + const char* category= "rocksdb"; + int count; + + if (PSI_server == nullptr) + return; + + count= array_elements(all_rocksdb_mutexes); + PSI_server->register_mutex(category, all_rocksdb_mutexes, count); + + count= array_elements(all_rocksdb_rwlocks); + PSI_server->register_rwlock(category, all_rocksdb_rwlocks, count); + + count= array_elements(all_rocksdb_conds); + // TODO Disabling PFS for conditions due to the bug https://github.com/MySQLOnRocksDB/mysql-5.6/issues/92 + // PSI_server->register_cond(category, all_rocksdb_conds, count); + + count= array_elements(all_rocksdb_stages); + mysql_stage_register(category, all_rocksdb_stages, count); + + count= array_elements(all_rocksdb_threads); + mysql_thread_register(category, all_rocksdb_threads, count); +} +#endif + + +/* + Drop index thread's control +*/ + +static Rdb_drop_index_thread rdb_drop_idx_thread; + +static void rocksdb_drop_index_wakeup_thread( + my_core::THD* thd __attribute__((__unused__)), + struct st_mysql_sys_var* var __attribute__((__unused__)), + void* var_ptr __attribute__((__unused__)), + const void* save) +{ + if (*static_cast(save)) { + rdb_drop_idx_thread.signal(); + } +} + +static inline uint32_t rocksdb_perf_context_level(THD* thd) +{ + DBUG_ASSERT(thd != nullptr); + + int session_perf_context_level= THDVAR(thd, perf_context_level); + if (session_perf_context_level > rocksdb::PerfLevel::kUninitialized) { + return session_perf_context_level; + } + + /* + Fallback to global thdvar, if session specific one was not set to a valid + value. + */ + + int global_perf_context_level= THDVAR(nullptr, perf_context_level); + if (global_perf_context_level > rocksdb::PerfLevel::kUninitialized) { + return global_perf_context_level; + } + + return rocksdb::PerfLevel::kDisable; +} + +/* + Very short (functor-like) interface to be passed to + Rdb_transaction::walk_tx_list() +*/ + +interface Rdb_tx_list_walker +{ + virtual ~Rdb_tx_list_walker() {} + virtual void process_tran(const Rdb_transaction*) = 0; +}; + +/* + This is a helper class that is passed to RocksDB to get notifications when + a snapshot gets created. +*/ + +class Rdb_snapshot_notifier : public rocksdb::TransactionNotifier +{ + Rdb_transaction* m_owning_tx; + + void SnapshotCreated(const rocksdb::Snapshot *snapshot) override; + + public: + explicit Rdb_snapshot_notifier(Rdb_transaction* owning_tx) + : m_owning_tx(owning_tx) {} + + // If the owning Rdb_transaction gets destructed we need to not reference + // it anymore. + void detach() + { + m_owning_tx = nullptr; + } +}; + +/* This is the base class for transactions when interacting with rocksdb. +*/ +class Rdb_transaction +{ + protected: + ulonglong m_write_count= 0; + ulonglong m_lock_count= 0; + + bool m_is_delayed_snapshot= false; + bool m_is_two_phase= false; + + THD* m_thd= nullptr; + + rocksdb::ReadOptions m_read_opts; + + static std::multiset s_tx_list; + static mysql_mutex_t s_tx_list_mutex; + + Rdb_io_perf* m_tbl_io_perf; + + bool m_tx_read_only= false; + + int m_timeout_sec; /* Cached value of @@rocksdb_lock_wait_timeout */ + + /* Maximum number of locks the transaction can have */ + ulonglong m_max_row_locks; + + bool m_is_tx_failed= false; + bool m_rollback_only= false; + + std::shared_ptr m_notifier; + + // This should be used only when updating binlog information. + virtual rocksdb::WriteBatchBase* get_write_batch()= 0; + virtual bool commit_no_binlog()= 0; + virtual rocksdb::Iterator *get_iterator( + const rocksdb::ReadOptions &options, + rocksdb::ColumnFamilyHandle* column_family)= 0; + + +public: + const char* m_mysql_log_file_name; + my_off_t m_mysql_log_offset; + const char* m_mysql_gtid; + String m_detailed_error; + int64_t m_snapshot_timestamp= 0; + bool m_ddl_transaction; + + static void init_mutex() + { + mysql_mutex_init(key_mutex_tx_list, &s_tx_list_mutex, MY_MUTEX_INIT_FAST); + } + + static void term_mutex() + { + DBUG_ASSERT(s_tx_list.size() == 0); + mysql_mutex_destroy(&s_tx_list_mutex); + } + + static void walk_tx_list(Rdb_tx_list_walker* walker) + { + mysql_mutex_lock(&s_tx_list_mutex); + for (auto it : s_tx_list) + walker->process_tran(it); + mysql_mutex_unlock(&s_tx_list_mutex); + } + + int set_status_error(THD *thd, const rocksdb::Status &s, + const std::shared_ptr& kd, + Rdb_tbl_def *tbl_def) + { + DBUG_ASSERT(!s.ok()); + + if (s.IsTimedOut()) + { + /* + SQL layer has weird expectations. If we return an error when + doing a read in DELETE IGNORE, it will ignore the error ("because it's + an IGNORE command!) but then will fail an assert, because "error code + was returned, but no error happened". Do what InnoDB's + convert_error_code_to_mysql() does: force a statement + rollback before returning HA_ERR_LOCK_WAIT_TIMEOUT: + */ + my_core::thd_mark_transaction_to_rollback(thd, false /*just statement*/); + m_detailed_error.copy(timeout_message("index", + tbl_def->full_tablename().c_str(), + kd->get_name().c_str())); + + return HA_ERR_LOCK_WAIT_TIMEOUT; + } + if (s.IsBusy()) + { + rocksdb_snapshot_conflict_errors++; + return HA_ERR_LOCK_DEADLOCK; + } + /* TODO: who returns HA_ERR_ROCKSDB_TOO_MANY_LOCKS now?? */ + + my_error(ER_INTERNAL_ERROR, MYF(0), s.ToString().c_str()); + return HA_ERR_INTERNAL_ERROR; + } + + THD* get_thd() const { return m_thd; } + + /* Used for tracking io_perf counters */ + void io_perf_start(Rdb_io_perf *io_perf) + { + /* + Since perf_context is tracked per thread, it is difficult and expensive + to maintain perf_context on a per table basis. Therefore, roll all + perf_context data into the first table used in a query. This works well + for single table queries and is probably good enough for queries that hit + multiple tables. + + perf_context stats gathering is started when the table lock is acquired + or when ha_rocksdb::start_stmt is called in case of LOCK TABLES. They + are recorded when the table lock is released, or when commit/rollback + is called on the transaction, whichever comes first. Table lock release + and commit/rollback can happen in different orders. In the case where + the lock is released before commit/rollback is called, an extra step to + gather stats during commit/rollback is needed. + */ + if (m_tbl_io_perf == nullptr && + io_perf->start(rocksdb_perf_context_level(m_thd))) + { + m_tbl_io_perf= io_perf; + } + } + + void io_perf_end_and_record(void) + { + if (m_tbl_io_perf != nullptr) + { + m_tbl_io_perf->end_and_record(rocksdb_perf_context_level(m_thd)); + m_tbl_io_perf= nullptr; + } + } + + void io_perf_end_and_record(Rdb_io_perf *io_perf) + { + if (m_tbl_io_perf == io_perf) + { + io_perf_end_and_record(); + } + } + + void set_params(int timeout_sec_arg, int max_row_locks_arg) + { + m_timeout_sec= timeout_sec_arg; + m_max_row_locks= max_row_locks_arg; + set_lock_timeout(timeout_sec_arg); + } + + virtual void set_lock_timeout(int timeout_sec_arg)= 0; + + ulonglong get_write_count() const { return m_write_count; } + + ulonglong get_lock_count() const { return m_lock_count; } + + virtual void set_sync(bool sync)= 0; + + virtual void release_lock(rocksdb::ColumnFamilyHandle* column_family, + const std::string& rowkey)= 0; + + virtual bool prepare(const rocksdb::TransactionName& name)= 0; + + bool commit_or_rollback() + { + bool res; + if (m_is_tx_failed) + { + rollback(); + res= false; + } + else + res= commit(); + return res; + } + + bool commit() + { + if (get_write_count() == 0) + { + rollback(); + return false; + } + else if (m_rollback_only) + { + /* + Transactions marked as rollback_only are expected to be rolled back at + prepare(). But there are some exceptions like below that prepare() is + never called and commit() is called instead. + 1. Binlog is disabled + 2. No modification exists in binlog cache for the transaction (#195) + In both cases, rolling back transaction is safe. Nothing is written to + binlog. + */ + my_printf_error(ER_UNKNOWN_ERROR, ERRSTR_ROLLBACK_ONLY, MYF(0)); + rollback(); + return true; + } + else + { + my_core::thd_binlog_pos(m_thd, &m_mysql_log_file_name, + &m_mysql_log_offset, &m_mysql_gtid); + binlog_manager.update(m_mysql_log_file_name, + m_mysql_log_offset, + m_mysql_gtid, get_write_batch()); + return commit_no_binlog(); + } + } + + virtual void rollback()= 0; + + void snapshot_created(const rocksdb::Snapshot *snapshot) + { + m_read_opts.snapshot = snapshot; + rdb->GetEnv()->GetCurrentTime(&m_snapshot_timestamp); + m_is_delayed_snapshot = false; + } + + virtual void acquire_snapshot(bool acquire_now)= 0; + virtual void release_snapshot()= 0; + + bool has_snapshot() const + { + return m_read_opts.snapshot != nullptr; + } + + private: + // The tables we are currently loading. In a partitioned table this can + // have more than one entry + std::vector m_curr_bulk_load; + + public: + int finish_bulk_load() + { + int rc= 0; + + std::vector::iterator it; + while ((it = m_curr_bulk_load.begin()) != m_curr_bulk_load.end()) + { + int rc2= (*it)->finalize_bulk_load(); + if (rc2 != 0 && rc == 0) + { + rc= rc2; + } + } + + DBUG_ASSERT(m_curr_bulk_load.size() == 0); + + return rc; + } + + void start_bulk_load(ha_rocksdb* bulk_load) + { + /* + If we already have an open bulk load of a table and the name doesn't + match the current one, close out the currently running one. This allows + multiple bulk loads to occur on a partitioned table, but then closes + them all out when we switch to another table. + */ + if (!m_curr_bulk_load.empty() && + !bulk_load->same_table(*m_curr_bulk_load[0])) + { + auto res= finish_bulk_load(); + SHIP_ASSERT(res == 0); + } + + m_curr_bulk_load.push_back(bulk_load); + } + + void end_bulk_load(ha_rocksdb* bulk_load) + { + for (auto it = m_curr_bulk_load.begin(); it != m_curr_bulk_load.end(); + it++) + { + if (*it == bulk_load) + { + m_curr_bulk_load.erase(it); + return; + } + } + + // Should not reach here + SHIP_ASSERT(0); + } + + /* + Flush the data accumulated so far. This assumes we're doing a bulk insert. + + @detail + This should work like transaction commit, except that we don't + synchronize with the binlog (there is no API that would allow to have + binlog flush the changes accumulated so far and return its current + position) + + @todo + Add test coverage for what happens when somebody attempts to do bulk + inserts while inside a multi-statement transaction. + */ + bool flush_batch() + { + if (get_write_count() == 0) + return false; + + /* Commit the current transaction */ + if (commit_no_binlog()) + return true; + + /* Start another one */ + start_tx(); + return false; + } + + virtual rocksdb::Status put(rocksdb::ColumnFamilyHandle* column_family, + const rocksdb::Slice& key, + const rocksdb::Slice& value)= 0; + virtual rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle* column_family, + const rocksdb::Slice& key)= 0; + virtual rocksdb::Status single_delete( + rocksdb::ColumnFamilyHandle* column_family, + const rocksdb::Slice& key)= 0; + + virtual bool has_modifications() const= 0; + + virtual rocksdb::WriteBatchBase* get_indexed_write_batch()= 0; + /* + Return a WriteBatch that one can write to. The writes will skip any + transaction locking. The writes will NOT be visible to the transaction. + */ + rocksdb::WriteBatchBase* get_blind_write_batch() + { + return get_indexed_write_batch()->GetWriteBatch(); + } + + virtual rocksdb::Status get(rocksdb::ColumnFamilyHandle* column_family, + const rocksdb::Slice& key, + std::string* value) const= 0; + virtual rocksdb::Status get_for_update( + rocksdb::ColumnFamilyHandle* column_family, + const rocksdb::Slice& key, std::string* value)= 0; + + rocksdb::Iterator *get_iterator(rocksdb::ColumnFamilyHandle* column_family, + bool skip_bloom_filter, + bool fill_cache, + bool read_current= false, + bool create_snapshot= true) + { + // Make sure we are not doing both read_current (which implies we don't + // want a snapshot) and create_snapshot which makes sure we create + // a snapshot + DBUG_ASSERT(!read_current || !create_snapshot); + + if (create_snapshot) + acquire_snapshot(true); + + rocksdb::ReadOptions options= m_read_opts; + + if (skip_bloom_filter) + { + options.total_order_seek= true; + } + else + { + // With this option, Iterator::Valid() returns false if key + // is outside of the prefix bloom filter range set at Seek(). + // Must not be set to true if not using bloom filter. + options.prefix_same_as_start= true; + } + options.fill_cache= fill_cache; + if (read_current) + { + options.snapshot= nullptr; + } + return get_iterator(options, column_family); + } + + virtual bool is_tx_started() const= 0; + virtual void start_tx()= 0; + virtual void start_stmt()= 0; + virtual void rollback_stmt()= 0; + + void set_tx_failed(bool failed_arg) { m_is_tx_failed= failed_arg; } + + bool can_prepare() const + { + if (m_rollback_only) + { + my_printf_error(ER_UNKNOWN_ERROR, ERRSTR_ROLLBACK_ONLY, MYF(0)); + return false; + } + return true; + } + + int rollback_to_savepoint(void *savepoint) + { + if (has_modifications()) + { + my_printf_error(ER_UNKNOWN_ERROR, + "MyRocks currently does not support ROLLBACK TO " + "SAVEPOINT if modifying rows.", + MYF(0)); + m_rollback_only= true; + return 1; + } + return 0; + } + + /* + This is used by transactions started with "START TRANSACTION WITH " + "CONSISTENT [ROCKSDB] SNAPSHOT". When tx_read_only is turned on, + snapshot has to be created via DB::GetSnapshot(), not via Transaction + API. + */ + bool is_tx_read_only() const + { + return m_tx_read_only; + } + + bool is_two_phase() const + { + return m_is_two_phase; + } + + void set_tx_read_only(bool val) + { + m_tx_read_only= val; + } + + explicit Rdb_transaction(THD *thd): m_thd(thd), m_tbl_io_perf(nullptr) + { + mysql_mutex_lock(&s_tx_list_mutex); + s_tx_list.insert(this); + mysql_mutex_unlock(&s_tx_list_mutex); + } + + virtual ~Rdb_transaction() { + mysql_mutex_lock(&s_tx_list_mutex); + s_tx_list.erase(this); + mysql_mutex_unlock(&s_tx_list_mutex); + } +}; + +/* + This is a rocksdb transaction. Its members represent the current transaction, + which consists of: + - the snapshot + - the changes we've made but are not seeing yet. + + The changes are made to individual tables, which store them here and then + this object commits them on commit. +*/ +class Rdb_transaction_impl : public Rdb_transaction +{ + rocksdb::Transaction *m_rocksdb_tx= nullptr; + rocksdb::Transaction *m_rocksdb_reuse_tx= nullptr; + + public: + void set_lock_timeout(int timeout_sec_arg) override + { + if (m_rocksdb_tx) + m_rocksdb_tx->SetLockTimeout(m_timeout_sec * 1000); + } + + void set_sync(bool sync) override + { + m_rocksdb_tx->GetWriteOptions()->sync= sync; + } + + void release_lock(rocksdb::ColumnFamilyHandle* column_family, + const std::string &rowkey) override + { + if (!THDVAR(m_thd, lock_scanned_rows)) + { + m_rocksdb_tx->UndoGetForUpdate(column_family, rocksdb::Slice(rowkey)); + } + } + + private: + void release_tx(void) + { + // We are done with the current active transaction object. Preserve it + // for later reuse. + DBUG_ASSERT(m_rocksdb_reuse_tx == nullptr); + m_rocksdb_reuse_tx= m_rocksdb_tx; + m_rocksdb_tx= nullptr; + } + + bool prepare(const rocksdb::TransactionName& name) override + { + rocksdb::Status s; + s= m_rocksdb_tx->SetName(name); + if (!s.ok()) + { + rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); + return false; + } + + s= m_rocksdb_tx->Prepare(); + if (!s.ok()) + { + rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); + return false; + } + return true; + } + + bool commit_no_binlog() override + { + bool res= false; + release_snapshot(); + rocksdb::Status s= m_rocksdb_tx->Commit(); + if (!s.ok()) + { + rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); + res= true; + } + + /* Save the transaction object to be reused */ + release_tx(); + + m_write_count= 0; + m_lock_count= 0; + set_tx_read_only(false); + m_rollback_only= false; + return res; + } + + public: + void rollback() override + { + m_write_count= 0; + m_lock_count= 0; + m_ddl_transaction= false; + if (m_rocksdb_tx) + { + release_snapshot(); + /* This will also release all of the locks: */ + m_rocksdb_tx->Rollback(); + + /* Save the transaction object to be reused */ + release_tx(); + + set_tx_read_only(false); + m_rollback_only= false; + } + } + + void acquire_snapshot(bool acquire_now) override + { + if (m_read_opts.snapshot == nullptr) { + if (is_tx_read_only()) { + snapshot_created(rdb->GetSnapshot()); + } + else if (acquire_now) { + m_rocksdb_tx->SetSnapshot(); + snapshot_created(m_rocksdb_tx->GetSnapshot()); + } + else if (!m_is_delayed_snapshot) { + m_rocksdb_tx->SetSnapshotOnNextOperation(m_notifier); + m_is_delayed_snapshot = true; + } + } + } + + void release_snapshot() override + { + bool need_clear = m_is_delayed_snapshot; + + if (m_read_opts.snapshot != nullptr) + { + m_snapshot_timestamp = 0; + if (is_tx_read_only()) + { + rdb->ReleaseSnapshot(m_read_opts.snapshot); + need_clear = false; + } + else + { + need_clear = true; + } + m_read_opts.snapshot = nullptr; + } + + if (need_clear && m_rocksdb_tx != nullptr) + m_rocksdb_tx->ClearSnapshot(); + } + + bool has_snapshot() + { + return m_read_opts.snapshot != nullptr; + } + + const char *err_too_many_locks= + "Number of locks held by the transaction exceeded @@rocksdb_max_row_locks"; + + rocksdb::Status put(rocksdb::ColumnFamilyHandle* column_family, + const rocksdb::Slice& key, + const rocksdb::Slice& value) override + { + ++m_write_count; + ++m_lock_count; + if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks) + return rocksdb::Status::Aborted(rocksdb::Slice(err_too_many_locks)); + return m_rocksdb_tx->Put(column_family, key, value); + } + + rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle* column_family, + const rocksdb::Slice& key) override + { + ++m_write_count; + ++m_lock_count; + if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks) + return rocksdb::Status::Aborted(rocksdb::Slice(err_too_many_locks)); + return m_rocksdb_tx->Delete(column_family, key); + } + + rocksdb::Status single_delete(rocksdb::ColumnFamilyHandle* column_family, + const rocksdb::Slice& key) override + { + ++m_write_count; + ++m_lock_count; + if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks) + return rocksdb::Status::Aborted(rocksdb::Slice(err_too_many_locks)); + return m_rocksdb_tx->SingleDelete(column_family, key); + } + + bool has_modifications() const override + { + return m_rocksdb_tx->GetWriteBatch() && + m_rocksdb_tx->GetWriteBatch()->GetWriteBatch() && + m_rocksdb_tx->GetWriteBatch()->GetWriteBatch()->Count() > 0; + } + + rocksdb::WriteBatchBase* get_write_batch() override + { + if (is_two_phase()) + { + return m_rocksdb_tx->GetCommitTimeWriteBatch(); + } + return m_rocksdb_tx->GetWriteBatch()->GetWriteBatch(); + } + + /* + Return a WriteBatch that one can write to. The writes will skip any + transaction locking. The writes WILL be visible to the transaction. + */ + rocksdb::WriteBatchBase* get_indexed_write_batch() override + { + ++m_write_count; + return m_rocksdb_tx->GetWriteBatch(); + } + + rocksdb::Status get(rocksdb::ColumnFamilyHandle* column_family, + const rocksdb::Slice& key, + std::string* value) const override + { + return m_rocksdb_tx->Get(m_read_opts, column_family, key, value); + } + + rocksdb::Status get_for_update(rocksdb::ColumnFamilyHandle* column_family, + const rocksdb::Slice& key, + std::string* value) override + { + if (++m_lock_count > m_max_row_locks) + return rocksdb::Status::Aborted(rocksdb::Slice(err_too_many_locks)); + return m_rocksdb_tx->GetForUpdate(m_read_opts, column_family, key, value); + } + + rocksdb::Iterator *get_iterator(const rocksdb::ReadOptions &options, + rocksdb::ColumnFamilyHandle* column_family) + override + { + return m_rocksdb_tx->GetIterator(options, column_family); + } + + bool is_tx_started() const override + { + return (m_rocksdb_tx != nullptr); + } + + void start_tx() override + { + rocksdb::TransactionOptions tx_opts; + rocksdb::WriteOptions write_opts; + tx_opts.set_snapshot= false; + tx_opts.lock_timeout= m_timeout_sec * 1000; + + write_opts.sync= THDVAR(m_thd, write_sync); + write_opts.disableWAL= THDVAR(m_thd, write_disable_wal); + write_opts.ignore_missing_column_families= + THDVAR(m_thd, write_ignore_missing_column_families); + m_is_two_phase= !rocksdb_disable_2pc; + + /* + If m_rocksdb_reuse_tx is null this will create a new transaction object. + Otherwise it will reuse the existing one. + */ + m_rocksdb_tx= rdb->BeginTransaction(write_opts, tx_opts, + m_rocksdb_reuse_tx); + m_rocksdb_reuse_tx= nullptr; + + m_read_opts= rocksdb::ReadOptions(); + + m_ddl_transaction= false; + } + + /* + Start a statement inside a multi-statement transaction. + + @todo: are we sure this is called once (and not several times) per + statement start? + + For hooking to start of statement that is its own transaction, see + ha_rocksdb::external_lock(). + */ + void start_stmt() override + { + // Set the snapshot to delayed acquisition (SetSnapshotOnNextOperation) + acquire_snapshot(false); + m_rocksdb_tx->SetSavePoint(); + } + + /* + This must be called when last statement is rolled back, but the transaction + continues + */ + void rollback_stmt() override + { + /* TODO: here we must release the locks taken since the start_stmt() call */ + if (m_rocksdb_tx) + { + const rocksdb::Snapshot *org_snapshot = m_rocksdb_tx->GetSnapshot(); + m_rocksdb_tx->RollbackToSavePoint(); + + const rocksdb::Snapshot *cur_snapshot = m_rocksdb_tx->GetSnapshot(); + if (org_snapshot != cur_snapshot) + { + if (org_snapshot != nullptr) + m_snapshot_timestamp = 0; + + m_read_opts.snapshot = cur_snapshot; + if (cur_snapshot != nullptr) + rdb->GetEnv()->GetCurrentTime(&m_snapshot_timestamp); + else + m_is_delayed_snapshot = true; + } + } + } + + explicit Rdb_transaction_impl(THD *thd) : + Rdb_transaction(thd), m_rocksdb_tx(nullptr) + { + // Create a notifier that can be called when a snapshot gets generated. + m_notifier = std::make_shared(this); + } + + virtual ~Rdb_transaction_impl() + { + rollback(); + + // Theoretically the notifier could outlive the Rdb_transaction_impl + // (because of the shared_ptr), so let it know it can't reference + // the transaction anymore. + m_notifier->detach(); + + // Free any transaction memory that is still hanging around. + delete m_rocksdb_reuse_tx; + DBUG_ASSERT(m_rocksdb_tx == nullptr); + } +}; + +/* This is a rocksdb write batch. This class doesn't hold or wait on any + transaction locks (skips rocksdb transaction API) thus giving better + performance. The commit is done through rdb->GetBaseDB()->Commit(). + + Currently this is only used for replication threads which are guaranteed + to be non-conflicting. Any further usage of this class should completely + be thought thoroughly. +*/ +class Rdb_writebatch_impl : public Rdb_transaction +{ + rocksdb::WriteBatchWithIndex* m_batch; + rocksdb::WriteOptions write_opts; + // Called after commit/rollback. + void reset() + { + m_batch->Clear(); + m_read_opts = rocksdb::ReadOptions(); + m_ddl_transaction= false; + } + private: + bool prepare(const rocksdb::TransactionName& name) override + { + return true; + } + + bool commit_no_binlog() override + { + bool res= false; + release_snapshot(); + rocksdb::Status s= rdb->GetBaseDB()->Write(write_opts, + m_batch->GetWriteBatch()); + if (!s.ok()) + { + rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); + res= true; + } + reset(); + + m_write_count= 0; + set_tx_read_only(false); + m_rollback_only= false; + return res; + } + public: + void set_lock_timeout(int timeout_sec_arg) override + { + // Nothing to do here. + } + + void set_sync(bool sync) override + { + write_opts.sync= sync; + } + + void release_lock(rocksdb::ColumnFamilyHandle* column_family, + const std::string &rowkey) override + { + // Nothing to do here since we don't hold any row locks. + } + + void rollback() override + { + m_write_count= 0; + m_lock_count= 0; + release_snapshot(); + + reset(); + set_tx_read_only(false); + m_rollback_only= false; + } + + void acquire_snapshot(bool acquire_now) override + { + if (m_read_opts.snapshot == nullptr) + snapshot_created(rdb->GetSnapshot()); + } + + void release_snapshot() override + { + if (m_read_opts.snapshot != nullptr) + { + rdb->ReleaseSnapshot(m_read_opts.snapshot); + m_read_opts.snapshot = nullptr; + } + } + + rocksdb::Status put(rocksdb::ColumnFamilyHandle* column_family, + const rocksdb::Slice& key, + const rocksdb::Slice& value) override + { + ++m_write_count; + m_batch->Put(column_family, key, value); + // Note Put/Delete in write batch doesn't return any error code. We simply + // return OK here. + return rocksdb::Status::OK(); + } + + rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle* column_family, + const rocksdb::Slice& key) override + { + ++m_write_count; + m_batch->Delete(column_family, key); + return rocksdb::Status::OK(); + } + + rocksdb::Status single_delete(rocksdb::ColumnFamilyHandle* column_family, + const rocksdb::Slice& key) override + { + ++m_write_count; + m_batch->SingleDelete(column_family, key); + return rocksdb::Status::OK(); + } + + bool has_modifications() const override + { + return m_batch->GetWriteBatch()->Count() > 0; + } + + rocksdb::WriteBatchBase* get_write_batch() override + { + return m_batch; + } + + rocksdb::WriteBatchBase* get_indexed_write_batch() override + { + ++m_write_count; + return m_batch; + } + + rocksdb::Status get(rocksdb::ColumnFamilyHandle* column_family, + const rocksdb::Slice& key, + std::string* value) const override + { + return m_batch->GetFromBatchAndDB( + rdb, m_read_opts, column_family, key, value); + } + + rocksdb::Status get_for_update(rocksdb::ColumnFamilyHandle* column_family, + const rocksdb::Slice& key, + std::string* value) override + { + return get(column_family, key, value); + } + + rocksdb::Iterator *get_iterator(const rocksdb::ReadOptions &options, + rocksdb::ColumnFamilyHandle* column_family) + override + { + auto it = rdb->NewIterator(options); + return m_batch->NewIteratorWithBase(it); + } + + bool is_tx_started() const override + { + return (m_batch != nullptr); + } + + void start_tx() override + { + reset(); + write_opts.sync= THDVAR(m_thd, write_sync); + write_opts.disableWAL= THDVAR(m_thd, write_disable_wal); + write_opts.ignore_missing_column_families= + THDVAR(m_thd, write_ignore_missing_column_families); + } + + void start_stmt() override + { + m_batch->SetSavePoint(); + } + + void rollback_stmt() override + { + if (m_batch) + m_batch->RollbackToSavePoint(); + } + + explicit Rdb_writebatch_impl(THD *thd) : + Rdb_transaction(thd), m_batch(nullptr) + { + m_batch = new rocksdb::WriteBatchWithIndex(rocksdb::BytewiseComparator(), + 0 , true); + } + + virtual ~Rdb_writebatch_impl() + { + rollback(); + delete m_batch; + } +}; + +void Rdb_snapshot_notifier::SnapshotCreated(const rocksdb::Snapshot *snapshot) +{ + if (m_owning_tx != nullptr) + { + m_owning_tx->snapshot_created(snapshot); + } +} + +std::multiset Rdb_transaction::s_tx_list; +mysql_mutex_t Rdb_transaction::s_tx_list_mutex; + +static Rdb_transaction* &get_tx_from_thd(THD *thd) +{ + return *reinterpret_cast( + my_core::thd_ha_data(thd, rocksdb_hton)); +} + +namespace { + +class Rdb_perf_context_guard +{ + Rdb_io_perf m_io_perf; + THD *m_thd; + + public: + explicit Rdb_perf_context_guard(THD *thd) : m_thd(thd) + { + Rdb_transaction*& tx= get_tx_from_thd(m_thd); + /* + if perf_context information is already being recorded, this becomes a + no-op + */ + if (tx != nullptr) + { + tx->io_perf_start(&m_io_perf); + } + } + + ~Rdb_perf_context_guard() + { + Rdb_transaction*& tx= get_tx_from_thd(m_thd); + if (tx != nullptr) + { + tx->io_perf_end_and_record(); + } + } +}; + +} // anonymous namespace + +/* + TODO: maybe, call this in external_lock() and store in ha_rocksdb.. +*/ + +static Rdb_transaction *get_or_create_tx(THD *thd) +{ + Rdb_transaction*& tx= get_tx_from_thd(thd); + // TODO: this is called too many times.. O(#rows) + if (tx == nullptr) + { + if (rpl_skip_tx_api_var && thd->rli_slave) + tx= new Rdb_writebatch_impl(thd); + else + tx= new Rdb_transaction_impl(thd); + tx->set_params(THDVAR(thd, lock_wait_timeout), + THDVAR(thd, max_row_locks)); + tx->start_tx(); + } + else + { + tx->set_params(THDVAR(thd, lock_wait_timeout), + THDVAR(thd, max_row_locks)); + if (!tx->is_tx_started()) + { + tx->start_tx(); + } + } + + return tx; +} + + +static int rocksdb_close_connection(handlerton* hton, THD* thd) +{ + Rdb_transaction*& tx= get_tx_from_thd(thd); + if (tx != nullptr) + { + int rc= tx->finish_bulk_load(); + if (rc != 0) + { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Error %d finalizing last SST file while " + "disconnecting", rc); + abort_with_stack_traces(); + } + + delete tx; + tx= nullptr; + } + return 0; +} + +/* + * Serializes an xid to a string so that it can + * be used as a rocksdb transaction name + */ +static std::string rdb_xid_to_string(const XID& src) +{ + DBUG_ASSERT(src.gtrid_length >= 0 && src.gtrid_length <= MAXGTRIDSIZE); + DBUG_ASSERT(src.bqual_length >= 0 && src.bqual_length <= MAXBQUALSIZE); + + std::string buf; + buf.reserve(RDB_XIDHDR_LEN + src.gtrid_length + src.bqual_length); + + /* + * expand formatID to fill 8 bytes if it doesn't already + * then reinterpret bit pattern as unsigned and store in network order + */ + uchar fidbuf[RDB_FORMATID_SZ]; + int64 signed_fid8= src.formatID; + uint64 raw_fid8= *reinterpret_cast(&signed_fid8); + rdb_netbuf_store_uint64(fidbuf, raw_fid8); + buf.append(reinterpret_cast(fidbuf), RDB_FORMATID_SZ); + + buf.push_back(src.gtrid_length); + buf.push_back(src.bqual_length); + buf.append(src.data, (src.gtrid_length) + (src.bqual_length)); + return buf; +} + + +/** + Called by hton->flush_logs after MySQL group commit prepares a set of + transactions. +*/ +static bool rocksdb_flush_wal(handlerton* hton __attribute__((__unused__)), + ulonglong target_lsn __attribute__((__unused__))) +{ + DBUG_ASSERT(rdb != nullptr); + rocksdb::Status s= rdb->SyncWAL(); + if (!s.ok()) { + return 1; + } + return 0; +} + +/** + For a slave, prepare() updates the slave_gtid_info table which tracks the + replication progress. +*/ +static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx, + bool async) +{ + Rdb_transaction*& tx= get_tx_from_thd(thd); + if (!tx->can_prepare()) + { + return 1; + } + if (prepare_tx || + (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { + /* We were instructed to prepare the whole transaction, or + this is an SQL statement end and autocommit is on */ + std::vector slave_gtid_info; + my_core::thd_slave_gtid_info(thd, &slave_gtid_info); + for (auto it : slave_gtid_info) { + rocksdb::WriteBatchBase* write_batch = tx->get_blind_write_batch(); + binlog_manager.update_slave_gtid_info(it.id, it.db, it.gtid, write_batch); + } + + if (tx->is_two_phase()) { + if (thd->durability_property == HA_IGNORE_DURABILITY || async) { + tx->set_sync(false); + } + XID xid; + thd_get_xid(thd, reinterpret_cast(&xid)); + if (!tx->prepare(rdb_xid_to_string(xid))) { + return 1; + } + if (thd->durability_property == HA_IGNORE_DURABILITY) { + /** + we set the log sequence as '1' just to trigger hton->flush_logs + */ + thd_store_lsn(thd, 1, DB_TYPE_ROCKSDB); + } + } + + DEBUG_SYNC(thd, "rocksdb.prepared"); + } + + return 0; +} + +/** + do nothing for prepare/commit by xid + this is needed to avoid crashes in XA scenarios +*/ +static int rocksdb_commit_by_xid(handlerton* hton, XID* xid) +{ + auto name= rdb_xid_to_string(*xid); + rocksdb::Transaction *trx= rdb->GetTransactionByName(name); + if (trx == nullptr) { + return 1; + } + rocksdb::Status s= trx->Commit(); + if (!s.ok()) { + return 1; + } + delete trx; + return 0; +} + +static int rocksdb_rollback_by_xid(handlerton* hton __attribute__((__unused__)), + XID* xid) +{ + auto name= rdb_xid_to_string(*xid); + rocksdb::Transaction *trx= rdb->GetTransactionByName(name); + if (trx == nullptr) { + return 1; + } + rocksdb::Status s= trx->Rollback(); + if (!s.ok()) { + return 1; + } + delete trx; + return 0; +} + +/** + Rebuilds an XID from a serialized version stored in a string. +*/ +static void rdb_xid_from_string(const std::string& src, XID *dst) +{ + DBUG_ASSERT(dst != nullptr); + uint offset= 0; + uint64 raw_fid8= + rdb_netbuf_to_uint64(reinterpret_cast(src.data())); + int64 signed_fid8= *reinterpret_cast(&raw_fid8); + dst->formatID= signed_fid8; + offset += RDB_FORMATID_SZ; + dst->gtrid_length= src.at(offset); + offset += RDB_GTRID_SZ; + dst->bqual_length= src.at(offset); + offset += RDB_BQUAL_SZ; + + DBUG_ASSERT(dst->gtrid_length >= 0 && dst->gtrid_length <= MAXGTRIDSIZE); + DBUG_ASSERT(dst->bqual_length >= 0 && dst->bqual_length <= MAXBQUALSIZE); + + src.copy(dst->data, (dst->gtrid_length)+(dst->bqual_length), RDB_XIDHDR_LEN); +} + +/** + Reading last committed binary log info from RocksDB system row. + The info is needed for crash safe slave/master to work. +*/ +static int rocksdb_recover(handlerton* hton, XID* xid_list, uint len, + char* binlog_file, my_off_t* binlog_pos) +{ + if (binlog_file && binlog_pos) + { + char file_buf[FN_REFLEN+1]= {0}; + my_off_t pos; + char gtid_buf[FN_REFLEN+1]= {0}; + if (binlog_manager.read(file_buf, &pos, gtid_buf)) + { + if (is_binlog_advanced(binlog_file, *binlog_pos, file_buf, pos)) + { + memcpy(binlog_file, file_buf, FN_REFLEN + 1); + *binlog_pos= pos; + fprintf(stderr, "RocksDB: Last binlog file position %llu," + " file name %s\n", pos, file_buf); + if (*gtid_buf) + { + fprintf(stderr, "RocksDB: Last MySQL Gtid %s\n", gtid_buf); + } + } + } + } + + if (len == 0 || xid_list == nullptr) + { + return 0; + } + + std::vector trans_list; + rdb->GetAllPreparedTransactions(&trans_list); + + uint count= 0; + for (auto& trans : trans_list) + { + if (count >= len) + { + break; + } + auto name= trans->GetName(); + rdb_xid_from_string(name, &xid_list[count]); + count++; + } + return count; +} + +static int rocksdb_commit(handlerton* hton, THD* thd, bool commit_tx, bool) +{ + DBUG_ENTER("rocksdb_commit"); + + /* this will trigger saving of perf_context information */ + Rdb_perf_context_guard guard(thd); + + /* note: h->external_lock(F_UNLCK) is called after this function is called) */ + Rdb_transaction*& tx= get_tx_from_thd(thd); + + if (tx != nullptr) + { + if (commit_tx || + (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) + { + /* + We get here + - For a COMMIT statement that finishes a multi-statement transaction + - For a statement that has its own transaction + */ + if (tx->commit()) + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + else + { + /* + We get here when committing a statement within a transaction. + + We don't need to do anything here. tx->start_stmt() will notify + Rdb_transaction_impl that another statement has started. + */ + tx->set_tx_failed(false); + } + + if (my_core::thd_tx_isolation(thd) <= ISO_READ_COMMITTED) + { + // For READ_COMMITTED, we release any existing snapshot so that we will + // see any changes that occurred since the last statement. + tx->release_snapshot(); + } + } + DBUG_RETURN(0); +} + + +static int rocksdb_rollback(handlerton* hton, THD* thd, bool rollback_tx) +{ + Rdb_perf_context_guard guard(thd); + Rdb_transaction*& tx= get_tx_from_thd(thd); + + if (tx != nullptr) + { + if (rollback_tx) + { + /* + We get here, when + - ROLLBACK statement is issued. + + Discard the changes made by the transaction + */ + tx->rollback(); + } + else + { + /* + We get here when + - a statement with AUTOCOMMIT=1 is being rolled back (because of some + error) + - a statement inside a transaction is rolled back + */ + + tx->rollback_stmt(); + tx->set_tx_failed(true); + } + + if (my_core::thd_tx_isolation(thd) <= ISO_READ_COMMITTED) + { + // For READ_COMMITTED, we release any existing snapshot so that we will + // see any changes that occurred since the last statement. + tx->release_snapshot(); + } + } + return 0; +} + +static bool print_stats(THD* thd, + std::string const& type, + std::string const& name, + std::string const& status, + stat_print_fn *stat_print) +{ + return stat_print(thd, type.c_str(), type.size(), name.c_str(), name.size(), + status.c_str(), status.size()); +} + +static std::string format_string( + const char *format, + ...) +{ + std::string res; + va_list args; + va_list args_copy; + + va_start(args, format); + va_copy(args_copy, args); + + size_t len = vsnprintf(nullptr, 0, format, args) + 1; + va_end(args); + + if (len == 0) { + res = std::string(""); + } + else { + char buff[len]; + (void) vsnprintf(buff, len, format, args_copy); + + res = std::string(buff); + } + + va_end(args_copy); + + return res; +} + +class Rdb_snapshot_status : public Rdb_tx_list_walker +{ + private: + std::string m_data; + + static std::string current_timestamp(void) + { + static const char *const format = "%d-%02d-%02d %02d:%02d:%02d"; + time_t currtime; + struct tm currtm; + + time(&currtime); + + localtime_r(&currtime, &currtm); + + return format_string(format, currtm.tm_year + 1900, currtm.tm_mon + 1, + currtm.tm_mday, currtm.tm_hour, currtm.tm_min, + currtm.tm_sec); + } + + static std::string get_header(void) + { + return + "\n============================================================\n" + + current_timestamp() + + " ROCKSDB TRANSACTION MONITOR OUTPUT\n" + "============================================================\n" + "---------\n" + "SNAPSHOTS\n" + "---------\n" + "LIST OF SNAPSHOTS FOR EACH SESSION:\n"; + } + + static std::string get_footer(void) + { + return + "-----------------------------------------\n" + "END OF ROCKSDB TRANSACTION MONITOR OUTPUT\n" + "=========================================\n"; + } + + public: + Rdb_snapshot_status() : m_data(get_header()) {} + + std::string getResult() { return m_data + get_footer(); } + + /* Implement Rdb_transaction interface */ + /* Create one row in the snapshot status table */ + void process_tran(const Rdb_transaction *tx) override + { + /* Calculate the duration the snapshot has existed */ + int64_t snapshot_timestamp = tx->m_snapshot_timestamp; + if (snapshot_timestamp != 0) + { + int64_t curr_time; + rdb->GetEnv()->GetCurrentTime(&curr_time); + + THD* thd = tx->get_thd(); + + m_data += format_string("---SNAPSHOT, ACTIVE %lld sec\n" + "MySQL thread id %lu, OS thread handle %p\n" + "lock count %llu, write count %llu\n", + curr_time - snapshot_timestamp, + my_core::thd_get_thread_id(thd), thd, + tx->get_lock_count(), tx->get_write_count()); + } + } +}; + +/* Generate the snapshot status table */ +static bool rocksdb_show_snapshot_status(handlerton* hton, + THD* thd, + stat_print_fn* stat_print) +{ + Rdb_snapshot_status showStatus; + + Rdb_transaction::walk_tx_list(&showStatus); + + // Send the result data back to MySQL */ + return print_stats(thd, "SNAPSHOTS", "rocksdb", showStatus.getResult(), + stat_print); +} + +/* + This is called for SHOW ENGINE ROCKSDB STATUS|LOGS|etc. + + For now, produce info about live files (which gives an imprecise idea about + what column families are there) +*/ + +static bool rocksdb_show_status(handlerton* hton, + THD* thd, + stat_print_fn* stat_print, + enum ha_stat_type stat_type) +{ + bool res= false; + if (stat_type == HA_ENGINE_STATUS) + { + std::string str; + + /* Per DB stats */ + if (rdb->GetProperty("rocksdb.dbstats", &str)) { + res |= print_stats(thd, "DBSTATS", "rocksdb", str, stat_print); + } + + /* Per column family stats */ + for (auto cf_name : cf_manager.get_cf_names()) + { + rocksdb::ColumnFamilyHandle* cfh; + bool is_automatic; + + /* + Only the cf name is important. Whether it was generated automatically + does not matter, so is_automatic is ignored. + */ + cfh= cf_manager.get_cf(cf_name.c_str(), "", nullptr, &is_automatic); + if (cfh == nullptr) + continue; + + if (!rdb->GetProperty(cfh, "rocksdb.cfstats", &str)) + continue; + + res |= print_stats(thd, "CF_COMPACTION", cf_name, str, stat_print); + } + + /* Memory Statistics */ + std::vector dbs; + std::unordered_set cache_set; + size_t internal_cache_count = 0; + size_t kDefaultInternalCacheSize = 8 * 1024 * 1024; + char buf[100]; + + dbs.push_back(rdb); + cache_set.insert(rocksdb_tbl_options.block_cache.get()); + for (const auto& cf_handle : cf_manager.get_all_cf()) + { + rocksdb::ColumnFamilyDescriptor cf_desc; + cf_handle->GetDescriptor(&cf_desc); + auto* table_factory = cf_desc.options.table_factory.get(); + if (table_factory != nullptr) + { + std::string tf_name = table_factory->Name(); + if (tf_name.find("BlockBasedTable") != std::string::npos) + { + const rocksdb::BlockBasedTableOptions* bbt_opt = + reinterpret_cast( + table_factory->GetOptions()); + if (bbt_opt != nullptr) + { + if (bbt_opt->block_cache.get() != nullptr) + { + cache_set.insert(bbt_opt->block_cache.get()); + } + else + { + internal_cache_count++; + } + cache_set.insert(bbt_opt->block_cache_compressed.get()); + } + } + } + } + + std::map temp_usage_by_type; + str.clear(); + rocksdb::MemoryUtil::GetApproximateMemoryUsageByType( + dbs, cache_set, &temp_usage_by_type); + snprintf(buf, sizeof(buf), "\nMemTable Total: %lu", + temp_usage_by_type[rocksdb::MemoryUtil::kMemTableTotal]); + str.append(buf); + snprintf(buf, sizeof(buf), "\nMemTable Unflushed: %lu", + temp_usage_by_type[rocksdb::MemoryUtil::kMemTableUnFlushed]); + str.append(buf); + snprintf(buf, sizeof(buf), "\nTable Readers Total: %lu", + temp_usage_by_type[rocksdb::MemoryUtil::kTableReadersTotal]); + str.append(buf); + snprintf(buf, sizeof(buf), "\nCache Total: %lu", + temp_usage_by_type[rocksdb::MemoryUtil::kCacheTotal]); + str.append(buf); + snprintf(buf, sizeof(buf), "\nDefault Cache Capacity: %lu", + internal_cache_count * kDefaultInternalCacheSize); + str.append(buf); + res |= print_stats(thd, "Memory_Stats", "rocksdb", str, stat_print); + } + else if (stat_type == HA_ENGINE_TRX) + { + /* Handle the SHOW ENGINE ROCKSDB TRANSACTION STATUS command */ + res |= rocksdb_show_snapshot_status(hton, thd, stat_print); + } + + return res; +} + +static inline void rocksdb_register_tx(handlerton *hton, THD *thd, + Rdb_transaction *tx) +{ + trans_register_ha(thd, FALSE, rocksdb_hton); + if (my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) + { + tx->start_stmt(); + trans_register_ha(thd, TRUE, rocksdb_hton); + } +} + +/* + Supporting START TRANSACTION WITH CONSISTENT [ROCKSDB] SNAPSHOT + + Features: + 1. Supporting START TRANSACTION WITH CONSISTENT SNAPSHOT + 2. Getting current binlog position in addition to #1. + + The second feature is done by START TRANSACTION WITH + CONSISTENT ROCKSDB SNAPSHOT. This is Facebook's extension, and + it works like existing START TRANSACTION WITH CONSISTENT INNODB SNAPSHOT. + + - When not setting engine, START TRANSACTION WITH CONSISTENT SNAPSHOT + takes both InnoDB and RocksDB snapshots, and both InnoDB and RocksDB + participate in transaction. When executing COMMIT, both InnoDB and + RocksDB modifications are committed. Remember that XA is not supported yet, + so mixing engines is not recommended anyway. + + - When setting engine, START TRANSACTION WITH CONSISTENT.. takes + snapshot for the specified engine only. But it starts both + InnoDB and RocksDB transactions. +*/ +static int rocksdb_start_tx_and_assign_read_view( + handlerton* hton, /*!< in: RocksDB handlerton */ + THD* thd, /*!< in: MySQL thread handle of the + user for whom the transaction should + be committed */ + char* binlog_file, /* out: binlog file for last commit */ + ulonglong* binlog_pos, /* out: binlog pos for last commit */ + char** gtid_executed, /* out: Gtids logged until last commit */ + int* gtid_executed_length) /*out: Length of gtid_executed string */ +{ + Rdb_perf_context_guard guard(thd); + + ulong const tx_isolation = my_core::thd_tx_isolation(thd); + + if (tx_isolation != ISO_REPEATABLE_READ) + { + my_printf_error(ER_UNKNOWN_ERROR, + "Only REPEATABLE READ isolation level is supported " + "for START TRANSACTION WITH CONSISTENT SNAPSHOT " + "in RocksDB Storage Engine.", MYF(0)); + return 1; + } + + if (binlog_file) + { + if (binlog_pos && mysql_bin_log_is_open()) + mysql_bin_log_lock_commits(); + else + return 1; + } + + Rdb_transaction* tx= get_or_create_tx(thd); + DBUG_ASSERT(!tx->has_snapshot()); + tx->set_tx_read_only(true); + rocksdb_register_tx(hton, thd, tx); + tx->acquire_snapshot(true); + + if (binlog_file) + mysql_bin_log_unlock_commits(binlog_file, binlog_pos, gtid_executed, + gtid_executed_length); + + return 0; +} + +/* Dummy SAVEPOINT support. This is needed for long running transactions + * like mysqldump (https://bugs.mysql.com/bug.php?id=71017). + * Current SAVEPOINT does not correctly handle ROLLBACK and does not return + * errors. This needs to be addressed in future versions (Issue#96). + */ +static int rocksdb_savepoint(handlerton *hton, THD *thd, void *savepoint) +{ + return 0; +} + +static int rocksdb_rollback_to_savepoint(handlerton *hton, THD *thd, + void *savepoint) +{ + Rdb_transaction*& tx= get_tx_from_thd(thd); + return tx->rollback_to_savepoint(savepoint); +} + +static bool rocksdb_rollback_to_savepoint_can_release_mdl(handlerton *hton, + THD *thd) +{ + return true; +} + +/* + This is called for INFORMATION_SCHEMA +*/ +static void rocksdb_update_table_stats( + /* per-table stats callback */ + void (*cb)(const char* db, const char* tbl, bool is_partition, + my_io_perf_t* r, my_io_perf_t* w, my_io_perf_t* r_blob, + my_io_perf_t* r_primary, my_io_perf_t* r_secondary, + page_stats_t *page_stats, comp_stats_t *comp_stats, + int n_lock_wait, int n_lock_wait_timeout, + const char* engine)) +{ + my_io_perf_t io_perf_read; + my_io_perf_t io_perf; + page_stats_t page_stats; + comp_stats_t comp_stats; + std::vector tablenames; + + /* + Most of these are for innodb, so setting them to 0. + TODO: possibly separate out primary vs. secondary index reads + */ + memset(&io_perf, 0, sizeof(io_perf)); + memset(&page_stats, 0, sizeof(page_stats)); + memset(&comp_stats, 0, sizeof(comp_stats)); + + tablenames= rdb_open_tables.get_table_names(); + + for (const auto& it : tablenames) + { + Rdb_table_handler *table_handler; + std::string str, dbname, tablename, partname; + char dbname_sys[NAME_LEN + 1]; + char tablename_sys[NAME_LEN + 1]; + bool is_partition; + + if (rdb_normalize_tablename(it, &str)) { + /* Function needs to return void because of the interface and we've + * detected an error which shouldn't happen. There's no way to let + * caller know that something failed. + */ + SHIP_ASSERT(false); + return; + } + + if (rdb_split_normalized_tablename(str, &dbname, &tablename, &partname)) + { + continue; + } + + is_partition= (partname.size() != 0); + + table_handler= rdb_open_tables.get_table_handler(it.c_str()); + if (table_handler == nullptr) + { + continue; + } + + io_perf_read.bytes= table_handler->m_io_perf_read.bytes.load(); + io_perf_read.requests= table_handler->m_io_perf_read.requests.load(); + + /* + Convert from rocksdb timer to mysql timer. RocksDB values are + in nanoseconds, but table statistics expect the value to be + in my_timer format. + */ + io_perf_read.svc_time= my_core::microseconds_to_my_timer( + table_handler->m_io_perf_read.svc_time.load() / 1000); + io_perf_read.svc_time_max= my_core::microseconds_to_my_timer( + table_handler->m_io_perf_read.svc_time_max.load() / 1000); + io_perf_read.wait_time= my_core::microseconds_to_my_timer( + table_handler->m_io_perf_read.wait_time.load() / 1000); + io_perf_read.wait_time_max= my_core::microseconds_to_my_timer( + table_handler->m_io_perf_read.wait_time_max.load() / 1000); + io_perf_read.slow_ios= table_handler->m_io_perf_read.slow_ios.load(); + rdb_open_tables.release_table_handler(table_handler); + + /* + Table stats expects our database and table name to be in system encoding, + not filename format. Convert before calling callback. + */ + my_core::filename_to_tablename(dbname.c_str(), dbname_sys, + sizeof(dbname_sys)); + my_core::filename_to_tablename(tablename.c_str(), tablename_sys, + sizeof(tablename_sys)); + (*cb)(dbname_sys, tablename_sys, is_partition, &io_perf_read, &io_perf, + &io_perf, &io_perf, &io_perf, &page_stats, &comp_stats, 0, 0, + rocksdb_hton_name); + } +} + + +static rocksdb::Status check_rocksdb_options_compatibility( + const char *dbpath, + const rocksdb::Options& main_opts, + const std::vector& cf_descr) +{ + DBUG_ASSERT(rocksdb_datadir != nullptr); + + rocksdb::DBOptions loaded_db_opt; + std::vector loaded_cf_descs; + rocksdb::Status status = LoadLatestOptions(dbpath, + rocksdb::Env::Default(), &loaded_db_opt, + &loaded_cf_descs); + + // If we're starting from scratch and there are no options saved yet then this + // is a valid case. Therefore we can't compare the current set of options to + // anything. + if (status.IsNotFound()) { + return rocksdb::Status::OK(); + } + + if (!status.ok()) { + return status; + } + + if (loaded_cf_descs.size() != cf_descr.size()) { + return rocksdb::Status::NotSupported("Mismatched size of column family " \ + "descriptors."); + } + + // Please see RocksDB documentation for more context about why we need to set + // user-defined functions and pointer-typed options manually. + for (size_t i = 0; i < loaded_cf_descs.size(); i++) { + loaded_cf_descs[i].options.compaction_filter = + cf_descr[i].options.compaction_filter; + loaded_cf_descs[i].options.compaction_filter_factory = + cf_descr[i].options.compaction_filter_factory; + loaded_cf_descs[i].options.comparator = cf_descr[i].options.comparator; + loaded_cf_descs[i].options.memtable_factory = + cf_descr[i].options.memtable_factory; + loaded_cf_descs[i].options.merge_operator = + cf_descr[i].options.merge_operator; + loaded_cf_descs[i].options.prefix_extractor = + cf_descr[i].options.prefix_extractor; + loaded_cf_descs[i].options.table_factory = + cf_descr[i].options.table_factory; + } + + // This is the essence of the function - determine if it's safe to open the + // database or not. + status = CheckOptionsCompatibility(dbpath, rocksdb::Env::Default(), + main_opts, loaded_cf_descs); + + return status; +} + +/* + Storage Engine initialization function, invoked when plugin is loaded. +*/ + +static int rocksdb_init_func(void *p) +{ + DBUG_ENTER("rocksdb_init_func"); + + // Validate the assumption about the size of ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN. + static_assert(sizeof(longlong) == 8, "Assuming that longlong is 8 bytes."); + +#ifdef HAVE_PSI_INTERFACE + init_rocksdb_psi_keys(); +#endif + + rocksdb_hton= (handlerton *)p; + mysql_mutex_init(rdb_psi_open_tbls_mutex_key, &rdb_open_tables.m_mutex, + MY_MUTEX_INIT_FAST); +#ifdef HAVE_PSI_INTERFACE + rdb_bg_thread.init(rdb_signal_bg_psi_mutex_key, + rdb_signal_bg_psi_cond_key); + rdb_drop_idx_thread.init(rdb_signal_drop_idx_psi_mutex_key, + rdb_signal_drop_idx_psi_cond_key); +#else + rdb_bg_thread.init(); + rdb_drop_idx_thread.init(); +#endif + mysql_mutex_init(rdb_collation_data_mutex_key, &rdb_collation_data_mutex, + MY_MUTEX_INIT_FAST); + mysql_mutex_init(rdb_mem_cmp_space_mutex_key, &rdb_mem_cmp_space_mutex, + MY_MUTEX_INIT_FAST); + +#if defined(HAVE_PSI_INTERFACE) + rdb_collation_exceptions = new Regex_list_handler( + key_rwlock_collation_exception_list); +#else + rdb_collation_exceptions = new Regex_list_handler(); +#endif + + mysql_mutex_init(rdb_sysvars_psi_mutex_key, &rdb_sysvars_mutex, + MY_MUTEX_INIT_FAST); + rdb_open_tables.init_hash(); + Rdb_transaction::init_mutex(); + + rocksdb_hton->state= SHOW_OPTION_YES; + rocksdb_hton->create= rocksdb_create_handler; + rocksdb_hton->close_connection= rocksdb_close_connection; + rocksdb_hton->prepare= rocksdb_prepare; + rocksdb_hton->commit_by_xid= rocksdb_commit_by_xid; + rocksdb_hton->rollback_by_xid= rocksdb_rollback_by_xid; + rocksdb_hton->recover= rocksdb_recover; + rocksdb_hton->commit= rocksdb_commit; + rocksdb_hton->rollback= rocksdb_rollback; + rocksdb_hton->db_type= DB_TYPE_ROCKSDB; + rocksdb_hton->show_status= rocksdb_show_status; + rocksdb_hton->start_consistent_snapshot= + rocksdb_start_tx_and_assign_read_view; + rocksdb_hton->savepoint_set= rocksdb_savepoint; + rocksdb_hton->savepoint_rollback= rocksdb_rollback_to_savepoint; + rocksdb_hton->savepoint_rollback_can_release_mdl= + rocksdb_rollback_to_savepoint_can_release_mdl; + rocksdb_hton->update_table_stats = rocksdb_update_table_stats; + rocksdb_hton->flush_logs= rocksdb_flush_wal; + + rocksdb_hton->flags= HTON_TEMPORARY_NOT_SUPPORTED | + HTON_SUPPORTS_EXTENDED_KEYS | + HTON_CAN_RECREATE; + + DBUG_ASSERT(!mysqld_embedded); + + rocksdb_stats= rocksdb::CreateDBStatistics(); + rocksdb_db_options.statistics= rocksdb_stats; + + if (rocksdb_rate_limiter_bytes_per_sec != 0) { + rocksdb_rate_limiter.reset(rocksdb::NewGenericRateLimiter( + rocksdb_rate_limiter_bytes_per_sec)); + rocksdb_db_options.rate_limiter= rocksdb_rate_limiter; + } + + std::shared_ptr myrocks_logger= std::make_shared(); + rocksdb::Status s= rocksdb::CreateLoggerFromOptions( + rocksdb_datadir, rocksdb_db_options, &rocksdb_db_options.info_log); + if (s.ok()) { + myrocks_logger->SetRocksDBLogger(rocksdb_db_options.info_log); + } + + rocksdb_db_options.info_log= myrocks_logger; + myrocks_logger->SetInfoLogLevel( + static_cast(rocksdb_info_log_level)); + rocksdb_db_options.wal_dir= rocksdb_wal_dir; + + rocksdb_db_options.wal_recovery_mode= + static_cast(rocksdb_wal_recovery_mode); + + rocksdb_db_options.access_hint_on_compaction_start= + static_cast + (rocksdb_access_hint_on_compaction_start); + + if (rocksdb_db_options.allow_mmap_reads && + !rocksdb_db_options.allow_os_buffer) + { + // allow_mmap_reads implies allow_os_buffer and RocksDB will not open if + // mmap_reads is on and os_buffer is off. (NO_LINT_DEBUG) + sql_print_error("RocksDB: Can't disable allow_os_buffer " + "if allow_mmap_reads is enabled\n"); + rdb_open_tables.free_hash(); + DBUG_RETURN(1); + } + + std::vector cf_names; + rocksdb::Status status; + status= rocksdb::DB::ListColumnFamilies(rocksdb_db_options, rocksdb_datadir, + &cf_names); + if (!status.ok()) + { + /* + When we start on an empty datadir, ListColumnFamilies returns IOError, + and RocksDB doesn't provide any way to check what kind of error it was. + Checking system errno happens to work right now. + */ + if (status.IsIOError() && errno == ENOENT) + { + sql_print_information("RocksDB: Got ENOENT when listing column families"); + sql_print_information("RocksDB: assuming that we're creating a new database"); + } + else + { + std::string err_text= status.ToString(); + sql_print_error("RocksDB: Error listing column families: %s", err_text.c_str()); + rdb_open_tables.free_hash(); + DBUG_RETURN(1); + } + } + else + sql_print_information("RocksDB: %ld column families found", cf_names.size()); + + std::vector cf_descr; + std::vector cf_handles; + + rocksdb_tbl_options.index_type= + (rocksdb::BlockBasedTableOptions::IndexType)rocksdb_index_type; + + if (!rocksdb_tbl_options.no_block_cache) { + rocksdb_tbl_options.block_cache= + rocksdb::NewLRUCache(rocksdb_block_cache_size); + } + // Using newer BlockBasedTable format version for better compression + // and better memory allocation. + // See: https://github.com/facebook/rocksdb/commit/9ab5adfc59a621d12357580c94451d9f7320c2dd + rocksdb_tbl_options.format_version= 2; + + if (rocksdb_collect_sst_properties) { + properties_collector_factory = std::make_shared + ( + &ddl_manager + ); + + rocksdb_set_compaction_options(nullptr, nullptr, nullptr, nullptr); + + mysql_mutex_lock(&rdb_sysvars_mutex); + + DBUG_ASSERT(rocksdb_table_stats_sampling_pct + <= RDB_TBL_STATS_SAMPLE_PCT_MAX); + properties_collector_factory->SetTableStatsSamplingPct( + rocksdb_table_stats_sampling_pct); + + mysql_mutex_unlock(&rdb_sysvars_mutex); + } + + if (!rocksdb_cf_options_map.init(ROCKSDB_WRITE_BUFFER_SIZE_DEFAULT, + rocksdb_tbl_options, + properties_collector_factory, + rocksdb_default_cf_options, + rocksdb_override_cf_options)) + { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Failed to initialize CF options map."); + rdb_open_tables.free_hash(); + DBUG_RETURN(1); + } + + /* + If there are no column families, we're creating the new database. + Create one column family named "default". + */ + if (cf_names.size() == 0) + cf_names.push_back(DEFAULT_CF_NAME); + + std::vector compaction_enabled_cf_indices; + sql_print_information("RocksDB: Column Families at start:"); + for (size_t i = 0; i < cf_names.size(); ++i) + { + rocksdb::ColumnFamilyOptions opts; + rocksdb_cf_options_map.get_cf_options(cf_names[i], &opts); + + sql_print_information(" cf=%s", cf_names[i].c_str()); + sql_print_information(" write_buffer_size=%ld", opts.write_buffer_size); + sql_print_information(" target_file_size_base=%" PRIu64, + opts.target_file_size_base); + + /* + Temporarily disable compactions to prevent a race condition where + compaction starts before compaction filter is ready. + */ + if (!opts.disable_auto_compactions) + { + compaction_enabled_cf_indices.push_back(i); + opts.disable_auto_compactions = true; + } + cf_descr.push_back(rocksdb::ColumnFamilyDescriptor(cf_names[i], opts)); + } + + rocksdb::Options main_opts(rocksdb_db_options, + rocksdb_cf_options_map.get_defaults()); + + /* + Flashcache configuration: + When running on Flashcache, mysqld opens Flashcache device before + initializing storage engines, and setting file descriptor at + cachedev_fd global variable. + RocksDB has Flashcache-aware configuration. When this is enabled, + RocksDB adds background threads into Flashcache blacklists, which + makes sense for Flashcache use cases. + */ + if (cachedev_enabled) + { + flashcache_aware_env= + rocksdb::NewFlashcacheAwareEnv(rocksdb::Env::Default(), + cachedev_fd); + if (flashcache_aware_env.get() == nullptr) + { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Failed to open flashcache device at fd %d", + cachedev_fd); + rdb_open_tables.free_hash(); + DBUG_RETURN(1); + } + sql_print_information("RocksDB: Disabling flashcache on background " + "writer threads, fd %d", cachedev_fd); + main_opts.env= flashcache_aware_env.get(); + } + + main_opts.env->SetBackgroundThreads(main_opts.max_background_flushes, + rocksdb::Env::Priority::HIGH); + main_opts.env->SetBackgroundThreads(main_opts.max_background_compactions, + rocksdb::Env::Priority::LOW); + rocksdb::TransactionDBOptions tx_db_options; + tx_db_options.transaction_lock_timeout= 2; // 2 seconds + tx_db_options.custom_mutex_factory= std::make_shared(); + + status= check_rocksdb_options_compatibility(rocksdb_datadir, main_opts, + cf_descr); + + // We won't start if we'll determine that there's a chance of data corruption + // because of incompatible options. + if (!status.ok()) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: compatibility check against existing database " \ + "options failed. %s", status.ToString().c_str()); + rdb_open_tables.free_hash(); + DBUG_RETURN(1); + } + + status= rocksdb::TransactionDB::Open(main_opts, tx_db_options, + rocksdb_datadir, cf_descr, + &cf_handles, &rdb); + + if (!status.ok()) + { + std::string err_text= status.ToString(); + sql_print_error("RocksDB: Error opening instance: %s", err_text.c_str()); + rdb_open_tables.free_hash(); + DBUG_RETURN(1); + } + cf_manager.init(&rocksdb_cf_options_map, &cf_handles); + + if (dict_manager.init(rdb->GetBaseDB(), &cf_manager)) + { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Failed to initialize data dictionary."); + rdb_open_tables.free_hash(); + DBUG_RETURN(1); + } + + if (binlog_manager.init(&dict_manager)) + { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Failed to initialize binlog manager."); + rdb_open_tables.free_hash(); + DBUG_RETURN(1); + } + + if (ddl_manager.init(&dict_manager, &cf_manager, rocksdb_validate_tables)) + { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Failed to initialize DDL manager."); + rdb_open_tables.free_hash(); + DBUG_RETURN(1); + } + + Rdb_sst_info::init(rdb); + + /* + Enable auto compaction, things needed for compaction filter are finished + initializing + */ + std::vector compaction_enabled_cf_handles; + compaction_enabled_cf_handles.reserve(compaction_enabled_cf_indices.size()); + for (auto index : compaction_enabled_cf_indices) + { + compaction_enabled_cf_handles.push_back(cf_handles[index]); + } + + status= rdb->EnableAutoCompaction(compaction_enabled_cf_handles); + + if (!status.ok()) + { + std::string err_text= status.ToString(); + // NO_LINT_DEBUG + sql_print_error("RocksDB: Error enabling compaction: %s", err_text.c_str()); + rdb_open_tables.free_hash(); + DBUG_RETURN(1); + } + + auto err= rdb_bg_thread.create_thread( +#ifdef HAVE_PSI_INTERFACE + rdb_background_psi_thread_key +#endif + ); + if (err != 0) { + sql_print_error("RocksDB: Couldn't start the background thread: (errno=%d)", + err); + rdb_open_tables.free_hash(); + DBUG_RETURN(1); + } + + err= rdb_drop_idx_thread.create_thread( +#ifdef HAVE_PSI_INTERFACE + rdb_drop_idx_psi_thread_key +#endif + ); + if (err != 0) { + sql_print_error("RocksDB: Couldn't start the drop index thread: (errno=%d)", + err); + rdb_open_tables.free_hash(); + DBUG_RETURN(1); + } + + rdb_set_collation_exception_list(rocksdb_strict_collation_exceptions); + + if (rocksdb_pause_background_work) { + rdb->PauseBackgroundWork(); + } + + sql_print_information("RocksDB instance opened"); + DBUG_RETURN(0); +} + +/* + Storage Engine deinitialization function, invoked when plugin is unloaded. +*/ + +static int rocksdb_done_func(void *p) +{ + int error= 0; + DBUG_ENTER("rocksdb_done_func"); + + // signal the drop index thread to stop + rdb_drop_idx_thread.signal(true); + + // Flush all memtables for not lose data, even if WAL is disabled. + rocksdb_flush_all_memtables(); + + // Stop all rocksdb background work + CancelAllBackgroundWork(rdb->GetBaseDB(), true); + + // Signal the background thread to stop and to persist all stats collected + // from background flushes and compactions. This will add more keys to a new + // memtable, but since the memtables were just flushed, it should not trigger + // a flush that can stall due to background threads being stopped. As long + // as these keys are stored in a WAL file, they can be retrieved on restart. + rdb_bg_thread.signal(true); + + // Wait for the background thread to finish. + auto err= rdb_bg_thread.join(); + if (err != 0) { + // We'll log the message and continue because we're shutting down and + // continuation is the optimal strategy. + // NO_LINT_DEBUG + sql_print_error("RocksDB: Couldn't stop the background thread: (errno=%d)", + err); + } + + // Wait for the drop index thread to finish. + err= rdb_drop_idx_thread.join(); + if (err != 0) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Couldn't stop the index thread: (errno=%d)", + err); + } + + if (rdb_open_tables.m_hash.records) + { + // Looks like we are getting unloaded and yet we have some open tables + // left behind. + error= 1; + } + + rdb_open_tables.free_hash(); + mysql_mutex_destroy(&rdb_open_tables.m_mutex); + mysql_mutex_destroy(&rdb_sysvars_mutex); + + delete rdb_collation_exceptions; + mysql_mutex_destroy(&rdb_collation_data_mutex); + mysql_mutex_destroy(&rdb_mem_cmp_space_mutex); + + Rdb_transaction::term_mutex(); + + for (auto& it : rdb_collation_data) + { + delete it; + it= nullptr; + } + + ddl_manager.cleanup(); + binlog_manager.cleanup(); + dict_manager.cleanup(); + cf_manager.cleanup(); + + delete rdb; + rdb= nullptr; + + // Disown the cache data since we're shutting down. + // This results in memory leaks but it improved the shutdown time. + // Don't disown when running under valgrind +#ifndef HAVE_purify + if (rocksdb_tbl_options.block_cache) + { + rocksdb_tbl_options.block_cache->DisownData(); + } +#endif /* HAVE_purify */ + + DBUG_RETURN(error); +} + + +/** + @brief + Example of simple lock controls. The "table_handler" it creates is a + structure we will pass to each ha_rocksdb handler. Do you have to have + one of these? Well, you have pieces that are used for locking, and + they are needed to function. +*/ + +Rdb_table_handler* Rdb_open_tables_map::get_table_handler( + const char *table_name) +{ + Rdb_table_handler *table_handler; + uint length; + char *tmp_name; + + DBUG_ASSERT(table_name != nullptr); + length=(uint) strlen(table_name); + + // First, look up the table in the hash map. + mysql_mutex_lock(&m_mutex); + if (!(table_handler= reinterpret_cast( + my_hash_search(&m_hash, reinterpret_cast(table_name), + length)))) + { + // Since we did not find it in the hash map, attempt to create and add it + // to the hash map. + if (!(table_handler= reinterpret_cast( + my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), + &table_handler, sizeof(*table_handler), + &tmp_name, length+1, + NullS)))) + { + // Allocating a new Rdb_table_handler and a new table name failed. + mysql_mutex_unlock(&m_mutex); + return nullptr; + } + + table_handler->m_ref_count= 0; + table_handler->m_table_name_length= length; + table_handler->m_table_name= tmp_name; + strmov(table_handler->m_table_name, table_name); + + if (my_hash_insert(&m_hash, reinterpret_cast(table_handler))) + { + // Inserting into the hash map failed. + mysql_mutex_unlock(&m_mutex); + my_free(table_handler); + return nullptr; + } + + thr_lock_init(&table_handler->m_thr_lock); + my_io_perf_atomic_init(&table_handler->m_io_perf_read); + } + DBUG_ASSERT(table_handler->m_ref_count >= 0); + table_handler->m_ref_count++; + + mysql_mutex_unlock(&m_mutex); + + return table_handler; +} + + +std::vector rdb_get_open_table_names(void) +{ + return rdb_open_tables.get_table_names(); +} + + +std::vector Rdb_open_tables_map::get_table_names(void) const +{ + ulong i; + const Rdb_table_handler *table_handler; + std::vector names; + + mysql_mutex_lock(&m_mutex); + for (i = 0; + (table_handler= reinterpret_cast( + my_hash_const_element(&m_hash, i))); + i++) + { + DBUG_ASSERT(table_handler != nullptr); + names.push_back(table_handler->m_table_name); + } + DBUG_ASSERT(i == m_hash.records); + mysql_mutex_unlock(&m_mutex); + + return names; +} + + +void ha_rocksdb::load_auto_incr_value() +{ + int save_active_index= active_index; + active_index= table->s->next_number_index; + uint8 save_table_status= table->status; + + /* + load_auto_incr_value() may be called by statements that + do not execute implicit commits (i.e. SHOW CREATE TABLE). + index_last() creates a snapshot. When a snapshot is created + here, it has to be released as well. (GitHub issue#189) + */ + Rdb_transaction *tx= get_or_create_tx(table->in_use); + bool is_new_snapshot= !tx->has_snapshot(); + + // Do a lookup. We only need index column, so it should be index-only. + // (another reason to make it index-only is that table->read_set is + // not set appropriately and non-index-only lookup will not read the value) + bool save_keyread_only= m_keyread_only; + m_keyread_only= true; + + if (!index_last(table->record[0])) + update_auto_incr_val(); + + m_keyread_only= save_keyread_only; + if (is_new_snapshot) + { + tx->release_snapshot(); + } + + table->status= save_table_status; + active_index= save_active_index; + + /* + Do what ha_rocksdb::index_end() does. + (Why don't we use index_init/index_end? class handler defines index_init + as private, for some reason). + */ + release_scan_iterator(); +} + +/* Get PK value from table->record[0]. */ +/* + TODO(alexyang): No existing support for auto_increment on non-pk columns, see + end of ha_rocksdb::create. Also see opened issue here: + https://github.com/facebook/mysql-5.6/issues/153 +*/ +void ha_rocksdb::update_auto_incr_val() +{ + Field *field; + longlong new_val; + field= table->key_info[table->s->next_number_index].key_part[0].field; + + my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + new_val= field->val_int() + 1; + dbug_tmp_restore_column_map(table->read_set, old_map); + + longlong auto_incr_val= m_tbl_def->m_auto_incr_val; + while (auto_incr_val < new_val && + !m_tbl_def->m_auto_incr_val.compare_exchange_weak( + auto_incr_val, new_val)) + { + // Do nothing - just loop until auto_incr_val is >= new_val or + // we successfully set it + } +} + +int ha_rocksdb::load_hidden_pk_value() +{ + int save_active_index= active_index; + active_index= m_tbl_def->m_key_count - 1; + uint8 save_table_status= table->status; + + Rdb_transaction *tx= get_or_create_tx(table->in_use); + bool is_new_snapshot= !tx->has_snapshot(); + + // Do a lookup. + if (!index_last(table->record[0])) + { + /* + Decode PK field from the key + */ + longlong hidden_pk_id= 0; + if (read_hidden_pk_id_from_rowkey(&hidden_pk_id)) + { + if (is_new_snapshot) + { + tx->release_snapshot(); + } + return HA_ERR_INTERNAL_ERROR; + } + + hidden_pk_id++; + longlong old = m_tbl_def->m_hidden_pk_val; + while (old < hidden_pk_id && + !m_tbl_def->m_hidden_pk_val.compare_exchange_weak(old, hidden_pk_id)) + {} + } + + if (is_new_snapshot) + { + tx->release_snapshot(); + } + + table->status= save_table_status; + active_index= save_active_index; + + release_scan_iterator(); + + return 0; +} + +/* Get PK value from m_tbl_def->m_hidden_pk_info. */ +longlong ha_rocksdb::update_hidden_pk_val() +{ + DBUG_ASSERT(has_hidden_pk(table)); + longlong new_val; + new_val= m_tbl_def->m_hidden_pk_val++; + return new_val; +} + +/* Get the id of the hidden pk id from m_last_rowkey */ +int ha_rocksdb::read_hidden_pk_id_from_rowkey(longlong* hidden_pk_id) +{ + DBUG_ASSERT(hidden_pk_id != nullptr); + DBUG_ASSERT(table != nullptr); + DBUG_ASSERT(has_hidden_pk(table)); + + rocksdb::Slice rowkey_slice(m_last_rowkey.ptr(), m_last_rowkey.length()); + + // Get hidden primary key from old key slice + Rdb_string_reader reader(&rowkey_slice); + if ((!reader.read(Rdb_key_def::INDEX_NUMBER_SIZE))) + return 1; + + const int length= Field_longlong::PACK_LENGTH; + const uchar *from= reinterpret_cast(reader.read(length)); + if (from == nullptr) + { + return 1; /* Mem-comparable image doesn't have enough bytes */ + } + + *hidden_pk_id= rdb_netbuf_read_uint64(&from); + return 0; +} + + +/** + @brief + Free lock controls. We call this whenever we close a table. If the table had + the last reference to the table_handler, then we free the memory associated + with it. +*/ + +void Rdb_open_tables_map::release_table_handler( + Rdb_table_handler *table_handler) +{ + mysql_mutex_lock(&m_mutex); + + DBUG_ASSERT(table_handler->m_ref_count > 0); + if (!--table_handler->m_ref_count) + { + // Last rereference was released. Tear down the hash entry. + auto ret __attribute__((__unused__)) = + my_hash_delete(&m_hash, reinterpret_cast(table_handler)); + DBUG_ASSERT(!ret); // the hash entry must actually be found and deleted + my_core::thr_lock_delete(&table_handler->m_thr_lock); + my_free(table_handler); + } + + mysql_mutex_unlock(&m_mutex); +} + + +static handler* rocksdb_create_handler(my_core::handlerton *hton, + my_core::TABLE_SHARE *table_arg, + my_core::MEM_ROOT *mem_root) +{ + return new (mem_root) ha_rocksdb(hton, table_arg); +} + + +ha_rocksdb::ha_rocksdb(my_core::handlerton *hton, + my_core::TABLE_SHARE *table_arg) + : handler(hton, table_arg), m_table_handler(nullptr), m_scan_it(nullptr), + m_scan_it_skips_bloom(false), m_tbl_def(nullptr), + m_pk_descr(nullptr), m_key_descr_arr(nullptr), + m_pk_can_be_decoded(false), + m_maybe_unpack_info(false), + m_pk_tuple(nullptr), m_pk_packed_tuple(nullptr), + m_sk_packed_tuple(nullptr), + m_sk_match_prefix(nullptr), m_sk_match_prefix_buf(nullptr), + m_sk_packed_tuple_old(nullptr), + m_pack_buffer(nullptr), m_lock_rows(RDB_LOCK_NONE), + m_keyread_only(FALSE), m_encoder_arr(nullptr), + m_row_checksums_checked(0), + m_in_rpl_delete_rows(false), m_in_rpl_update_rows(false), + m_force_skip_unique_check(false) +{} + + +static const char *ha_rocksdb_exts[] = { + NullS +}; + + +const char **ha_rocksdb::bas_ext() const +{ + return ha_rocksdb_exts; +} + +bool ha_rocksdb::same_table(const ha_rocksdb& other) const +{ + return m_tbl_def->base_tablename() == other.m_tbl_def->base_tablename(); +} + +bool ha_rocksdb::init_with_fields() +{ + uint pk= table_share->primary_key; + if (pk != MAX_KEY) + { + uint key_parts= table_share->key_info[pk].user_defined_key_parts; + check_keyread_allowed(pk /*PK*/, key_parts-1, true); + } + else + m_pk_can_be_decoded= false; + + cached_table_flags= table_flags(); + return false; /* Ok */ +} + + +/** + Convert record from table->record[0] form into a form that can be written + into rocksdb. + + @param pk_packed_slice Packed PK tuple. We need it in order to compute + and store its CRC. + @param packed_rec OUT Data slice with record data. +*/ + +void ha_rocksdb::convert_record_to_storage_format( + const rocksdb::Slice& pk_packed_slice, + Rdb_string_writer *pk_unpack_info, + rocksdb::Slice *packed_rec) +{ + DBUG_ASSERT_IMP(m_maybe_unpack_info, pk_unpack_info); + m_storage_record.length(0); + + /* All NULL bits are initially 0 */ + m_storage_record.fill(m_null_bytes_in_rec, 0); + + // If a primary key may have non-empty unpack_info for certain values, + // (m_maybe_unpack_info=TRUE), we write the unpack_info block. The block + // itself was prepared in Rdb_key_def::pack_record. + if (m_maybe_unpack_info && + m_pk_descr->m_kv_format_version >= + Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1) + { + m_storage_record.append(reinterpret_cast(pk_unpack_info->ptr()), + pk_unpack_info->get_current_pos()); + } + + for (uint i=0; i < table->s->fields; i++) + { + /* Don't pack decodable PK key parts */ + if (m_encoder_arr[i].m_storage_type != Rdb_field_encoder::STORE_ALL) + { + continue; + } + + Field *field= table->field[i]; + if (m_encoder_arr[i].maybe_null()) + { + char *data= (char*)m_storage_record.ptr(); + if (field->is_null()) + { + data[m_encoder_arr[i].m_null_offset]|= m_encoder_arr[i].m_null_mask; + /* Don't write anything for NULL values */ + continue; + } + } + + if (m_encoder_arr[i].m_field_type == MYSQL_TYPE_BLOB) + { + my_core::Field_blob *blob= (my_core::Field_blob*)field; + /* Get the number of bytes needed to store length*/ + uint length_bytes= blob->pack_length() - portable_sizeof_char_ptr; + + /* Store the length of the value */ + m_storage_record.append(reinterpret_cast(blob->ptr), length_bytes); + + /* Store the blob value itself */ + char *data_ptr; + memcpy(&data_ptr, blob->ptr + length_bytes, sizeof(uchar**)); + m_storage_record.append(data_ptr, blob->get_length()); + } + else if (m_encoder_arr[i].m_field_type == MYSQL_TYPE_VARCHAR) + { + Field_varstring* field_var= (Field_varstring*)field; + uint data_len; + /* field_var->length_bytes is 1 or 2 */ + if (field_var->length_bytes == 1) + { + data_len= field_var->ptr[0]; + } + else + { + DBUG_ASSERT(field_var->length_bytes==2); + data_len= uint2korr(field_var->ptr); + } + m_storage_record.append(reinterpret_cast(field_var->ptr), + field_var->length_bytes + data_len); + } + else + { + /* Copy the field data */ + uint len= field->pack_length_in_rec(); + m_storage_record.append(reinterpret_cast(field->ptr), len); + } + } + + if (should_store_checksums()) + { + uint32_t key_crc32= my_core::crc32(0, + rdb_slice_to_uchar_ptr(&pk_packed_slice), + pk_packed_slice.size()); + uint32_t val_crc32= my_core::crc32( + 0, rdb_mysql_str_to_uchar_str(&m_storage_record), + m_storage_record.length()); + uchar key_crc_buf[RDB_CHECKSUM_SIZE]; + uchar val_crc_buf[RDB_CHECKSUM_SIZE]; + rdb_netbuf_store_uint32(key_crc_buf, key_crc32); + rdb_netbuf_store_uint32(val_crc_buf, val_crc32); + m_storage_record.append((const char*)&RDB_CHECKSUM_DATA_TAG, 1); + m_storage_record.append((const char*)key_crc_buf, RDB_CHECKSUM_SIZE); + m_storage_record.append((const char*)val_crc_buf, RDB_CHECKSUM_SIZE); + } + + *packed_rec= rocksdb::Slice(m_storage_record.ptr(), + m_storage_record.length()); +} + + +/* + @brief + Setup which fields will be unpacked when reading rows + + @detail + Two special cases when we still unpack all fields: + - When this table is being updated (m_lock_rows==RDB_LOCK_WRITE). + - When @@rocksdb_verify_checksums is ON (In this mode, we need to read all + fields to find whether there is a row checksum at the end. We could skip + the fields instead of decoding them, but currently we do decoding.) + + @seealso + ha_rocksdb::setup_field_converters() + ha_rocksdb::convert_record_from_storage_format() +*/ +void ha_rocksdb::setup_read_decoders() +{ + m_decoders_vect.clear(); + + int last_useful= 0; + int skip_size= 0; + + for (uint i= 0; i < table->s->fields; i++) + { + // We only need the decoder if the whole record is stored. + if (m_encoder_arr[i].m_storage_type != Rdb_field_encoder::STORE_ALL) + { + continue; + } + + if (m_lock_rows == RDB_LOCK_WRITE || m_verify_checksums || + bitmap_is_set(table->read_set, table->field[i]->field_index)) + { + // We will need to decode this field + m_decoders_vect.push_back({&m_encoder_arr[i], true, skip_size}); + last_useful= m_decoders_vect.size(); + skip_size= 0; + } + else + { + if (m_encoder_arr[i].uses_variable_len_encoding() || + m_encoder_arr[i].maybe_null()) + { + // For variable-length field, we need to read the data and skip it + m_decoders_vect.push_back({&m_encoder_arr[i], false, skip_size}); + skip_size= 0; + } + else + { + // Fixed-width field can be skipped without looking at it. + // Add appropriate skip_size to the next field. + skip_size += m_encoder_arr[i].m_pack_length_in_rec; + } + } + } + + // It could be that the last few elements are varchars that just do + // skipping. Remove them. + m_decoders_vect.erase(m_decoders_vect.begin() + last_useful, + m_decoders_vect.end()); +} + + +#ifndef NDEBUG +void dbug_append_garbage_at_end(std::string &on_disk_rec) +{ + on_disk_rec.append("abc"); +} + +void dbug_truncate_record(std::string &on_disk_rec) +{ + on_disk_rec.resize(0); +} + +void dbug_modify_rec_varchar12(std::string &on_disk_rec) +{ + std::string res; + // The record is NULL-byte followed by VARCHAR(10). + // Put the NULL-byte + res.append("\0", 1); + // Then, add a valid VARCHAR(12) value. + res.append("\xC", 1); + res.append("123456789ab", 12); + + on_disk_rec.assign(res); +} + + +void dbug_modify_key_varchar8(String &on_disk_rec) +{ + std::string res; + // The key starts with index number + res.append(on_disk_rec.ptr(), Rdb_key_def::INDEX_NUMBER_SIZE); + + // Then, a mem-comparable form of a varchar(8) value. + res.append("ABCDE\0\0\0\xFC", 9); + on_disk_rec.length(0); + on_disk_rec.append(res.data(), res.size()); +} + +#endif + + +int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key, + uchar * buf) +{ + DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read1", + dbug_append_garbage_at_end(m_retrieved_record);); + DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read2", + dbug_truncate_record(m_retrieved_record);); + DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read3", + dbug_modify_rec_varchar12(m_retrieved_record);); + + rocksdb::Slice retrieved_rec_slice(&m_retrieved_record.front(), + m_retrieved_record.size()); + return convert_record_from_storage_format(key, &retrieved_rec_slice, buf); +} + + +/* + @brief + Unpack the record in this->m_retrieved_record and this->m_last_rowkey from + storage format into buf (which can be table->record[0] or table->record[1]). + + @param key Table record's key in mem-comparable form. + @param buf Store record in table->record[0] format here + + @detail + If the table has blobs, the unpacked data in buf may keep pointers to the + data in this->m_retrieved_record. + + The key is only needed to check its checksum value (the checksum is in + m_retrieved_record). + + @seealso + ha_rocksdb::setup_read_decoders() Sets up data structures which tell which + columns to decode. + + @return + 0 OK + other Error inpacking the data +*/ + +int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key, + const rocksdb::Slice *value, + uchar * buf) +{ + DBUG_ASSERT(key != nullptr); + DBUG_ASSERT(buf != nullptr); + + Rdb_string_reader reader(value); + my_ptrdiff_t ptr_diff= buf - table->record[0]; + + /* + Decode PK fields from the key + */ + DBUG_EXECUTE_IF("myrocks_simulate_bad_pk_read1", + dbug_modify_key_varchar8(m_last_rowkey);); + + const rocksdb::Slice rowkey_slice(m_last_rowkey.ptr(), + m_last_rowkey.length()); + const char *unpack_info= nullptr; + uint16 unpack_info_len= 0; + rocksdb::Slice unpack_slice; + + /* Other fields are decoded from the value */ + const char *null_bytes= nullptr; + if (m_null_bytes_in_rec && !(null_bytes= reader.read(m_null_bytes_in_rec))) + { + return HA_ERR_INTERNAL_ERROR; + } + + if (m_maybe_unpack_info && m_pk_descr->m_kv_format_version >= + Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1) + { + unpack_info= reader.read(RDB_UNPACK_HEADER_SIZE); + + if (!unpack_info || unpack_info[0] != RDB_UNPACK_DATA_TAG) + { + return HA_ERR_INTERNAL_ERROR; + } + + unpack_info_len= rdb_netbuf_to_uint16( + reinterpret_cast(unpack_info + 1)); + unpack_slice= rocksdb::Slice(unpack_info, unpack_info_len); + + reader.read(unpack_info_len - RDB_UNPACK_HEADER_SIZE); + } + + if (m_pk_descr->unpack_record(table, buf, &rowkey_slice, + unpack_info ? &unpack_slice : nullptr, + false /* verify_checksum */)) + { + return HA_ERR_INTERNAL_ERROR; + } + + for (auto it= m_decoders_vect.begin(); it != m_decoders_vect.end(); it++) + { + const Rdb_field_encoder* const field_dec= it->m_field_enc; + const bool decode= it->m_decode; + bool isNull = field_dec->maybe_null() && + ((null_bytes[field_dec->m_null_offset] & field_dec->m_null_mask) != 0); + + Field *field= table->field[field_dec->m_field_index]; + + /* Skip the bytes we need to skip */ + if (it->m_skip && !reader.read(it->m_skip)) + return HA_ERR_INTERNAL_ERROR; + + if (isNull) + { + if (decode) + { + /* This sets the NULL-bit of this record */ + field->set_null(ptr_diff); + /* + Besides that, set the field value to default value. CHECKSUM TABLE + depends on this. + */ + uint field_offset= field->ptr - table->record[0]; + memcpy(buf + field_offset, + table->s->default_values + field_offset, + field->pack_length()); + } + continue; + } + else + { + if (decode) + field->set_notnull(ptr_diff); + } + + if (field_dec->m_field_type == MYSQL_TYPE_BLOB) + { + my_core::Field_blob *blob= (my_core::Field_blob*)field; + /* Get the number of bytes needed to store length*/ + uint length_bytes= blob->pack_length() - portable_sizeof_char_ptr; + + blob->move_field_offset(ptr_diff); + + const char *data_len_str; + if (!(data_len_str= reader.read(length_bytes))) + { + blob->move_field_offset(-ptr_diff); + return HA_ERR_INTERNAL_ERROR; + } + + memcpy(blob->ptr, data_len_str, length_bytes); + + uint32 data_len= blob->get_length((uchar*)data_len_str, length_bytes, + table->s->db_low_byte_first); + const char *blob_ptr; + if (!(blob_ptr= reader.read(data_len))) + { + blob->move_field_offset(-ptr_diff); + return HA_ERR_INTERNAL_ERROR; + } + + if (decode) + { + // set 8-byte pointer to 0, like innodb does (relevant for 32-bit + // platforms) + memset(blob->ptr + length_bytes, 0, 8); + memcpy(blob->ptr + length_bytes, &blob_ptr, sizeof(uchar**)); + blob->move_field_offset(-ptr_diff); + } + } + else if (field_dec->m_field_type == MYSQL_TYPE_VARCHAR) + { + Field_varstring* field_var= (Field_varstring*)field; + const char *data_len_str; + if (!(data_len_str= reader.read(field_var->length_bytes))) + return HA_ERR_INTERNAL_ERROR; + + uint data_len; + /* field_var->length_bytes is 1 or 2 */ + if (field_var->length_bytes == 1) + { + data_len= (uchar)data_len_str[0]; + } + else + { + DBUG_ASSERT(field_var->length_bytes == 2); + data_len= uint2korr(data_len_str); + } + if (data_len > field->field_length) + { + /* The data on disk is longer than table DDL allows? */ + return HA_ERR_INTERNAL_ERROR; + } + if (!reader.read(data_len)) + return HA_ERR_INTERNAL_ERROR; + + if (decode) + { + memcpy(field_var->ptr + ptr_diff, data_len_str, + field_var->length_bytes + data_len); + } + } + else + { + const char *data_bytes; + uint len= field_dec->m_pack_length_in_rec; + if (len > 0) + { + if ((data_bytes= reader.read(len)) == nullptr) + { + return HA_ERR_INTERNAL_ERROR; + } + if (decode) + memcpy(field->ptr + ptr_diff, data_bytes, len); + } + } + } + + if (m_verify_checksums) + { + if (reader.remaining_bytes() == RDB_CHECKSUM_CHUNK_SIZE && + reader.read(1)[0] == RDB_CHECKSUM_DATA_TAG) + { + uint32_t stored_key_chksum= + rdb_netbuf_to_uint32((const uchar*)reader.read(RDB_CHECKSUM_SIZE)); + uint32_t stored_val_chksum= + rdb_netbuf_to_uint32((const uchar*)reader.read(RDB_CHECKSUM_SIZE)); + + uint32_t computed_key_chksum= + my_core::crc32(0, rdb_slice_to_uchar_ptr(key), key->size()); + uint32_t computed_val_chksum= + my_core::crc32(0, rdb_slice_to_uchar_ptr(value), + value->size() - RDB_CHECKSUM_CHUNK_SIZE); + + DBUG_EXECUTE_IF("myrocks_simulate_bad_pk_checksum1", + stored_key_chksum++;); + + if (stored_key_chksum != computed_key_chksum) + { + m_pk_descr->report_checksum_mismatch(true, key->data(), key->size()); + return HA_ERR_INTERNAL_ERROR; + } + + DBUG_EXECUTE_IF("myrocks_simulate_bad_pk_checksum2", + stored_val_chksum++;); + if (stored_val_chksum != computed_val_chksum) + { + m_pk_descr->report_checksum_mismatch(false, value->data(), + value->size()); + return HA_ERR_INTERNAL_ERROR; + } + + m_row_checksums_checked++; + } + if (reader.remaining_bytes()) + return HA_ERR_INTERNAL_ERROR; + } + + return 0; +} + + +void ha_rocksdb::get_storage_type(Rdb_field_encoder *encoder, uint kp) +{ + Field *field= table->key_info[table->s->primary_key].key_part[kp].field; + + if (field->real_type() == MYSQL_TYPE_NEWDECIMAL) + { + // Index-only is supported for DECIMAL columns. + // A DECIMAL value can be restored from its mem-comparable form. + // This works for both the old data format and the new data format. + if (m_pk_descr->m_kv_format_version >= + Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1) + { + // New format, don't store Decimal value in the row. + encoder->m_storage_type= Rdb_field_encoder::STORE_NONE; + } + else + { + // Old format. Pretend there's no way to unpack the decimal from + // its mem-comparable form. + encoder->m_storage_type= Rdb_field_encoder::STORE_ALL; + } + return; + } + + if (m_pk_descr->m_kv_format_version >= + Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1) + { + // STORE_SOME uses unpack_info, so our key must be of version + // higher than PRIMARY_FORMAT_VERSION_UPDATE1 to use this + // feature. + if (m_pk_descr->has_unpack_info(kp)) + { + DBUG_ASSERT(m_pk_descr->can_unpack(kp)); + encoder->m_storage_type= Rdb_field_encoder::STORE_SOME; + m_maybe_unpack_info= true; + } + else if (m_pk_descr->can_unpack(kp)) + { + encoder->m_storage_type= Rdb_field_encoder::STORE_NONE; + } + } + else + { + // For old versions, we can only store none if there is no + // unpack_info (and it's unpackable). + if (m_pk_descr->can_unpack(kp) && !m_pk_descr->has_unpack_info(kp)) + { + encoder->m_storage_type= Rdb_field_encoder::STORE_NONE; + } + } +} + +/* + Setup data needed to convert table->record[] to and from record storage format. + + @seealso + ha_rocksdb::convert_record_to_storage_format, + ha_rocksdb::convert_record_from_storage_format +*/ + +void ha_rocksdb::setup_field_converters() +{ + uint i; + uint null_bytes= 0; + uchar cur_null_mask= 0x1; + + DBUG_ASSERT(m_encoder_arr == nullptr); + m_encoder_arr= static_cast( + my_malloc(table->s->fields * sizeof(Rdb_field_encoder), MYF(0))); + if (m_encoder_arr == nullptr) + { + return; + } + + for (i= 0; i < table->s->fields; i++) + { + Field *field= table->field[i]; + m_encoder_arr[i].m_storage_type= Rdb_field_encoder::STORE_ALL; + + /* + Check if this field is + - a part of primary key, and + - it can be decoded back from its key image. + If both hold, we don't need to store this field in the value part of + RocksDB's key-value pair. + + If hidden pk exists, we skip this check since the field will never be + part of the hidden pk. + */ + if (!has_hidden_pk(table) && + field->part_of_key.is_set(table->s->primary_key)) + { + KEY *pk_info= &table->key_info[table->s->primary_key]; + for (uint kp= 0; kp < pk_info->user_defined_key_parts; kp++) + { + /* key_part->fieldnr is counted from 1 */ + if (field->field_index + 1 == pk_info->key_part[kp].fieldnr) + { + get_storage_type(&m_encoder_arr[i], kp); + break; + } + } + } + + m_encoder_arr[i].m_field_type= field->real_type(); + m_encoder_arr[i].m_field_index= i; + m_encoder_arr[i].m_pack_length_in_rec= field->pack_length_in_rec(); + + if (field->real_maybe_null()) + { + m_encoder_arr[i].m_null_mask= cur_null_mask; + m_encoder_arr[i].m_null_offset= null_bytes; + if (cur_null_mask == 0x80) + { + cur_null_mask= 0x1; + null_bytes++; + } + else + cur_null_mask= cur_null_mask << 1; + } + else + { + m_encoder_arr[i].m_null_mask= 0; + } + } + + /* Count the last, unfinished NULL-bits byte */ + if (cur_null_mask != 0x1) + null_bytes++; + + m_null_bytes_in_rec= null_bytes; +} + +int ha_rocksdb::alloc_key_buffers(const TABLE* table_arg, + const Rdb_tbl_def* tbl_def_arg) +{ + DBUG_ENTER("ha_rocksdb::alloc_key_buffers"); + DBUG_ASSERT(m_pk_tuple == nullptr); + + std::shared_ptr* kd_arr= tbl_def_arg->m_key_descr_arr; + + uint key_len= 0; + m_pk_descr= kd_arr[pk_index(table_arg, tbl_def_arg)]; + if (has_hidden_pk(table_arg)) + { + m_pk_key_parts= 1; + } + else + { + m_pk_key_parts= + table->key_info[table->s->primary_key].user_defined_key_parts; + key_len= table->key_info[table->s->primary_key].key_length; + } + + // move this into get_table_handler() ?? + m_pk_descr->setup(table_arg, tbl_def_arg); + + m_pk_tuple= reinterpret_cast(my_malloc(key_len, MYF(0))); + if (m_pk_tuple == nullptr) + { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + uint pack_key_len= m_pk_descr->max_storage_fmt_length(); + m_pk_packed_tuple= reinterpret_cast(my_malloc(pack_key_len, MYF(0))); + if (m_pk_packed_tuple == nullptr) + { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + /* Sometimes, we may use m_sk_packed_tuple for storing packed PK */ + uint max_packed_sk_len= pack_key_len; + for (uint i= 0; i < table_arg->s->keys; i++) + { + if (i == table_arg->s->primary_key) /* Primary key was processed above */ + continue; + + // TODO: move this into get_table_handler() ?? + kd_arr[i]->setup(table_arg, tbl_def_arg); + + uint packed_len= kd_arr[i]->max_storage_fmt_length(); + if (packed_len > max_packed_sk_len) + { + max_packed_sk_len= packed_len; + } + } + + if (!(m_sk_packed_tuple= + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || + !(m_sk_match_prefix_buf= + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || + !(m_sk_packed_tuple_old= + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || + !(m_end_key_packed_tuple= + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || + !((m_pack_buffer= + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))))) + { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + DBUG_RETURN(0); +} + +void ha_rocksdb::free_key_buffers() +{ + my_free(m_pk_tuple); + m_pk_tuple= nullptr; + + my_free(m_pk_packed_tuple); + m_pk_packed_tuple= nullptr; + + my_free(m_sk_packed_tuple); + m_sk_packed_tuple= nullptr; + + my_free(m_sk_match_prefix_buf); + m_sk_match_prefix_buf= nullptr; + + my_free(m_sk_packed_tuple_old); + m_sk_packed_tuple_old= nullptr; + + my_free(m_end_key_packed_tuple); + m_end_key_packed_tuple= nullptr; + + my_free(m_pack_buffer); + m_pack_buffer= nullptr; +} + +void ha_rocksdb::set_use_read_free_rpl(const char* whitelist) +{ + DBUG_ASSERT(whitelist != nullptr); + +#if defined(HAVE_PSI_INTERFACE) + Regex_list_handler regex_handler(key_rwlock_read_free_rpl_tables); +#else + Regex_list_handler regex_handler; +#endif + + if (!regex_handler.set_patterns(whitelist)) + { + warn_about_bad_patterns(®ex_handler, "read_free_rpl_tables"); + } + + m_use_read_free_rpl = regex_handler.matches(m_tbl_def->base_tablename()); +} + +void ha_rocksdb::set_skip_unique_check_tables(const char* whitelist) +{ + DBUG_ASSERT(whitelist != nullptr); + +#if defined(HAVE_PSI_INTERFACE) + Regex_list_handler regex_handler(key_rwlock_skip_unique_check_tables); +#else + Regex_list_handler regex_handler; +#endif + + if (!regex_handler.set_patterns(whitelist)) + { + warn_about_bad_patterns(®ex_handler, "skip_unique_check_tables"); + } + + m_skip_unique_check= regex_handler.matches(m_tbl_def->base_tablename()); +} + +int ha_rocksdb::open(const char *name, int mode, uint test_if_locked) +{ + DBUG_ENTER("ha_rocksdb::open"); + + if (m_table_handler != nullptr) + { + rdb_open_tables.release_table_handler(m_table_handler); + } + m_table_handler= rdb_open_tables.get_table_handler(name); + if (m_table_handler == nullptr) + { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + my_core::thr_lock_data_init(&m_table_handler->m_thr_lock, &m_db_lock, + nullptr); + m_io_perf.init(&m_table_handler->m_table_perf_context, + &m_table_handler->m_io_perf_read, &stats); + + /* + note: m_pk_descr may be non-NULL here, when using partitions. It seems, + it's ok to discard it + */ + std::string fullname; + if (rdb_normalize_tablename(name, &fullname)) + { + return HA_ERR_INTERNAL_ERROR; + } + + m_tbl_def= ddl_manager.find(fullname); + if (m_tbl_def == nullptr) + { + my_error(ER_INTERNAL_ERROR, MYF(0), + "Attempt to open a table that is not present in RocksDB-SE data " + "dictionary"); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + m_lock_rows= RDB_LOCK_NONE; + + m_key_descr_arr= m_tbl_def->m_key_descr_arr; + + /* + Full table scan actually uses primary key + (UPDATE needs to know this, otherwise it will go into infinite loop on + queries like "UPDATE tbl SET pk=pk+100") + */ + key_used_on_scan= table->s->primary_key; + + int err; + if ((err = alloc_key_buffers(table, m_tbl_def))) + { + DBUG_RETURN(err); + } + + setup_field_converters(); + + info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); + + /* + The following load_XXX code calls row decode functions, and they do + that without having done ::external_lock() or index_init()/rnd_init(). + (Note: this also means we're doing a read when there was no + setup_field_converters() call) + + Initialize the necessary variables for them: + */ + m_verify_checksums= false; + + /* TODO: move the following to where TABLE_SHARE is opened: */ + if (table->found_next_number_field) + load_auto_incr_value(); + + if (has_hidden_pk(table) && load_hidden_pk_value()) + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + + /* Index block size in MyRocks: used by MySQL in query optimization */ + stats.block_size = rocksdb_tbl_options.block_size; + + /* Determine at open whether we can use Read Free Replication or not */ + set_use_read_free_rpl(THDVAR(ha_thd(), read_free_rpl_tables)); + + /* Determine at open whether we should skip unique checks for this table */ + set_skip_unique_check_tables(THDVAR(ha_thd(), skip_unique_check_tables)); + + DBUG_RETURN(0); +} + + +int ha_rocksdb::close(void) +{ + DBUG_ENTER("ha_rocksdb::close"); + + m_pk_descr= nullptr; + m_key_descr_arr= nullptr; + + free_key_buffers(); + + my_free(m_encoder_arr); + m_encoder_arr= nullptr; + + rdb_open_tables.release_table_handler(m_table_handler); + m_table_handler= nullptr; + + DBUG_RETURN(0); +} + +bool ha_rocksdb::get_error_message(const int error, String *buf) +{ + static_assert(HA_ERR_ROCKSDB_LAST > HA_ERR_FIRST, + "HA_ERR_ROCKSDB_LAST > HA_ERR_FIRST"); + static_assert(HA_ERR_ROCKSDB_LAST > HA_ERR_LAST, + "HA_ERR_ROCKSDB_LAST > HA_ERR_LAST"); + + DBUG_ASSERT(error > 0); + DBUG_ASSERT(error <= HA_ERR_ROCKSDB_LAST); + DBUG_ASSERT(buf != nullptr); + + Rdb_transaction* tx = get_tx_from_thd(ha_thd()); + bool temp_error = false; + + switch (error) { + case HA_ERR_ROCKSDB_PK_REQUIRED: + buf->append("Table must have a PRIMARY KEY."); + break; + case HA_ERR_ROCKSDB_UNIQUE_NOT_SUPPORTED: + buf->append("Unique indexes are not supported."); + break; + case HA_ERR_ROCKSDB_TOO_MANY_LOCKS: + buf->append("Number of locks held reached @@rocksdb_max_row_locks."); + break; + case HA_ERR_LOCK_WAIT_TIMEOUT: + DBUG_ASSERT(tx != nullptr); + buf->append(tx->m_detailed_error); + temp_error = true; + break; + default: + // We can be called with the values which are < HA_ERR_FIRST because most + // MySQL internal functions will just return 1 in case of an error. + break; + } + + return temp_error; +} + +/* MyRocks supports only the following collations for indexed columns */ +static const std::set RDB_INDEX_COLLATIONS= + {&my_charset_bin, &my_charset_utf8_bin, &my_charset_latin1_bin}; + + +static bool rdb_is_index_collation_supported(const my_core::Field *field) +{ + my_core::enum_field_types type= field->real_type(); + /* Handle [VAR](CHAR|BINARY) or TEXT|BLOB */ + if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING || + type == MYSQL_TYPE_BLOB) + { + return RDB_INDEX_COLLATIONS.find(field->charset()) != + RDB_INDEX_COLLATIONS.end(); + } + return true; +} + +/* + Create structures needed for storing data in rocksdb. This is called when the + table is created. The structures will be shared by all TABLE* objects. + + @param + table_arg Table with definition + db_table "dbname.tablename" + len strlen of the above + tbl_def_arg tbl_def whose key_descr is being created/populated + old_tbl_def_arg tbl_def from which keys are being copied over from + (for use during inplace alter) + + @return + 0 - Ok + other - error, either given table ddl is not supported by rocksdb or OOM. +*/ +int ha_rocksdb::create_key_defs(const TABLE *table_arg, + Rdb_tbl_def *tbl_def_arg, + const TABLE *old_table_arg /* = nullptr */, + const Rdb_tbl_def *old_tbl_def_arg + /* = nullptr */) +{ + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(table_arg->s != nullptr); + + DBUG_ENTER("ha_rocksdb::create_key_defs"); + + uint i; + + /* + These need to be one greater than MAX_INDEXES since the user can create + MAX_INDEXES secondary keys and no primary key which would cause us + to generate a hidden one. + */ + std::array cfs; + + /* + NOTE: All new column families must be created before new index numbers are + allocated to each key definition. See below for more details. + http://github.com/MySQLOnRocksDB/mysql-5.6/issues/86#issuecomment-138515501 + */ + if (create_cfs(table_arg, tbl_def_arg, &cfs)) + { + DBUG_RETURN(1); + }; + + if (!old_tbl_def_arg) + { + /* + old_tbl_def doesn't exist. this means we are in the process of creating + a new table. + + Get the index numbers (this will update the next_index_number) + and create Rdb_key_def structures. + */ + for (i= 0; i < tbl_def_arg->m_key_count; i++) + { + if (create_key_def(table_arg, i, tbl_def_arg, + &m_key_descr_arr[i], cfs[i])) + { + DBUG_RETURN(1); + } + } + } + else + { + /* + old_tbl_def exists. This means we are creating a new tbl_def as part of + in-place alter table. Copy over existing keys from the old_tbl_def and + generate the necessary new key definitions if any. + */ + if (create_inplace_key_defs(table_arg, tbl_def_arg, old_table_arg, + old_tbl_def_arg, cfs)) + { + DBUG_RETURN(1); + } + } + + DBUG_RETURN(0); +} + +/* + Checks index parameters and creates column families needed for storing data + in rocksdb if necessary. + + @param in + table_arg Table with definition + db_table Table name + tbl_def_arg Table def structure being populated + + @param out + cfs CF info for each key definition in 'key_info' order + + @return + 0 - Ok + other - error +*/ +int ha_rocksdb::create_cfs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg, + std::array* cfs) +{ + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(table_arg->s != nullptr); + + DBUG_ENTER("ha_rocksdb::create_cfs"); + + char tablename_sys[NAME_LEN + 1]; + + my_core::filename_to_tablename(tbl_def_arg->base_tablename().c_str(), + tablename_sys, sizeof(tablename_sys)); + + /* + The first loop checks the index parameters and creates + column families if necessary. + */ + for (uint i= 0; i < tbl_def_arg->m_key_count; i++) + { + rocksdb::ColumnFamilyHandle* cf_handle; + + if (rocksdb_strict_collation_check && + !is_hidden_pk(i, table_arg, tbl_def_arg) && + tbl_def_arg->base_tablename().find(tmp_file_prefix) != 0) + { + for (uint part= 0; part < table_arg->key_info[i].actual_key_parts; part++) + { + if (!rdb_is_index_collation_supported( + table_arg->key_info[i].key_part[part].field) && + !rdb_collation_exceptions->matches(tablename_sys)) + { + std::string collation_err; + for (auto coll : RDB_INDEX_COLLATIONS) + { + if (collation_err != "") + { + collation_err += ", "; + } + collation_err += coll->name; + } + my_printf_error(ER_UNKNOWN_ERROR, + "Unsupported collation on string indexed " + "column %s.%s Use binary collation (%s).", MYF(0), + tbl_def_arg->full_tablename().c_str(), + table_arg->key_info[i].key_part[part].field->field_name, + collation_err.c_str()); + DBUG_RETURN(1); + } + } + } + + /* + index comment has Column Family name. If there was no comment, we get + NULL, and it means use the default column family. + */ + const char *comment = get_key_comment(i, table_arg, tbl_def_arg); + const char *key_name = get_key_name(i, table_arg, tbl_def_arg); + + if (looks_like_per_index_cf_typo(comment)) + { + my_error(ER_NOT_SUPPORTED_YET, MYF(0), + "column family name looks like a typo of $per_index_cf"); + DBUG_RETURN(1); + } + /* Prevent create from using the system column family */ + if (comment && strcmp(DEFAULT_SYSTEM_CF_NAME, comment) == 0) + { + my_error(ER_WRONG_ARGUMENTS, MYF(0), + "column family not valid for storing index data"); + DBUG_RETURN(1); + } + bool is_auto_cf_flag; + cf_handle = cf_manager.get_or_create_cf(rdb, comment, + tbl_def_arg->full_tablename(), + key_name, &is_auto_cf_flag); + if (!cf_handle) + DBUG_RETURN(1); + + auto& cf = (*cfs)[i]; + cf.cf_handle = cf_handle; + cf.is_reverse_cf = Rdb_cf_manager::is_cf_name_reverse(comment); + cf.is_auto_cf = is_auto_cf_flag; + } + + DBUG_RETURN(0); +} + +/* + Create key definition needed for storing data in rocksdb during ADD index + inplace operations. + + @param in + table_arg Table with definition + tbl_def_arg New table def structure being populated + old_tbl_def_arg Old(current) table def structure + cfs Struct array which contains column family information + + @return + 0 - Ok + other - error, either given table ddl is not supported by rocksdb or OOM. +*/ +int ha_rocksdb::create_inplace_key_defs(const TABLE *table_arg, + Rdb_tbl_def *tbl_def_arg, + const TABLE *old_table_arg, + const Rdb_tbl_def *old_tbl_def_arg, + const std::array& cfs) +{ + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + DBUG_ASSERT(old_tbl_def_arg != nullptr); + + DBUG_ENTER("create_key_def"); + + std::shared_ptr* old_key_descr= + old_tbl_def_arg->m_key_descr_arr; + std::shared_ptr* new_key_descr= + tbl_def_arg->m_key_descr_arr; + std::unordered_map old_key_pos = + get_old_key_positions(table_arg, tbl_def_arg, old_table_arg, + old_tbl_def_arg); + + uint i; + for (i= 0; i < tbl_def_arg->m_key_count; i++) + { + auto it = old_key_pos.find(get_key_name(i, table_arg, tbl_def_arg)); + if (it != old_key_pos.end()) + { + /* + Found matching index in old table definition, so copy it over to the + new one created. + */ + const std::shared_ptr& okd= + old_key_descr[it->second]; + + uint16 index_dict_version= 0; + uchar index_type= 0; + uint16 kv_version= 0; + GL_INDEX_ID gl_index_id= okd->get_gl_index_id(); + if (!dict_manager.get_index_info(gl_index_id, &index_dict_version, + &index_type, &kv_version)) + { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Could not get index information " + "for Index Number (%u,%u), table %s", + gl_index_id.cf_id, gl_index_id.index_id, + old_tbl_def_arg->full_tablename().c_str()); + DBUG_RETURN(1); + } + + /* + We can't use the copy constructor because we need to update the + keynr within the pack_info for each field and the keyno of the keydef + itself. + */ + new_key_descr[i]= std::make_shared( + okd->get_index_number(), + i, + okd->get_cf(), + index_dict_version, + index_type, + kv_version, + okd->m_is_reverse_cf, + okd->m_is_auto_cf, + okd->m_name.c_str(), + dict_manager.get_stats(gl_index_id)); + } + else if (create_key_def(table_arg, i, tbl_def_arg, + &new_key_descr[i], cfs[i])) + { + DBUG_RETURN(1); + } + + DBUG_ASSERT(new_key_descr[i] != nullptr); + new_key_descr[i]->setup(table_arg, tbl_def_arg); + } + + DBUG_RETURN(0); +} + +std::unordered_map ha_rocksdb::get_old_key_positions( + const TABLE* table_arg, + const Rdb_tbl_def* tbl_def_arg, + const TABLE* old_table_arg, + const Rdb_tbl_def* old_tbl_def_arg) +{ + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(old_table_arg != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + DBUG_ASSERT(old_tbl_def_arg != nullptr); + + DBUG_ENTER("get_old_key_positions"); + + std::shared_ptr* old_key_descr= + old_tbl_def_arg->m_key_descr_arr; + std::unordered_map old_key_pos; + std::unordered_map new_key_pos; + uint i; + + for (i= 0; i < tbl_def_arg->m_key_count; i++) + { + new_key_pos[get_key_name(i, table_arg, tbl_def_arg)] = i; + } + + for (i= 0; i < old_tbl_def_arg->m_key_count; i++) + { + if (is_hidden_pk(i, old_table_arg, old_tbl_def_arg)) + { + old_key_pos[old_key_descr[i]->m_name] = i; + continue; + } + + /* + In case of matching key name, need to check key parts of keys as well, + in case a simultaneous drop + add is performed, where the key name is the + same but the key parts are different. + + Example: + CREATE TABLE t1 (a INT, b INT, KEY ka(a)) ENGINE=RocksDB; + ALTER TABLE t1 DROP INDEX ka, ADD INDEX ka(b), ALGORITHM=INPLACE; + */ + const KEY* old_key = &old_table_arg->key_info[i]; + auto it = new_key_pos.find(old_key->name); + if (it == new_key_pos.end()) + { + continue; + } + + KEY* new_key = &table_arg->key_info[it->second]; + + if (!compare_key_parts(old_key, new_key)) + { + old_key_pos[old_key->name] = i; + } + } + + DBUG_RETURN(old_key_pos); +} + +/* Check two keys to ensure that key parts within keys match */ +int ha_rocksdb::compare_key_parts(const KEY* old_key, const KEY* new_key) +{ + DBUG_ASSERT(old_key != nullptr); + DBUG_ASSERT(new_key != nullptr); + + DBUG_ENTER("compare_key_parts"); + + /* Skip if key parts do not match, as it is a different key */ + if (new_key->user_defined_key_parts != old_key->user_defined_key_parts) + { + DBUG_RETURN(1); + } + + /* Check to see that key parts themselves match */ + for (uint i= 0; i < old_key->user_defined_key_parts; i++) + { + if (strcmp(old_key->key_part[i].field->field_name, + new_key->key_part[i].field->field_name) != 0) + { + DBUG_RETURN(1); + } + } + + DBUG_RETURN(0); +} + +/* + Create key definition needed for storing data in rocksdb. + This can be called either during CREATE table or doing ADD index operations. + + @param in + table_arg Table with definition + i Position of index being created inside table_arg->key_info + tbl_def_arg Table def structure being populated + cf_info Struct which contains column family information + + @param out + new_key_def Newly created index definition. + + @return + 0 - Ok + other - error, either given table ddl is not supported by rocksdb or OOM. +*/ +int ha_rocksdb::create_key_def(const TABLE *table_arg, uint i, + const Rdb_tbl_def* tbl_def_arg, + std::shared_ptr* new_key_def, + const struct key_def_cf_info& cf_info) +{ + DBUG_ENTER("create_key_def"); + DBUG_ASSERT(new_key_def != nullptr); + DBUG_ASSERT(*new_key_def == nullptr); + + uint index_id= ddl_manager.get_and_update_next_number(&dict_manager); + uint16_t index_dict_version= Rdb_key_def::INDEX_INFO_VERSION_LATEST; + uchar index_type; + uint16_t kv_version; + + if (is_hidden_pk(i, table_arg, tbl_def_arg)) + { + index_type= Rdb_key_def::INDEX_TYPE_HIDDEN_PRIMARY; + kv_version= Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; + } + else if (i == table_arg->s->primary_key) + { + index_type= Rdb_key_def::INDEX_TYPE_PRIMARY; + uint16 pk_latest_version= Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; + DBUG_EXECUTE_IF("MYROCKS_FORMAT_VERSION_INITIAL", + {pk_latest_version= + Rdb_key_def::PRIMARY_FORMAT_VERSION_INITIAL; + }); + kv_version= pk_latest_version; + } + else + { + index_type= Rdb_key_def::INDEX_TYPE_SECONDARY; + uint16 sk_latest_version= Rdb_key_def::SECONDARY_FORMAT_VERSION_LATEST; + DBUG_EXECUTE_IF("MYROCKS_FORMAT_VERSION_INITIAL", + {sk_latest_version= + Rdb_key_def::SECONDARY_FORMAT_VERSION_INITIAL; + }); + kv_version= sk_latest_version; + } + + const char *key_name = get_key_name(i, table_arg, m_tbl_def); + *new_key_def= std::make_shared( + index_id, i, cf_info.cf_handle, index_dict_version, index_type, + kv_version, cf_info.is_reverse_cf, cf_info.is_auto_cf, key_name); + + DBUG_RETURN(0); +} + +int rdb_normalize_tablename(const std::string& tablename, + std::string* strbuf) +{ + DBUG_ASSERT(strbuf != nullptr); + + if (tablename.size() < 2 || tablename[0] != '.' || tablename[1] != '/') + { + DBUG_ASSERT(0); // We were not passed table name? + return HA_ERR_INTERNAL_ERROR; + } + + size_t pos = tablename.find_first_of('/', 2); + if (pos == std::string::npos) + { + DBUG_ASSERT(0); // We were not passed table name? + return HA_ERR_INTERNAL_ERROR; + } + + *strbuf = tablename.substr(2, pos - 2) + "." + tablename.substr(pos + 1); + + return 0; +} + +/* + Check to see if the user's original statement includes foreign key + references +*/ +bool ha_rocksdb::contains_foreign_key(THD* thd) +{ + bool success; + const char* str = thd_query_string(thd)->str; + + DBUG_ASSERT(str != nullptr); + + while (*str != '\0') + { + // Scan from our current pos looking for 'FOREIGN' + str = rdb_find_in_string(str, "FOREIGN", &success); + if (!success) + { + return false; + } + + // Skip past the found "FOREIGN' + str = rdb_check_next_token(&my_charset_bin, str, "FOREIGN", &success); + DBUG_ASSERT(success); + + if (!my_isspace(&my_charset_bin, *str)) + { + return false; + } + + // See if the next token is 'KEY' + str = rdb_check_next_token(&my_charset_bin, str, "KEY", &success); + if (!success) + { + continue; + } + + // See if the next token is '(' + str = rdb_check_next_token(&my_charset_bin, str, "(", &success); + if (!success) + { + // There is an optional index id after 'FOREIGN KEY', skip it + str = rdb_skip_id(&my_charset_bin, str); + + // Now check for '(' again + str = rdb_check_next_token(&my_charset_bin, str, "(", &success); + } + + // If we have found 'FOREIGN KEY [] (' we can be confident we have + // a foreign key clause. + return success; + } + + // We never found a valid foreign key clause + return false; +} + +/** + @brief + splits the normalized table name of .#P# into + the , and components. + + @param dbbuf returns database name/table_schema + @param tablebuf returns tablename + @param partitionbuf returns partition suffix if there is one + @return 0 on success, non-zero on failure to split +*/ +int rdb_split_normalized_tablename(const std::string& fullname, + std::string* db, + std::string* table, + std::string* partition) +{ + DBUG_ASSERT(!fullname.empty()); + +#define RDB_PARTITION_STR "#P#" + + /* Normalize returns dbname.tablename. */ + size_t dotpos = fullname.find('.'); + + /* Invalid table name? */ + if (dotpos == std::string::npos) + { + return HA_ERR_INTERNAL_ERROR; + } + + // Table must have a database name associated with it. + DBUG_ASSERT(dotpos > 0); + + if (db != nullptr) + { + *db = fullname.substr(0, dotpos); + } + + dotpos++; + + size_t partpos = fullname.find(RDB_PARTITION_STR, dotpos, + strlen(RDB_PARTITION_STR)); + + if (partpos != std::string::npos) + { + DBUG_ASSERT(partpos >= dotpos); + + if (table != nullptr) + { + *table = fullname.substr(dotpos, partpos - dotpos); + } + + if (partition != nullptr) + { + *partition = fullname.substr(partpos + strlen(RDB_PARTITION_STR)); + } + } + else if (table != nullptr) + { + *table = fullname.substr(dotpos); + } + + return 0; +} + + +/** + @brief + create() is called to create a database. The variable name will have the name + of the table. + + @details + When create() is called you do not need to worry about + opening the table. Also, the .frm file will have already been + created so adjusting create_info is not necessary. You can overwrite + the .frm file at this point if you wish to change the table + definition, but there are no methods currently provided for doing + so. + + Called from handle.cc by ha_create_table(). + + @see + ha_create_table() in handle.cc +*/ + +int ha_rocksdb::create(const char *name, TABLE *table_arg, + HA_CREATE_INFO *create_info) +{ + int res; + std::string str; + DBUG_ENTER("ha_rocksdb::create"); + + THD *thd = my_core::thd_get_current_thd(); + + if (get_table_if_exists(name)) { + if (thd->lex->sql_command == SQLCOM_TRUNCATE) { + if (delete_table(name)) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + } else { + if (rdb_normalize_tablename(name, &str)) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + my_printf_error(ER_UNKNOWN_ERROR, + "Table '%s' does not exist, but metadata information exists inside " + "MyRocks. This is a sign of data inconsistency. Please check if " + "'%s.frm' exists, and try to restore it if it does not exist.", + MYF(0), str.c_str(), name); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + } + + /* + Construct dbname.tablename ourselves, because parititioning + passes strings like "./test/t14#P#p0" for individual partitions, + while table_arg->s->table_name has none of that. + */ + if (rdb_normalize_tablename(name, &str)) + { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + if (contains_foreign_key(thd)) + { + my_printf_error(ER_NOT_SUPPORTED_YET, "MyRocks does not currently support " + "foreign key constraints", MYF(0)); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + /* + TODO(alexyang): Temporarily disable unique indexes support when there is no + Primary Key + */ + for (uint keyno= 0; keyno < table_arg->s->keys; keyno++) + { + if ((table_arg->key_info[keyno].flags & HA_NOSAME) && + has_hidden_pk(table_arg)) + { + my_printf_error(ER_UNKNOWN_ERROR, + "Unique index support is disabled when the table has no primary key. ", + MYF(0)); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + } + + bool write_err= false; + std::unique_ptr wb= dict_manager.begin(); + rocksdb::WriteBatch *batch= wb.get(); + + /* Create table/key descriptions and put them into the data dictionary */ + m_tbl_def= new Rdb_tbl_def(str); + + uint n_keys= table_arg->s->keys; + + /* + If no primary key found, create a hidden PK and place it inside table + definition + */ + if (has_hidden_pk(table_arg)) + { + n_keys += 1; + } + + m_key_descr_arr= new std::shared_ptr[n_keys]; + m_tbl_def->m_key_count= n_keys; + m_tbl_def->m_key_descr_arr= m_key_descr_arr; + + if ((res= create_key_defs(table_arg, m_tbl_def))) + { + goto error; + } + + m_pk_descr= m_key_descr_arr[pk_index(table_arg, m_tbl_def)]; + + dict_manager.lock(); + write_err= ddl_manager.put_and_write(m_tbl_def, batch) + || dict_manager.commit(batch); + dict_manager.unlock(); + + if (write_err) + { + goto error; + } + + if (create_info->auto_increment_value) + m_tbl_def->m_auto_incr_val= create_info->auto_increment_value; + + /* + We only support auto_increment at start of the PRIMARY KEY. + */ + //Field *field; + //if ((field= table_arg->next_number_field)) + /* TODO mdcallag: disable this for now to let UNIQUE indexes kind of work + if ((field= table_arg->found_next_number_field)) + { + int pk= table_arg->s->primary_key; + Field *pk_field= table_arg->key_info[pk].key_part[0].field; + if (field->field_index != pk_field->field_index) + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + */ + DBUG_RETURN(0); + +error: + /* Delete what we have allocated so far */ + delete m_tbl_def; + m_tbl_def= nullptr; + m_key_descr_arr= nullptr; + + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); +} + + +/** + @note + This function is used only when the table has not yet been opened, and + keyread_allowed bitmap doesn't have the correct values yet. + + See comment in ha_rocksdb::index_flags() for details. +*/ + +bool ha_rocksdb::check_keyread_allowed(uint inx, uint part, bool all_parts) const +{ + bool res= true; + KEY *key_info= &table_share->key_info[inx]; + + Rdb_field_packing dummy1; + res= dummy1.setup(nullptr, key_info->key_part[part].field, inx, part, + key_info->key_part[part].length); + + if (res && all_parts) + { + for (uint i= 0; i < part; i++) + { + Field *field; + if ((field= key_info->key_part[i].field)) + { + Rdb_field_packing dummy; + if (!dummy.setup(nullptr, field, inx, i, key_info->key_part[i].length)) + { + /* Cannot do index-only reads for this column */ + res= false; + break; + } + } + } + } + + uint pk= table_share->primary_key; + if (inx == pk && all_parts && + part + 1 == table_share->key_info[pk].user_defined_key_parts) + { + m_pk_can_be_decoded= res; + } + + return res; +} + +int ha_rocksdb::read_key_exact(const std::shared_ptr& kd, + rocksdb::Iterator *iter, + bool full_key_match, + const rocksdb::Slice& key_slice) const +{ + DBUG_ASSERT(kd != nullptr); + DBUG_ASSERT(iter != nullptr); + + /* + We are looking for the first record such that + index_tuple= lookup_tuple. + lookup_tuple may be a prefix of the index. + */ + if (kd->m_is_reverse_cf) + { + if (!full_key_match) + { + if (!iter->Valid()) + iter->SeekToLast(); + else + iter->Prev(); + } + } + + if (!iter->Valid() || !kd->value_matches_prefix(iter->key(), key_slice)) + { + /* + Got a record that is not equal to the lookup value, or even a record + from another table.index. + */ + return HA_ERR_KEY_NOT_FOUND; + } + return 0; +} + +int ha_rocksdb::read_before_key(const std::shared_ptr& kd, + bool full_key_match, + const rocksdb::Slice& key_slice) +{ + DBUG_ASSERT(kd != nullptr); + + /* + We are looking for record with the biggest t.key such that + t.key < lookup_tuple. + */ + if (kd->m_is_reverse_cf) + { + if (m_scan_it->Valid() && full_key_match && + kd->value_matches_prefix(m_scan_it->key(), key_slice)) + { + /* We are using full key and we've hit an exact match */ + m_scan_it->Next(); + } + } + else + { + if (m_scan_it->Valid()) + m_scan_it->Prev(); + else + m_scan_it->SeekToLast(); + } + + return m_scan_it->Valid() ? 0 : HA_ERR_KEY_NOT_FOUND; +} + +int ha_rocksdb::read_after_key(const std::shared_ptr& kd, + bool full_key_match, + const rocksdb::Slice& key_slice) +{ + DBUG_ASSERT(kd != nullptr); + + /* + We are looking for the first record such that + + index_tuple $GT lookup_tuple + + with HA_READ_AFTER_KEY, $GT = '>', + with HA_READ_KEY_OR_NEXT, $GT = '>=' + */ + if (kd->m_is_reverse_cf) + { + if (!m_scan_it->Valid()) + { + m_scan_it->SeekToLast(); + } + else + { + /* + We should step back + - when not using full extended key + - when using full extended key and when we've got an exact match + */ + if (!full_key_match || + !kd->value_matches_prefix(m_scan_it->key(), key_slice)) + { + m_scan_it->Prev(); + } + } + } + + return m_scan_it->Valid() ? 0 : HA_ERR_KEY_NOT_FOUND; +} + +int ha_rocksdb::position_to_correct_key( + const std::shared_ptr& kd, + enum ha_rkey_function find_flag, + bool full_key_match, + const uchar* key, + key_part_map keypart_map, + const rocksdb::Slice& key_slice, + bool* move_forward) +{ + int rc= 0; + + *move_forward= true; + + switch (find_flag) { + case HA_READ_KEY_EXACT: + rc= read_key_exact(kd, m_scan_it, full_key_match, key_slice); + break; + case HA_READ_BEFORE_KEY: + *move_forward= false; + rc= read_before_key(kd, full_key_match, key_slice); + if (rc == 0 && !kd->covers_key(m_scan_it->key())) + { + /* The record we've got is not from this index */ + rc= HA_ERR_KEY_NOT_FOUND; + } + break; + case HA_READ_AFTER_KEY: + case HA_READ_KEY_OR_NEXT: + rc= read_after_key(kd, full_key_match, key_slice); + if (rc == 0 && !kd->covers_key(m_scan_it->key())) + { + /* The record we've got is not from this index */ + rc= HA_ERR_KEY_NOT_FOUND; + } + break; + case HA_READ_KEY_OR_PREV: + case HA_READ_PREFIX: + /* This flag is not used by the SQL layer, so we don't support it yet. */ + rc= HA_ERR_UNSUPPORTED; + break; + case HA_READ_PREFIX_LAST: + case HA_READ_PREFIX_LAST_OR_PREV: + *move_forward= false; + /* + Find the last record with the specified index prefix lookup. + - HA_READ_PREFIX_LAST requires that the record has the + prefix=lookup (if there are no such records, + HA_ERR_KEY_NOT_FOUND should be returned). + - HA_READ_PREFIX_LAST_OR_PREV has no such requirement. If there are no + records with prefix=lookup, we should return the last record + before that. + */ + rc= read_before_key(kd, full_key_match, key_slice); + if (rc == 0) + { + const rocksdb::Slice& rkey= m_scan_it->key(); + if (!kd->covers_key(rkey)) + { + /* The record we've got is not from this index */ + rc= HA_ERR_KEY_NOT_FOUND; + } + else if (find_flag == HA_READ_PREFIX_LAST) + { + uint size = kd->pack_index_tuple(table, m_pack_buffer, + m_sk_packed_tuple, key, + keypart_map); + rocksdb::Slice lookup_tuple( + reinterpret_cast(m_sk_packed_tuple), size); + + // We need to compare the key we've got with the original search prefix. + if (!kd->value_matches_prefix(rkey, lookup_tuple)) + { + rc= HA_ERR_KEY_NOT_FOUND; + } + } + } + break; + default: + DBUG_ASSERT(0); + break; + } + + return rc; +} + +int ha_rocksdb::calc_eq_cond_len(const std::shared_ptr& kd, + enum ha_rkey_function find_flag, + const rocksdb::Slice& slice, + int bytes_changed_by_succ, + const key_range *end_key, + uint* end_key_packed_size) +{ + if (find_flag == HA_READ_KEY_EXACT) + return slice.size(); + + if (find_flag == HA_READ_PREFIX_LAST) + { + /* + We have made the kd->successor(m_sk_packed_tuple) call above. + + The slice is at least Rdb_key_def::INDEX_NUMBER_SIZE bytes long. + */ + return slice.size() - bytes_changed_by_succ; + } + + if (end_key) + { + *end_key_packed_size= kd->pack_index_tuple(table, m_pack_buffer, + m_end_key_packed_tuple, + end_key->key, + end_key->keypart_map); + + /* + Calculating length of the equal conditions here. 4 byte index id is + included. + Example1: id1 BIGINT, id2 INT, id3 BIGINT, PRIMARY KEY (id1, id2, id3) + WHERE id1=1 AND id2=1 AND id3>=2 => eq_cond_len= 4+8+4= 16 + WHERE id1=1 AND id2>=1 AND id3>=2 => eq_cond_len= 4+8= 12 + Example2: id1 VARCHAR(30), id2 INT, PRIMARY KEY (id1, id2) + WHERE id1 = 'AAA' and id2 < 3; => eq_cond_len=13 (varchar used 9 bytes) + */ + rocksdb::Slice end_slice(reinterpret_cast(m_end_key_packed_tuple), + *end_key_packed_size); + return slice.difference_offset(end_slice); + } + + /* + On range scan without any end key condition, there is no + eq cond, and eq cond length is the same as index_id size (4 bytes). + Example1: id1 BIGINT, id2 INT, id3 BIGINT, PRIMARY KEY (id1, id2, id3) + WHERE id1>=1 AND id2 >= 2 and id2 <= 5 => eq_cond_len= 4 + */ + return Rdb_key_def::INDEX_NUMBER_SIZE; +} + +int ha_rocksdb::read_row_from_primary_key(uchar *buf) +{ + DBUG_ASSERT(buf != nullptr); + + int rc; + const rocksdb::Slice& rkey= m_scan_it->key(); + uint pk_size= rkey.size(); + const char *pk_data= rkey.data(); + + memcpy(m_pk_packed_tuple, pk_data, pk_size); + m_last_rowkey.copy(pk_data, pk_size, &my_charset_bin); + + if (m_lock_rows != RDB_LOCK_NONE) + { + /* We need to put a lock and re-read */ + rc= get_row_by_rowid(buf, m_pk_packed_tuple, pk_size); + } + else + { + /* Unpack from the row we've read */ + const rocksdb::Slice& value = m_scan_it->value(); + rc= convert_record_from_storage_format(&rkey, &value, buf); + } + + return rc; +} + +int ha_rocksdb::read_row_from_secondary_key(uchar *buf, + const std::shared_ptr& kd, bool move_forward) +{ + DBUG_ASSERT(buf != nullptr); + DBUG_ASSERT(kd != nullptr); + + int rc= 0; + uint pk_size; + + if (m_keyread_only && m_lock_rows == RDB_LOCK_NONE && !has_hidden_pk(table)) + { + /* Get the key columns and primary key value */ + const rocksdb::Slice& rkey= m_scan_it->key(); + pk_size= kd->get_primary_key_tuple(table, m_pk_descr, &rkey, + m_pk_packed_tuple); + const rocksdb::Slice& value= m_scan_it->value(); + if (pk_size == RDB_INVALID_KEY_LEN) + { + rc= HA_ERR_INTERNAL_ERROR; + } + else + { + rc= try_keyonly_read_from_sk(buf, kd, rkey, value, pk_size); + } + } + else + { + if (kd->m_is_reverse_cf) + move_forward= !move_forward; + + rc= find_icp_matching_index_rec(move_forward, buf); + if (!rc) + { + const rocksdb::Slice& rkey= m_scan_it->key(); + pk_size= kd->get_primary_key_tuple(table, m_pk_descr, &rkey, + m_pk_packed_tuple); + if (pk_size == RDB_INVALID_KEY_LEN) + { + rc= HA_ERR_INTERNAL_ERROR; + } + else + { + rc= get_row_by_rowid(buf, m_pk_packed_tuple, pk_size); + } + } + } + + if (!rc) + { + m_last_rowkey.copy((const char*)m_pk_packed_tuple, pk_size, + &my_charset_bin); + } + + return rc; +} + +/** + @note + Some data types require unpack_info in order to do index-only reads. + + However, support for this may have been added after the database was built. + This means that we need to support the case where unpack_info is not + available. In this case, we should just do a primary key lookup. +*/ +int ha_rocksdb::try_keyonly_read_from_sk(uchar* buf, + const std::shared_ptr& kd, + const rocksdb::Slice& key, + const rocksdb::Slice& value, + const uint rowid_size) +{ + int res= kd->unpack_record(table, buf, &key, &value, m_verify_checksums); + uint size= rowid_size; + if (res == UNPACK_INFO_MISSING) + { + // If we can't unpack, then do a PK lookup. + if (rowid_size == RDB_INVALID_KEY_LEN) + { + size= kd->get_primary_key_tuple( + table, m_pk_descr, &key, m_pk_packed_tuple); + } + return get_row_by_rowid(buf, m_pk_packed_tuple, size); + } + else if (res == UNPACK_FAILURE) + { + return HA_ERR_INTERNAL_ERROR; + } + DBUG_ASSERT(res == UNPACK_SUCCESS); + return res; +} + + +/** + @note + The problem with this function is that SQL layer calls it, when + - the table has not been yet opened (no ::open() call done) + - this->table_share already exists, but it is in the process of being + filled, so some of fields are still NULL. + - In particular, table_share->key_info[inx].key_part[] is filled only up + to part #part. Subsequent key parts are not yet filled. + + To complicate things further, SQL layer will call index_flags() with + all_parts=TRUE. Essentially, we're asked to provide flags for reading + keyparts whose datatype is not yet known. + + We walk around this problem by using check_keyread_allowed(), which uses + table_share object and is careful not to step on unitialized data. + + When we get a call with all_parts=TRUE, we try to analyze all parts but + ignore those that have key_part->field==nullptr (these are not initialized + yet). +*/ + +ulong ha_rocksdb::index_flags(uint inx, uint part, bool all_parts) const +{ + ulong base_flags= HA_READ_NEXT | // doesn't seem to be used + HA_READ_ORDER | + HA_READ_RANGE | + HA_READ_PREV; + + if (check_keyread_allowed(inx, part, all_parts)) + base_flags |= HA_KEYREAD_ONLY; + + if (inx == table_share->primary_key) + { + /* + Index-only reads on primary key are the same as table scan for us. Still, + we need to explicitly "allow" them, otherwise SQL layer will miss some + plans. + */ + base_flags |= HA_KEYREAD_ONLY; + } + else + { + /* + We can Index Condition Pushdown any key except the primary. With primary + key, we get (pk, record) pair immediately, there is no place to put the + ICP check. + */ + base_flags |= HA_DO_INDEX_COND_PUSHDOWN; + } + + return base_flags; +} + + +/** + @brief + Read next index tuple through the secondary index. + + @details + m_scan_it points at the index key-value pair that we should read the (pk,row) + pair for. +*/ +int ha_rocksdb::secondary_index_read(const int keyno, uchar *buf) +{ + DBUG_ASSERT(buf != nullptr); + DBUG_ASSERT(table != nullptr); + + stats.rows_requested++; + + /* Use STATUS_NOT_FOUND when record not found or some error occurred */ + table->status= STATUS_NOT_FOUND; + + if (m_scan_it->Valid()) + { + rocksdb::Slice key= m_scan_it->key(); + + /* Check if we've ran out of records of this index */ + if (m_key_descr_arr[keyno]->covers_key(key)) + { + int rc; + + //TODO: We could here check if we have ran out of range we're scanning + uint size= m_key_descr_arr[keyno]->get_primary_key_tuple( + table, m_pk_descr, &key, m_pk_packed_tuple); + if (size == RDB_INVALID_KEY_LEN) + { + return HA_ERR_INTERNAL_ERROR; + } + + m_last_rowkey.copy((const char*)m_pk_packed_tuple, size, &my_charset_bin); + + if (m_keyread_only && m_lock_rows == RDB_LOCK_NONE && + !has_hidden_pk(table)) + { + rc= try_keyonly_read_from_sk(buf, m_key_descr_arr[keyno], + key, m_scan_it->value(), size); + } + else + { + rc= get_row_by_rowid(buf, m_pk_packed_tuple, size); + } + + if (!rc) + { + table->status= 0; + stats.rows_read++; + stats.rows_index_next++; + update_row_stats(ROWS_READ); + } + return rc; + } + } + return HA_ERR_END_OF_FILE; +} + + +/* + ha_rocksdb::read_range_first overrides handler::read_range_first. + The only difference from handler::read_range_first is that + ha_rocksdb::read_range_first passes end_key to + ha_rocksdb::index_read_map_impl function. +*/ +int ha_rocksdb::read_range_first(const key_range *start_key, + const key_range *end_key, + bool eq_range_arg, bool sorted) +{ + int result; + DBUG_ENTER("ha_rocksdb::read_range_first"); + + eq_range= eq_range_arg; + set_end_range(end_key, RANGE_SCAN_ASC); + + range_key_part= table->key_info[active_index].key_part; + + if (!start_key) // Read first record + result= ha_index_first(table->record[0]); + else + { + if (is_using_prohibited_gap_locks(table, + is_using_full_unique_key( + active_index, + start_key->keypart_map, + start_key->flag))) + { + DBUG_RETURN(HA_ERR_LOCK_DEADLOCK); + } + + MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0, + { result= index_read_map_impl(table->record[0], + start_key->key, + start_key->keypart_map, + start_key->flag, + end_key); }) + } + if (result) + DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND) + ? HA_ERR_END_OF_FILE + : result); + + if (compare_key(end_range) <= 0) + { + DBUG_RETURN(0); + } + else + { + /* + The last read row does not fall in the range. So request + storage engine to release row lock if possible. + */ + unlock_row(); + DBUG_RETURN(HA_ERR_END_OF_FILE); + } +} + + +int ha_rocksdb::index_read_map(uchar *buf, const uchar *key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) +{ + return index_read_map_impl(buf, key, keypart_map, find_flag, nullptr); +} + +/* + See storage/rocksdb/rocksdb-range-access.txt for description of how MySQL + index navigation commands are converted into RocksDB lookup commands. + + This function takes end_key as an argument, and it is set on range scan. + MyRocks needs to decide whether prefix bloom filter can be used or not. + To decide to use prefix bloom filter or not, calculating equal condition length + is needed. On equal lookups (find_flag == HA_READ_KEY_EXACT), equal + condition length is the same as rocksdb::Slice.size() of the start key. + On range scan, equal condition length is MIN(start_key, end_key) of the + rocksdb::Slice expression. +*/ +int ha_rocksdb::index_read_map_impl(uchar *buf, const uchar *key, + key_part_map keypart_map, + enum ha_rkey_function find_flag, + const key_range *end_key) +{ + int rc= 0; + DBUG_ENTER("ha_rocksdb::index_read_map"); + + ha_statistic_increment(&SSV::ha_read_key_count); + const std::shared_ptr& kd= m_key_descr_arr[active_index]; + uint actual_key_parts= kd->get_key_parts(); + bool using_full_key= is_using_full_key(keypart_map, actual_key_parts); + + if (!end_key) + end_key= end_range; + + /* By default, we don't need the retrieved records to match the prefix */ + m_sk_match_prefix= nullptr; + stats.rows_requested++; + + if (active_index == table->s->primary_key && + find_flag == HA_READ_KEY_EXACT && using_full_key) + { + /* + Equality lookup over primary key, using full tuple. + This is a special case, use DB::Get. + */ + uint size= kd->pack_index_tuple(table, m_pack_buffer, m_pk_packed_tuple, + key, keypart_map); + rc= get_row_by_rowid(buf, m_pk_packed_tuple, size); + if (!rc) + { + stats.rows_read++; + stats.rows_index_first++; + update_row_stats(ROWS_READ); + } + DBUG_RETURN(rc); + } + + /* + Unique secondary index performs lookups without the extended key fields + */ + uint packed_size; + if (active_index != table->s->primary_key && + table->key_info[active_index].flags & HA_NOSAME && + find_flag == HA_READ_KEY_EXACT && using_full_key) + { + key_part_map tmp_map= (key_part_map(1) << + table->key_info[active_index].user_defined_key_parts) - 1; + packed_size= kd->pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, + key, tmp_map); + if (table->key_info[active_index].user_defined_key_parts != + kd->get_key_parts()) + using_full_key= false; + } + else + { + packed_size= kd->pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, + key, keypart_map); + } + + if ((pushed_idx_cond && pushed_idx_cond_keyno == active_index) && + (find_flag == HA_READ_KEY_EXACT || find_flag == HA_READ_PREFIX_LAST)) + { + /* + We are doing a point index lookup, and ICP is enabled. It is possible + that this call will be followed by ha_rocksdb->index_next_same() call. + + Do what InnoDB does: save the lookup tuple now. We will need it in + index_next_same/find_icp_matching_index_rec in order to stop scanning + as soon as index record doesn't match the lookup tuple. + + When not using ICP, handler::index_next_same() will make sure that rows + that don't match the lookup prefix are not returned. + row matches the lookup prefix. + */ + m_sk_match_prefix= m_sk_match_prefix_buf; + m_sk_match_length= packed_size; + memcpy(m_sk_match_prefix, m_sk_packed_tuple, packed_size); + } + + int bytes_changed_by_succ= 0; + if (find_flag == HA_READ_PREFIX_LAST_OR_PREV || + find_flag == HA_READ_PREFIX_LAST || + find_flag == HA_READ_AFTER_KEY) + { + /* See below */ + bytes_changed_by_succ= kd->successor(m_sk_packed_tuple, packed_size); + } + + rocksdb::Slice slice( + reinterpret_cast(m_sk_packed_tuple), packed_size); + + uint end_key_packed_size= 0; + uint eq_cond_len= calc_eq_cond_len(kd, find_flag, slice, + bytes_changed_by_succ, end_key, + &end_key_packed_size); + + bool use_all_keys= false; + if (find_flag == HA_READ_KEY_EXACT && + my_count_bits(keypart_map) == kd->get_key_parts()) + use_all_keys= true; + + Rdb_transaction *tx= get_or_create_tx(table->in_use); + bool is_new_snapshot= !tx->has_snapshot(); + // Loop as long as we get a deadlock error AND we end up creating the + // snapshot here (i.e. it did not exist prior to this) + for (;;) + { + /* + This will open the iterator and position it at a record that's equal or + greater than the lookup tuple. + */ + setup_scan_iterator(kd, &slice, use_all_keys, is_ascending(kd, find_flag), + eq_cond_len); + + /* + Once we are positioned on from above, move to the position we really + want: See storage/rocksdb/rocksdb-range-access.txt + */ + bool move_forward; + rc = position_to_correct_key(kd, find_flag, using_full_key, key, + keypart_map, slice, &move_forward); + + if (rc) + { + /* This status is returned on any error */ + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(rc); + } + + m_skip_scan_it_next_call= false; + + /* + Now get the data for the row into 'buf'. If we were using a primary key + then we have all the rows we need. For a secondary key we now need to + lookup the primary key. + */ + if (active_index == table->s->primary_key) + rc= read_row_from_primary_key(buf); + else + rc= read_row_from_secondary_key(buf, kd, move_forward); + + if (rc != HA_ERR_LOCK_DEADLOCK || !is_new_snapshot) + break; /* Exit the loop */ + + // release the snapshot and iterator so they will be regenerated + tx->release_snapshot(); + release_scan_iterator(); + } + + if (rc) + { + /* the only possible error condition is record-not-found */ + table->status= STATUS_NOT_FOUND; + } + else + { + table->status= 0; + stats.rows_read++; + stats.rows_index_first++; + update_row_stats(ROWS_READ); + } + DBUG_RETURN(rc); +} + + +/* + @brief + Scan the secondary index until we find an index record that satisfies ICP + + @param move_forward TRUE <=> move m_scan_it forward + FALSE <=> move m_scan_it backward + @param buf Record buffer (must be the same buffer that + pushed index condition points to, in practice + it is table->record[0]) + + @detail + Move the current iterator m_scan_it until we get an index tuple that + satisfies the pushed Index Condition. + (if there is no pushed index condition, return right away) + + @return + 0 - Index tuple satisfies ICP, can do index read. + other - error code +*/ + +int ha_rocksdb::find_icp_matching_index_rec(bool move_forward, uchar *buf) +{ + DBUG_ASSERT(buf != nullptr); + + if (pushed_idx_cond && pushed_idx_cond_keyno == active_index) + { + const std::shared_ptr& kd= m_key_descr_arr[active_index]; + DBUG_ASSERT(kd != nullptr); + + while (1) + { + if (!m_scan_it->Valid()) + { + table->status= STATUS_NOT_FOUND; + return HA_ERR_END_OF_FILE; + } + rocksdb::Slice rkey= m_scan_it->key(); + + if (!kd->covers_key(rkey)) + { + table->status= STATUS_NOT_FOUND; + return HA_ERR_END_OF_FILE; + } + + if (m_sk_match_prefix) + { + rocksdb::Slice prefix((const char*)m_sk_match_prefix, + m_sk_match_length); + if (!kd->value_matches_prefix(rkey, prefix)) + { + table->status= STATUS_NOT_FOUND; + return HA_ERR_END_OF_FILE; + } + } + + rocksdb::Slice value= m_scan_it->value(); + if (try_keyonly_read_from_sk(buf, kd, rkey, value, RDB_INVALID_KEY_LEN)) + { + return HA_ERR_INTERNAL_ERROR; + } + + enum icp_result icp_status= check_index_cond(); + if (icp_status == ICP_NO_MATCH) + { + if (move_forward) + m_scan_it->Next(); + else + m_scan_it->Prev(); + continue; /* Get the next (or prev) index tuple */ + } + else if (icp_status == ICP_OUT_OF_RANGE) + { + /* We have walked out of range we are scanning */ + table->status= STATUS_NOT_FOUND; + return HA_ERR_END_OF_FILE; + } + else /* icp_status == ICP_MATCH */ + { + /* Index Condition is satisfied. We have rc==0, proceed to fetch the row. */ + break; + } + /* + TODO: should we have this here, or RockDB handles this internally? + if (my_core::thd_killed(current_thd)) + { + rc= HA_ERR_INTERNAL_ERROR; // doesn't matter + break; + } + */ + } + } + return 0; +} + + +int ha_rocksdb::index_read_last_map(uchar * buf, const uchar * key, + key_part_map keypart_map) +{ + return index_read_map(buf, key, keypart_map, HA_READ_PREFIX_LAST); +} + + +int ha_rocksdb::check(THD* thd, HA_CHECK_OPT* check_opt) +{ + uint pk= pk_index(table, m_tbl_def); + String rowkey_copy; + String sec_key_copy; + const char* table_name = table->s->table_name.str; + + // Only when debugging: don't use snapshot when reading + // Rdb_transaction *tx= get_or_create_tx(table->in_use); + // tx->snapshot= nullptr; + + bool save_verify_checksums= m_verify_checksums; + m_verify_checksums= true; + + /* For each secondary index, check that we can get a PK value from it */ + // NO_LINT_DEBUG + sql_print_information("CHECKTABLE %s: Checking table %s", table_name, + table_name); + ha_rows UNINIT_VAR(row_checksums_at_start); // set/used iff first_index==true + ha_rows row_checksums= ha_rows(-1); + bool first_index= true; + + for (uint keyno= 0; keyno < table->s->keys; keyno++) + { + if (keyno != pk) + { + extra(HA_EXTRA_KEYREAD); + ha_index_init(keyno, true); + ha_rows rows= 0; + ha_rows checksums= 0; + if (first_index) + row_checksums_at_start= m_row_checksums_checked; + int res; + // NO_LINT_DEBUG + sql_print_information("CHECKTABLE %s: Checking index %s", table_name, + table->key_info[keyno].name); + while (1) + { + if (!rows) + res= index_first(table->record[0]); + else + res= index_next(table->record[0]); + + if (res == HA_ERR_END_OF_FILE) + break; + if (res) + { + // error + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: .. row %lld: index scan error %d", + table_name, rows, res); + goto error; + } + rocksdb::Slice key= m_scan_it->key(); + sec_key_copy.copy(key.data(), key.size(), &my_charset_bin); + rowkey_copy.copy(m_last_rowkey.ptr(), m_last_rowkey.length(), + &my_charset_bin); + + if (m_key_descr_arr[keyno]-> + unpack_info_has_checksum(m_scan_it->value())) + { + checksums++; + } + + if ((res= get_row_by_rowid(table->record[0], rowkey_copy.ptr(), + rowkey_copy.length()))) + { + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: .. row %lld: " + "failed to fetch row by rowid", + table_name, rows); + goto error; + } + + longlong hidden_pk_id= 0; + if (has_hidden_pk(table) && + read_hidden_pk_id_from_rowkey(&hidden_pk_id)) + goto error; + + /* Check if we get the same PK value */ + uint packed_size= m_pk_descr->pack_record( + table, m_pack_buffer, table->record[0], m_pk_packed_tuple, nullptr, + false, hidden_pk_id); + if (packed_size != rowkey_copy.length() || + memcmp(m_pk_packed_tuple, rowkey_copy.ptr(), packed_size)) + { + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: .. row %lld: PK value mismatch", + table_name, rows); + goto print_and_error; + } + + /* Check if we get the same secondary key value */ + packed_size= m_key_descr_arr[keyno]->pack_record( + table, m_pack_buffer, table->record[0], m_sk_packed_tuple, + &m_sk_tails, false, hidden_pk_id); + if (packed_size != sec_key_copy.length() || + memcmp(m_sk_packed_tuple, sec_key_copy.ptr(), packed_size)) + { + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: .. row %lld: " + "secondary index value mismatch", + table_name, rows); + goto print_and_error; + } + rows++; + continue; + +print_and_error: + { + std::string buf; + buf = rdb_hexdump(rowkey_copy.ptr(), rowkey_copy.length(), 1000); + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: rowkey: %s", table_name, + buf.c_str()); + + buf= rdb_hexdump(m_retrieved_record.data(), m_retrieved_record.size(), + 1000); + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: record: %s", table_name, + buf.c_str()); + + buf = rdb_hexdump(sec_key_copy.ptr(), sec_key_copy.length(), 1000); + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: index: %s", table_name, + buf.c_str()); + + goto error; + } + } + // NO_LINT_DEBUG + sql_print_information("CHECKTABLE %s: ... %lld index entries checked " + "(%lld had checksums)", table_name, rows, + checksums); + + if (first_index) + { + row_checksums= m_row_checksums_checked - row_checksums_at_start; + first_index= false; + } + ha_index_end(); + } + } + if (row_checksums != ha_rows(-1)) + { + // NO_LINT_DEBUG + sql_print_information("CHECKTABLE %s: %lld table records had checksums", + table_name, row_checksums); + } + extra(HA_EXTRA_NO_KEYREAD); + + m_verify_checksums= save_verify_checksums; + /* + TODO: we should check also for PK records that are missing in the secondary + indexes. + For that, need to walk through the PK and check that every PK record has a + proper counterpart in each secondary index. + */ + return HA_ADMIN_OK; +error: + m_verify_checksums= save_verify_checksums; + ha_index_or_rnd_end(); + extra(HA_EXTRA_NO_KEYREAD); + return HA_ADMIN_CORRUPT; +} + + +static void dbug_dump_str(FILE *out, const char *str, int len) +{ + fprintf(out, "\""); + for (int i= 0; i < len; i++) + { + if (str[i] > 32) + fprintf(out, "%c", str[i]); + else + fprintf(out, "\\%d", str[i]); + } + fprintf(out, "\""); +} + + +/* + Debugging help: dump the whole database into a human-readable file. + Usage: + dbug_dump_database(rdb); +*/ + +void dbug_dump_database(rocksdb::DB *db) +{ + FILE *out= fopen("/tmp/rocksdb.dump", "wt"); + if (!out) + return; + + rocksdb::Iterator* it = db->NewIterator(rocksdb::ReadOptions()); + for (it->SeekToFirst(); it->Valid(); it->Next()) { + rocksdb::Slice key= it->key(); + rocksdb::Slice val= it->value(); + dbug_dump_str(out, key.data(), key.size()); + fprintf(out, " -> "); + dbug_dump_str(out, val.data(), val.size()); + fprintf(out, "\n"); + } + + delete it; + fclose(out); +} + + +/* + Given a rowid (i.e. packed PK) as a parameter, get the record. + + @return + 0 - Ok + other - HA_ERR error code. +*/ + +int ha_rocksdb::get_row_by_rowid(uchar *buf, const char *rowid, + const uint rowid_size) +{ + DBUG_ASSERT(buf != nullptr); + DBUG_ASSERT(rowid != nullptr); + DBUG_ASSERT(table != nullptr); + + int rc; + DBUG_ENTER("ha_rocksdb::get_row_by_rowid"); + + rocksdb::Slice key_slice(rowid, rowid_size); + + Rdb_transaction *tx= get_or_create_tx(table->in_use); + DBUG_ASSERT(tx != nullptr); + + DEBUG_SYNC(ha_thd(), "rocksdb.get_row_by_rowid"); + DBUG_EXECUTE_IF("dbug.rocksdb.get_row_by_rowid", + { + THD *thd= ha_thd(); + const char act[]= + "now signal Reached " + "wait_for signal.rocksdb.get_row_by_rowid_let_running"; + DBUG_ASSERT(opt_debug_sync_timeout > 0); + DBUG_ASSERT(!debug_sync_set_action(thd, + STRING_WITH_LEN(act))); + };); + + bool found; + rocksdb::Status s; + + if (m_lock_rows == RDB_LOCK_NONE) + { + tx->acquire_snapshot(true); + s= tx->get(m_pk_descr->get_cf(), key_slice, &m_retrieved_record); + } + else + s= tx->get_for_update(m_pk_descr->get_cf(), key_slice, &m_retrieved_record); + + if (!s.IsNotFound() && !s.ok()) + { + DBUG_RETURN(tx->set_status_error(table->in_use, s, m_pk_descr, m_tbl_def)); + } + found= !s.IsNotFound(); + + table->status= STATUS_NOT_FOUND; + if (found) + { + m_last_rowkey.copy((const char*)rowid, rowid_size, &my_charset_bin); + rc= convert_record_from_storage_format(&key_slice, buf); + if (!rc) + table->status= 0; + } + else + { + /* + Note: we don't need to unlock the row. It is intentional that we keep + locks on rows that don't exist. + */ + rc= HA_ERR_KEY_NOT_FOUND; + } + + DBUG_RETURN(rc); +} + + +int ha_rocksdb::index_next(uchar *buf) +{ + bool moves_forward= true; + ha_statistic_increment(&SSV::ha_read_next_count); + if (m_key_descr_arr[active_index]->m_is_reverse_cf) + { + moves_forward= false; + } + + int rc= index_next_with_direction(buf, moves_forward); + if (rc == HA_ERR_KEY_NOT_FOUND) + rc = HA_ERR_END_OF_FILE; + return rc; +} + + +int ha_rocksdb::index_prev(uchar *buf) +{ + bool moves_forward= false; + ha_statistic_increment(&SSV::ha_read_prev_count); + if (m_key_descr_arr[active_index]->m_is_reverse_cf) + { + moves_forward= true; + } + + int rc= index_next_with_direction(buf, moves_forward); + if (rc == HA_ERR_KEY_NOT_FOUND) + rc = HA_ERR_END_OF_FILE; + + return rc; +} + + +int ha_rocksdb::index_next_with_direction(uchar *buf, bool move_forward) +{ + int rc; + DBUG_ENTER("ha_rocksdb::index_next"); + + if (active_index == pk_index(table, m_tbl_def)) + { + rc= rnd_next_with_direction(buf, move_forward); + } + else + { + if (m_skip_scan_it_next_call) + { + m_skip_scan_it_next_call= false; + } + else + { + if (move_forward) + m_scan_it->Next(); /* this call cannot fail */ + else + m_scan_it->Prev(); + } + rc= find_icp_matching_index_rec(move_forward, buf); + if (!rc) + rc= secondary_index_read(active_index, buf); + } + + DBUG_RETURN(rc); +} + + +int ha_rocksdb::index_first(uchar *buf) +{ + m_sk_match_prefix= nullptr; + ha_statistic_increment(&SSV::ha_read_first_count); + int rc= m_key_descr_arr[active_index]->m_is_reverse_cf ? + index_last_intern(buf) : index_first_intern(buf); + if (rc == HA_ERR_KEY_NOT_FOUND) + rc = HA_ERR_END_OF_FILE; + return rc; +} + + +int ha_rocksdb::index_last(uchar *buf) +{ + m_sk_match_prefix= nullptr; + ha_statistic_increment(&SSV::ha_read_last_count); + int rc= m_key_descr_arr[active_index]->m_is_reverse_cf ? + index_first_intern(buf) : index_last_intern(buf); + if (rc == HA_ERR_KEY_NOT_FOUND) + rc = HA_ERR_END_OF_FILE; + return rc; +} + + +/* + Start scanning from the "first" value. + + The 'first' here means "the first from start of the key space". + For reverse-ordered key spaces, the first value will be the biggest, 'zzzz'. + + An picture of a forward-ordered keyspace (remember, the keys have form + 'indexnr-keyval'. Suppose the index we are at has number n) + + (n-1) - ... + ( n ) <--- 1. (n) doesn't exist in the db but it would be here. + ( n ) - aaa <--- 2. Seek("n") will put us here on the first index + ( n ) - bbb record. + ( n ) - cc + + So, need to do: Seek(n); + + A backward-ordered keyspace: + + (n+1) - bbb + (n+1) - aaa + (n+1) <--- (n+1) doesn't exist in the db but would be here. + ( n ) - ccc <--- 1. We need to be here. + ( n ) - bbb + ( n ) - aaa + ( n ) + + So, need to: Seek(n+1); + +*/ + +int ha_rocksdb::index_first_intern(uchar *buf) +{ + DBUG_ASSERT(buf != nullptr); + + uchar *key; + uint key_size; + int rc; + DBUG_ENTER("ha_rocksdb::index_first"); + + if (is_pk(active_index, table, m_tbl_def)) + { + key= m_pk_packed_tuple; + } + else + { + key= m_sk_packed_tuple; + } + + DBUG_ASSERT(key != nullptr); + + const std::shared_ptr& kd= m_key_descr_arr[active_index]; + if (kd->m_is_reverse_cf) + { + kd->get_supremum_key(key, &key_size); + } + else + { + kd->get_infimum_key(key, &key_size); + } + + rocksdb::Slice index_key((const char*)key, key_size); + + Rdb_transaction *tx= get_or_create_tx(table->in_use); + DBUG_ASSERT(tx != nullptr); + + bool is_new_snapshot= !tx->has_snapshot(); + // Loop as long as we get a deadlock error AND we end up creating the + // snapshot here (i.e. it did not exist prior to this) + for (;;) + { + setup_scan_iterator(kd, &index_key, false, !kd->m_is_reverse_cf, + Rdb_key_def::INDEX_NUMBER_SIZE); + m_skip_scan_it_next_call= true; + + rc= index_next_with_direction(buf, true); + if (rc != HA_ERR_LOCK_DEADLOCK || !is_new_snapshot) + break; // exit the loop + + // release the snapshot and iterator so they will be regenerated + tx->release_snapshot(); + release_scan_iterator(); + } + + if (!rc) + { + /* + index_next is always incremented on success, so decrement if it is + index_first instead + */ + stats.rows_index_first++; + stats.rows_index_next--; + } + DBUG_RETURN(rc); +} + + +/** + @details + Start scanning from the "last" value + + The 'last' here means "the last from start of the key space". + For reverse-ordered key spaces, we will actually read the smallest value. + + An picture of a forward-ordered keyspace (remember, the keys have form + 'indexnr-keyval'. Suppose the we are at a key that has number n) + + (n-1)-something + ( n )-aaa + ( n )-bbb + ( n )-ccc <----------- Need to seek to here. + (n+1) <---- Doesn't exist, but would be here. + (n+1)-smth, or no value at all + + RocksDB's Iterator::Seek($val) seeks to "at $val or first value that's + greater". We can't see to "(n)-ccc" directly, because we don't know what + is the value of 'ccc' (the biggest record with prefix (n)). Instead, we seek + to "(n+1)", which is the least possible value that's greater than any value + in index #n. Then we step one record back. + + So, need to: it->Seek(n+1) || it->SeekToLast(); it->Prev(); + + A backward-ordered keyspace: + + (n+1)-something + ( n ) - ccc + ( n ) - bbb + ( n ) - aaa <---------------- (*) Need to seek here. + ( n ) <--- Doesn't exist, but would be here. + (n-1)-smth, or no value at all + + So, need to: it->Seek(n) || it->SeekToLast(); it->Prev(); +*/ + +int ha_rocksdb::index_last_intern(uchar *buf) +{ + DBUG_ASSERT(buf != nullptr); + + uchar *key; + uint key_size; + int rc; + DBUG_ENTER("ha_rocksdb::index_last"); + + if (is_pk(active_index, table, m_tbl_def)) + { + key= m_pk_packed_tuple; + } + else + { + key= m_sk_packed_tuple; + } + + DBUG_ASSERT(key != nullptr); + + const std::shared_ptr& kd= m_key_descr_arr[active_index]; + if (kd->m_is_reverse_cf) + { + kd->get_infimum_key(key, &key_size); + } + else + { + kd->get_supremum_key(key, &key_size); + } + + rocksdb::Slice index_key((const char*)key, key_size); + + Rdb_transaction *tx= get_or_create_tx(table->in_use); + DBUG_ASSERT(tx != nullptr); + + bool is_new_snapshot= !tx->has_snapshot(); + // Loop as long as we get a deadlock error AND we end up creating the + // snapshot here (i.e. it did not exist prior to this) + for (;;) + { + setup_scan_iterator(kd, &index_key, false, kd->m_is_reverse_cf, + Rdb_key_def::INDEX_NUMBER_SIZE); + + if (!m_scan_it->Valid()) + { + /* + RocksDB keyspace has no records that follow our index. It could be, our + index is the last in the keyspace. Navigate to last record in the + keyspace. + */ + m_scan_it->SeekToLast(); + } + else + { + /* + Iterator is pointing at some record right after the end of the index. + Move one record back. + */ + m_scan_it->Prev(); + } + m_skip_scan_it_next_call= false; + + if (is_pk(active_index, table, m_tbl_def)) + { + m_skip_scan_it_next_call= true; + rc= rnd_next_with_direction(buf, false); + } + else + { + rc= find_icp_matching_index_rec(false /*move_forward*/, buf); + if (!rc) + rc= secondary_index_read(active_index, buf); + } + + if (rc != HA_ERR_LOCK_DEADLOCK || !is_new_snapshot) + break; /* exit the loop */ + + // release the snapshot and iterator so they will be regenerated + tx->release_snapshot(); + release_scan_iterator(); + } + + if (!rc) + { + /* + index_next is always incremented on success, so decrement if it is + index_first instead + */ + stats.rows_index_first++; + stats.rows_index_next--; + } + DBUG_RETURN(rc); +} + + +void ha_rocksdb::unlock_row() +{ + if (m_lock_rows != RDB_LOCK_NONE) + { + Rdb_transaction *tx= get_or_create_tx(table->in_use); + tx->release_lock(m_pk_descr->get_cf(), std::string(m_last_rowkey.ptr(), + m_last_rowkey.length())); + } +} + +/* + Returning true if SingleDelete can be used. + - Secondary Indexes can always use SingleDelete. + - If the index is PRIMARY KEY, and if all of the columns of the table + are covered by the PRIMARY KEY, SingleDelete can be used. +*/ +bool ha_rocksdb::can_use_single_delete(uint index) +{ + return (index != pk_index(table, m_tbl_def) || + (!has_hidden_pk(table) && + table->key_info[index].actual_key_parts == table->s->fields)); +} + +bool ha_rocksdb::skip_unique_check() +{ + /* + We want to skip unique checks if: + 1) bulk_load is on + 2) this table is in the whitelist of tables to skip and the replication + lag has reached a large enough value (see unique_check_lag_threshold + and unique_check_lage_reset_threshold) + 3) the user set rocksdb_skip_unique_check + */ + return THDVAR(table->in_use, bulk_load) || + (m_force_skip_unique_check && m_skip_unique_check) || + THDVAR(table->in_use, skip_unique_check); +} + +void ha_rocksdb::set_force_skip_unique_check(bool skip) +{ + m_force_skip_unique_check= skip; +} + +bool ha_rocksdb::commit_in_the_middle() +{ + return THDVAR(table->in_use, bulk_load) || + THDVAR(table->in_use, commit_in_the_middle); +} + +/* + Executing bulk commit if it should. + @retval true if bulk commit failed + @retval false if bulk commit was skipped or succeeded +*/ +bool ha_rocksdb::do_bulk_commit(Rdb_transaction *tx) +{ + DBUG_ASSERT(tx != nullptr); + return commit_in_the_middle() && + tx->get_write_count() >= THDVAR(table->in_use, bulk_load_size) && + tx->flush_batch(); +} + +/* + If table was created without primary key, SQL layer represents the primary + key number as MAX_INDEXES. Hence, this function returns true if the table + does not contain a primary key. (In which case we generate a hidden + 'auto-incremented' pk.) +*/ +bool ha_rocksdb::has_hidden_pk(const TABLE* table) +{ + DBUG_ASSERT(table != nullptr); + return Rdb_key_def::table_has_hidden_pk(table); +} + +/* + Returns true if given index number is a hidden_pk. + - This is used when a table is created with no primary key. +*/ +bool ha_rocksdb::is_hidden_pk(const uint index, const TABLE* table_arg, + const Rdb_tbl_def* tbl_def_arg) +{ + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(table_arg->s != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + + return (table_arg->s->primary_key == MAX_INDEXES && + index == tbl_def_arg->m_key_count - 1); +} + +/* Returns index of primary key */ +uint ha_rocksdb::pk_index(const TABLE* table_arg, + const Rdb_tbl_def* tbl_def_arg) +{ + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(table_arg->s != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + + return table_arg->s->primary_key == MAX_INDEXES ? + tbl_def_arg->m_key_count - 1 : + table_arg->s->primary_key; +} + +/* Returns true if given index number is a primary key */ +bool ha_rocksdb::is_pk(const uint index, const TABLE* table_arg, + const Rdb_tbl_def *tbl_def_arg) +{ + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(table_arg->s != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + + return index == table_arg->s->primary_key || + is_hidden_pk(index, table_arg, tbl_def_arg); +} + +const char* ha_rocksdb::get_key_name(const uint index, const TABLE* table_arg, + const Rdb_tbl_def* tbl_def_arg) +{ + DBUG_ASSERT(table_arg != nullptr); + + if (is_hidden_pk(index, table_arg, tbl_def_arg)) + { + return HIDDEN_PK_NAME; + } + + return table_arg->key_info[index].name; +} + +const char* ha_rocksdb::get_key_comment(const uint index, + const TABLE* table_arg, + const Rdb_tbl_def* tbl_def_arg) +{ + DBUG_ASSERT(table_arg != nullptr); + + if (is_hidden_pk(index, table_arg, tbl_def_arg)) + { + return nullptr; + } + + return table_arg->key_info[index].comment.str; +} + +int ha_rocksdb::write_row(uchar *buf) +{ + DBUG_ENTER("ha_rocksdb::write_row"); + DBUG_ASSERT(buf == table->record[0]); + DBUG_ASSERT(m_lock_rows == RDB_LOCK_WRITE); + ha_statistic_increment(&SSV::ha_write_count); + /* + Note: "buf == table->record[0]" is copied from innodb. I am not aware of + any use cases where this condition is not true. + */ + if (table->next_number_field && buf == table->record[0]) + { + int err; + if ((err= update_auto_increment())) + { + DBUG_RETURN(err); + } + } + + int rv= update_write_row(nullptr, buf, skip_unique_check()); + + if (rv == 0) + { + stats.rows_inserted++; + update_row_stats(ROWS_INSERTED); + } + DBUG_RETURN(rv); +} + + +/** + Constructing m_last_rowkey (MyRocks key expression) from + before_update|delete image (MySQL row expression). + m_last_rowkey is normally set during lookup phase, such as + rnd_next_with_direction() and rnd_pos(). With Read Free Replication, + these read functions are skipped and update_rows(), delete_rows() are + called without setting m_last_rowkey. This function sets m_last_rowkey + for Read Free Replication. +*/ +void ha_rocksdb::set_last_rowkey(const uchar *old_data) +{ + if (old_data && use_read_free_rpl()) + { + int old_pk_size= m_pk_descr->pack_record( + table, m_pack_buffer, old_data, m_pk_packed_tuple, nullptr, false); + m_last_rowkey.copy((const char*)m_pk_packed_tuple, old_pk_size, + &my_charset_bin); + } +} + +int ha_rocksdb::get_pk_for_update(struct update_row_info* row_info) +{ + int size; + + /* + Get new row key for any insert, and any update where the pk is not hidden. + Row key for updates with hidden pk is handled below. + */ + if (!has_hidden_pk(table)) + { + row_info->hidden_pk_id= 0; + + row_info->new_pk_unpack_info= &m_pk_unpack_info; + + size= m_pk_descr->pack_record(table, m_pack_buffer, row_info->new_data, + m_pk_packed_tuple, + row_info->new_pk_unpack_info, + false); + } + else if (row_info->old_data == nullptr) + { + row_info->hidden_pk_id= update_hidden_pk_val(); + size= m_pk_descr->pack_hidden_pk(row_info->hidden_pk_id, m_pk_packed_tuple); + } + else + { + /* + If hidden primary key, rowkey for new record will always be the same as + before + */ + size= row_info->old_pk_slice.size(); + memcpy(m_pk_packed_tuple, row_info->old_pk_slice.data(), size); + if (read_hidden_pk_id_from_rowkey(&row_info->hidden_pk_id)) + { + return HA_ERR_INTERNAL_ERROR; + } + } + + row_info->new_pk_slice= rocksdb::Slice((const char*)m_pk_packed_tuple, size); + + return 0; +} + +int ha_rocksdb::check_and_lock_unique_pk(uint key_id, + const struct update_row_info& row_info, + bool* found, bool* pk_changed) +{ + DBUG_ASSERT(found != nullptr); + DBUG_ASSERT(pk_changed != nullptr); + + *pk_changed= false; + + /* + For UPDATEs, if the key has changed, we need to obtain a lock. INSERTs + always require locking. + */ + if (row_info.old_pk_slice.size() > 0) + { + /* + If the keys are the same, then no lock is needed + */ + if (!Rdb_pk_comparator::bytewise_compare(row_info.new_pk_slice, + row_info.old_pk_slice)) + { + *found= false; + return 0; + } + + *pk_changed= true; + } + + /* + Perform a read to determine if a duplicate entry exists. For primary + keys, a point lookup will be sufficient. + + note: we intentionally don't set options.snapshot here. We want to read + the latest committed data. + */ + + /* + To prevent race conditions like below, it is necessary to + take a lock for a target row. get_for_update() holds a gap lock if + target key does not exist, so below conditions should never + happen. + + 1) T1 Get(empty) -> T2 Get(empty) -> T1 Put(insert) -> T1 commit + -> T2 Put(overwrite) -> T2 commit + 2) T1 Get(empty) -> T1 Put(insert, not committed yet) -> T2 Get(empty) + -> T2 Put(insert, blocked) -> T1 commit -> T2 commit(overwrite) + */ + rocksdb::Status s= row_info.tx->get_for_update(m_pk_descr->get_cf(), + row_info.new_pk_slice, + &m_retrieved_record); + if (!s.ok() && !s.IsNotFound()) + { + return row_info.tx->set_status_error(table->in_use, s, + m_key_descr_arr[key_id], m_tbl_def); + } + + *found= !s.IsNotFound(); + return 0; +} + +int ha_rocksdb::check_and_lock_sk(uint key_id, + const struct update_row_info& row_info, + bool* found) const +{ + DBUG_ASSERT(found != nullptr); + *found= false; + + /* + Can skip checking this key if none of the key fields have changed. + */ + if (row_info.old_data != nullptr && !m_update_scope.is_set(key_id)) + { + return 0; + } + + KEY* key_info= nullptr; + uint n_null_fields= 0; + uint user_defined_key_parts= 1; + + key_info= &table->key_info[key_id]; + user_defined_key_parts= key_info->user_defined_key_parts; + /* + If there are no uniqueness requirements, there's no need to obtain a + lock for this key. + */ + if (!(key_info->flags & HA_NOSAME)) + { + return 0; + } + + const std::shared_ptr& kd= m_key_descr_arr[key_id]; + + /* + Calculate the new key for obtaining the lock + + For unique secondary indexes, the key used for locking does not + include the extended fields. + */ + int size= kd->pack_record(table, m_pack_buffer, row_info.new_data, + m_sk_packed_tuple, nullptr, false, 0, + user_defined_key_parts, &n_null_fields); + if (n_null_fields > 0) + { + /* + If any fields are marked as NULL this will never match another row as + to NULL never matches anything else including another NULL. + */ + return 0; + } + + rocksdb::Slice new_slice= rocksdb::Slice((const char*)m_sk_packed_tuple, + size); + + /* + For UPDATEs, if the key has changed, we need to obtain a lock. INSERTs + always require locking. + */ + if (row_info.old_data != nullptr) + { + size= kd->pack_record(table, m_pack_buffer, row_info.old_data, + m_sk_packed_tuple_old, nullptr, false, + row_info.hidden_pk_id, + user_defined_key_parts); + rocksdb::Slice old_slice= rocksdb::Slice( + (const char*)m_sk_packed_tuple_old, size); + + /* + For updates, if the keys are the same, then no lock is needed + + Also check to see if the key has any fields set to NULL. If it does, then + this key is unique since NULL is not equal to each other, so no lock is + needed. + */ + if (!Rdb_pk_comparator::bytewise_compare(new_slice, old_slice)) + { + return 0; + } + } + + /* + Perform a read to determine if a duplicate entry exists - since this is + a secondary indexes a range scan is needed. + + note: we intentionally don't set options.snapshot here. We want to read + the latest committed data. + */ + + bool all_parts_used= (user_defined_key_parts == kd->get_key_parts()); + + /* + This iterator seems expensive since we need to allocate and free + memory for each unique index. + + If this needs to be optimized, for keys without NULL fields, the + extended primary key fields can be migrated to the value portion of the + key. This enables using Get() instead of Seek() as in the primary key + case. + + The bloom filter may need to be disabled for this lookup. + */ + bool total_order_seek= + !can_use_bloom_filter(ha_thd(), kd, new_slice, all_parts_used, + is_ascending(m_key_descr_arr[key_id], + HA_READ_KEY_EXACT)); + bool fill_cache= !THDVAR(ha_thd(), skip_fill_cache); + + /* + psergey-todo: we just need to take lock, lookups not needed: + */ + std::string dummy_value; + rocksdb::Status s= row_info.tx->get_for_update(kd->get_cf(), new_slice, + &dummy_value); + if (!s.ok() && !s.IsNotFound()) + { + return row_info.tx->set_status_error(table->in_use, s, kd, m_tbl_def); + } + + rocksdb::Iterator *iter= row_info.tx->get_iterator(kd->get_cf(), + total_order_seek, + fill_cache, + true /* read current data */, + false /* acquire snapshot */); + /* + Need to scan the transaction to see if there is a duplicate key. + Also need to scan RocksDB and verify the key has not been deleted + in the transaction. + */ + iter->Seek(new_slice); + *found= !read_key_exact(kd, iter, all_parts_used, new_slice); + delete iter; + + return 0; +} + +int ha_rocksdb::check_uniqueness_and_lock( + const struct update_row_info& row_info, + bool* pk_changed) +{ + /* + Go through each index and determine if the index has uniqueness + requirements. If it does, then try to obtain a row lock on the new values. + Once all locks have been obtained, then perform the changes needed to + update/insert the row. + */ + for (uint key_id= 0; key_id < m_tbl_def->m_key_count; key_id++) + { + bool found; + int rc; + + if (is_pk(key_id, table, m_tbl_def)) + { + rc= check_and_lock_unique_pk(key_id, row_info, &found, pk_changed); + } + else + { + rc= check_and_lock_sk(key_id, row_info, &found); + } + + if (rc != 0) + { + return rc; + } + + if (found) + { + /* There is a row with this key already, so error out. */ + errkey= key_id; + m_dupp_errkey= errkey; + return HA_ERR_FOUND_DUPP_KEY; + } + } + + return 0; +} + +int ha_rocksdb::bulk_load_key(Rdb_transaction* tx, + const std::shared_ptr& kd, + const rocksdb::Slice& key, + const rocksdb::Slice& value) +{ + rocksdb::ColumnFamilyHandle* cf= kd->get_cf(); + DBUG_ASSERT(cf != nullptr); + + if (m_sst_info == nullptr) + { + m_sst_info= + std::make_shared(rdb, m_table_handler->m_table_name, + kd->get_name(), cf, rocksdb_db_options); + tx->start_bulk_load(this); + } + + DBUG_ASSERT(m_sst_info != nullptr); + + int rc= m_sst_info->put(key, value); + if (rc != 0) + { + my_printf_error(ER_UNKNOWN_ERROR, + "Failed to add a key to sst file writer(%s)", MYF(0), + m_sst_info->error_message().c_str()); + rc= HA_ERR_INTERNAL_ERROR; + } + + return rc; +} + +int ha_rocksdb::finalize_bulk_load() +{ + int rc= 0; + + if (m_sst_info != nullptr) + { + rc= m_sst_info->commit(); + if (rc != 0) + { + my_printf_error(ER_UNKNOWN_ERROR, + "Failed to commit bulk loaded sst file to the " + "data store (%s)", + MYF(0), m_sst_info->error_message().c_str()); + rc= HA_ERR_INTERNAL_ERROR; + } + + m_sst_info= nullptr; + + Rdb_transaction *tx= get_or_create_tx(ha_thd()); + if (tx != nullptr) + { + tx->end_bulk_load(this); + } + } + + return rc; +} + +int ha_rocksdb::update_pk(const std::shared_ptr& kd, + const struct update_row_info& row_info, + bool pk_changed) +{ + uint key_id= kd->get_keyno(); + bool hidden_pk= is_hidden_pk(key_id, table, m_tbl_def); + if (!hidden_pk && pk_changed) + { + /* + The old key needs to be deleted. + */ + rocksdb::Status s= delete_or_singledelete(key_id, row_info.tx, kd->get_cf(), + row_info.old_pk_slice); + if (!s.ok()) + { + return row_info.tx->set_status_error(table->in_use, s, kd, m_tbl_def); + } + } + + if (table->next_number_field) + { + update_auto_incr_val(); + } + + rocksdb::Slice value_slice; + convert_record_to_storage_format(row_info.new_pk_slice, + row_info.new_pk_unpack_info, + &value_slice); + + int rc= 0; + auto cf= m_pk_descr->get_cf(); + if (rocksdb_enable_bulk_load_api && THDVAR(table->in_use, bulk_load) && + !hidden_pk) + { + /* + Write the primary key directly to an SST file using an SstFileWriter + */ + rc= bulk_load_key(row_info.tx, kd, row_info.new_pk_slice, + value_slice); + } + else if (row_info.skip_unique_check) + { + /* + It is responsibility of the user to make sure that the data being + inserted doesn't violate any unique keys. + */ + row_info.tx->get_blind_write_batch()->Put(cf, row_info.new_pk_slice, + value_slice); + } + else if (row_info.tx->m_ddl_transaction) + { + /* + DDL statement must check for unique key conflicts. For example: + ALTER TABLE tbl DROP PRIMARY KEY, ADD PRIMARY KEY(non_unique_column) + */ + row_info.tx->get_indexed_write_batch()->Put(cf, row_info.new_pk_slice, + value_slice); + } + else + { + auto s= row_info.tx->put(cf, row_info.new_pk_slice, value_slice); + if (!s.ok()) + { + if (s.IsBusy()) + { + errkey= table->s->primary_key; + m_dupp_errkey= errkey; + rc = HA_ERR_FOUND_DUPP_KEY; + } + else + { + rc = row_info.tx->set_status_error(table->in_use, s, m_pk_descr, + m_tbl_def); + } + } + } + + return rc; +} + +int ha_rocksdb::update_sk(const TABLE* table_arg, + const std::shared_ptr& kd, + const struct update_row_info& row_info) +{ + int new_packed_size; + int old_packed_size; + + rocksdb::Slice new_key_slice; + rocksdb::Slice new_value_slice; + rocksdb::Slice old_key_slice; + + uint key_id = kd->get_keyno(); + /* + Can skip updating this key if none of the key fields have changed. + */ + if (row_info.old_data != nullptr && !m_update_scope.is_set(key_id)) + { + return 0; + } + + bool store_checksums= should_store_checksums(); + + new_packed_size= kd->pack_record(table_arg, m_pack_buffer, row_info.new_data, + m_sk_packed_tuple, &m_sk_tails, + store_checksums, + row_info.hidden_pk_id); + + if (row_info.old_data != nullptr) + { + // The old value + old_packed_size= kd->pack_record(table_arg, m_pack_buffer, + row_info.old_data, + m_sk_packed_tuple_old, &m_sk_tails_old, + store_checksums, + row_info.hidden_pk_id); + + /* + Check if we are going to write the same value. This can happen when + one does + UPDATE tbl SET col='foo' + and we are looking at the row that already has col='foo'. + + We also need to compare the unpack info. Suppose, the collation is + case-insensitive, and unpack info contains information about whether + the letters were uppercase and lowercase. Then, both 'foo' and 'FOO' + will have the same key value, but different data in unpack_info. + + (note: anyone changing bytewise_compare should take this code into + account) + */ + if (old_packed_size == new_packed_size && + m_sk_tails_old.get_current_pos() == m_sk_tails.get_current_pos() && + memcmp(m_sk_packed_tuple_old, m_sk_packed_tuple, + old_packed_size) == 0 && + memcmp(m_sk_tails_old.ptr(), m_sk_tails.ptr(), + m_sk_tails.get_current_pos()) == 0) + { + return 0; + } + + /* + Deleting entries from secondary index should skip locking, but + be visible to the transaction. + (also note that DDL statements do not delete rows, so this is not a DDL + statement) + */ + old_key_slice= rocksdb::Slice( + reinterpret_cast(m_sk_packed_tuple_old), + old_packed_size); + + row_info.tx->get_indexed_write_batch()->SingleDelete(kd->get_cf(), + old_key_slice); + } + + /* + We're writing a new entry for secondary key. We can skip locking; we + should write to + - WriteBatchWithIndex normally (so that transaction sees the new row) + - non-indexed WriteBatch, when we don't need to see the new row: + = when doing a DDL operation and writing to a non-unique index, or + = when doing a bulk load + */ + rocksdb::WriteBatchBase *write_batch; + if ((row_info.tx->m_ddl_transaction && + !(table_arg->key_info[key_id].flags & HA_NOSAME)) || + row_info.skip_unique_check) + { + write_batch= row_info.tx->get_blind_write_batch(); + } + else + { + write_batch= row_info.tx->get_indexed_write_batch(); + } + + new_key_slice= rocksdb::Slice( + reinterpret_cast(m_sk_packed_tuple), new_packed_size); + new_value_slice= rocksdb::Slice( + reinterpret_cast(m_sk_tails.ptr()), + m_sk_tails.get_current_pos()); + + write_batch->Put(kd->get_cf(), new_key_slice, new_value_slice); + + return 0; +} + +int ha_rocksdb::update_indexes(const struct update_row_info& row_info, + bool pk_changed) +{ + int rc; + + for (uint key_id= 0; key_id < m_tbl_def->m_key_count; key_id++) + { + const std::shared_ptr& kd= m_key_descr_arr[key_id]; + if (is_pk(key_id, table, m_tbl_def)) + { + rc= update_pk(kd, row_info, pk_changed); + } + else + { + rc= update_sk(table, kd, row_info); + } + + if (rc != 0) + { + return rc; + } + } + + return 0; +} + +int ha_rocksdb::update_write_row(const uchar *old_data, + const uchar *new_data, + const bool skip_unique_check) +{ + bool pk_changed= false; + struct update_row_info row_info; + + row_info.old_data= old_data; + row_info.new_data= new_data; + row_info.skip_unique_check= skip_unique_check; + row_info.new_pk_unpack_info= nullptr; + + DBUG_ENTER("ha_rocksdb::write_row_common"); + + set_last_rowkey(old_data); + + row_info.tx= get_or_create_tx(table->in_use); + + if (old_data != nullptr) + { + row_info.old_pk_slice= rocksdb::Slice(m_last_rowkey.ptr(), + m_last_rowkey.length()); + + /* Determine which indexes need updating. */ + calc_updated_indexes(); + } + + /* + Get the new row key into row_info.new_pk_slice + */ + int rc= get_pk_for_update(&row_info); + if (rc != 0) + { + DBUG_RETURN(rc); + } + + if (!skip_unique_check) + { + /* + Check to see if we are going to have failures because of unique + keys. Also lock the appropriate key values. + */ + rc= check_uniqueness_and_lock(row_info, &pk_changed); + if (rc != 0) + { + DBUG_RETURN(rc); + } + } + + DEBUG_SYNC(ha_thd(), "rocksdb.update_write_row_after_unique_check"); + + /* + At this point, all locks have been obtained, and all checks for duplicate + keys have been performed. No further errors can be allowed to occur from + here because updates to the transaction will be made and those updates + cannot be easily removed without rolling back the entire transaction. + */ + rc= update_indexes(row_info, pk_changed); + if (rc != 0) + { + DBUG_RETURN(rc); + } + + if (do_bulk_commit(row_info.tx)) + { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + DBUG_RETURN(0); +} + + +/* + Open a cursor and position it at the passed record +*/ + +void ha_rocksdb::setup_scan_iterator( + const std::shared_ptr& kd, + rocksdb::Slice *slice, + const bool use_all_keys, + const bool is_ascending, + const uint eq_cond_len) +{ + DBUG_ASSERT(kd != nullptr); + DBUG_ASSERT(slice != nullptr); + DBUG_ASSERT(slice->size() >= eq_cond_len); + + Rdb_transaction *tx= get_or_create_tx(table->in_use); + + bool skip_bloom= true; + + rocksdb::Slice eq_cond(slice->data(), eq_cond_len); + if (can_use_bloom_filter(ha_thd(), kd, eq_cond, use_all_keys, is_ascending)) + { + skip_bloom= false; + } + + /* + In some cases, setup_scan_iterator() is called multiple times from + the same query but bloom filter can not always be used. + Suppose the following query example. id2 is VARCHAR(30) and PRIMARY KEY (id1, id2). + select count(*) from t2 WHERE id1=100 and id2 IN ('00000000000000000000', '100'); + In this case, setup_scan_iterator() is called twice, the first time is for + (id1, id2)=(100, '00000000000000000000') and the second time is for (100, '100'). + If prefix bloom filter length is 24 bytes, prefix bloom filter can be used for the + first condition but not for the second condition. + If bloom filter condition is changed, currently it is necessary to destroy and + re-create Iterator. + */ + if (m_scan_it_skips_bloom != skip_bloom) + { + release_scan_iterator(); + } + + /* + SQL layer can call rnd_init() multiple times in a row. + In that case, re-use the iterator, but re-position it at the table start. + */ + if (!m_scan_it) + { + bool fill_cache= !THDVAR(ha_thd(), skip_fill_cache); + m_scan_it= tx->get_iterator(kd->get_cf(), skip_bloom, fill_cache); + m_scan_it_skips_bloom= skip_bloom; + } + /* + Seek() will "Position at the first key in the source that at or past target". + The operation cannot fail. + */ + m_scan_it->Seek(*slice); +} + + +void ha_rocksdb::setup_iterator_for_rnd_scan() +{ + uint key_size; + + if (m_pk_descr->m_is_reverse_cf) + m_pk_descr->get_supremum_key(m_pk_packed_tuple, &key_size); + else + m_pk_descr->get_infimum_key(m_pk_packed_tuple, &key_size); + + rocksdb::Slice table_key((const char*)m_pk_packed_tuple, key_size); + + setup_scan_iterator(m_pk_descr, &table_key); + m_skip_scan_it_next_call= true; +} + + +int ha_rocksdb::rnd_init(bool scan) +{ + DBUG_ENTER("ha_rocksdb::rnd_init"); + + Rdb_transaction *tx= get_or_create_tx(table->in_use); + setup_read_decoders(); + + if (scan) + { + m_rnd_scan_is_new_snapshot= !tx->has_snapshot(); + setup_iterator_for_rnd_scan(); + } + else + { + /* We don't need any preparations for rnd_pos() calls. */ + } + + // If m_lock_rows is on then we will be doing a get_for_update when accessing + // the index, so don't acquire the snapshot right away. Otherwise acquire + // the snapshot immediately. + tx->acquire_snapshot(m_lock_rows == RDB_LOCK_NONE); + + DBUG_RETURN(0); +} + + +int ha_rocksdb::rnd_next(uchar *buf) +{ + int rc; + ha_statistic_increment(&SSV::ha_read_rnd_next_count); + for (;;) + { + rc= rnd_next_with_direction(buf, true); + if (rc != HA_ERR_LOCK_DEADLOCK || !m_rnd_scan_is_new_snapshot) + break; /* exit the loop */ + + // release the snapshot and iterator and then regenerate them + Rdb_transaction *tx= get_or_create_tx(table->in_use); + tx->release_snapshot(); + release_scan_iterator(); + setup_iterator_for_rnd_scan(); + } + + m_rnd_scan_is_new_snapshot= false; + + if (rc == HA_ERR_KEY_NOT_FOUND) + rc = HA_ERR_END_OF_FILE; + return rc; +} + + +/* + See also secondary_index_read(). +*/ +int ha_rocksdb::rnd_next_with_direction(uchar *buf, bool move_forward) +{ + int rc; + DBUG_ENTER("ha_rocksdb::rnd_next"); + + table->status= STATUS_NOT_FOUND; + stats.rows_requested++; + + if (!m_scan_it || !m_scan_it->Valid()) + { + /* + We can get here when SQL layer has called + + h->index_init(PRIMARY); + h->index_read_map(full index tuple, HA_READ_KEY_EXACT); + + In this case, we should return EOF. + */ + DBUG_RETURN(HA_ERR_END_OF_FILE); + } + + if (m_skip_scan_it_next_call) + { + m_skip_scan_it_next_call= false; + } + else + { + if (move_forward) + m_scan_it->Next(); /* this call cannot fail */ + else + m_scan_it->Prev(); /* this call cannot fail */ + } + + if (m_scan_it->Valid()) + { + /* check if we're out of this table */ + rocksdb::Slice key= m_scan_it->key(); + if (!m_pk_descr->covers_key(key)) + { + rc= HA_ERR_END_OF_FILE; + } + else + { + if (m_lock_rows != RDB_LOCK_NONE) + { + /* + Lock the row we've just read. + + Now we call get_for_update which will 1) Take a lock and 2) Will fail + if the row was deleted since the snapshot was taken. + */ + Rdb_transaction *tx= get_or_create_tx(table->in_use); + DEBUG_SYNC(ha_thd(), "rocksdb_concurrent_delete"); + rocksdb::Status s= tx->get_for_update(m_pk_descr->get_cf(), key, + &m_retrieved_record); + if (!s.ok()) + { + DBUG_RETURN(tx->set_status_error(table->in_use, s, m_pk_descr, + m_tbl_def)); + } + + // If we called get_for_update() use the value from that call not from + // the iterator as it may be stale since we don't have a snapshot + // when m_lock_rows is not RDB_LOCK_NONE. + m_last_rowkey.copy(key.data(), key.size(), &my_charset_bin); + rc= convert_record_from_storage_format(&key, buf); + } + else + { + // Use the value from the iterator + rocksdb::Slice value= m_scan_it->value(); + m_last_rowkey.copy(key.data(), key.size(), &my_charset_bin); + rc= convert_record_from_storage_format(&key, &value, buf); + } + + table->status= 0; + } + } + else + rc= HA_ERR_END_OF_FILE; + + if (!rc) { + stats.rows_read++; + stats.rows_index_next++; + update_row_stats(ROWS_READ); + } + + DBUG_RETURN(rc); +} + + +int ha_rocksdb::rnd_end() +{ + DBUG_ENTER("ha_rocksdb::rnd_end"); + + release_scan_iterator(); + + DBUG_RETURN(0); +} + +int ha_rocksdb::index_init(uint idx, bool sorted) +{ + Rdb_transaction *tx= get_or_create_tx(table->in_use); + DBUG_ASSERT(tx != nullptr); + + setup_read_decoders(); + + // If m_lock_rows is not RDB_LOCK_NONE then we will be doing a get_for_update + // when accessing the index, so don't acquire the snapshot right away. + // Otherwise acquire the snapshot immediately. + tx->acquire_snapshot(m_lock_rows == RDB_LOCK_NONE); + + active_index= idx; + + return 0; +} + +int ha_rocksdb::index_end() +{ + release_scan_iterator(); + + active_index= MAX_KEY; + in_range_check_pushed_down= FALSE; + return 0; +} + + +int ha_rocksdb::delete_all_rows(ha_rows* nrows) +{ + DBUG_ENTER("ha_rocksdb::delete_all_rows"); + /* + We will have to delete records one-by-one anyway. Let the SQL layer do + it. + */ + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + +int ha_rocksdb::truncate() +{ + m_tbl_def->m_auto_incr_val= 1; + m_tbl_def->m_hidden_pk_val= 1; + remove_rows(m_tbl_def); + return 0; +} + + +/* + Delete the row we've last read. The row is also passed as parameter. + + @detail + The caller guarantees table buf points to the row that was just read. + The row is either table->record[0] or table->record[1]. + (Check out InnoDB: row_update_for_mysql() has "UT_NOT_USED(mysql_rec)" +*/ +int ha_rocksdb::delete_row(const uchar *buf) +{ + DBUG_ENTER("ha_rocksdb::delete_row"); + + ha_statistic_increment(&SSV::ha_delete_count); + set_last_rowkey(buf); + + rocksdb::Slice key_slice(m_last_rowkey.ptr(), m_last_rowkey.length()); + Rdb_transaction *tx= get_or_create_tx(table->in_use); + + uint index= pk_index(table, m_tbl_def); + rocksdb::Status s= delete_or_singledelete(index, tx, + m_pk_descr->get_cf(), + key_slice); + if (!s.ok()) + { + DBUG_RETURN(tx->set_status_error(table->in_use, s, m_pk_descr, m_tbl_def)); + } + + longlong hidden_pk_id= 0; + if (m_tbl_def->m_key_count > 1 && + has_hidden_pk(table) && read_hidden_pk_id_from_rowkey(&hidden_pk_id)) + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + + // Delete the record for every secondary index + for (uint i= 0; i < m_tbl_def->m_key_count; i++) + { + if (!is_pk(i, table, m_tbl_def)) + { + int packed_size; + const std::shared_ptr& kd= m_key_descr_arr[i]; + packed_size= kd->pack_record(table, m_pack_buffer, buf, m_sk_packed_tuple, + nullptr, false, hidden_pk_id); + rocksdb::Slice secondary_key_slice( + reinterpret_cast(m_sk_packed_tuple), packed_size); + /* Deleting on secondary key doesn't need any locks: */ + tx->get_indexed_write_batch()->SingleDelete(kd->get_cf(), + secondary_key_slice); + } + } + + if (do_bulk_commit(tx)) + { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + stats.rows_deleted++; + update_row_stats(ROWS_DELETED); + + DBUG_RETURN(0); +} + + +rocksdb::Status +ha_rocksdb::delete_or_singledelete(uint index, + Rdb_transaction *tx, + rocksdb::ColumnFamilyHandle* column_family, + const rocksdb::Slice& key) +{ + if (can_use_single_delete(index)) + return tx->single_delete(column_family, key); + return tx->delete_key(column_family, key); +} + + +void ha_rocksdb::update_stats(void) +{ + DBUG_ENTER("ha_rocksdb::update_stats"); + + stats.records= 0; + stats.index_file_length= 0ul; + stats.data_file_length= 0ul; + stats.mean_rec_length= 0; + for (uint i= 0; i < m_tbl_def->m_key_count; i++) + { + if (is_pk(i, table, m_tbl_def)) + { + stats.data_file_length= m_pk_descr->m_stats.m_actual_disk_size; + stats.records = m_pk_descr->m_stats.m_rows; + } + else + { + stats.index_file_length+= m_key_descr_arr[i]->m_stats.m_actual_disk_size; + } + } + DBUG_VOID_RETURN; +} + +int ha_rocksdb::info(uint flag) +{ + DBUG_ENTER("ha_rocksdb::info"); + + if (!table) + return 1; + + if (flag & HA_STATUS_VARIABLE) + { + /* + Test only to simulate corrupted stats + */ + DBUG_EXECUTE_IF("myrocks_simulate_negative_stats", + m_pk_descr->m_stats.m_actual_disk_size = + -m_pk_descr->m_stats.m_actual_disk_size;); + + update_stats(); + + /* + If any stats are negative due to bad cached stats, re-run analyze table + and re-retrieve the stats. + */ + if (static_cast(stats.data_file_length) < 0 || + static_cast(stats.index_file_length) < 0 || + static_cast(stats.records) < 0) + { + if (analyze(nullptr, nullptr)) { + DBUG_RETURN(1); + } + + update_stats(); + } + + if (stats.records == 0) + { + // most likely, the table is in memtable + // try to deduce from GetApproximateSizes + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]; + auto r= get_range(pk_index(table, m_tbl_def), buf); + uint64_t sz= 0; + rdb->GetApproximateSizes( + m_pk_descr->get_cf(), + &r, 1, + &sz, true); + stats.records = sz/ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE; + stats.data_file_length = sz; + + if (rocksdb_debug_optimizer_n_rows > 0) + stats.records = rocksdb_debug_optimizer_n_rows; + } + + if (stats.records != 0) + stats.mean_rec_length = stats.data_file_length / stats.records; + } + if (flag & HA_STATUS_CONST) + { + ref_length= m_pk_descr->max_storage_fmt_length(); + + // TODO: Needs to reimplement after having real index statistics + for (uint i= 0; i < m_tbl_def->m_key_count; i++) + { + if (is_hidden_pk(i, table, m_tbl_def)) + { + continue; + } + KEY* k= &table->key_info[i]; + for (uint j = 0; j < k->actual_key_parts; j++) + { + const Rdb_index_stats& k_stats= m_key_descr_arr[i]->m_stats; + uint x = k_stats.m_distinct_keys_per_prefix.size() > j && + k_stats.m_distinct_keys_per_prefix[j] > 0 ? + k_stats.m_rows / k_stats.m_distinct_keys_per_prefix[j] : + 0; + if (x > stats.records) + x = stats.records; + if ((x == 0 && rocksdb_debug_optimizer_no_zero_cardinality) || + rocksdb_debug_optimizer_n_rows > 0) + { + // Fake cardinality implementation. For example, (idx1, idx2, idx3) index + // will have rec_per_key for (idx1)=4, (idx1,2)=2, and (idx1,2,3)=1. + // rec_per_key for the whole index is 1, and multiplied by 2^n if + // n suffix columns of the index are not used. + x = 1 << (k->actual_key_parts-j-1); + } + k->rec_per_key[j]= x; + } + } + } + + if (flag & HA_STATUS_ERRKEY) + { + /* + Currently we support only primary keys so we know which key had a + uniqueness violation. + */ + errkey= m_dupp_errkey; + dup_ref= m_pk_tuple; // TODO(?): this should store packed PK. + } + + if (flag & HA_STATUS_AUTO) + { + stats.auto_increment_value= m_tbl_def->m_auto_incr_val; + } + + DBUG_RETURN(0); +} + +void ha_rocksdb::position(const uchar *record) +{ + DBUG_ENTER("ha_rocksdb::position"); + + longlong hidden_pk_id= 0; + if (has_hidden_pk(table) && read_hidden_pk_id_from_rowkey(&hidden_pk_id)) + DBUG_ASSERT(false); // should never reach here + + /* + Get packed primary key value from the record. + + (Note: m_last_rowkey has the packed PK of last-read-row, which allows to + handle most cases, but there is an exception to this: when slave applies + RBR events, it fills the record and then calls position(); rnd_pos()) + + Also note that we *can* rely on PK column values being available. This is + because table_flags() includes HA_PRIMARY_KEY_REQUIRED_FOR_POSITION bit. + When that is true, table->prepare_for_position() adds PK columns into the + read set (this may potentially disable index-only access when PK column + cannot be restored from its mem-comparable form in the secondary indexes). + */ + uint packed_size= m_pk_descr->pack_record(table, m_pack_buffer, record, + ref, nullptr, false, hidden_pk_id); + + /* + It could be that mem-comparable form of PK occupies less than ref_length + bytes. Fill the remainder with zeros. + */ + if (ref_length > packed_size) + memset(ref + packed_size, 0, ref_length - packed_size); + + DBUG_VOID_RETURN; +} + + +int ha_rocksdb::rnd_pos(uchar *buf, uchar *pos) +{ + int rc; + size_t len; + DBUG_ENTER("ha_rocksdb::rnd_pos"); + + ha_statistic_increment(&SSV::ha_read_rnd_count); + stats.rows_requested++; + len= m_pk_descr->key_length(table, rocksdb::Slice((const char*)pos, + ref_length)); + if (len == size_t(-1)) + { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); /* Data corruption? */ + } + + rc= get_row_by_rowid(buf, pos, len); + + if (!rc) + { + stats.rows_read++; + update_row_stats(ROWS_READ); + } + + DBUG_RETURN(rc); +} + + +/* + @brief + Calculate (if needed) the bitmap of indexes that are modified by the + current query + + @detail + The calculation done by checking index definitions against the table->write_set + column bitmap. +*/ + +void ha_rocksdb::calc_updated_indexes() +{ + if (!m_update_scope_is_valid) + { + m_update_scope_is_valid= true; + m_update_scope.clear_all(); + + for (uint keynr= 0; keynr < table->s->keys; keynr++) + { + const std::shared_ptr& kd= m_key_descr_arr[keynr]; + /* Walk over all key parts, including the "extended key" suffix */ + uint key_parts= kd->get_key_parts(); + for (uint kp= 0; kp < key_parts; kp++) + { + if (has_hidden_pk(table) && kp + 1 == key_parts) + break; + + Field *field= kd->get_table_field_for_part_no(table, kp); + if (bitmap_is_set(table->write_set, field->field_index)) + { + m_update_scope.set_bit(keynr); + break; + } + } + } + } +} + + +int ha_rocksdb::update_row(const uchar *old_data, uchar *new_data) +{ + DBUG_ENTER("ha_rocksdb::update_row"); + DBUG_ASSERT(m_lock_rows == RDB_LOCK_WRITE); + /* + old_data points to record we're updating. It is the same as the record + we've just read (for multi-table UPDATE, too, because SQL layer will make + an rnd_pos() call to re-read the record before calling update_row()) + */ + DBUG_ASSERT(new_data == table->record[0]); + + ha_statistic_increment(&SSV::ha_update_count); + int rv; + rv= update_write_row(old_data, new_data, false); + + if (rv == 0) + { + stats.rows_updated++; + update_row_stats(ROWS_UPDATED); + } + DBUG_RETURN(rv); +} + + +/* The following function was copied from ha_blackhole::store_lock: */ +THR_LOCK_DATA **ha_rocksdb::store_lock(THD *thd, + THR_LOCK_DATA **to, + enum thr_lock_type lock_type) +{ + DBUG_ENTER("ha_rocksdb::store_lock"); + + bool in_lock_tables = my_core::thd_in_lock_tables(thd); + + /* First, make a decision about MyRocks's internal locking */ + if (lock_type >= TL_WRITE_ALLOW_WRITE) + { + m_lock_rows = RDB_LOCK_WRITE; + } + else if (lock_type == TL_READ_WITH_SHARED_LOCKS) + { + m_lock_rows = RDB_LOCK_READ; + } + else + { + m_lock_rows = RDB_LOCK_NONE; + if (THDVAR(thd, lock_scanned_rows)) + { + /* + The following logic was copied directly from + ha_innobase::store_lock_with_x_type() in + storage/innobase/handler/ha_innodb.cc and causes MyRocks to leave + locks in place on rows that are in a table that is not beint updated. + */ + uint sql_command = my_core::thd_sql_command(thd); + if ((lock_type == TL_READ && in_lock_tables) || + (lock_type == TL_READ_HIGH_PRIORITY && in_lock_tables) || + can_hold_read_locks_on_select(thd, lock_type)) + { + ulong tx_isolation = my_core::thd_tx_isolation(thd); + if (sql_command != SQLCOM_CHECKSUM && + ((my_core::thd_test_options(thd, OPTION_BIN_LOG) && + tx_isolation > ISO_READ_COMMITTED) || + tx_isolation == ISO_SERIALIZABLE || + (lock_type != TL_READ && lock_type != TL_READ_NO_INSERT) || + (sql_command != SQLCOM_INSERT_SELECT && + sql_command != SQLCOM_REPLACE_SELECT && + sql_command != SQLCOM_UPDATE && + sql_command != SQLCOM_DELETE && + sql_command != SQLCOM_CREATE_TABLE))) + { + m_lock_rows = RDB_LOCK_READ; + } + } + } + } + + /* Then, tell the SQL layer what kind of locking it should use: */ + if (lock_type != TL_IGNORE && m_db_lock.type == TL_UNLOCK) + { + /* + Here is where we get into the guts of a row level lock. + If TL_UNLOCK is set + If we are not doing a LOCK TABLE or DISCARD/IMPORT + TABLESPACE, then allow multiple writers + */ + + if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) && + !in_lock_tables && !my_core::thd_tablespace_op(thd)) + { + lock_type = TL_WRITE_ALLOW_WRITE; + } + + /* + In queries of type INSERT INTO t1 SELECT ... FROM t2 ... + MySQL would use the lock TL_READ_NO_INSERT on t2, and that + would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts + to t2. Convert the lock to a normal read lock to allow + concurrent inserts to t2. + */ + + if (lock_type == TL_READ_NO_INSERT && !in_lock_tables) + { + lock_type = TL_READ; + } + + m_db_lock.type= lock_type; + } + *to++= &m_db_lock; + DBUG_RETURN(to); +} + +void ha_rocksdb::read_thd_vars(THD *thd) +{ + m_store_checksums= THDVAR(thd, store_checksums); + m_verify_checksums= THDVAR(thd, verify_checksums); + m_checksums_pct= THDVAR(thd, checksums_pct); +} + +int ha_rocksdb::external_lock(THD *thd, int lock_type) +{ + int res= 0; + DBUG_ENTER("ha_rocksdb::external_lock"); + + int binlog_format= my_core::thd_binlog_format(thd); + bool unsafe_for_binlog= THDVAR(ha_thd(), unsafe_for_binlog); + if (lock_type == F_WRLCK && !thd->rli_slave && + !unsafe_for_binlog && + binlog_format != BINLOG_FORMAT_ROW && + binlog_format != BINLOG_FORMAT_UNSPEC && + my_core::thd_binlog_filter_ok(thd)) + { + my_printf_error(ER_UNKNOWN_ERROR, + "Can't execute updates on master with binlog_format != ROW.", + MYF(0)); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + if (lock_type == F_UNLCK) + { + Rdb_transaction* tx= get_or_create_tx(thd); + + tx->io_perf_end_and_record(&m_io_perf); + if (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) + { + /* + Do like InnoDB: when we get here, it's time to commit a + single-statement transaction. + + If the statement involved multiple tables, this code will be executed + for each of them, but that's ok because non-first tx->commit() calls + will be no-ops. + */ + res= tx->commit_or_rollback(); + } + } + else + { + if (my_core::thd_tx_isolation(thd) < ISO_READ_COMMITTED || + my_core::thd_tx_isolation(thd) > ISO_REPEATABLE_READ) + { + my_printf_error(ER_UNKNOWN_ERROR, + "MyRocks supports only READ COMMITTED and " + "REPEATABLE READ isolation levels. " + "Please change from current isolation " + "level %s", + MYF(0), + tx_isolation_names[my_core::thd_tx_isolation(thd)]); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + /* + It's nice to do the following on start of every statement. The problem + is, handler->start_stmt() is not called for INSERTs. + So, we put this code here. + */ + Rdb_transaction* tx= get_or_create_tx(thd); + read_thd_vars(thd); + + m_update_scope_is_valid= false; + + if (skip_unique_check()) + { + if ((thd->lex->sql_command == SQLCOM_INSERT || + thd->lex->sql_command == SQLCOM_LOAD || + thd->lex->sql_command == SQLCOM_REPLACE) && + (thd->lex->duplicates == DUP_REPLACE || + thd->lex->duplicates == DUP_UPDATE)) + { + my_printf_error(ER_UNKNOWN_ERROR, + "When unique checking is disabled in MyRocks, INSERT," + "UPDATE,LOAD statements with clauses that update or " + "replace the key (i.e. " + "INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not " + "allowed. Query: %s", + MYF(0), thd->query()); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + } + + if (lock_type == F_WRLCK) + { + if (tx->is_tx_read_only()) + { + my_printf_error(ER_UNKNOWN_ERROR, + "Can't execute updates when you started a transaction " + "with START TRANSACTION WITH CONSISTENT [ROCKSDB] " + "SNAPSHOT.", + MYF(0)); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + /* + SQL layer signals us to take a write lock. It does so when starting DML + statement. We should put locks on the rows we're reading. + + Note: sometimes, external_lock() can be called without a prior + ::store_lock call. That's why we need to set lock_* members here, too. + */ + m_lock_rows= RDB_LOCK_WRITE; + + if (thd->lex->sql_command == SQLCOM_CREATE_INDEX || + thd->lex->sql_command == SQLCOM_DROP_INDEX || + thd->lex->sql_command == SQLCOM_ALTER_TABLE) + { + tx->m_ddl_transaction= true; + } + } + rocksdb_register_tx(rocksdb_hton, thd, tx); + tx->io_perf_start(&m_io_perf); + } + + DBUG_RETURN(res); +} + + +/** + @note + A quote from ha_innobase::start_stmt(): + + MySQL calls this function at the start of each SQL statement inside LOCK + TABLES. Inside LOCK TABLES the ::external_lock method does not work to + mark SQL statement borders. + +*/ + +int ha_rocksdb::start_stmt(THD *thd, thr_lock_type lock_type) +{ + Rdb_transaction* tx= get_or_create_tx(thd); + read_thd_vars(thd); + rocksdb_register_tx(ht, thd, tx); + tx->io_perf_start(&m_io_perf); + return 0; +} + +rocksdb::Range get_range( + uint32_t i, uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2], int offset1, + int offset2) +{ + uchar* buf_begin= buf; + uchar* buf_end= buf + Rdb_key_def::INDEX_NUMBER_SIZE; + rdb_netbuf_store_index(buf_begin, i + offset1); + rdb_netbuf_store_index(buf_end, i + offset2); + + return rocksdb::Range( + rocksdb::Slice((const char*) buf_begin, Rdb_key_def::INDEX_NUMBER_SIZE), + rocksdb::Slice((const char*) buf_end, Rdb_key_def::INDEX_NUMBER_SIZE)); +} + +static rocksdb::Range get_range( + const std::shared_ptr& kd, + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2], + int offset1, int offset2) +{ + return get_range(kd->get_index_number(), buf, offset1, offset2); +} + +rocksdb::Range get_range(const std::shared_ptr& kd, + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]) +{ + if (kd->m_is_reverse_cf) + { + return myrocks::get_range(kd, buf, 1, 0); + } + else + { + return myrocks::get_range(kd, buf, 0, 1); + } +} + +rocksdb::Range ha_rocksdb::get_range( + int i, uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]) const +{ + return myrocks::get_range(m_key_descr_arr[i], buf); +} + + +/* + Drop index thread's main logic +*/ + +void Rdb_drop_index_thread::run() +{ + mysql_mutex_lock(&m_signal_mutex); + + for (;;) { + // The stop flag might be set by shutdown command + // after drop_index_thread releases signal_mutex + // (i.e. while executing expensive Seek()). To prevent drop_index_thread + // from entering long cond_timedwait, checking if stop flag + // is true or not is needed, with drop_index_interrupt_mutex held. + if (m_stop) { + break; + } + + timespec ts; + clock_gettime(CLOCK_REALTIME, &ts); + ts.tv_sec += dict_manager.is_drop_index_empty() + ? 24*60*60 // no filtering + : 60; // filtering + + auto ret __attribute__((__unused__)) = mysql_cond_timedwait( + &m_signal_cond, &m_signal_mutex, &ts); + if (m_stop) { + break; + } + // make sure, no program error is returned + DBUG_ASSERT(ret == 0 || ret == ETIMEDOUT); + mysql_mutex_unlock(&m_signal_mutex); + + std::vector indices; + dict_manager.get_ongoing_drop_indexes(&indices); + if (!indices.empty()) { + std::unordered_set finished; + rocksdb::ReadOptions read_opts; + read_opts.total_order_seek = true; // disable bloom filter + + for (auto d : indices) { + uint32 cf_flags= 0; + if (!dict_manager.get_cf_flags(d.cf_id, &cf_flags)) + { + sql_print_error("RocksDB: Failed to get column family flags " + "from cf id %u. MyRocks data dictionary may " + "get corrupted.", d.cf_id); + abort_with_stack_traces(); + } + rocksdb::ColumnFamilyHandle* cfh= cf_manager.get_cf(d.cf_id); + DBUG_ASSERT(cfh); + bool is_reverse_cf= cf_flags & Rdb_key_def::REVERSE_CF_FLAG; + + bool index_removed= false; + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE]= {0}; + rdb_netbuf_store_uint32(key_buf, d.index_id); + rocksdb::Slice key = rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]; + rocksdb::Range range = get_range(d.index_id, buf, is_reverse_cf?1:0, + is_reverse_cf?0:1); + rocksdb::CompactRangeOptions compact_range_options; + compact_range_options.bottommost_level_compaction = + rocksdb::BottommostLevelCompaction::kForce; + compact_range_options.exclusive_manual_compaction = false; + rocksdb::Status status = DeleteFilesInRange(rdb->GetBaseDB(), cfh, + &range.start, &range.limit); + if (!status.ok()) + { + if (status.IsShutdownInProgress()) + { + break; + } + rdb_handle_io_error(status, RDB_IO_ERROR_BG_THREAD); + } + status = rdb->CompactRange( + compact_range_options, cfh, &range.start, &range.limit); + if (!status.ok()) + { + if (status.IsShutdownInProgress()) + { + break; + } + rdb_handle_io_error(status, RDB_IO_ERROR_BG_THREAD); + } + std::unique_ptr it( + rdb->NewIterator(read_opts, cfh)); + it->Seek(key); + if (is_reverse_cf) + { + if (!it->Valid()) + { + it->SeekToLast(); + } + else + { + it->Prev(); + } + } + if (!it->Valid()) + { + index_removed= true; + } + else + { + if (memcmp(it->key().data(), key_buf, Rdb_key_def::INDEX_NUMBER_SIZE)) + { + // Key does not have same prefix + index_removed= true; + } + } + if (index_removed) + { + finished.insert(d); + } + } + + if (!finished.empty()) { + dict_manager.finish_drop_indexes(finished); + } + } + mysql_mutex_lock(&m_signal_mutex); + } + + mysql_mutex_unlock(&m_signal_mutex); +} + + +Rdb_tbl_def* ha_rocksdb::get_table_if_exists(const char* tablename) +{ + DBUG_ASSERT(tablename != nullptr); + + std::string str; + if (rdb_normalize_tablename(tablename, &str)) + { + // We were not passed table name? + DBUG_ASSERT(0); + return nullptr; + } + + return ddl_manager.find(str); +} + +/* + Note: the following function is called when the table is not open. That is, + this->table==nullptr, pk_key_descr==nullptr, etc. + + tablename points to line in form "./dbname/tablename". +*/ + +int ha_rocksdb::delete_table(const char *tablename) +{ + DBUG_ENTER("ha_rocksdb::delete_table"); + std::unique_ptr wb= dict_manager.begin(); + rocksdb::WriteBatch *batch= wb.get(); + + /* Find the table in the hash */ + Rdb_tbl_def *tbl = get_table_if_exists(tablename); + if (!tbl) + DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); + dict_manager.add_drop_table(tbl->m_key_descr_arr, tbl->m_key_count, batch); + + /* + Remove the table entry in data dictionary (this will also remove it from + the persistent data dictionary). + */ + ddl_manager.remove(tbl, batch, true); + if (dict_manager.commit(batch)) + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + + rdb_drop_idx_thread.signal(); + + DBUG_RETURN(0); +} + + +void ha_rocksdb::remove_rows(Rdb_tbl_def *tbl) +{ + rocksdb::WriteOptions wo= rdb_get_rocksdb_write_options(handler::ha_thd()); + + rocksdb::ReadOptions opts; + opts.total_order_seek= true; + rocksdb::Iterator* it= rdb->NewIterator(opts); + + char key_buf[MAX_KEY_LENGTH]; + uint key_len; + /* + Remove all records in each index. + (This is is not crash-safe, but it doesn't matter, because bulk row + deletion will be handled on rocksdb side) + */ + for (uint i= 0; i < tbl->m_key_count ; i++) + { + const std::shared_ptr& kd= tbl->m_key_descr_arr[i]; + kd->get_infimum_key(reinterpret_cast(key_buf), &key_len); + + rocksdb::Slice table_key(key_buf, key_len); + it->Seek(table_key); + while (it->Valid()) + { + rocksdb::Slice key= it->key(); + if (!kd->covers_key(key)) + { + break; + } + + if (can_use_single_delete(i)) + rdb->SingleDelete(wo, key); + else + rdb->Delete(wo, key); + it->Next(); + } + } + delete it; +} + + +int ha_rocksdb::rename_table(const char *from, const char *to) +{ + DBUG_ASSERT(from != nullptr); + DBUG_ASSERT(to != nullptr); + + bool write_err= false; + std::string from_str; + std::string to_str; + std::string from_db; + std::string to_db; + + if (rdb_normalize_tablename(from, &from_str) || + rdb_split_normalized_tablename(from_str, &from_db)) + { + return HA_ERR_INTERNAL_ERROR; + } + + if (rdb_normalize_tablename(to, &to_str) || + rdb_split_normalized_tablename(to_str, &to_db)) + { + return HA_ERR_INTERNAL_ERROR; + } + + // If the user changed the database part of the name then validate that the + // 'to' database exists. + if (from_db != to_db && !rdb_database_exists(to_db)) + { + return HA_ERR_INTERNAL_ERROR; + } + + /* Check if any index has a per-index column family */ + { + /* + We can't check this directly, because + 1. the object that referred to the old table has been close()d. + 2. this ha_rocksdb object has no connection to any table at all, it has + been just created with (TABLE*)nullptr. + + So, we have to find the old table in the ddl_manager (it's there because + it has been opened by this statement), and check is_auto_cf there. + */ + Rdb_tbl_def *tdef; + if (!(tdef= ddl_manager.find(from_str))) + return HA_ERR_INTERNAL_ERROR; + + for (uint i= 0; i < tdef->m_key_count; i++) + { + DBUG_ASSERT(tdef->m_key_descr_arr != nullptr); + + if (tdef->m_key_descr_arr[i]->m_is_auto_cf) + { + my_error(ER_NOT_SUPPORTED_YET, MYF(0), + "ALTER TABLE on table with per-index CF"); + return HA_ERR_INTERNAL_ERROR; + } + } + } + + std::unique_ptr wb= dict_manager.begin(); + rocksdb::WriteBatch *batch= wb.get(); + dict_manager.lock(); + write_err= ddl_manager.rename(from_str, to_str, batch) + || dict_manager.commit(batch); + dict_manager.unlock(); + + return write_err ? HA_ERR_INTERNAL_ERROR : 0; +} + +/** + check_if_incompatible_data() called if ALTER TABLE can't detect otherwise + if new and old definition are compatible + + @details If there are no other explicit signs like changed number of + fields this function will be called by compare_tables() + (sql/sql_tables.cc) to decide should we rewrite whole table or only .frm + file. + +*/ + +bool ha_rocksdb::check_if_incompatible_data(HA_CREATE_INFO *info, + uint table_changes) +{ + DBUG_ENTER("ha_rocksdb::check_if_incompatible_data"); + // this function is needed only for online alter-table + DBUG_RETURN(COMPATIBLE_DATA_NO); +} + + +int ha_rocksdb::extra(enum ha_extra_function operation) +{ + DBUG_ENTER("ha_rocksdb::extra"); + + switch (operation) { + case HA_EXTRA_KEYREAD: + m_keyread_only= true; + break; + case HA_EXTRA_NO_KEYREAD: + m_keyread_only= false; + break; + case HA_EXTRA_FLUSH: + /* + If the table has blobs, then they are part of m_retrieved_record. + This call invalidates them. + */ + m_retrieved_record.clear(); + break; + default: + break; + } + + DBUG_RETURN(0); +} + +/* + Given a starting key and an ending key, estimate the number of rows that + will exist between the two keys. +*/ +ha_rows ha_rocksdb::records_in_range(uint inx, key_range *min_key, + key_range *max_key) +{ + DBUG_ENTER("ha_rocksdb::records_in_range"); + ha_rows ret = THDVAR(ha_thd(), records_in_range); + if (ret) { + DBUG_RETURN(ret); + } + if (table->force_index) { + ha_rows force_rows = THDVAR(ha_thd(), force_index_records_in_range); + if (force_rows) { + DBUG_RETURN(force_rows); + } + } + + const std::shared_ptr& kd= m_key_descr_arr[inx]; + DBUG_ASSERT(kd); + + uint size1=0; + if (min_key) { + size1 = kd->pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, + min_key->key, min_key->keypart_map); + if (min_key->flag == HA_READ_PREFIX_LAST_OR_PREV || + min_key->flag == HA_READ_PREFIX_LAST || + min_key->flag == HA_READ_AFTER_KEY) + { + kd->successor(m_sk_packed_tuple, size1); + } + } else { + kd->get_infimum_key(m_sk_packed_tuple, &size1); + } + + uint size2=0; + if (max_key) { + size2 = kd->pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple_old, + max_key->key, max_key->keypart_map); + if (max_key->flag == HA_READ_PREFIX_LAST_OR_PREV || + max_key->flag == HA_READ_PREFIX_LAST || + max_key->flag == HA_READ_AFTER_KEY) + { + kd->successor(m_sk_packed_tuple_old, size2); + } + // pad the upper key with FFFFs to make sure it is more than the lower + if (size1 > size2) { + memset(m_sk_packed_tuple_old+size2, 0xff, size1-size2); + size2 = size1; + } + } else { + kd->get_supremum_key(m_sk_packed_tuple_old, &size2); + } + + rocksdb::Slice slice1((const char*) m_sk_packed_tuple, size1); + rocksdb::Slice slice2((const char*) m_sk_packed_tuple_old, size2); + + // slice1 >= slice2 means no row will match + if (slice1.compare(slice2) >= 0) + { + DBUG_RETURN(0); + } + + rocksdb::Range r( + kd->m_is_reverse_cf ? slice2 : slice1, + kd->m_is_reverse_cf ? slice1 : slice2 + ); + + uint64_t sz=0; + auto disk_size = kd->m_stats.m_actual_disk_size; + if (disk_size == 0) + disk_size = kd->m_stats.m_data_size; + auto rows = kd->m_stats.m_rows; + if (rows == 0 || disk_size == 0) + { + rows= 1; + disk_size= ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE; + } + // Getting statistics, including from Memtables + rdb->GetApproximateSizes( + kd->get_cf(), + &r, 1, + &sz, true); + ret = rows*sz/disk_size; + + /* + GetApproximateSizes() gives estimates so ret might exceed stats.records. + MySQL then decides to use full index scan rather than range scan, which + is not efficient for most cases. + To prevent this, changing estimated records slightly smaller than + stats.records. + */ + if (ret >= stats.records) + { + ret = stats.records * 0.99; + } + + if (rocksdb_debug_optimizer_n_rows > 0) + { + ret= rocksdb_debug_optimizer_n_rows; + } + else if (ret == 0) + { + ret = 1; + } + + DBUG_RETURN(ret); +} + + +void ha_rocksdb::update_create_info(HA_CREATE_INFO *create_info) +{ + if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) + { + create_info->auto_increment_value= m_tbl_def->m_auto_incr_val; + } +} + +/** + @brief + Doing manual compaction on OPTIMIZE TABLE in RocksDB. + Compaction itself is executed by background thread in RocksDB, but + CompactRange() waits until compaction completes so this function + may take a long time. + Since RocksDB dataset is allocated per index id, OPTIMIZE TABLE + triggers manual compaction for all indexes of the table. + @details + Compaction range is from the beginning of the index id to + the first row of the next index id. When using reverse order + column family, the first row of the next index id should be + the last row of the previous index id. +*/ +int ha_rocksdb::optimize(THD *thd, HA_CHECK_OPT* check_opt) +{ + int rc= 0; + for (uint i= 0; i < table->s->keys; i++) + { + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]; + auto range = get_range(i, buf); + if (!rdb->CompactRange(rocksdb::CompactRangeOptions(), + m_key_descr_arr[i]->get_cf(), + &range.start, &range.limit).ok()) + { + rc= 1; + break; + } + } + return rc; +} + +int ha_rocksdb::analyze(THD* thd, HA_CHECK_OPT* check_opt) +{ + DBUG_ENTER("ha_rocksdb::analyze"); + + if (!table) + DBUG_RETURN(1); + + // find per column family key ranges which need to be queried + std::unordered_map> + ranges; + std::unordered_set ids_to_check; + std::vector buf(table->s->keys * 2 * Rdb_key_def::INDEX_NUMBER_SIZE); + for (uint i = 0; i < table->s->keys; i++) + { + auto bufp = &buf[i * 2 * Rdb_key_def::INDEX_NUMBER_SIZE]; + const std::shared_ptr& kd= m_key_descr_arr[i]; + ranges[kd->get_cf()].push_back(get_range(i, bufp)); + ids_to_check.insert(kd->get_gl_index_id()); + } + + // for analyze statements, force flush on memtable to get accurate cardinality + Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); + if (thd != nullptr && THDVAR(thd, flush_memtable_on_analyze) && + !rocksdb_pause_background_work) + { + for (auto it : ids_to_check) + { + rdb->Flush(rocksdb::FlushOptions(), cf_manager.get_cf(it.cf_id)); + } + } + + // get RocksDB table properties for these ranges + rocksdb::TablePropertiesCollection props; + for (auto it : ranges) + { + auto old_size __attribute__((__unused__)) = props.size(); + auto status = rdb->GetPropertiesOfTablesInRange( + it.first, &it.second[0], it.second.size(), &props); + DBUG_ASSERT(props.size() >= old_size); + if (!status.ok()) + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + int num_sst= 0; + // group stats per index id + std::unordered_map stats; + for (auto it : ids_to_check) + { + // Initialize the stats to 0. If there are no files that contain + // this gl_index_id, then 0 should be stored for the cached stats. + stats[it] = Rdb_index_stats(it); + } + for (auto it : props) + { + std::vector sst_stats; + Rdb_tbl_prop_coll::read_stats_from_tbl_props(it.second, &sst_stats); + /* + sst_stats is a list of index statistics for indexes that have entries + in the current SST file. + */ + for (auto it1 : sst_stats) + { + /* + Only update statistics for indexes that belong to this SQL table. + + The reason is: We are walking through all SST files that have + entries from this table (and so can compute good statistics). For + other SQL tables, it can be that we're only seeing a small fraction + of table's entries (and so we can't update statistics based on that). + */ + if (ids_to_check.find(it1.m_gl_index_id) == ids_to_check.end()) + continue; + + const std::shared_ptr& kd= + ddl_manager.find(it1.m_gl_index_id); + stats[it1.m_gl_index_id].merge(it1, true, kd->max_storage_fmt_length()); + } + num_sst++; + } + + // set and persist new stats + ddl_manager.set_stats(stats); + ddl_manager.persist_stats(true); + + DBUG_RETURN(0); +} + +void ha_rocksdb::get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values) +{ + /* + MySQL has a somewhat complicated way of handling the auto-increment value. + The first time get_auto_increment is called for a statement, + nb_desired_values is the estimate for how many values will be needed. The + engine can then reserve some values, and those will be automatically used + by MySQL, until a hard-coded value shows up in the insert statement, after + which MySQL again calls this function to reset its starting value. + * + For simplicity we will just ignore nb_desired_values - we aren't going to + reserve any extra values for a multi-insert statement. Each row will + simply acquire the next value as needed and we will always tell MySQL that + we only reserved 1 value. Since we are using an atomic value for + m_auto_incr_val this should be safe - if we had to grab a mutex, doing + an actual reserve of some values might be a better solution. + */ + *first_value= m_tbl_def->m_auto_incr_val++; + *nb_reserved_values= 1; +} + + +#ifndef DBUG_OFF + +/* Debugger help function */ +static char dbug_item_print_buf[512]; + +const char *dbug_print_item(Item *item) +{ + char *buf= dbug_item_print_buf; + String str(buf, sizeof(dbug_item_print_buf), &my_charset_bin); + str.length(0); + if (!item) + return "(Item*)nullptr"; + item->print(&str ,QT_ORDINARY); + if (str.c_ptr() == buf) + return buf; + else + return "Couldn't fit into buffer"; +} + +#endif /*DBUG_OFF*/ + + +/** + SQL layer calls this function to push an index condition. + + @details + The condition is for index keyno (only one condition can be pushed at a + time). + The caller guarantees that condition refers only to index fields; besides + that, fields must have + + $field->part_of_key.set_bit(keyno) + + which means that + + (handler->index_flags(keyno, $keypart, 0) & HA_KEYREAD_ONLY) == 1 + + which means that field value can be restored from the index tuple. + + @return + Part of condition we couldn't check (always nullptr). +*/ + +class Item* ha_rocksdb::idx_cond_push(uint keyno, class Item* idx_cond) +{ + DBUG_ASSERT(keyno != MAX_KEY); + DBUG_ASSERT(idx_cond != nullptr); + + pushed_idx_cond = idx_cond; + pushed_idx_cond_keyno = keyno; + in_range_check_pushed_down = TRUE; + + /* We will check the whole condition */ + return nullptr; +} + + +/* + @brief + Check the index condition. + + @detail + Check the index condition. (The caller has unpacked all needed index + columns into table->record[0]) + + @return + ICP_NO_MATCH - Condition not satisfied (caller should continue + scanning) + OUT_OF_RANGE - We've left the range we're scanning (caller should + stop scanning and return HA_ERR_END_OF_FILE) + + ICP_MATCH - Condition is satisfied (caller should fetch the record + and return it) +*/ + +enum icp_result ha_rocksdb::check_index_cond() +{ + DBUG_ASSERT(pushed_idx_cond); + DBUG_ASSERT(pushed_idx_cond_keyno != MAX_KEY); + + if (end_range && compare_key_icp(end_range) > 0) + { + /* caller should return HA_ERR_END_OF_FILE already */ + return ICP_OUT_OF_RANGE; + } + + return pushed_idx_cond->val_int() ? ICP_MATCH : ICP_NO_MATCH; +} + + +/** + Checking if an index is used for ascending scan or not + + @detail + Currently RocksDB does not support bloom filter for + prefix lookup + descending scan, but supports bloom filter for + prefix lookup + ascending scan. This function returns true if + the scan pattern is absolutely ascending. + @param kd + @param find_flag +*/ +bool ha_rocksdb::is_ascending(const std::shared_ptr& kd, + enum ha_rkey_function find_flag) const +{ + DBUG_ASSERT(kd != nullptr); + + bool is_ascending; + switch (find_flag) { + case HA_READ_KEY_EXACT: + { + is_ascending = !kd->m_is_reverse_cf; + break; + } + case HA_READ_PREFIX: + { + is_ascending= true; + break; + } + case HA_READ_KEY_OR_NEXT: + case HA_READ_AFTER_KEY: + { + is_ascending = !kd->m_is_reverse_cf; + break; + } + case HA_READ_KEY_OR_PREV: + case HA_READ_BEFORE_KEY: + case HA_READ_PREFIX_LAST: + case HA_READ_PREFIX_LAST_OR_PREV: + { + is_ascending = kd->m_is_reverse_cf; + break; + } + default: + is_ascending= false; + } + return is_ascending; +} + +/* + Checks if inplace alter is supported for a given operation. +*/ + +my_core::enum_alter_inplace_result ha_rocksdb::check_if_supported_inplace_alter( + TABLE* /* altered_table */, + my_core::Alter_inplace_info *ha_alter_info) +{ + DBUG_ENTER("check_if_supported_alter"); + + if (ha_alter_info->handler_flags & + ~(my_core::Alter_inplace_info::DROP_INDEX | + my_core::Alter_inplace_info::DROP_UNIQUE_INDEX | + my_core::Alter_inplace_info::ADD_INDEX)) + { + DBUG_RETURN(my_core::HA_ALTER_INPLACE_NOT_SUPPORTED); + } + + DBUG_RETURN(my_core::HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE); +} + +/** + Allows the storage engine to update internal structures with concurrent + writes blocked. If check_if_supported_inplace_alter() returns + HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE or + HA_ALTER_INPLACE_SHARED_AFTER_PREPARE, this function is called with + exclusive lock otherwise the same level of locking as for + inplace_alter_table() will be used. + + @note Storage engines are responsible for reporting any errors by + calling my_error()/print_error() + + @note If this function reports error, commit_inplace_alter_table() + will be called with commit= false. + + @note For partitioning, failing to prepare one partition, means that + commit_inplace_alter_table() will be called to roll back changes for + all partitions. This means that commit_inplace_alter_table() might be + called without prepare_inplace_alter_table() having been called first + for a given partition. + + @param altered_table TABLE object for new version of table. + @param ha_alter_info Structure describing changes to be done + by ALTER TABLE and holding data used + during in-place alter. + + @retval true Error + @retval false Success +*/ +bool ha_rocksdb::prepare_inplace_alter_table( + TABLE *altered_table, + my_core::Alter_inplace_info *ha_alter_info) +{ + DBUG_ENTER("prepare_inplace_alter_table"); + uint n_dropped_keys= ha_alter_info->index_drop_count; + uint n_added_keys= ha_alter_info->index_add_count; + uint old_n_keys= m_tbl_def->m_key_count; + uint new_n_keys= altered_table->s->keys; + if (has_hidden_pk(altered_table)) + { + new_n_keys += 1; + } + + std::shared_ptr* old_key_descr= m_tbl_def->m_key_descr_arr; + std::shared_ptr* new_key_descr= + new std::shared_ptr[new_n_keys]; + + Rdb_tbl_def* new_tdef= new Rdb_tbl_def(m_tbl_def->full_tablename()); + new_tdef->m_key_descr_arr= new_key_descr; + new_tdef->m_key_count= new_n_keys; + new_tdef->m_auto_incr_val= + m_tbl_def->m_auto_incr_val.load(std::memory_order_relaxed); + new_tdef->m_hidden_pk_val= + m_tbl_def->m_hidden_pk_val.load(std::memory_order_relaxed); + + if (ha_alter_info->handler_flags & + (my_core::Alter_inplace_info::DROP_INDEX | + my_core::Alter_inplace_info::DROP_UNIQUE_INDEX | + my_core::Alter_inplace_info::ADD_INDEX) + && create_key_defs(altered_table, new_tdef, table, m_tbl_def)) + { + /* Delete the new key descriptors */ + delete[] new_key_descr; + + /* + Explicitly mark as nullptr so we don't accidentally remove entries + from data dictionary on cleanup (or cause double delete[]). + */ + new_tdef->m_key_descr_arr= nullptr; + delete new_tdef; + DBUG_RETURN(1); + } + + std::unordered_set> added_indexes; + std::unordered_set dropped_index_ids; + + uint i; + uint j; + const KEY* key; + + /* Determine which(if any) key definition(s) need to be dropped */ + for (i = 0; i < ha_alter_info->index_drop_count; i++) + { + const KEY* key = ha_alter_info->index_drop_buffer[i]; + for (j = 0; j < old_n_keys; j++) + { + if (!old_key_descr[j]->m_name.compare(key->name)) + { + dropped_index_ids.insert(old_key_descr[j]->get_gl_index_id()); + break; + } + } + } + + DBUG_ASSERT(dropped_index_ids.size() == ha_alter_info->index_drop_count); + + /* Determine which(if any) key definitions(s) need to be added */ + for (i = 0; i < ha_alter_info->index_add_count; i++) + { + key = &ha_alter_info->key_info_buffer[ + ha_alter_info->index_add_buffer[i]]; + for (j = 0; j < new_n_keys; j++) + { + if (!new_key_descr[j]->m_name.compare(key->name)) + { + added_indexes.insert(new_key_descr[j]); + break; + } + } + } + + DBUG_ASSERT(added_indexes.size() == ha_alter_info->index_add_count); + + ha_alter_info->handler_ctx= new Rdb_inplace_alter_ctx( + new_tdef, old_key_descr, new_key_descr, old_n_keys, new_n_keys, + added_indexes, dropped_index_ids, n_added_keys, n_dropped_keys); + + DBUG_RETURN(0); +} + +/** + Alter the table structure in-place with operations specified using + HA_ALTER_FLAGS and Alter_inplace_info. The level of concurrency allowed + during this operation depends on the return value from + check_if_supported_inplace_alter(). + + @note Storage engines are responsible for reporting any errors by + calling my_error()/print_error() + + @note If this function reports error, commit_inplace_alter_table() + will be called with commit= false. + + @param altered_table TABLE object for new version of table. + @param ha_alter_info Structure describing changes to be done + by ALTER TABLE and holding data used + during in-place alter. + + @retval true Error + @retval false Success +*/ +bool ha_rocksdb::inplace_alter_table(TABLE* altered_table, + my_core::Alter_inplace_info *ha_alter_info) +{ + /* Do nothing for now */ + DBUG_ENTER("inplace_alter_table"); + DBUG_ASSERT(ha_alter_info->handler_ctx); + + Rdb_inplace_alter_ctx* ctx= + static_cast (ha_alter_info->handler_ctx); + + if (ha_alter_info->handler_flags & my_core::Alter_inplace_info::ADD_INDEX) + { + /* + Buffers need to be set up again to account for new, possibly longer + secondary keys. + */ + free_key_buffers(); + if (alloc_key_buffers(altered_table, ctx->m_new_tdef)) + { + DBUG_RETURN(1); + } + + /* Populate all new secondary keys by scanning primary key */ + if (inplace_populate_sk(altered_table, ctx->m_added_indexes)) + { + DBUG_RETURN(1); + } + } + DBUG_EXECUTE_IF("myrocks_simulate_index_create_rollback", DBUG_RETURN(1);); + + DBUG_RETURN(0); +} + +/** + Scan the Primary Key index entries and populate the new secondary keys. +*/ +int ha_rocksdb::inplace_populate_sk(const TABLE* new_table_arg, + const std::unordered_set>& indexes) +{ + DBUG_ENTER("ha_rocksdb::inplace_populate_sk"); + std::unique_ptr wb= dict_manager.begin(); + rocksdb::WriteBatch *batch= wb.get(); + + /* Update the data dictionary */ + std::unordered_set create_index_ids; + for (auto& index : indexes) + { + create_index_ids.insert(index->get_gl_index_id()); + } + dict_manager.add_create_index(create_index_ids, batch); + dict_manager.commit(batch); + + const bool hidden_pk_exists = has_hidden_pk(table); + + int res= 0; + Rdb_transaction *tx; + ulonglong rdb_merge_buf_size= THDVAR(ha_thd(), merge_buf_size); + ulonglong rdb_merge_combine_read_size= THDVAR(ha_thd(), + merge_combine_read_size); + + for (auto& index : indexes) + { + const rocksdb::Comparator* index_comp= index->get_cf()->GetComparator(); + Rdb_index_merge rdb_merge(rdb_merge_buf_size, rdb_merge_combine_read_size, + index_comp); + + if ((res= rdb_merge.init())) + { + DBUG_RETURN(res); + } + + /* + Note: We pass in the currently existing table + tbl_def object here, + as the pk index position may have changed in the case of hidden primary + keys. + */ + uint pk= pk_index(table, m_tbl_def); + ha_index_init(pk, true); + + /* Scan each record in the primary key in order */ + for (res = index_first(table->record[0]); res == 0; + res = index_next(table->record[0])) + { + longlong hidden_pk_id= 0; + if (hidden_pk_exists && + read_hidden_pk_id_from_rowkey(&hidden_pk_id)) + { + // NO_LINT_DEBUG + sql_print_error("Error retrieving hidden pk id."); + ha_index_end(); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + /* Create new secondary index entry */ + int new_packed_size= index->pack_record(new_table_arg, m_pack_buffer, + table->record[0], + m_sk_packed_tuple, &m_sk_tails, + should_store_checksums(), + hidden_pk_id); + + rocksdb::Slice key= rocksdb::Slice( + reinterpret_cast(m_sk_packed_tuple), new_packed_size); + rocksdb::Slice val= rocksdb::Slice( + reinterpret_cast(m_sk_tails.ptr()), + m_sk_tails.get_current_pos()); + + /* + Add record to offset tree in preparation for writing out to + disk in sorted chunks. + */ + if ((res= rdb_merge.add(key, val))) + { + ha_index_end(); + DBUG_RETURN(res); + } + } + + if (res != HA_ERR_END_OF_FILE) + { + // NO_LINT_DEBUG + sql_print_error("Error retrieving index entry from primary key."); + ha_index_end(); + DBUG_RETURN(res); + } + + tx= get_or_create_tx(table->in_use); + ha_index_end(); + + /* + Perform an n-way merge of n sorted buffers on disk, then writes all + results to RocksDB via SSTFileWriter API. + */ + rocksdb::Slice merge_key; + rocksdb::Slice merge_val; + while ((res= rdb_merge.next(&merge_key, &merge_val)) == 0) + { + /* + Insert key and slice to SST via SSTFileWriter API. + */ + if ((res= bulk_load_key(tx, index, merge_key, merge_val))) + { + break; + } + } + + /* + Here, res == -1 means that we are finished, while > 0 means an error + occurred. + */ + if (res > 0) + { + // NO_LINT_DEBUG + sql_print_error("Error while bulk loading keys in external merge sort."); + DBUG_RETURN(res); + } + + if ((res= tx->finish_bulk_load())) + { + // NO_LINT_DEBUG + sql_print_error("Error finishing bulk load."); + DBUG_RETURN(res); + } + } + + /* + Explicitly tell jemalloc to clean up any unused dirty pages at this point. + See https://reviews.facebook.net/D63723 for more details. + */ + purge_all_jemalloc_arenas(); + + DBUG_EXECUTE_IF("crash_during_online_index_creation", DBUG_SUICIDE();); + DBUG_RETURN(res); +} + +/** + Commit or rollback the changes made during prepare_inplace_alter_table() + and inplace_alter_table() inside the storage engine. + Note that in case of rollback the allowed level of concurrency during + this operation will be the same as for inplace_alter_table() and thus + might be higher than during prepare_inplace_alter_table(). (For example, + concurrent writes were blocked during prepare, but might not be during + rollback). + + @note Storage engines are responsible for reporting any errors by + calling my_error()/print_error() + + @note If this function with commit= true reports error, it will be called + again with commit= false. + + @note In case of partitioning, this function might be called for rollback + without prepare_inplace_alter_table() having been called first. + Also partitioned tables sets ha_alter_info->group_commit_ctx to a NULL + terminated array of the partitions handlers and if all of them are + committed as one, then group_commit_ctx should be set to NULL to indicate + to the partitioning handler that all partitions handlers are committed. + @see prepare_inplace_alter_table(). + + @param altered_table TABLE object for new version of table. + @param ha_alter_info Structure describing changes to be done + by ALTER TABLE and holding data used + during in-place alter. + @param commit True => Commit, False => Rollback. + + @retval true Error + @retval false Success +*/ +bool ha_rocksdb::commit_inplace_alter_table( + my_core::TABLE *altered_table, + my_core::Alter_inplace_info *ha_alter_info, + bool commit) +{ + Rdb_inplace_alter_ctx* ctx0= + static_cast (ha_alter_info->handler_ctx); + + DBUG_ENTER("commit_inplace_alter_table"); + + /* + IMPORTANT: When rollback is requested, mysql will abort with + an assertion failure. That means every failed commit during inplace alter + table will result in a fatal error on the server. Indexes ongoing creation + will be detected when the server restarts, and dropped. + + For partitioned tables, a rollback call to this function (commit == false) + is done for each partition. A successful commit call only executes once + for all partitions. + */ + if (!commit) + { + /* If ctx has not been created yet, nothing to do here */ + if (!ctx0) + { + DBUG_RETURN(0); + } + + /* + Cannot call destructor for Rdb_tbl_def directly because we don't want to + erase the mappings inside the ddl_manager, as the old_key_descr is still + using them. + */ + if (ctx0->m_new_key_descr) + { + /* Delete the new key descriptors */ + for (uint i = 0; i < ctx0->m_new_tdef->m_key_count; i++) + { + ctx0->m_new_key_descr[i]= nullptr; + } + + delete[] ctx0->m_new_key_descr; + ctx0->m_new_key_descr = nullptr; + ctx0->m_new_tdef->m_key_descr_arr = nullptr; + + delete ctx0->m_new_tdef; + } + + DBUG_RETURN(0); + } + + DBUG_ASSERT(ctx0); + + /* + For partitioned tables, we need to commit all changes to all tables at + once, unlike in the other inplace alter API methods. + */ + inplace_alter_handler_ctx** ctx_array; + inplace_alter_handler_ctx* ctx_single[2]; + + if (ha_alter_info->group_commit_ctx) + { + DBUG_EXECUTE_IF("crash_during_index_creation_partition", + DBUG_SUICIDE();); + ctx_array = ha_alter_info->group_commit_ctx; + } + else + { + ctx_single[0] = ctx0; + ctx_single[1] = nullptr; + ctx_array = ctx_single; + } + + DBUG_ASSERT(ctx0 == ctx_array[0]); + ha_alter_info->group_commit_ctx = nullptr; + + if (ha_alter_info->handler_flags & + (my_core::Alter_inplace_info::DROP_INDEX | + my_core::Alter_inplace_info::DROP_UNIQUE_INDEX | + my_core::Alter_inplace_info::ADD_INDEX)) + { + std::unique_ptr wb= dict_manager.begin(); + rocksdb::WriteBatch *batch= wb.get(); + std::unordered_set create_index_ids; + + m_tbl_def= ctx0->m_new_tdef; + m_key_descr_arr= m_tbl_def->m_key_descr_arr; + m_pk_descr= m_key_descr_arr[pk_index(altered_table, m_tbl_def)]; + + dict_manager.lock(); + for (inplace_alter_handler_ctx** pctx = ctx_array; *pctx; pctx++) + { + Rdb_inplace_alter_ctx* ctx= static_cast (*pctx); + + /* Mark indexes to be dropped */ + dict_manager.add_drop_index(ctx->m_dropped_index_ids, batch); + + for (auto& index : ctx->m_added_indexes) + { + create_index_ids.insert(index->get_gl_index_id()); + } + + if (ddl_manager.put_and_write(ctx->m_new_tdef, batch)) + { + /* + Failed to write new entry into data dictionary, this should never + happen. + */ + DBUG_ASSERT(0); + } + } + + if (dict_manager.commit(batch)) + { + /* + Should never reach here. We assume MyRocks will abort if commit fails. + */ + DBUG_ASSERT(0); + } + + dict_manager.unlock(); + + /* Mark ongoing create indexes as finished/remove from data dictionary */ + dict_manager.finish_indexes_operation(create_index_ids, + Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + rdb_drop_idx_thread.signal(); + } + + DBUG_RETURN(0); +} + +#define SHOW_FNAME(name) rocksdb_show_##name + +#define DEF_SHOW_FUNC(name, key) \ + static int SHOW_FNAME(name)(MYSQL_THD thd, SHOW_VAR *var, char *buff) \ + { \ + rocksdb_status_counters.name = \ + rocksdb_stats->getTickerCount(rocksdb::key); \ + var->type = SHOW_LONGLONG; \ + var->value = (char *)&rocksdb_status_counters.name; \ + return 0; \ + } + +#define DEF_STATUS_VAR(name) \ + {"rocksdb_" #name, (char*) &SHOW_FNAME(name), SHOW_FUNC} + +#define DEF_STATUS_VAR_PTR(name, ptr, option) \ + {"rocksdb_" name, (char*) ptr, option} + +#define DEF_STATUS_VAR_FUNC(name, ptr, option) \ + {name, reinterpret_cast(ptr), option} + +struct rocksdb_status_counters_t { + uint64_t block_cache_miss; + uint64_t block_cache_hit; + uint64_t block_cache_add; + uint64_t block_cache_index_miss; + uint64_t block_cache_index_hit; + uint64_t block_cache_filter_miss; + uint64_t block_cache_filter_hit; + uint64_t block_cache_data_miss; + uint64_t block_cache_data_hit; + uint64_t bloom_filter_useful; + uint64_t memtable_hit; + uint64_t memtable_miss; + uint64_t compaction_key_drop_new; + uint64_t compaction_key_drop_obsolete; + uint64_t compaction_key_drop_user; + uint64_t number_keys_written; + uint64_t number_keys_read; + uint64_t number_keys_updated; + uint64_t bytes_written; + uint64_t bytes_read; + uint64_t no_file_closes; + uint64_t no_file_opens; + uint64_t no_file_errors; + uint64_t l0_slowdown_micros; + uint64_t memtable_compaction_micros; + uint64_t l0_num_files_stall_micros; + uint64_t rate_limit_delay_millis; + uint64_t num_iterators; + uint64_t number_multiget_get; + uint64_t number_multiget_keys_read; + uint64_t number_multiget_bytes_read; + uint64_t number_deletes_filtered; + uint64_t number_merge_failures; + uint64_t sequence_number; + uint64_t bloom_filter_prefix_checked; + uint64_t bloom_filter_prefix_useful; + uint64_t number_reseeks_iteration; + uint64_t getupdatessince_calls; + uint64_t block_cachecompressed_miss; + uint64_t block_cachecompressed_hit; + uint64_t wal_synced; + uint64_t wal_bytes; + uint64_t write_self; + uint64_t write_other; + uint64_t write_timedout; + uint64_t write_wal; + uint64_t flush_write_bytes; + uint64_t compact_read_bytes; + uint64_t compact_write_bytes; + uint64_t number_superversion_acquires; + uint64_t number_superversion_releases; + uint64_t number_superversion_cleanups; + uint64_t number_block_not_compressed; +}; + +static rocksdb_status_counters_t rocksdb_status_counters; + +DEF_SHOW_FUNC(block_cache_miss, BLOCK_CACHE_MISS) +DEF_SHOW_FUNC(block_cache_hit, BLOCK_CACHE_HIT) +DEF_SHOW_FUNC(block_cache_add, BLOCK_CACHE_ADD) +DEF_SHOW_FUNC(block_cache_index_miss, BLOCK_CACHE_INDEX_MISS) +DEF_SHOW_FUNC(block_cache_index_hit, BLOCK_CACHE_INDEX_HIT) +DEF_SHOW_FUNC(block_cache_filter_miss, BLOCK_CACHE_FILTER_MISS) +DEF_SHOW_FUNC(block_cache_filter_hit, BLOCK_CACHE_FILTER_HIT) +DEF_SHOW_FUNC(block_cache_data_miss, BLOCK_CACHE_DATA_MISS) +DEF_SHOW_FUNC(block_cache_data_hit, BLOCK_CACHE_DATA_HIT) +DEF_SHOW_FUNC(bloom_filter_useful, BLOOM_FILTER_USEFUL) +DEF_SHOW_FUNC(memtable_hit, MEMTABLE_HIT) +DEF_SHOW_FUNC(memtable_miss, MEMTABLE_MISS) +DEF_SHOW_FUNC(compaction_key_drop_new, COMPACTION_KEY_DROP_NEWER_ENTRY) +DEF_SHOW_FUNC(compaction_key_drop_obsolete, COMPACTION_KEY_DROP_OBSOLETE) +DEF_SHOW_FUNC(compaction_key_drop_user, COMPACTION_KEY_DROP_USER) +DEF_SHOW_FUNC(number_keys_written, NUMBER_KEYS_WRITTEN) +DEF_SHOW_FUNC(number_keys_read, NUMBER_KEYS_READ) +DEF_SHOW_FUNC(number_keys_updated, NUMBER_KEYS_UPDATED) +DEF_SHOW_FUNC(bytes_written, BYTES_WRITTEN) +DEF_SHOW_FUNC(bytes_read, BYTES_READ) +DEF_SHOW_FUNC(no_file_closes, NO_FILE_CLOSES) +DEF_SHOW_FUNC(no_file_opens, NO_FILE_OPENS) +DEF_SHOW_FUNC(no_file_errors, NO_FILE_ERRORS) +DEF_SHOW_FUNC(l0_slowdown_micros, STALL_L0_SLOWDOWN_MICROS) +DEF_SHOW_FUNC(memtable_compaction_micros, STALL_MEMTABLE_COMPACTION_MICROS) +DEF_SHOW_FUNC(l0_num_files_stall_micros, STALL_L0_NUM_FILES_MICROS) +DEF_SHOW_FUNC(rate_limit_delay_millis, RATE_LIMIT_DELAY_MILLIS) +DEF_SHOW_FUNC(num_iterators, NO_ITERATORS) +DEF_SHOW_FUNC(number_multiget_get, NUMBER_MULTIGET_CALLS) +DEF_SHOW_FUNC(number_multiget_keys_read, NUMBER_MULTIGET_KEYS_READ) +DEF_SHOW_FUNC(number_multiget_bytes_read, NUMBER_MULTIGET_BYTES_READ) +DEF_SHOW_FUNC(number_deletes_filtered, NUMBER_FILTERED_DELETES) +DEF_SHOW_FUNC(number_merge_failures, NUMBER_MERGE_FAILURES) +DEF_SHOW_FUNC(sequence_number, SEQUENCE_NUMBER) +DEF_SHOW_FUNC(bloom_filter_prefix_checked, BLOOM_FILTER_PREFIX_CHECKED) +DEF_SHOW_FUNC(bloom_filter_prefix_useful, BLOOM_FILTER_PREFIX_USEFUL) +DEF_SHOW_FUNC(number_reseeks_iteration, NUMBER_OF_RESEEKS_IN_ITERATION) +DEF_SHOW_FUNC(getupdatessince_calls, GET_UPDATES_SINCE_CALLS) +DEF_SHOW_FUNC(block_cachecompressed_miss, BLOCK_CACHE_COMPRESSED_MISS) +DEF_SHOW_FUNC(block_cachecompressed_hit, BLOCK_CACHE_COMPRESSED_HIT) +DEF_SHOW_FUNC(wal_synced, WAL_FILE_SYNCED) +DEF_SHOW_FUNC(wal_bytes, WAL_FILE_BYTES) +DEF_SHOW_FUNC(write_self, WRITE_DONE_BY_SELF) +DEF_SHOW_FUNC(write_other, WRITE_DONE_BY_OTHER) +DEF_SHOW_FUNC(write_timedout, WRITE_TIMEDOUT) +DEF_SHOW_FUNC(write_wal, WRITE_WITH_WAL) +DEF_SHOW_FUNC(flush_write_bytes, FLUSH_WRITE_BYTES) +DEF_SHOW_FUNC(compact_read_bytes, COMPACT_READ_BYTES) +DEF_SHOW_FUNC(compact_write_bytes, COMPACT_WRITE_BYTES) +DEF_SHOW_FUNC(number_superversion_acquires, NUMBER_SUPERVERSION_ACQUIRES) +DEF_SHOW_FUNC(number_superversion_releases, NUMBER_SUPERVERSION_RELEASES) +DEF_SHOW_FUNC(number_superversion_cleanups, NUMBER_SUPERVERSION_CLEANUPS) +DEF_SHOW_FUNC(number_block_not_compressed, NUMBER_BLOCK_NOT_COMPRESSED) + +static void myrocks_update_status() { + export_stats.rows_deleted = global_stats.rows[ROWS_DELETED]; + export_stats.rows_inserted = global_stats.rows[ROWS_INSERTED]; + export_stats.rows_read = global_stats.rows[ROWS_READ]; + export_stats.rows_updated = global_stats.rows[ROWS_UPDATED]; + + export_stats.system_rows_deleted = global_stats.system_rows[ROWS_DELETED]; + export_stats.system_rows_inserted = global_stats.system_rows[ROWS_INSERTED]; + export_stats.system_rows_read = global_stats.system_rows[ROWS_READ]; + export_stats.system_rows_updated = global_stats.system_rows[ROWS_UPDATED]; +} + +static SHOW_VAR myrocks_status_variables[]= { + DEF_STATUS_VAR_FUNC("rows_deleted", &export_stats.rows_deleted, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("rows_inserted", &export_stats.rows_inserted, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("rows_read", &export_stats.rows_read, SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("rows_updated", &export_stats.rows_updated, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("system_rows_deleted", &export_stats.system_rows_deleted, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("system_rows_inserted", + &export_stats.system_rows_inserted, SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("system_rows_read", &export_stats.system_rows_read, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("system_rows_updated", &export_stats.system_rows_updated, + SHOW_LONGLONG), + + {NullS, NullS, SHOW_LONG} +}; + +static void show_myrocks_vars(THD* thd, SHOW_VAR* var, char* buff) { + myrocks_update_status(); + var->type = SHOW_ARRAY; + var->value = reinterpret_cast(&myrocks_status_variables); +} + +static SHOW_VAR rocksdb_status_vars[]= { + DEF_STATUS_VAR(block_cache_miss), + DEF_STATUS_VAR(block_cache_hit), + DEF_STATUS_VAR(block_cache_add), + DEF_STATUS_VAR(block_cache_index_miss), + DEF_STATUS_VAR(block_cache_index_hit), + DEF_STATUS_VAR(block_cache_filter_miss), + DEF_STATUS_VAR(block_cache_filter_hit), + DEF_STATUS_VAR(block_cache_data_miss), + DEF_STATUS_VAR(block_cache_data_hit), + DEF_STATUS_VAR(bloom_filter_useful), + DEF_STATUS_VAR(memtable_hit), + DEF_STATUS_VAR(memtable_miss), + DEF_STATUS_VAR(compaction_key_drop_new), + DEF_STATUS_VAR(compaction_key_drop_obsolete), + DEF_STATUS_VAR(compaction_key_drop_user), + DEF_STATUS_VAR(number_keys_written), + DEF_STATUS_VAR(number_keys_read), + DEF_STATUS_VAR(number_keys_updated), + DEF_STATUS_VAR(bytes_written), + DEF_STATUS_VAR(bytes_read), + DEF_STATUS_VAR(no_file_closes), + DEF_STATUS_VAR(no_file_opens), + DEF_STATUS_VAR(no_file_errors), + DEF_STATUS_VAR(l0_slowdown_micros), + DEF_STATUS_VAR(memtable_compaction_micros), + DEF_STATUS_VAR(l0_num_files_stall_micros), + DEF_STATUS_VAR(rate_limit_delay_millis), + DEF_STATUS_VAR(num_iterators), + DEF_STATUS_VAR(number_multiget_get), + DEF_STATUS_VAR(number_multiget_keys_read), + DEF_STATUS_VAR(number_multiget_bytes_read), + DEF_STATUS_VAR(number_deletes_filtered), + DEF_STATUS_VAR(number_merge_failures), + DEF_STATUS_VAR(sequence_number), + DEF_STATUS_VAR(bloom_filter_prefix_checked), + DEF_STATUS_VAR(bloom_filter_prefix_useful), + DEF_STATUS_VAR(number_reseeks_iteration), + DEF_STATUS_VAR(getupdatessince_calls), + DEF_STATUS_VAR(block_cachecompressed_miss), + DEF_STATUS_VAR(block_cachecompressed_hit), + DEF_STATUS_VAR(wal_synced), + DEF_STATUS_VAR(wal_bytes), + DEF_STATUS_VAR(write_self), + DEF_STATUS_VAR(write_other), + DEF_STATUS_VAR(write_timedout), + DEF_STATUS_VAR(write_wal), + DEF_STATUS_VAR(flush_write_bytes), + DEF_STATUS_VAR(compact_read_bytes), + DEF_STATUS_VAR(compact_write_bytes), + DEF_STATUS_VAR(number_superversion_acquires), + DEF_STATUS_VAR(number_superversion_releases), + DEF_STATUS_VAR(number_superversion_cleanups), + DEF_STATUS_VAR(number_block_not_compressed), + DEF_STATUS_VAR_PTR("snapshot_conflict_errors", + &rocksdb_snapshot_conflict_errors, + SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_stat_computes", &rocksdb_number_stat_computes, SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_sst_entry_put", &rocksdb_num_sst_entry_put, + SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_sst_entry_delete", &rocksdb_num_sst_entry_delete, + SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_sst_entry_singledelete", + &rocksdb_num_sst_entry_singledelete, SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_sst_entry_merge", &rocksdb_num_sst_entry_merge, + SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_sst_entry_other", &rocksdb_num_sst_entry_other, + SHOW_LONGLONG), + {"rocksdb", reinterpret_cast(&show_myrocks_vars), SHOW_FUNC}, + {NullS, NullS, SHOW_LONG} +}; + + +/* + Background thread's main logic +*/ + +void Rdb_background_thread::run() +{ + timespec ts_next_sync; + clock_gettime(CLOCK_REALTIME, &ts_next_sync); + ts_next_sync.tv_sec++; + + for (;;) + { + // wait for 1 second or until we received a condition to stop the thread + mysql_mutex_lock(&m_signal_mutex); + auto ret __attribute__((__unused__)) = mysql_cond_timedwait( + &m_signal_cond, &m_signal_mutex, &ts_next_sync); + // make sure that no program error is returned + DBUG_ASSERT(ret == 0 || ret == ETIMEDOUT); + bool local_stop= m_stop; + bool local_save_stats= m_save_stats; + reset(); + mysql_mutex_unlock(&m_signal_mutex); + + if (local_stop) + { + break; + } + + if (local_save_stats) + { + ddl_manager.persist_stats(); + } + + // Flush the WAL if need be but don't do it more frequent + // than once per second + timespec ts; + clock_gettime(CLOCK_REALTIME, &ts); + if (ts.tv_sec - ts_next_sync.tv_sec >= 1) + { + if (rdb && rocksdb_background_sync) + { + DBUG_ASSERT(!rocksdb_db_options.allow_mmap_writes); + rocksdb::Status s= rdb->SyncWAL(); + if (!s.ok()) + rdb_handle_io_error(s, RDB_IO_ERROR_BG_THREAD); + } + ts_next_sync.tv_sec= ts.tv_sec + 1; + } + } + + // save remaining stats which might've left unsaved + ddl_manager.persist_stats(); +} + + +/** + Deciding if it is possible to use bloom filter or not. + + @detail + Even if bloom filter exists, it is not always possible + to use bloom filter. If using bloom filter when you shouldn't, + false negative may happen -- fewer rows than expected may be returned. + It is users' responsibility to use bloom filter correctly. + + If bloom filter does not exist, return value does not matter because + RocksDB does not use bloom filter internally. + + @param kd + @param eq_cond Equal condition part of the key. This always includes + system index id (4 bytes). + @param use_all_keys True if all key parts are set with equal conditions. + This is aware of extended keys. +*/ +bool can_use_bloom_filter(THD *thd, + const std::shared_ptr& kd, + const rocksdb::Slice &eq_cond, + const bool use_all_keys, + bool is_ascending) +{ + bool can_use= false; + + if (THDVAR(thd, skip_bloom_filter_on_read)) + { + return can_use; + } + + rocksdb::Options opt = rdb->GetOptions(kd->get_cf()); + if (opt.prefix_extractor) + { + /* + This is an optimized use case for CappedPrefixTransform. + If eq_cond length >= prefix extractor length and if + all keys are used for equal lookup, it is + always possible to use bloom filter. + + Prefix bloom filter can't be used on descending scan with + prefix lookup (i.e. WHERE id1=1 ORDER BY id2 DESC), because of + RocksDB's limitation. On ascending (or not sorting) scan, + keys longer than the capped prefix length will be truncated down + to the capped length and the resulting key is added to the bloom filter. + + Keys shorter than the capped prefix length will be added to + the bloom filter. When keys are looked up, key conditionals + longer than the capped length can be used; key conditionals + shorter require all parts of the key to be available + for the short key match. + */ + if (use_all_keys && opt.prefix_extractor->InRange(eq_cond)) + can_use= true; + else if (!is_ascending) + can_use= false; + else if (opt.prefix_extractor->SameResultWhenAppended(eq_cond)) + can_use= true; + else + can_use= false; + } else + { + /* + if prefix extractor is not defined, all key parts have to be + used by eq_cond. + */ + if (use_all_keys) + can_use= true; + else + can_use= false; + } + + return can_use; +} + +/* For modules that need access to the global data structures */ +rocksdb::DB *rdb_get_rocksdb_db() +{ + return rdb; +} + +Rdb_cf_manager& rdb_get_cf_manager() +{ + return cf_manager; +} + +rocksdb::BlockBasedTableOptions& rdb_get_table_options() +{ + return rocksdb_tbl_options; +} + + +int rdb_get_table_perf_counters(const char *tablename, + Rdb_perf_counters *counters) +{ + DBUG_ASSERT(counters != nullptr); + DBUG_ASSERT(tablename != nullptr); + + Rdb_table_handler *table_handler; + table_handler= rdb_open_tables.get_table_handler(tablename); + if (table_handler == nullptr) + { + return HA_ERR_INTERNAL_ERROR; + } + + counters->load(table_handler->m_table_perf_context); + + rdb_open_tables.release_table_handler(table_handler); + return 0; +} + + +void rdb_handle_io_error(rocksdb::Status status, RDB_IO_ERROR_TYPE err_type) +{ + if (status.IsIOError()) + { + switch (err_type) { + case RDB_IO_ERROR_TX_COMMIT: + case RDB_IO_ERROR_DICT_COMMIT: + { + sql_print_error("RocksDB: Failed to write to WAL - status %d, %s", + status.code(), status.ToString().c_str()); + sql_print_error("RocksDB: Aborting on WAL write error."); + abort_with_stack_traces(); + break; + } + case RDB_IO_ERROR_BG_THREAD: + { + sql_print_warning("RocksDB: BG Thread failed to write to RocksDB " + "- status %d, %s", status.code(), + status.ToString().c_str()); + break; + } + default: + DBUG_ASSERT(0); + break; + } + } + else if (status.IsCorruption()) + { + /* NO_LINT_DEBUG */ + sql_print_error("RocksDB: Data Corruption detected! %d, %s", + status.code(), status.ToString().c_str()); + /* NO_LINT_DEBUG */ + sql_print_error("RocksDB: Aborting because of data corruption."); + abort_with_stack_traces(); + } + else if (!status.ok()) + { + switch (err_type) { + case RDB_IO_ERROR_DICT_COMMIT: + { + sql_print_error("RocksDB: Failed to write to WAL (dictionary) - " + "status %d, %s", + status.code(), status.ToString().c_str()); + sql_print_error("RocksDB: Aborting on WAL write error."); + abort_with_stack_traces(); + break; + } + default: + sql_print_warning("RocksDB: Failed to write to RocksDB " + "- status %d, %s", status.code(), + status.ToString().c_str()); + break; + } + } +} + +Rdb_dict_manager *rdb_get_dict_manager(void) +{ + return &dict_manager; +} + +Rdb_ddl_manager *rdb_get_ddl_manager(void) +{ + return &ddl_manager; +} + +Rdb_binlog_manager *rdb_get_binlog_manager(void) +{ + return &binlog_manager; +} + + +void +rocksdb_set_compaction_options( + my_core::THD* thd __attribute__((__unused__)), + my_core::st_mysql_sys_var* var __attribute__((__unused__)), + void* var_ptr, + const void* save) +{ + if (var_ptr && save) { + *(uint64_t*)var_ptr = *(const uint64_t*) save; + } + Rdb_compact_params params = { + (uint64_t)rocksdb_compaction_sequential_deletes, + (uint64_t)rocksdb_compaction_sequential_deletes_window, + (uint64_t)rocksdb_compaction_sequential_deletes_file_size + }; + if (properties_collector_factory) { + properties_collector_factory->SetCompactionParams(params); + } +} + +void rocksdb_set_table_stats_sampling_pct( + my_core::THD* thd __attribute__((__unused__)), + my_core::st_mysql_sys_var* var __attribute__((__unused__)), + void* var_ptr __attribute__((__unused__)), + const void* save) +{ + mysql_mutex_lock(&rdb_sysvars_mutex); + + uint32_t new_val= *static_cast(save); + + if (new_val != rocksdb_table_stats_sampling_pct) { + rocksdb_table_stats_sampling_pct = new_val; + + if (properties_collector_factory) { + properties_collector_factory->SetTableStatsSamplingPct( + rocksdb_table_stats_sampling_pct); + } + } + + mysql_mutex_unlock(&rdb_sysvars_mutex); +} + +/* + This function allows setting the rate limiter's bytes per second value + but only if the rate limiter is turned on which has to be done at startup. + If the rate is already 0 (turned off) or we are changing it to 0 (trying + to turn it off) this function will push a warning to the client and do + nothing. + This is similar to the code in innodb_doublewrite_update (found in + storage/innobase/handler/ha_innodb.cc). +*/ +void +rocksdb_set_rate_limiter_bytes_per_sec( + my_core::THD* thd, + my_core::st_mysql_sys_var* var __attribute__((__unused__)), + void* var_ptr __attribute__((__unused__)), + const void* save) +{ + uint64_t new_val= *static_cast(save); + if (new_val == 0 || rocksdb_rate_limiter_bytes_per_sec == 0) + { + /* + If a rate_limiter was not enabled at startup we can't change it nor + can we disable it if one was created at startup + */ + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_WRONG_ARGUMENTS, + "RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot " + "be dynamically changed to or from 0. Do a clean " + "shutdown if you want to change it from or to 0."); + } + else if (new_val != rocksdb_rate_limiter_bytes_per_sec) + { + /* Apply the new value to the rate limiter and store it locally */ + DBUG_ASSERT(rocksdb_rate_limiter != nullptr); + rocksdb_rate_limiter_bytes_per_sec= new_val; + rocksdb_rate_limiter->SetBytesPerSecond(new_val); + } +} + +void rdb_set_collation_exception_list(const char *exception_list) +{ + DBUG_ASSERT(rdb_collation_exceptions != nullptr); + + if (!rdb_collation_exceptions->set_patterns(exception_list)) + { + my_core::warn_about_bad_patterns(rdb_collation_exceptions, + "strict_collation_exceptions"); + } +} + +void +rocksdb_set_collation_exception_list(THD* thd, + struct st_mysql_sys_var* var, + void* var_ptr, + const void* save) +{ + const char* val = *static_cast(save); + + rdb_set_collation_exception_list(val); + + *static_cast(var_ptr) = val; +} + +void +rocksdb_set_bulk_load(THD* thd, + struct st_mysql_sys_var* var __attribute__((__unused__)), + void* var_ptr, + const void* save) +{ + Rdb_transaction*& tx= get_tx_from_thd(thd); + + if (tx != nullptr) + { + int rc= tx->finish_bulk_load(); + if (rc != 0) + { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Error %d finalizing last SST file while " + "setting bulk loading variable", rc); + abort_with_stack_traces(); + } + } + + *static_cast(var_ptr) = *static_cast(save); +} + +void rdb_queue_save_stats_request() +{ + rdb_bg_thread.request_save_stats(); +} + +void ha_rocksdb::rpl_before_delete_rows() +{ + m_in_rpl_delete_rows= true; +} + +void ha_rocksdb::rpl_after_delete_rows() +{ + m_in_rpl_delete_rows= false; +} + +void ha_rocksdb::rpl_before_update_rows() +{ + m_in_rpl_update_rows= true; +} + +void ha_rocksdb::rpl_after_update_rows() +{ + m_in_rpl_update_rows= false; +} + +/** + @brief + Read Free Replication can be used or not. Returning False means + Read Free Replication can be used. Read Free Replication can be used + on UPDATE or DELETE row events, and table must have user defined + primary key. +*/ +bool ha_rocksdb::use_read_free_rpl() +{ + return ((m_in_rpl_delete_rows || m_in_rpl_update_rows) && + !has_hidden_pk(table) && m_use_read_free_rpl); +} + +double ha_rocksdb::read_time(uint index, uint ranges, ha_rows rows) +{ + if (index != table->s->primary_key) { + /* Non covering index range scan */ + return(handler::read_time(index, ranges, rows)); + } + return (rows/20.0) + 1; +} + +} // namespace myrocks + +/* + Register the storage engine plugin outside of myrocks namespace + so that mysql_declare_plugin does not get confused when it does + its name generation. +*/ + +struct st_mysql_storage_engine rocksdb_storage_engine= +{ MYSQL_HANDLERTON_INTERFACE_VERSION }; + +mysql_declare_plugin(rocksdb_se) +{ + MYSQL_STORAGE_ENGINE_PLUGIN, /* Plugin Type */ + &rocksdb_storage_engine, /* Plugin Descriptor */ + "ROCKSDB", /* Plugin Name */ + "Monty Program Ab", /* Plugin Author */ + "RocksDB storage engine", /* Plugin Description */ + PLUGIN_LICENSE_GPL, /* Plugin Licence */ + myrocks::rocksdb_init_func, /* Plugin Entry Point */ + myrocks::rocksdb_done_func, /* Plugin Deinitializer */ + 0x0001, /* version number (0.1) */ + myrocks::rocksdb_status_vars, /* status variables */ + myrocks::rocksdb_system_variables, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}, +myrocks::rdb_i_s_cfstats, +myrocks::rdb_i_s_dbstats, +myrocks::rdb_i_s_perf_context, +myrocks::rdb_i_s_perf_context_global, +myrocks::rdb_i_s_cfoptions, +myrocks::rdb_i_s_global_info, +myrocks::rdb_i_s_ddl, +myrocks::rdb_i_s_index_file_map +mysql_declare_plugin_end; diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h new file mode 100644 index 00000000000..8f63050e74c --- /dev/null +++ b/storage/rocksdb/ha_rocksdb.h @@ -0,0 +1,1054 @@ +/* + Copyright (c) 2012,2013 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class implementation */ +#endif + +/* C++ standard header files */ +#include +#include +#include +#include + +/* MySQL header files */ +#include "./handler.h" /* handler */ +#include "./my_global.h" /* ulonglong */ +#include "./sql_string.h" +#include "./ut0counter.h" + +/* RocksDB header files */ +#include "rocksdb/cache.h" +#include "rocksdb/perf_context.h" +#include "rocksdb/statistics.h" +#include "rocksdb/utilities/options_util.h" +#include "rocksdb/utilities/transaction_db.h" +#include "rocksdb/utilities/write_batch_with_index.h" + +/* MyRocks header files */ +#include "./rdb_comparator.h" +#include "./rdb_index_merge.h" +#include "./rdb_perf_context.h" +#include "./rdb_sst_info.h" +#include "./rdb_utils.h" + +/** + @note MyRocks Coding Conventions: + MyRocks code follows the baseline MySQL coding conventions, available at + http://dev.mysql.com/doc/internals/en/coding-guidelines.html, with several + refinements (@see /storage/rocksdb/README file). +*/ + +/** + @note MyRocks Coding Conventions: + MyRocks code follows the baseline MySQL coding conventions, available at + http://dev.mysql.com/doc/internals/en/coding-guidelines.html, with several + refinements (@see /storage/rocksdb/README file). +*/ + +namespace myrocks { + +/* + This is + - the name of the default Column Family (the CF which stores indexes which + didn't explicitly specify which CF they are in) + - the name used to set the default column family parameter for per-cf + arguments. +*/ +const char * const DEFAULT_CF_NAME= "default"; + +/* + This is the name of the Column Family used for storing the data dictionary. +*/ +const char * const DEFAULT_SYSTEM_CF_NAME= "__system__"; + +/* + This is the name of the hidden primary key for tables with no pk. +*/ +const char * const HIDDEN_PK_NAME= "HIDDEN_PK_ID"; + +/* + Column family name which means "put this index into its own column family". + See Rdb_cf_manager::get_per_index_cf_name(). +*/ +const char * const PER_INDEX_CF_NAME = "$per_index_cf"; + +/* + Default, minimal valid, and maximum valid sampling rate values when collecting + statistics about table. +*/ +#define RDB_DEFAULT_TBL_STATS_SAMPLE_PCT 10 +#define RDB_TBL_STATS_SAMPLE_PCT_MIN 1 +#define RDB_TBL_STATS_SAMPLE_PCT_MAX 100 + +/* + Default and maximum values for rocksdb-compaction-sequential-deletes and + rocksdb-compaction-sequential-deletes-window to add basic boundary checking. +*/ +#define DEFAULT_COMPACTION_SEQUENTIAL_DELETES 0 +#define MAX_COMPACTION_SEQUENTIAL_DELETES 2000000 + +#define DEFAULT_COMPACTION_SEQUENTIAL_DELETES_WINDOW 0 +#define MAX_COMPACTION_SEQUENTIAL_DELETES_WINDOW 2000000 + +/* + Default and maximum values for various compaction and flushing related + options. Numbers are based on the hardware we currently use and our internal + benchmarks which indicate that parallelization helps with the speed of + compactions. + + Ideally of course we'll use heuristic technique to determine the number of + CPU-s and derive the values from there. This however has its own set of + problems and we'll choose simplicity for now. +*/ +#define MAX_BACKGROUND_COMPACTIONS 64 +#define MAX_BACKGROUND_FLUSHES 64 + +#define DEFAULT_SUBCOMPACTIONS 1 +#define MAX_SUBCOMPACTIONS 64 + +/* + Defines the field sizes for serializing XID object to a string representation. + string byte format: [field_size: field_value, ...] + [ + 8: XID.formatID, + 1: XID.gtrid_length, + 1: XID.bqual_length, + XID.gtrid_length + XID.bqual_length: XID.data + ] +*/ +#define RDB_FORMATID_SZ 8 +#define RDB_GTRID_SZ 1 +#define RDB_BQUAL_SZ 1 +#define RDB_XIDHDR_LEN (RDB_FORMATID_SZ + RDB_GTRID_SZ + RDB_BQUAL_SZ) + +/* + To fix an unhandled exception we specify the upper bound as LONGLONGMAX + instead of ULONGLONGMAX because the latter is -1 and causes an exception when + cast to jlong (signed) of JNI + + The reason behind the cast issue is the lack of unsigned int support in Java. +*/ +#define MAX_RATE_LIMITER_BYTES_PER_SEC static_cast(LONGLONG_MAX) + +/* + Hidden PK column (for tables with no primary key) is a longlong (aka 8 bytes). + static_assert() in code will validate this assumption. +*/ +#define ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN sizeof(longlong) + +/* + MyRocks specific error codes. NB! Please make sure that you will update + HA_ERR_ROCKSDB_LAST when adding new ones. +*/ +#define HA_ERR_ROCKSDB_UNIQUE_NOT_SUPPORTED (HA_ERR_LAST + 1) +#define HA_ERR_ROCKSDB_PK_REQUIRED (HA_ERR_LAST + 2) +#define HA_ERR_ROCKSDB_TOO_MANY_LOCKS (HA_ERR_LAST + 3) +#define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TOO_MANY_LOCKS + +inline bool looks_like_per_index_cf_typo(const char *name) +{ + return (name && name[0]=='$' && strcmp(name, PER_INDEX_CF_NAME)); +} + +/** + @brief + Rdb_table_handler is a reference-counted structure storing information for + each open table. All the objects are stored in a global hash map. + + //TODO: join this with Rdb_tbl_def ? +*/ +struct Rdb_table_handler +{ + char *m_table_name; + uint m_table_name_length; + int m_ref_count; + + my_core::THR_LOCK m_thr_lock; ///< MySQL latch needed by m_db_lock + + /* Stores cumulative table statistics */ + my_io_perf_atomic_t m_io_perf_read; + Rdb_atomic_perf_counters m_table_perf_context; +}; + + + +class Rdb_key_def; +class Rdb_tbl_def; +class Rdb_transaction; +class Rdb_transaction_impl; +class Rdb_writebatch_impl; +class Rdb_field_encoder; + +const char *const rocksdb_hton_name= "ROCKSDB"; + +typedef struct _gl_index_id_s { + uint32_t cf_id; + uint32_t index_id; + bool operator==(const struct _gl_index_id_s& other) const + { + return cf_id == other.cf_id && index_id == other.index_id; + } + bool operator!=(const struct _gl_index_id_s& other) const + { + return cf_id != other.cf_id || index_id != other.index_id; + } + bool operator<(const struct _gl_index_id_s& other) const + { + return cf_id < other.cf_id || + (cf_id == other.cf_id && index_id < other.index_id); + } + bool operator<=(const struct _gl_index_id_s& other) const + { + return cf_id < other.cf_id || + (cf_id == other.cf_id && index_id <= other.index_id); + } + bool operator>(const struct _gl_index_id_s& other) const + { + return cf_id > other.cf_id || + (cf_id == other.cf_id && index_id > other.index_id); + } + bool operator>=(const struct _gl_index_id_s& other) const + { + return cf_id > other.cf_id || + (cf_id == other.cf_id && index_id >= other.index_id); + } +} GL_INDEX_ID; + +enum operation_type { + ROWS_DELETED = 0, + ROWS_INSERTED, + ROWS_READ, + ROWS_UPDATED, + ROWS_MAX +}; + +/* Global statistics struct used inside MyRocks */ +struct st_global_stats { + ib_counter_t rows[ROWS_MAX]; + + // system_rows_ stats are only for system + // tables. They are not counted in rows_* stats. + ib_counter_t system_rows[ROWS_MAX]; +}; + +/* Struct used for exporting status to MySQL */ +struct st_export_stats { + ulonglong rows_deleted; + ulonglong rows_inserted; + ulonglong rows_read; + ulonglong rows_updated; + + ulonglong system_rows_deleted; + ulonglong system_rows_inserted; + ulonglong system_rows_read; + ulonglong system_rows_updated; +}; + +} // namespace myrocks + +#include "./rdb_buff.h" + +/* Provide hash function for GL_INDEX_ID so we can include it in sets */ +namespace std { + template <> + struct hash + { + std::size_t operator()(const myrocks::GL_INDEX_ID& gl_index_id) const + { + uint64_t val = ((uint64_t) gl_index_id.cf_id << 32 | + (uint64_t) gl_index_id.index_id); + return std::hash()(val); + } + }; +} // namespace std + +namespace myrocks { + +/** + @brief + Class definition for ROCKSDB storage engine plugin handler +*/ + +class ha_rocksdb: public my_core::handler +{ + my_core::THR_LOCK_DATA m_db_lock; ///< MySQL database lock + + Rdb_table_handler *m_table_handler; ///< Open table handler + + /* Iterator used for range scans and for full table/index scans */ + rocksdb::Iterator *m_scan_it; + + /* Whether m_scan_it was created with skip_bloom=true */ + bool m_scan_it_skips_bloom; + + Rdb_tbl_def *m_tbl_def; + + /* Primary Key encoder from KeyTupleFormat to StorageFormat */ + std::shared_ptr m_pk_descr; + + /* Array of index descriptors */ + std::shared_ptr *m_key_descr_arr; + + bool check_keyread_allowed(uint inx, uint part, bool all_parts) const; + + /* + Number of key parts in PK. This is the same as + table->key_info[table->s->primary_key].keyparts + */ + uint m_pk_key_parts; + + /* + TRUE <=> Primary Key columns can be decoded from the index + */ + mutable bool m_pk_can_be_decoded; + + /* + TRUE <=> Some fields in the PK may require unpack_info. + */ + bool m_maybe_unpack_info; + + uchar *m_pk_tuple; /* Buffer for storing PK in KeyTupleFormat */ + uchar *m_pk_packed_tuple; /* Buffer for storing PK in StorageFormat */ + // ^^ todo: change it to 'char*'? TODO: ^ can we join this with last_rowkey? + + /* + Temporary buffers for storing the key part of the Key/Value pair + for secondary indexes. + */ + uchar *m_sk_packed_tuple; + + /* + Temporary buffers for storing end key part of the Key/Value pair. + This is used for range scan only. + */ + uchar *m_end_key_packed_tuple; + + Rdb_string_writer m_sk_tails; + Rdb_string_writer m_pk_unpack_info; + + /* + ha_rockdb->index_read_map(.. HA_READ_KEY_EXACT or similar) will save here + mem-comparable form of the index lookup tuple. + */ + uchar *m_sk_match_prefix; + uint m_sk_match_length; + + /* Buffer space for the above */ + uchar *m_sk_match_prefix_buf; + + /* Second buffers, used by UPDATE. */ + uchar *m_sk_packed_tuple_old; + Rdb_string_writer m_sk_tails_old; + + /* + Temporary space for packing VARCHARs (we provide it to + pack_record()/pack_index_tuple() calls). + */ + uchar *m_pack_buffer; + + /* rowkey of the last record we've read, in StorageFormat. */ + String m_last_rowkey; + + /* Buffer used by convert_record_to_storage_format() */ + String m_storage_record; + + /* + Last retrieved record, in table->record[0] data format. + + This is used only when we get the record with rocksdb's Get() call (The + other option is when we get a rocksdb::Slice from an iterator) + */ + std::string m_retrieved_record; + + /* Type of locking to apply to rows */ + enum { RDB_LOCK_NONE, RDB_LOCK_READ, RDB_LOCK_WRITE } m_lock_rows; + + /* TRUE means we're doing an index-only read. FALSE means otherwise. */ + bool m_keyread_only; + + bool m_skip_scan_it_next_call; + + /* TRUE means we are accessing the first row after a snapshot was created */ + bool m_rnd_scan_is_new_snapshot; + + /* TRUE means the replication slave will use Read Free Replication */ + bool m_use_read_free_rpl; + + /* + TRUE means we should skip unique key checks for this table if the + replication lag gets too large + */ + bool m_skip_unique_check; + + /** + @brief + This is a bitmap of indexes (i.e. a set) whose keys (in future, values) may + be changed by this statement. Indexes that are not in the bitmap do not need + to be updated. + @note Valid inside UPDATE statements, IIF(m_update_scope_is_valid == true). + */ + my_core::key_map m_update_scope; + bool m_update_scope_is_valid; + + /* SST information used for bulk loading the primary key */ + std::shared_ptr m_sst_info; + + /* + MySQL index number for duplicate key error + */ + int m_dupp_errkey; + + int create_key_defs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg, + const TABLE *old_table_arg= nullptr, + const Rdb_tbl_def *old_tbl_def_arg= nullptr) + __attribute__((__nonnull__(2, 3), __warn_unused_result__)); + int secondary_index_read(const int keyno, uchar *buf) + __attribute__((__nonnull__, __warn_unused_result__)); + void setup_iterator_for_rnd_scan(); + void setup_scan_iterator(const std::shared_ptr& kd, + rocksdb::Slice *slice) + __attribute__((__nonnull__)) + { + setup_scan_iterator(kd, slice, false, false, 0); + } + bool is_ascending(const std::shared_ptr& keydef, + enum ha_rkey_function find_flag) const + __attribute__((__nonnull__, __warn_unused_result__)); + void setup_scan_iterator(const std::shared_ptr& kd, + rocksdb::Slice *slice, const bool use_all_keys, + const bool is_ascending, const uint eq_cond_len) + __attribute__((__nonnull__)); + void release_scan_iterator(void) + { + delete m_scan_it; + m_scan_it= nullptr; + } + + int get_row_by_rowid(uchar *buf, const char *rowid, + const uint rowid_size) + __attribute__((__nonnull__, __warn_unused_result__)); + int get_row_by_rowid(uchar *buf, const uchar *rowid, + const uint rowid_size) + __attribute__((__nonnull__, __warn_unused_result__)) + { + return get_row_by_rowid(buf, reinterpret_cast(rowid), + rowid_size); + } + + void update_auto_incr_val(); + void load_auto_incr_value(); + longlong update_hidden_pk_val(); + int load_hidden_pk_value() __attribute__((__warn_unused_result__)); + int read_hidden_pk_id_from_rowkey(longlong *hidden_pk_id) + __attribute__((__nonnull__, __warn_unused_result__)); + bool can_use_single_delete(uint index) + __attribute__((__warn_unused_result__)); + bool skip_unique_check() __attribute__((__warn_unused_result__)); + void set_force_skip_unique_check(bool skip) override; + bool commit_in_the_middle() __attribute__((__warn_unused_result__)); + bool do_bulk_commit(Rdb_transaction *tx) + __attribute__((__nonnull__, __warn_unused_result__)); + bool has_hidden_pk(const TABLE* table) + __attribute__((__nonnull__, __warn_unused_result__)); + + void update_row_stats(operation_type type); + + void set_last_rowkey(const uchar *old_data); + + /* + Array of table->s->fields elements telling how to store fields in the + record. + */ + Rdb_field_encoder *m_encoder_arr; + + /* Describes instructions on how to decode the field */ + class READ_FIELD + { + public: + /* Points to Rdb_field_encoder describing the field */ + Rdb_field_encoder* m_field_enc; + /* if true, decode the field, otherwise skip it */ + bool m_decode; + /* Skip this many bytes before reading (or skipping) this field */ + int m_skip; + }; + + /* + This tells which table fields should be decoded (or skipped) when + decoding table row from (pk, encoded_row) pair. (Secondary keys are + just always decoded in full currently) + */ + std::vector m_decoders_vect; + + /* Setup field_decoders based on type of scan and table->read_set */ + void setup_read_decoders(); + + /* + Number of bytes in on-disk (storage) record format that are used for + storing SQL NULL flags. + */ + uint m_null_bytes_in_rec; + + void get_storage_type(Rdb_field_encoder *encoder, uint kp); + void setup_field_converters(); + int alloc_key_buffers(const TABLE* table_arg, const Rdb_tbl_def* tbl_def_arg) + __attribute__((__nonnull__, __warn_unused_result__)); + void free_key_buffers(); + + // the buffer size should be at least 2*Rdb_key_def::INDEX_NUMBER_SIZE + rocksdb::Range get_range(int i, uchar buf[]) const; + + /* + Perf timers for data reads + */ + Rdb_io_perf m_io_perf; + + /* + A counter of how many row checksums were checked for this table. Note that + this does not include checksums for secondary index entries. + */ + my_core::ha_rows m_row_checksums_checked; + + /* + Update stats + */ + void update_stats(void); + +public: + /* + Controls whether writes include checksums. This is updated from the session variable + at the start of each query. + */ + bool m_store_checksums; + + /* Same as above but for verifying checksums when reading */ + bool m_verify_checksums; + int m_checksums_pct; + + ha_rocksdb(my_core::handlerton *hton, my_core::TABLE_SHARE *table_arg); + ~ha_rocksdb() + { + int err __attribute__((__unused__)); + err= finalize_bulk_load(); + DBUG_ASSERT(err == 0); + } + + /** @brief + The name that will be used for display purposes. + */ + const char *table_type() const { return rocksdb_hton_name; } + + /* The following is only used by SHOW KEYS: */ + const char *index_type(uint inx) { return "LSMTREE"; } + + /** @brief + The file extensions. + */ + const char **bas_ext() const; + + /* + See if this is the same base table - this should only be true for different + partitions of the same table. + */ + bool same_table(const ha_rocksdb& other) const; + + /** @brief + This is a list of flags that indicate what functionality the storage engine + implements. The current table flags are documented in handler.h + */ + ulonglong table_flags() const override + { + /* + HA_BINLOG_STMT_CAPABLE + We are saying that this engine is just statement capable to have + an engine that can only handle statement-based logging. This is + used in testing. + HA_REC_NOT_IN_SEQ + If we don't set it, filesort crashes, because it assumes rowids are + 1..8 byte numbers + */ + return HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE | + HA_REC_NOT_IN_SEQ | HA_CAN_INDEX_BLOBS | + (m_pk_can_be_decoded? HA_PRIMARY_KEY_IN_READ_INDEX : 0) | + HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | + HA_NULL_IN_KEY | + HA_PARTIAL_COLUMN_READ; + } + + bool init_with_fields() override; + + /** @brief + This is a bitmap of flags that indicates how the storage engine + implements indexes. The current index flags are documented in + handler.h. If you do not implement indexes, just return zero here. + + @details + part is the key part to check. First key part is 0. + If all_parts is set, MySQL wants to know the flags for the combined + index, up to and including 'part'. + */ + ulong index_flags(uint inx, uint part, bool all_parts) const; + + const key_map * keys_to_use_for_scanning() + { + return &key_map_full; + } + + bool primary_key_is_clustered() + { + return true; + } + + bool should_store_checksums() const + { + return m_store_checksums && (rand() % 100 < m_checksums_pct); + } + + int rename_table(const char *from, const char *to) + __attribute__((__nonnull__, __warn_unused_result__)); + + int convert_record_from_storage_format(const rocksdb::Slice *key, + const rocksdb::Slice *value, + uchar *buf) + __attribute__((__nonnull__, __warn_unused_result__)); + + int convert_record_from_storage_format(const rocksdb::Slice *key, + uchar *buf) + __attribute__((__nonnull__, __warn_unused_result__)); + + void convert_record_to_storage_format(const rocksdb::Slice& pk_packed_slice, + Rdb_string_writer *pk_unpack_info, + rocksdb::Slice *packed_rec) + __attribute__((__nonnull__)); + + static const char* get_key_name(const uint index, const TABLE* table_arg, + const Rdb_tbl_def* tbl_def_arg) + __attribute__((__nonnull__, __warn_unused_result__)); + + static const char* get_key_comment(const uint index, const TABLE* table_arg, + const Rdb_tbl_def* tbl_def_arg) + __attribute__((__nonnull__, __warn_unused_result__)); + + static bool is_hidden_pk(const uint index, const TABLE* table_arg, + const Rdb_tbl_def* tbl_def_arg) + __attribute__((__nonnull__, __warn_unused_result__)); + + static uint pk_index(const TABLE* table_arg, const Rdb_tbl_def* tbl_def_arg) + __attribute__((__nonnull__, __warn_unused_result__)); + + static bool is_pk(const uint index, const TABLE* table_arg, + const Rdb_tbl_def* tbl_def_arg) + __attribute__((__nonnull__, __warn_unused_result__)); + + /** @brief + unireg.cc will call max_supported_record_length(), max_supported_keys(), + max_supported_key_parts(), uint max_supported_key_length() + to make sure that the storage engine can handle the data it is about to + send. Return *real* limits of your storage engine here; MySQL will do + min(your_limits, MySQL_limits) automatically. + */ + uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; } + + uint max_supported_keys() const { return MAX_INDEXES; } + uint max_supported_key_parts() const { return MAX_REF_PARTS; } + uint max_supported_key_part_length() const { return 2048; } + /** @brief + unireg.cc will call this to make sure that the storage engine can handle + the data it is about to send. Return *real* limits of your storage engine + here; MySQL will do min(your_limits, MySQL_limits) automatically. + + @details + There is no need to implement ..._key_... methods if your engine doesn't + support indexes. + */ + uint max_supported_key_length() const { return 16*1024; /* just to return something*/ } + + /** + TODO: return actual upper bound of number of records in the table. + (e.g. save number of records seen on full table scan and/or use file size + as upper bound) + */ + ha_rows estimate_rows_upper_bound() { return HA_POS_ERROR; } + + /* At the moment, we're ok with default handler::index_init() implementation. */ + int index_read_map(uchar * buf, const uchar * key, + key_part_map keypart_map, + enum ha_rkey_function find_flag); + __attribute__((__warn_unused_result__)); + + int index_read_map_impl(uchar * buf, const uchar * key, + key_part_map keypart_map, + enum ha_rkey_function find_flag, + const key_range *end_key) + __attribute__((__warn_unused_result__));; + + int index_read_last_map(uchar * buf, const uchar * key, + key_part_map keypart_map) + __attribute__((__warn_unused_result__));; + + int read_range_first(const key_range *start_key, + const key_range *end_key, + bool eq_range, bool sorted) + __attribute__((__warn_unused_result__));; + + virtual double scan_time() { return (double) (stats.records+stats.deleted) / 20.0+10; } + virtual double read_time(uint, uint, ha_rows rows) override; + + int open(const char *name, int mode, uint test_if_locked) + __attribute__((__warn_unused_result__)); + int close(void) __attribute__((__warn_unused_result__)); + + int write_row(uchar *buf) __attribute__((__warn_unused_result__)); + int update_row(const uchar *old_data, uchar *new_data) + __attribute__((__warn_unused_result__)); + int delete_row(const uchar *buf) __attribute__((__warn_unused_result__)); + rocksdb::Status delete_or_singledelete(uint index, + Rdb_transaction *tx, + rocksdb::ColumnFamilyHandle* cf, + const rocksdb::Slice& key) + __attribute__((__warn_unused_result__)); + + int index_next(uchar *buf) __attribute__((__warn_unused_result__)); + int index_next_with_direction(uchar *buf, bool move_forward) + __attribute__((__warn_unused_result__)); + int index_prev(uchar *buf) __attribute__((__warn_unused_result__)); + + int index_first(uchar *buf) __attribute__((__warn_unused_result__)); + int index_last(uchar *buf) __attribute__((__warn_unused_result__)); + + class Item* idx_cond_push(uint keyno, class Item* idx_cond); + /* + Default implementation from cancel_pushed_idx_cond() suits us + */ +private: + struct key_def_cf_info + { + rocksdb::ColumnFamilyHandle* cf_handle; + bool is_reverse_cf; + bool is_auto_cf; + }; + + struct update_row_info + { + Rdb_transaction* tx; + const uchar* new_data; + const uchar* old_data; + rocksdb::Slice new_pk_slice; + rocksdb::Slice old_pk_slice; + + // "unpack_info" data for the new PK value + Rdb_string_writer *new_pk_unpack_info; + + longlong hidden_pk_id; + bool skip_unique_check; + }; + + int create_cfs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg, + std::array* cfs); + __attribute__((__nonnull__, __warn_unused_result__)); + + int create_key_def(const TABLE *table_arg, uint i, + const Rdb_tbl_def* tbl_def_arg, + std::shared_ptr* new_key_def, + const struct key_def_cf_info& cf_info); + __attribute__((__nonnull__, __warn_unused_result__)); + + int create_inplace_key_defs(const TABLE *table_arg, + Rdb_tbl_def *tbl_def_arg, + const TABLE *old_table_arg, + const Rdb_tbl_def *old_tbl_def_arg, + const std::array& cfs); + __attribute__((__nonnull__, __warn_unused_result__)); + + std::unordered_map get_old_key_positions( + const TABLE* table_arg, + const Rdb_tbl_def* tbl_def_arg, + const TABLE* old_table_arg, + const Rdb_tbl_def* old_tbl_def_arg) + __attribute__((__nonnull__)); + + int compare_key_parts(const KEY* old_key, const KEY* new_key); + __attribute__((__nonnull__, __warn_unused_result__)); + + int index_first_intern(uchar *buf) + __attribute__((__nonnull__, __warn_unused_result__)); + int index_last_intern(uchar *buf) + __attribute__((__nonnull__, __warn_unused_result__)); + + enum icp_result check_index_cond(); + int find_icp_matching_index_rec(bool move_forward, uchar *buf) + __attribute__((__nonnull__, __warn_unused_result__)); + + void calc_updated_indexes(); + int update_write_row(const uchar *old_data, const uchar *new_data, + const bool skip_unique_check) + __attribute__((__warn_unused_result__)); + int get_pk_for_update(struct update_row_info* row_info); + int check_and_lock_unique_pk(uint key_id, + const struct update_row_info& row_info, + bool* found, bool* pk_changed) + __attribute__((__warn_unused_result__)); + int check_and_lock_sk(uint key_id, const struct update_row_info& row_info, + bool* found) const + __attribute__((__warn_unused_result__)); + int check_uniqueness_and_lock(const struct update_row_info& row_info, + bool* pk_changed) + __attribute__((__warn_unused_result__)); + bool over_bulk_load_threshold(int* err) + __attribute__((__warn_unused_result__)); + int bulk_load_key(Rdb_transaction* tx, + const std::shared_ptr& kd, + const rocksdb::Slice& key, + const rocksdb::Slice& value) + __attribute__((__nonnull__, __warn_unused_result__)); + int update_pk(const std::shared_ptr& kd, + const struct update_row_info& row_info, + bool pk_changed) + __attribute__((__warn_unused_result__)); + int update_sk(const TABLE* table_arg, + const std::shared_ptr& kd, + const struct update_row_info& row_info) + __attribute__((__warn_unused_result__)); + int update_indexes(const struct update_row_info& row_info, bool pk_changed) + __attribute__((__warn_unused_result__)); + + int read_key_exact(const std::shared_ptr& kd, + rocksdb::Iterator* iter, bool using_full_key, + const rocksdb::Slice& key_slice) const + __attribute__((__nonnull__, __warn_unused_result__)); + int read_before_key(const std::shared_ptr& kd, + bool using_full_key, const rocksdb::Slice& key_slice) + __attribute__((__nonnull__, __warn_unused_result__)); + int read_after_key(const std::shared_ptr& kd, + bool using_full_key, const rocksdb::Slice& key_slice) + __attribute__((__nonnull__, __warn_unused_result__)); + + int position_to_correct_key(const std::shared_ptr& kd, + enum ha_rkey_function find_flag, + bool full_key_match, const uchar* key, + key_part_map keypart_map, + const rocksdb::Slice& key_slice, + bool* move_forward) + __attribute__((__warn_unused_result__)); + + int read_row_from_primary_key(uchar* buf) + __attribute__((__nonnull__, __warn_unused_result__)); + int read_row_from_secondary_key(uchar* buf, + const std::shared_ptr& kd, + bool move_forward) + __attribute__((__nonnull__, __warn_unused_result__)); + int try_keyonly_read_from_sk(uchar* buf, + const std::shared_ptr& kd, + const rocksdb::Slice& key, + const rocksdb::Slice& value, + uint rowid_size) + __attribute__((__nonnull__, __warn_unused_result__)); + + int calc_eq_cond_len(const std::shared_ptr& kd, + enum ha_rkey_function find_flag, + const rocksdb::Slice& slice, int bytes_changed_by_succ, + const key_range *end_key, uint* end_key_packed_size) + __attribute__((__warn_unused_result__)); + + Rdb_tbl_def* get_table_if_exists(const char* tablename) + __attribute__((__nonnull__, __warn_unused_result__)); + void read_thd_vars(THD *thd) + __attribute__((__nonnull__)); + bool contains_foreign_key(THD* thd) + __attribute__((__nonnull__, __warn_unused_result__)); + + int inplace_populate_sk(const TABLE* table_arg, + const std::unordered_set>& indexes); + +public: + int index_init(uint idx, bool sorted) __attribute__((__warn_unused_result__)); + int index_end() __attribute__((__warn_unused_result__)); + + void unlock_row(); + + /** @brief + Unlike index_init(), rnd_init() can be called two consecutive times + without rnd_end() in between (it only makes sense if scan=1). In this + case, the second call should prepare for the new table scan (e.g if + rnd_init() allocates the cursor, the second call should position the + cursor to the start of the table; no need to deallocate and allocate + it again. This is a required method. + */ + int rnd_init(bool scan) __attribute__((__warn_unused_result__)); + int rnd_end() __attribute__((__warn_unused_result__)); + + int rnd_next(uchar *buf) __attribute__((__warn_unused_result__)); + int rnd_next_with_direction(uchar *buf, bool move_forward) + __attribute__((__warn_unused_result__)); + + int rnd_pos(uchar *buf, uchar *pos) __attribute__((__warn_unused_result__)); + void position(const uchar *record); + int info(uint) override; + + /* This function will always return success, therefore no annotation related + * to checking the return value. Can't change the signature because it's + * required by the interface. */ + int extra(enum ha_extra_function operation); + + int start_stmt(THD *thd, thr_lock_type lock_type) + __attribute__((__warn_unused_result__)); + int external_lock(THD *thd, int lock_type) + __attribute__((__warn_unused_result__)); + int delete_all_rows(ha_rows* nrows) __attribute__((__warn_unused_result__)); + int truncate() __attribute__((__warn_unused_result__)); + + int reset() override + { + /* Free blob data */ + m_retrieved_record.clear(); + return 0; + } + + int check(THD* thd, HA_CHECK_OPT* check_opt) + __attribute__((__warn_unused_result__)); + void remove_rows(Rdb_tbl_def *tbl); + ha_rows records_in_range(uint inx, key_range *min_key, + key_range *max_key) + __attribute__((__warn_unused_result__)); + int delete_table(const char *from) __attribute__((__warn_unused_result__)); + int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info) + __attribute__((__warn_unused_result__)); + bool check_if_incompatible_data(HA_CREATE_INFO *info, + uint table_changes) + __attribute__((__warn_unused_result__)); + + THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, + enum thr_lock_type lock_type) + __attribute__((__warn_unused_result__)); + + my_bool register_query_cache_table(THD *thd, char *table_key, + uint key_length, + qc_engine_callback + *engine_callback, + ulonglong *engine_data) + { + /* Currently, we don't support query cache */ + return FALSE; + } + + bool get_error_message(const int error, String *buf) + __attribute__((__nonnull__)); + + void get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values); + void update_create_info(HA_CREATE_INFO *create_info); + int optimize(THD *thd, HA_CHECK_OPT *check_opt) + __attribute__((__warn_unused_result__)); + int analyze(THD* thd, HA_CHECK_OPT* check_opt) + __attribute__((__warn_unused_result__)); + + enum_alter_inplace_result check_if_supported_inplace_alter( + TABLE *altered_table, + my_core::Alter_inplace_info *ha_alter_info) override; + + bool prepare_inplace_alter_table(TABLE *altered_table, + my_core::Alter_inplace_info *ha_alter_info); + + bool inplace_alter_table(TABLE *altered_table, + my_core::Alter_inplace_info *ha_alter_info); + + bool commit_inplace_alter_table(TABLE *altered_table, + my_core::Alter_inplace_info *ha_alter_info, + bool commit); + + int finalize_bulk_load() __attribute__((__warn_unused_result__)); + + void set_use_read_free_rpl(const char* whitelist); + void set_skip_unique_check_tables(const char* whitelist); + + public: + virtual void rpl_before_delete_rows() override; + virtual void rpl_after_delete_rows() override; + virtual void rpl_before_update_rows() override; + virtual void rpl_after_update_rows() override; + virtual bool use_read_free_rpl(); + + private: + /* Flags tracking if we are inside different replication operation */ + bool m_in_rpl_delete_rows; + bool m_in_rpl_update_rows; + + bool m_force_skip_unique_check; +}; + +/* + Helper class for in-place alter, for storing handler context between inplace + alter calls +*/ +struct Rdb_inplace_alter_ctx : public my_core::inplace_alter_handler_ctx +{ + /* The new table definition */ + Rdb_tbl_def* m_new_tdef; + + /* Stores the original key definitions */ + std::shared_ptr* m_old_key_descr; + + /* Stores the new key definitions */ + std::shared_ptr* m_new_key_descr; + + /* Stores the old number of key definitions */ + const uint m_old_n_keys; + + /* Stores the new number of key definitions */ + const uint m_new_n_keys; + + /* Stores the added key glids */ + std::unordered_set> m_added_indexes; + + /* Stores the dropped key glids */ + std::unordered_set m_dropped_index_ids; + + /* Stores number of keys to add */ + const uint m_n_added_keys; + + /* Stores number of keys to drop */ + const uint m_n_dropped_keys; + + Rdb_inplace_alter_ctx( + Rdb_tbl_def* new_tdef, std::shared_ptr* old_key_descr, + std::shared_ptr* new_key_descr, uint old_n_keys, + uint new_n_keys, + std::unordered_set> added_indexes, + std::unordered_set dropped_index_ids, + uint n_added_keys, uint n_dropped_keys) : + my_core::inplace_alter_handler_ctx(), m_new_tdef(new_tdef), + m_old_key_descr(old_key_descr), m_new_key_descr(new_key_descr), + m_old_n_keys(old_n_keys), m_new_n_keys(new_n_keys), + m_added_indexes(added_indexes), + m_dropped_index_ids(dropped_index_ids), + m_n_added_keys(n_added_keys), + m_n_dropped_keys(n_dropped_keys) + { + } + + ~Rdb_inplace_alter_ctx(){} + + private: + /* Disable Copying */ + Rdb_inplace_alter_ctx(const Rdb_inplace_alter_ctx&); + Rdb_inplace_alter_ctx& operator=(const Rdb_inplace_alter_ctx&); +}; + +} // namespace myrocks diff --git a/storage/rocksdb/ha_rocksdb_proto.h b/storage/rocksdb/ha_rocksdb_proto.h new file mode 100644 index 00000000000..df41d2de619 --- /dev/null +++ b/storage/rocksdb/ha_rocksdb_proto.h @@ -0,0 +1,80 @@ +/* + Copyright (c) 2012,2013 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +/* C++ standard header files */ +#include +#include + +/* MySQL header files */ +#include "./sql_string.h" + +/* RocksDB includes */ +#include "rocksdb/table.h" + +namespace myrocks { + +enum RDB_IO_ERROR_TYPE { + RDB_IO_ERROR_TX_COMMIT, + RDB_IO_ERROR_DICT_COMMIT, + RDB_IO_ERROR_BG_THREAD +}; + +void rdb_handle_io_error(rocksdb::Status status, RDB_IO_ERROR_TYPE err_type); + +int rdb_normalize_tablename(const std::string& tablename, std::string* str) + __attribute__((__nonnull__, __warn_unused_result__)); + +int rdb_split_normalized_tablename(const std::string& fullname, std::string *db, + std::string *table = nullptr, + std::string *partition = nullptr) + __attribute__((__warn_unused_result__)); + +std::vector rdb_get_open_table_names(void); + +int rdb_get_table_perf_counters(const char *tablename, + Rdb_perf_counters *counters) + __attribute__((__nonnull__(2))); + +void rdb_get_global_perf_counters(Rdb_perf_counters *counters) + __attribute__((__nonnull__(1))); + +void rdb_queue_save_stats_request(); + +/* + Access to singleton objects. +*/ + +rocksdb::DB *rdb_get_rocksdb_db(); + +class Rdb_cf_manager; +Rdb_cf_manager& rdb_get_cf_manager(); + +rocksdb::BlockBasedTableOptions& rdb_get_table_options(); + +class Rdb_dict_manager; +Rdb_dict_manager *rdb_get_dict_manager(void) + __attribute__((__warn_unused_result__)); + +class Rdb_ddl_manager; +Rdb_ddl_manager *rdb_get_ddl_manager(void) + __attribute__((__warn_unused_result__)); + +class Rdb_binlog_manager; +Rdb_binlog_manager *rdb_get_binlog_manager(void) + __attribute__((__warn_unused_result__)); + +} // namespace myrocks diff --git a/storage/rocksdb/logger.h b/storage/rocksdb/logger.h new file mode 100644 index 00000000000..a1333e8dfd4 --- /dev/null +++ b/storage/rocksdb/logger.h @@ -0,0 +1,73 @@ +/* + Copyright (c) 2015, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +#include +#include +#include + +namespace myrocks { + +class Rdb_logger : public rocksdb::Logger +{ + public: + void Logv(const rocksdb::InfoLogLevel log_level, + const char* format, + va_list ap) override + { + DBUG_ASSERT(format != nullptr); + + enum loglevel mysql_log_level; + + if (m_logger) { + m_logger->Logv(log_level, format, ap); + } + + if (log_level < GetInfoLogLevel()) { + return; + } + + if (log_level >= rocksdb::InfoLogLevel::ERROR_LEVEL) { + mysql_log_level= ERROR_LEVEL; + } else if (log_level >= rocksdb::InfoLogLevel::WARN_LEVEL) { + mysql_log_level= WARNING_LEVEL; + } else { + mysql_log_level= INFORMATION_LEVEL; + } + + // log to MySQL + std::string f("LibRocksDB:"); + f.append(format); + error_log_print(mysql_log_level, f.c_str(), ap); + } + + void Logv(const char* format, va_list ap) override + { + DBUG_ASSERT(format != nullptr); + // If no level is specified, it is by default at information level + Logv(rocksdb::InfoLogLevel::INFO_LEVEL, format, ap); + } + + void SetRocksDBLogger(std::shared_ptr logger) + { + m_logger = logger; + } + + private: + std::shared_ptr m_logger; +}; + +} // namespace myrocks diff --git a/storage/rocksdb/mysql-test/rocksdb/include/dup_key_update.inc b/storage/rocksdb/mysql-test/rocksdb/include/dup_key_update.inc new file mode 100644 index 00000000000..82ceda1914d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/dup_key_update.inc @@ -0,0 +1,69 @@ +let $max_table = 2; + +--disable_query_log +let $table = 1; +while ($table <= $max_table) { + let $max = 9; + let $i = 2; + while ($i <= $max) { + let $insert = INSERT INTO t$table VALUES ($i, $i, $i); + eval $insert; + inc $i; + } + inc $table; +} +--enable_query_log + +let $table = 1; +while ($table <= $max_table) { + let $i = 1; + let $j = 9; + while ($i <= $max) { + + let $insert = INSERT INTO t$table VALUES ($i, $i, $i) ON DUPLICATE KEY UPDATE id2 = $j; + eval $insert; + + let $select = SELECT * FROM t$table WHERE id1 = $i; + eval $select; + + let $select = SELECT * FROM t$table FORCE INDEX (id3) WHERE id3 = $i; + eval $select; + + inc $j; + + let $insert = INSERT INTO t$table VALUES ($i, $i, $i) ON DUPLICATE KEY UPDATE id2 = $j; + eval $insert; + + let $select = SELECT * FROM t$table WHERE id1 = $i; + eval $select; + + let $select = SELECT * FROM t$table FORCE INDEX (id3) WHERE id3 = $i; + eval $select; + + inc $j; + + let $insert = INSERT INTO t$table VALUES ($i, $i, $i) ON DUPLICATE KEY UPDATE id2 = $j; + eval $insert; + + let $select = SELECT * FROM t$table WHERE id1 = $i; + eval $select; + + let $select = SELECT * FROM t$table FORCE INDEX (id3) WHERE id3 = $i; + eval $select; + + inc $j; + + inc $i; + inc $i; + inc $i; + inc $i; + } + + let $select = SELECT * FROM t$table; + eval $select; + + let $select = SELECT * FROM t$table FORCE INDEX (id3); + eval $select; + + inc $table; +} diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_1.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_1.inc new file mode 100644 index 00000000000..6dc5a78e3a0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_1.inc @@ -0,0 +1,51 @@ +# +# Check concurrent locking issues: +# Locking rows that do not exist when using all primary key columns in a +# WHERE clause +# +# To call this, set $isolation_level and call this file +# +# let $isolation_level = REPEATABLE READ; +# --source suite/rocksdb/include/locking_issues_case1_1.inc +# + +--echo +--echo ----------------------------------------------------------------------- +--echo - Locking issues case 1.1: +--echo - Locking rows that do not exist when using all primary key columns in +--echo - a WHERE clause +--echo - using $isolation_level transaction isolation level +--echo ----------------------------------------------------------------------- + +--disable_warnings +DROP TABLE IF EXISTS t0; +--enable_warnings + +CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2)); +INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0); + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; +SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE; + +connection con2; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t0 VALUES (1,5,0); + +--error ER_LOCK_WAIT_TIMEOUT +SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE; + +connection con1; +COMMIT; + +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_2.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_2.inc new file mode 100644 index 00000000000..13083bf82d9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_2.inc @@ -0,0 +1,48 @@ +# +# Check concurrent locking issues: +# Locking rows that do not exist without using all primary key columns in a +# WHERE clause +# +# To call this, set $isolation_level and call this file +# +# let $isolation_level = REPEATABLE READ; +# --source suite/rocksdb/include/locking_issues_case1_2.inc +# + +--echo +--echo ----------------------------------------------------------------------- +--echo - Locking issues case 1.2: +--echo - Locking rows that do not exist without using all primary key +--echo - columns in a WHERE clause +--echo - using $isolation_level transaction isolation level +--echo ----------------------------------------------------------------------- + +--disable_warnings +DROP TABLE IF EXISTS t0; +--enable_warnings + +CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2)); +INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0); + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; +SELECT * FROM t0 WHERE id1=1 FOR UPDATE; + +connection con2; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; +SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE; +INSERT INTO t0 VALUES (1,5,0); + +connection con1; +COMMIT; + +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case2.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case2.inc new file mode 100644 index 00000000000..61c604dd6d3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case2.inc @@ -0,0 +1,97 @@ +# +# Check concurrent locking issues: +# Rows that are scanned but do not match the WHERE clause are not locked. +# +# To call this, set $isolation_level and call this file +# If you want to enable rocksdb_lock_scanned_rows set $lock_scanned_rows=1 +# +# let $isolation_level = REPEATABLE READ; +# let $lock_scanned_rows = 1 (optional) +# --source suite/rocksdb/include/locking_issues_case2.inc +# + +--echo +--echo ----------------------------------------------------------------------- +--echo - Locking issues case 2: +--echo - Rows that are scanned but do not match the WHERE are not locked +--echo - using $isolation_level transaction isolation level unless +--echo - rocksdb_lock_scanned_rows is on +--echo ----------------------------------------------------------------------- + +--disable_warnings +DROP TABLE IF EXISTS t0; +--enable_warnings + +SELECT @@global.rocksdb_lock_scanned_rows; + +if ($lock_scanned_rows) +{ + let $original_val=query_get_value( + select @@global.rocksdb_lock_scanned_rows as val, val, 1); + SET GLOBAL rocksdb_lock_scanned_rows=ON; +} + +CREATE TABLE t0(id INT PRIMARY KEY, value INT); +INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1); + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; + +connection con2; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; + +if ($lock_scanned_rows == 1) +{ + connection con1; + # This is expected to leave locks on all the rows in t0 + SELECT * FROM t0 WHERE value > 0 FOR UPDATE; + + connection con2; + --error ER_LOCK_WAIT_TIMEOUT + UPDATE t0 SET VALUE=10 WHERE id=1; +} + +if ($lock_scanned_rows == 0) +{ + connection con1; + # This is expected to release locks on rows with value=0 + SELECT * FROM t0 WHERE value > 0 FOR UPDATE; + + connection con2; + # This should succeed as con1 should have released the lock on row (1,0) + UPDATE t0 SET VALUE=10 WHERE id=1; + + # This should fail because lock on row (5,1) is still held. + --error ER_LOCK_WAIT_TIMEOUT + UPDATE t0 SET VALUE=10 WHERE id=5; + + connection con1; + # Do another operation + UPDATE t0 SET value=100 WHERE id in (4,5) and value>0; + + connection con2; + # Check that row (4,0) is still not locked + SELECT * FROM t0 WHERE id=4 FOR UPDATE; + + COMMIT; + SELECT * FROM t0; +} + +connection con1; +COMMIT; + +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t0; + +if ($lock_scanned_rows == 1) +{ + eval SET GLOBAL rocksdb_lock_scanned_rows=$original_val; +} diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case3.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case3.inc new file mode 100644 index 00000000000..bd9af241e5c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case3.inc @@ -0,0 +1,69 @@ +# +# Check concurrent locking issues: +# After creating a snapshot, other clients updating rows +# +# To call this, set $isolation_level and call this file +# +# let $isolation_level = REPEATABLE READ; +# --source suite/rocksdb/include/locking_issues_case3.inc +# + +--echo +--echo ----------------------------------------------------------------------- +--echo - Locking issues case 3: +--echo - After creating a snapshot, other clients updating rows +--echo - using $isolation_level transaction isolation level +--echo ----------------------------------------------------------------------- + +--disable_warnings +DROP TABLE IF EXISTS t0; +--enable_warnings + +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); + +# Insert 200,000 rows, breaking it up into inserts of 1000 rows at a time +--echo Inserting 200,000 rows +--disable_query_log +SET @save_rocksdb_bulk_load=@@rocksdb_bulk_load; +SET rocksdb_bulk_load=1; +SET @save_rocksdb_write_disable_wal=@@rocksdb_write_disable_wal; +SET GLOBAL rocksdb_write_disable_wal=1; +let $i = 1; +while ($i <= 200) { + eval BEGIN; + let $j = 1; + while ($j <= 100) { + eval INSERT INTO t0(value) VALUES (0),(0),(0),(0),(0),(0),(0),(0),(0),(0); + inc $j; + } + eval COMMIT; + inc $i; +} +SET rocksdb_bulk_load=@save_rocksdb_bulk_load; +SET GLOBAL rocksdb_write_disable_wal=@save_rocksdb_write_disable_wal; +--enable_query_log + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +let $ID = `SELECT connection_id()`; +send SELECT * FROM t0 WHERE value > 0 FOR UPDATE; + +connection con2; +let $wait_condition = SELECT 1 FROM information_schema.processlist + WHERE id = $ID AND state = "Sending data"; +--source include/wait_condition.inc +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000; + +connection con1; +--error ER_LOCK_DEADLOCK +reap; + +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case4.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case4.inc new file mode 100644 index 00000000000..da80f796750 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case4.inc @@ -0,0 +1,68 @@ +# +# Check concurrent locking issues: +# Phantom rows +# +# To call this, set $isolation_level and call this file +# +# let $isolation_level = REPEATABLE READ; +# --source suite/rocksdb/include/locking_issues_case4.inc +# + +--echo +--echo ----------------------------------------------------------------------- +--echo - Locking issues case 4: +--echo - Phantom rows +--echo - using $isolation_level transaction isolation level +--echo ----------------------------------------------------------------------- + +--disable_warnings +DROP TABLE IF EXISTS t0; +--enable_warnings + +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); + +# Insert 200,000 rows, breaking it up into inserts of 1000 rows at a time +--echo Inserting 200,000 rows +--disable_query_log +SET @save_rocksdb_bulk_load=@@rocksdb_bulk_load; +SET rocksdb_bulk_load=1; +SET @save_rocksdb_write_disable_wal=@@rocksdb_write_disable_wal; +SET GLOBAL rocksdb_write_disable_wal=1; +let $i = 1; +while ($i <= 200) { + eval BEGIN; + let $j = 1; + while ($j <= 100) { + eval INSERT INTO t0(value) VALUES (0),(0),(0),(0),(0),(0),(0),(0),(0),(0); + inc $j; + } + eval COMMIT; + inc $i; +} +SET rocksdb_bulk_load=@save_rocksdb_bulk_load; +SET GLOBAL rocksdb_write_disable_wal=@save_rocksdb_write_disable_wal; +--enable_query_log + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +let $ID = `SELECT connection_id()`; +send SELECT * FROM t0 WHERE value > 0 FOR UPDATE; + +connection con2; +let $wait_condition = SELECT 1 FROM information_schema.processlist + WHERE id = $ID AND state = "Sending data"; +--source include/wait_condition.inc +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +INSERT INTO t0 VALUES(200001,1), (-1,1); + +connection con1; +reap; + +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case5.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case5.inc new file mode 100644 index 00000000000..3e4f6350b79 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case5.inc @@ -0,0 +1,75 @@ +# +# Check concurrent locking issues: +# Deleting primary key +# +# To call this, set $isolation_level and call this file +# +# let $isolation_level = REPEATABLE READ; +# --source suite/rocksdb/include/locking_issues_case5.inc +# + +--echo +--echo ----------------------------------------------------------------------- +--echo - Locking issues case 5: +--echo - Deleting primary key +--echo - using $isolation_level transaction isolation level +--echo ----------------------------------------------------------------------- + +--disable_warnings +DROP TABLE IF EXISTS t0; +--enable_warnings + +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); + +# Insert 200,000 rows, breaking it up into inserts of 1000 rows at a time +--echo Inserting 200,000 rows +--disable_query_log +SET @save_rocksdb_bulk_load=@@rocksdb_bulk_load; +SET rocksdb_bulk_load=1; +SET @save_rocksdb_write_disable_wal=@@rocksdb_write_disable_wal; +SET GLOBAL rocksdb_write_disable_wal=1; +let $i = 1; +while ($i <= 200) { + eval BEGIN; + let $j = 1; + while ($j <= 100) { + eval INSERT INTO t0(value) VALUES (0),(0),(0),(0),(0),(0),(0),(0),(0),(0); + inc $j; + } + eval COMMIT; + inc $i; +} +SET rocksdb_bulk_load=@save_rocksdb_bulk_load; +SET GLOBAL rocksdb_write_disable_wal=@save_rocksdb_write_disable_wal; +--enable_query_log + +UPDATE t0 SET value=100 WHERE id=190000; + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; +let $ID = `SELECT connection_id()`; +send SELECT * FROM t0 WHERE value > 0 FOR UPDATE; + +connection con2; +let $wait_condition = SELECT 1 FROM information_schema.processlist + WHERE id = $ID AND state = "Sending data"; +--source include/wait_condition.inc +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; +DELETE FROM t0 WHERE id=190000; +COMMIT; + +connection con1; +--error ER_LOCK_DEADLOCK +reap; +COMMIT; + +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case6.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case6.inc new file mode 100644 index 00000000000..4cb5cae15aa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case6.inc @@ -0,0 +1,75 @@ +# +# Check concurrent locking issues: +# Changing primary key +# +# To call this, set $isolation_level and call this file +# +# let $isolation_level = REPEATABLE READ; +# --source suite/rocksdb/include/locking_issues_case6.inc +# + +--echo +--echo ----------------------------------------------------------------------- +--echo - Locking issues case 6: +--echo - Changing primary key +--echo - using $isolation_level transaction isolation level +--echo ----------------------------------------------------------------------- + +--disable_warnings +DROP TABLE IF EXISTS t0; +--enable_warnings + +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); + +# Insert 200,000 rows, breaking it up into inserts of 1000 rows at a time +--echo Inserting 200,000 rows +--disable_query_log +SET @save_rocksdb_bulk_load=@@rocksdb_bulk_load; +SET rocksdb_bulk_load=1; +SET @save_rocksdb_write_disable_wal=@@rocksdb_write_disable_wal; +SET GLOBAL rocksdb_write_disable_wal=1; +let $i = 1; +while ($i <= 200) { + eval BEGIN; + let $j = 1; + while ($j <= 100) { + eval INSERT INTO t0(value) VALUES (0),(0),(0),(0),(0),(0),(0),(0),(0),(0); + inc $j; + } + eval COMMIT; + inc $i; +} +SET rocksdb_bulk_load=@save_rocksdb_bulk_load; +SET GLOBAL rocksdb_write_disable_wal=@save_rocksdb_write_disable_wal; +--enable_query_log + +UPDATE t0 SET value=100 WHERE id=190000; + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; +let $ID = `SELECT connection_id()`; +send SELECT * FROM t0 WHERE value > 0 FOR UPDATE; + +connection con2; +let $wait_condition = SELECT 1 FROM information_schema.processlist + WHERE id = $ID AND state = "Sending data"; +--source include/wait_condition.inc +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; +UPDATE t0 SET id=200001 WHERE id=190000; +COMMIT; + +connection con1; +--error ER_LOCK_DEADLOCK +reap; +COMMIT; + +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case7.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case7.inc new file mode 100644 index 00000000000..d71d398982e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case7.inc @@ -0,0 +1,89 @@ +# +# Check concurrent locking issues: +# Rows scanned but are not in the updated table should be locked when +# rocksdb_lock_scanned_rows is on but not locked otherwise. +# +# To call this, set $isolation_level and $lock_scanned_rows and call this file +# +# let $isolation_level = REPEATABLE READ; +# let $lock_scanned_rows = 0 (or 1) +# --source suite/rocksdb/include/locking_issues_case7.inc +# + +--echo +--echo ----------------------------------------------------------------------- +--echo - Locking issues case 7: +--echo - Rows that are scanned as part of a query but not in the table being +--echo - updated should not be locked unless rocksdb_lock_scanned_rows is on +--echo ----------------------------------------------------------------------- + +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +SELECT @@global.rocksdb_lock_scanned_rows; + +if ($lock_scanned_rows) +{ + let $original_val=query_get_value( + select @@global.rocksdb_lock_scanned_rows as val, val, 1); + SET GLOBAL rocksdb_lock_scanned_rows=ON; +} + +CREATE TABLE t1(id INT PRIMARY KEY, value INT); +CREATE TABLE t2(id INT PRIMARY KEY, value INT); +INSERT INTO t1 VALUES (1,1), (2,2), (3,3); +INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; + +connection con2; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; + +--echo lock_scanned_rows is $lock_scanned_rows +if ($lock_scanned_rows == 1) +{ + connection con1; + # This is expected to leave a lock id=3 in t2; + UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3; + + connection con2; + --error ER_LOCK_WAIT_TIMEOUT + UPDATE t2 SET value=value+100 WHERE id=3; + + # No other row in t2 should be locked; + UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5); + SELECT * FROM t2; +} + +if ($lock_scanned_rows == 0) +{ + connection con1; + # This should leave no locks on any row in t2; + UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3; + + connection con2; + UPDATE t2 SET value=value+100; + SELECT * FROM t2; +} + +connection con1; +COMMIT; + +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t1; +DROP TABLE t2; + +if ($lock_scanned_rows == 1) +{ + eval SET GLOBAL rocksdb_lock_scanned_rows=$original_val; +} diff --git a/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_concurrent_delete.inc b/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_concurrent_delete.inc new file mode 100644 index 00000000000..71e713226d7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_concurrent_delete.inc @@ -0,0 +1,53 @@ +# Usage: +# +# let $order = ASC; # or DESC +# let $comment = "rev:cf2"; # or "" +# --source suite/rocksdb/include/rocksdb_concurrent_delete.inc + +let $first_row = -1; # Error this should never happen +if ($order == 'ASC') +{ + let $first_row = 1; +} +if ($order == 'DESC') +{ + let $first_row = 3; +} + +connect (con, localhost, root,,); +connection default; + +--disable_warnings +SET debug_sync='RESET'; +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT $comment, a INT); +INSERT INTO t1 VALUES(1,1), (2,2), (3,3); + +# This will cause the SELECT to block after finding the first row, but +# before locking and reading it. +connection con; +SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go'; +send_eval SELECT * FROM t1 order by t1.pk $order FOR UPDATE; + +# While that connection is waiting, delete the first row (the one con +# is about to lock and read +connection default; +SET debug_sync='now WAIT_FOR parked'; +eval DELETE FROM t1 WHERE pk = $first_row; + +# Signal the waiting select to continue +SET debug_sync='now SIGNAL go'; + +# Now get the results from the select. The first entry (1,1) (or (3,3) when +# using reverse ordering) should be missing. Prior to the fix the SELECT +# would have returned: "1815: Internal error: NotFound:" +connection con; +reap; + +# Cleanup +connection default; +disconnect con; +set debug_sync='RESET'; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/my.cnf b/storage/rocksdb/mysql-test/rocksdb/my.cnf new file mode 100644 index 00000000000..2ed68088259 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/my.cnf @@ -0,0 +1,7 @@ +!include include/default_my.cnf + +[server] +sql-mode=NO_ENGINE_SUBSTITUTION +explicit-defaults-for-timestamp=1 +rocksdb_lock_wait_timeout=1 +rocksdb_strict_collation_check=0 diff --git a/storage/rocksdb/mysql-test/rocksdb/optimize_table_check_sst.pl b/storage/rocksdb/mysql-test/rocksdb/optimize_table_check_sst.pl new file mode 100644 index 00000000000..8199d5051df --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/optimize_table_check_sst.pl @@ -0,0 +1,22 @@ +#!/usr/bin/perl + +die unless($ARGV[0]); +open(my $f, "<", $ARGV[0]) or die $!; +my @sst; +while(my $l = readline($f)) { + chomp($l); + push @sst, int($l); +} + +for(my $i= 0; $i < $#sst; $i++) { + printf("checking sst file reduction on optimize table from %d to %d..\n", $i, $i+1); + + if($sst[$i] - 1000 < $sst[$i+1]) { + printf("sst file reduction was not enough. %d->%d (minimum 1000kb)\n", $sst[$i], $sst[$i+1]); + die; + }else { + print "ok.\n"; + } +} +exit(0); + diff --git a/storage/rocksdb/mysql-test/rocksdb/r/1st.result b/storage/rocksdb/mysql-test/rocksdb/r/1st.result new file mode 100644 index 00000000000..7d1e8607645 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/1st.result @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (pk INT PRIMARY KEY DEFAULT '0', a INT(11), b CHAR(8)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL DEFAULT '0', + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI 0 +a int(11) YES NULL +b char(8) YES NULL +INSERT INTO t1 VALUES (1, 1,'a'); +INSERT INTO t1 (a,b) VALUES (2,'b'); +SELECT a,b FROM t1; +a b +1 a +2 b +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result new file mode 100644 index 00000000000..4a707d3a6f4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result @@ -0,0 +1,378 @@ +drop table if exists t1; +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `ka` (`a`), + KEY `kab` (`a`,`b`), + KEY `kb` (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +a b +2 6 +3 7 +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2; +a b +3 7 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD INDEX kb(b), DROP INDEX ka, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `kab` (`a`,`b`), + KEY `kb` (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +a b +2 6 +3 7 +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2; +a b +3 7 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 DROP INDEX ka, DROP INDEX kab, ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kb(b), ADD INDEX kab(a,b), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `kb` (`b`), + KEY `kab` (`a`,`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +a b +2 6 +3 7 +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2; +a b +3 7 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD INDEX kb(b), DROP INDEX ka, ADD INDEX kba(b,a), DROP INDEX kab, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `kb` (`b`), + KEY `kba` (`b`,`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +a b +2 6 +3 7 +SELECT * FROM t1 FORCE INDEX(kba) WHERE a > 2; +a b +3 7 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +ALTER TABLE t1 DROP INDEX ka, ADD INDEX ka(b), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `kab` (`a`,`b`), + KEY `ka` (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(ka) WHERE b > 5; +a b +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2; +a b +DROP TABLE t1; +CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` char(8) COLLATE utf8_bin NOT NULL, + `a` varchar(11) COLLATE utf8_bin DEFAULT NULL, + `b` int(10) unsigned DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk char(8) NO PRI NULL +a varchar(11) YES NULL +b int(10) unsigned YES NULL +INSERT INTO t1 VALUES ('aaa', '1111', 1); +INSERT INTO t1 VALUES ('bbb', '2222', 2); +INSERT INTO t1 VALUES ('ccc', '3333', 3); +ALTER TABLE t1 ADD INDEX kab(a,b), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` char(8) COLLATE utf8_bin NOT NULL, + `a` varchar(11) COLLATE utf8_bin DEFAULT NULL, + `b` int(10) unsigned DEFAULT NULL, + PRIMARY KEY (`pk`), + KEY `kab` (`a`,`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > '2' AND b < 3; +pk a b +bbb 2222 2 +DROP TABLE t1; +CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` char(8) COLLATE utf8_bin NOT NULL, + `a` varchar(11) COLLATE utf8_bin DEFAULT NULL, + `b` int(10) unsigned DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk char(8) NO PRI NULL +a varchar(11) YES NULL +b int(10) unsigned YES NULL +INSERT INTO t1 VALUES ('aaa', '1111', 1); +INSERT INTO t1 VALUES ('bbb', '2222', 2); +INSERT INTO t1 VALUES ('ccc', '3333', 3); +ALTER TABLE t1 ADD INDEX kab(a,b), ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX ka(a), DROP INDEX kab, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` char(8) COLLATE utf8_bin NOT NULL, + `a` varchar(11) COLLATE utf8_bin DEFAULT NULL, + `b` int(10) unsigned DEFAULT NULL, + PRIMARY KEY (`pk`), + KEY `ka` (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(ka) WHERE a > '2' AND b < 3; +pk a b +bbb 2222 2 +DROP TABLE t1; +CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` char(8) COLLATE utf8_bin NOT NULL, + `a` varchar(11) COLLATE utf8_bin DEFAULT NULL, + `b` int(10) unsigned DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk char(8) NO PRI NULL +a varchar(11) YES NULL +b int(10) unsigned YES NULL +INSERT INTO t1 VALUES ('aaa', '1111', 1); +INSERT INTO t1 VALUES ('bbb', '2222', 2); +INSERT INTO t1 VALUES ('ccc', '3333', 3); +ALTER TABLE t1 ADD INDEX kab(a,b), ADD INDEX ka(a), ADD INDEX kb(b), ALGORITHM=INPLACE; +ALTER TABLE t1 DROP INDEX ka, DROP INDEX kb, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` char(8) COLLATE utf8_bin NOT NULL, + `a` varchar(11) COLLATE utf8_bin DEFAULT NULL, + `b` int(10) unsigned DEFAULT NULL, + PRIMARY KEY (`pk`), + KEY `kab` (`a`,`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > '2' AND b < 3; +pk a b +bbb 2222 2 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +CREATE INDEX kb on t1 (b); +CREATE INDEX kba on t1 (b,a); +DROP INDEX ka on t1; +DROP INDEX kab on t1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `kb` (`b`), + KEY `kba` (`b`,`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +a b +2 6 +3 7 +SELECT * FROM t1 FORCE INDEX(kba) WHERE a > 2; +a b +3 7 +DROP TABLE t1; +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +DROP INDEX kij ON t1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL DEFAULT '0', + `j` int(11) DEFAULT NULL, + `k` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `j` (`j`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +/*!50100 PARTITION BY KEY (i) +PARTITIONS 4 */ +SELECT * FROM t1 ORDER BY i LIMIT 10; +i j k +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +SELECT COUNT(*) FROM t1; +COUNT(*) +100 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +# crash_during_online_index_creation +flush logs; +SET SESSION debug="+d,crash_during_online_index_creation"; +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +ERROR HY000: Lost connection to MySQL server during query +SET SESSION debug="-d,crash_during_online_index_creation"; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `ka` (`a`), + KEY `kab` (`a`,`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; +# crash_during_index_creation_partition +flush logs; +SET SESSION debug="+d,crash_during_index_creation_partition"; +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +ERROR HY000: Lost connection to MySQL server during query +SET SESSION debug="-d,crash_during_index_creation_partition"; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL DEFAULT '0', + `j` int(11) DEFAULT NULL, + `k` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `j` (`j`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +/*!50100 PARTITION BY KEY (i) +PARTITIONS 4 */ +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +SELECT * FROM t1 ORDER BY i LIMIT 10; +i j k +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +SELECT COUNT(*) FROM t1; +COUNT(*) +100 +DROP TABLE t1; +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; +# crash_during_index_creation_partition +flush logs; +SET SESSION debug="+d,myrocks_simulate_index_create_rollback"; +# expected assertion failure from sql layer here for alter rollback +call mtr.add_suppression("Assertion `0' failed."); +call mtr.add_suppression("Attempting backtrace. You can use the following information to find out"); +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +ERROR HY000: Lost connection to MySQL server during query +SET SESSION debug="-d,myrocks_simulate_index_create_rollback"; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL DEFAULT '0', + `j` int(11) DEFAULT NULL, + `k` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `j` (`j`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +/*!50100 PARTITION BY KEY (i) +PARTITIONS 4 */ +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL DEFAULT '0', + `j` int(11) DEFAULT NULL, + `k` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `j` (`j`), + KEY `kij` (`i`,`j`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +/*!50100 PARTITION BY KEY (i) +PARTITIONS 4 */ +SELECT COUNT(*) FROM t1; +COUNT(*) +100 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b TEXT); +ALTER TABLE t1 ADD KEY kb(b(10)); +ERROR HY000: Unsupported collation on string indexed column test.t1.b Use binary collation (binary, latin1_bin, utf8_bin). +ALTER TABLE t1 ADD PRIMARY KEY(a); +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_sstfilewriter.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_sstfilewriter.result new file mode 100644 index 00000000000..2d1ba7ca1d8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_sstfilewriter.result @@ -0,0 +1,72 @@ +drop table if exists t1; +CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30)) COLLATE 'latin1_bin'; +set rocksdb_bulk_load=1; +set rocksdb_bulk_load_size=100000; +LOAD DATA INFILE INTO TABLE t1; +set rocksdb_bulk_load=0; +select count(pk) from t1; +count(pk) +3000000 +select count(a) from t1; +count(a) +3000000 +select count(b) from t1; +count(b) +3000000 +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY; +SELECT COUNT(*) as c FROM +(SELECT COALESCE(LOWER(CONV(BIT_XOR(CAST(CRC32(CONCAT_WS('#', `b`, CONCAT(ISNULL(`b`)))) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `t1` FORCE INDEX(`kb`) +UNION DISTINCT +SELECT COALESCE(LOWER(CONV(BIT_XOR(CAST(CRC32(CONCAT_WS('#', +`b`, CONCAT(ISNULL(`b`)))) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `t1` FORCE +INDEX(`kb_copy`)) as temp; +c +1 +select count(*) from t1 FORCE INDEX(kb); +count(*) +3000000 +select count(*) from t1 FORCE INDEX(kb_copy); +count(*) +3000000 +select count(*) from t1 FORCE INDEX(PRIMARY); +count(*) +3000000 +ALTER TABLE t1 DROP INDEX kb, ALGORITHM=INPLACE; +ALTER TABLE t1 DROP INDEX kb_copy, ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kb(b), ADD INDEX kab(a,b), ALGORITHM=INPLACE; +SELECT COUNT(*) FROM t1 FORCE INDEX(kab); +COUNT(*) +3000000 +SELECT COUNT(*) FROM t1 FORCE INDEX(kb); +COUNT(*) +3000000 +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` char(5) COLLATE latin1_bin NOT NULL, + `a` char(30) COLLATE latin1_bin DEFAULT NULL, + `b` char(30) COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`pk`), + KEY `kb` (`b`), + KEY `kab` (`a`,`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin +DROP TABLE t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b INT, KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 DROP INDEX kab, ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kb(b) comment 'rev:cf1', ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `kb` (`b`) COMMENT 'rev:cf1' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT COUNT(*) FROM t1 FORCE INDEX(kb); +COUNT(*) +3 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_pk_concurrent_insert.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_pk_concurrent_insert.result new file mode 100644 index 00000000000..4fef9bce405 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_pk_concurrent_insert.result @@ -0,0 +1,7 @@ +drop table if exists t1; +# Binary must be compiled with debug for this test +CREATE TABLE t1 (a INT) ENGINE=rocksdb; +SELECT COUNT(*) from t1; +COUNT(*) +400 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result new file mode 100644 index 00000000000..34a14ff39d8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result @@ -0,0 +1,251 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES NULL +b char(8) YES NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT, c CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,'a'),(5,'z'); +ALTER TABLE t1 ADD COLUMN b INT; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `c` char(8) DEFAULT NULL, + `b` int(11) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT * FROM t1; +a c b +1 a NULL +5 z NULL +ALTER TABLE t1 DROP COLUMN b; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `c` char(8) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT * FROM t1; +a c +1 a +5 z +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +ALTER TABLE t1 DROP COLUMN pk; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES NULL +b char(8) YES NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +CREATE TABLE t2 (a INT, b CHAR(8)) ENGINE=rocksdb; +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a,b) VALUES (3,'c'); +INSERT INTO t2 (a,b) VALUES (4,'d'); +CHECK TABLE t1, t2 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check status OK +test.t2 check status OK +INSERT INTO t2 (a,b) VALUES (5,'e'); +CHECK TABLE t2 QUICK; +Table Op Msg_type Msg_text +test.t2 check status OK +INSERT INTO t1 (a,b) VALUES (6,'f'); +CHECK TABLE t1 FAST; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a,b) VALUES (7,'g'); +INSERT INTO t2 (a,b) VALUES (8,'h'); +CHECK TABLE t2, t1 MEDIUM; +Table Op Msg_type Msg_text +test.t2 check status OK +test.t1 check status OK +INSERT INTO t1 (a,b) VALUES (9,'i'); +INSERT INTO t2 (a,b) VALUES (10,'j'); +CHECK TABLE t1, t2 EXTENDED; +Table Op Msg_type Msg_text +test.t1 check status OK +test.t2 check status OK +INSERT INTO t1 (a,b) VALUES (11,'k'); +CHECK TABLE t1 CHANGED; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), UNIQUE INDEX(a)) ENGINE=rocksdb; +ERROR HY000: Unique index support is disabled when the table has no primary key. +CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES NULL +b char(8) YES NULL +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (36,'foo'); +DELETE FROM t1 WHERE a = 35 AND b = 'foo'; +SELECT * FROM t1; +a b +36 foo +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result new file mode 100644 index 00000000000..f8508febb01 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result @@ -0,0 +1,780 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + KEY `a` (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES MUL NULL +b char(8) YES NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +ALTER TABLE t1 ADD INDEX (b); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + KEY `b` (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES NULL +b char(8) YES MUL NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +ALTER TABLE t1 DROP INDEX b; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES NULL +b char(8) YES NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +ALTER TABLE t1 DROP COLUMN pk; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES NULL +b char(8) YES NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +# +# MDEV-4313: RocksDB: Server crashes in Rdb_key_def::setup on dropping the primary key column +# +CREATE TABLE t1 (pk INT PRIMARY KEY, i INT NOT NULL, KEY(i)) ENGINE=RocksDB; +ALTER TABLE t1 DROP COLUMN `pk`; +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a), KEY(b)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + KEY `a` (`a`), + KEY `b` (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES MUL NULL +b char(8) YES MUL NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a, b)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + KEY `a` (`a`,`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES MUL NULL +b char(8) YES NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a), KEY(b)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + KEY `a` (`a`), + KEY `b` (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES MUL NULL +b char(8) YES MUL NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a)) ENGINE=rocksdb; +INSERT INTO t1 (a) VALUES (1),(2),(5); +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (6),(8),(12); +CHECK TABLE t1 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (13),(15),(16); +CHECK TABLE t1 QUICK; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (17),(120),(132); +CHECK TABLE t1 FAST; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (801),(900),(7714); +CHECK TABLE t1 MEDIUM; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (8760),(10023),(12000); +CHECK TABLE t1 EXTENDED; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (13345),(24456),(78302),(143028); +CHECK TABLE t1 CHANGED; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, c INT, d INT, KEY kab(a, b), KEY kbc(b, c), KEY kabc(a,b,c)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL, + `d` int(11) DEFAULT NULL, + KEY `kab` (`a`,`b`), + KEY `kbc` (`b`,`c`), + KEY `kabc` (`a`,`b`,`c`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES MUL NULL +b int(11) YES MUL NULL +c int(11) YES NULL +d int(11) YES NULL +INSERT INTO t1 (a,b,c,d) VALUES (1,2,3,4); +INSERT INTO t1 (a,b,c,d) VALUES (5,6,7,8); +INSERT INTO t1 (a,b,c,d) VALUES (10,11,12,13); +INSERT INTO t1 (a,b,c,d) VALUES (14,15,16,17); +SELECT * FROM t1; +a b c d +1 2 3 4 +10 11 12 13 +14 15 16 17 +5 6 7 8 +SELECT * FROM t1 WHERE a = 1 OR a = 10; +a b c d +1 2 3 4 +10 11 12 13 +SELECT * FROM t1 WHERE c = 3 OR d = 17; +a b c d +1 2 3 4 +14 15 16 17 +SELECT * FROM t1 WHERE a > 5 OR d > 5; +a b c d +10 11 12 13 +14 15 16 17 +5 6 7 8 +SELECT a, b, c FROM t1 FORCE INDEX (kabc) WHERE a=1 OR b=11; +a b c +1 2 3 +10 11 12 +SELECT d FROM t1 FORCE INDEX (kbc) WHERE b > 6 AND c > 12; +d +17 +UPDATE t1 SET a=a+100; +UPDATE t1 SET a=a-100, b=99 WHERE a>100; +SELECT * FROM t1; +a b c d +1 99 3 4 +10 99 12 13 +14 99 16 17 +5 99 7 8 +DELETE FROM t1 WHERE a>5; +DELETE FROM t1 WHERE b=99 AND d>4; +SELECT * FROM t1; +a b c d +1 99 3 4 +TRUNCATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), KEY ka(a) comment 'rev:cf1', KEY kb(b) +comment 'rev:cf1', KEY kab(a,b) comment 'rev:cf2') ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + KEY `ka` (`a`) COMMENT 'rev:cf1', + KEY `kb` (`b`) COMMENT 'rev:cf1', + KEY `kab` (`a`,`b`) COMMENT 'rev:cf2' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES MUL NULL +b char(8) YES MUL NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (col1 int, col2 int, KEY kcol1(col1)) ENGINE=ROCKSDB; +INSERT INTO t1 (col1, col2) values (2,2); +ALTER TABLE t1 ADD COLUMN extra INT; +UPDATE t1 SET col2 = 1; +select * from t1; +col1 col2 extra +2 1 NULL +DELETE FROM t1 WHERE col1 = 2; +set global rocksdb_force_flush_memtable_now = true; +select * from t1; +col1 col2 extra +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_os_buffer.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_os_buffer.result new file mode 100644 index 00000000000..d15566f5a2c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_os_buffer.result @@ -0,0 +1 @@ + RocksDB: Can't disable allow_os_buffer if allow_mmap_reads is enabled diff --git a/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result b/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result new file mode 100644 index 00000000000..b37bf17e1ac --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result @@ -0,0 +1,183 @@ +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, c CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,1,'a'),(2,5,'z'); +ALTER TABLE t1 ADD COLUMN b INT; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11) DEFAULT NULL, + `c` char(8) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 ALTER COLUMN a SET DEFAULT '0'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11) DEFAULT '0', + `c` char(8) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 ALTER a DROP DEFAULT; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11), + `c` char(8) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 CHANGE COLUMN b b1 CHAR(8) FIRST; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `b1` char(8) DEFAULT NULL, + `pk` int(11) NOT NULL, + `a` int(11), + `c` char(8) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 CHANGE b1 b INT AFTER c; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11), + `c` char(8) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 CHANGE b b CHAR(8); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11), + `c` char(8) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 MODIFY COLUMN b INT; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11), + `c` char(8) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 MODIFY COLUMN b CHAR(8) FIRST; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `b` char(8) DEFAULT NULL, + `pk` int(11) NOT NULL, + `a` int(11), + `c` char(8) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 MODIFY COLUMN b INT AFTER a; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11), + `b` int(11) DEFAULT NULL, + `c` char(8) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 DROP COLUMN b; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11), + `c` char(8) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 RENAME TO t2; +SHOW CREATE TABLE t1; +ERROR 42S02: Table 'test.t1' doesn't exist +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `pk` int(11) NOT NULL, + `a` int(11), + `c` char(8) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t2; +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, b INT) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,1,5),(2,2,2),(3,4,3); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 ORDER BY b ASC, a DESC, pk DESC; +Warnings: +Warning 1105 ORDER BY ignored as there is a user-defined clustered index in the table 't1' +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT * FROM t1; +pk a b +1 1 5 +2 2 2 +3 4 3 +DROP TABLE t1; +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, b CHAR(8), c CHAR(8)) ENGINE=rocksdb CHARACTER SET latin1 COLLATE latin1_general_cs; +INSERT INTO t1 VALUES (1,5,'z','t'); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11) DEFAULT NULL, + `b` char(8) COLLATE latin1_general_cs DEFAULT NULL, + `c` char(8) COLLATE latin1_general_cs DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_general_cs +ALTER TABLE t1 CONVERT TO CHARACTER SET utf8; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + `c` char(8) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 +ALTER TABLE t1 DEFAULT CHARACTER SET = latin1 COLLATE latin1_general_ci; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11) DEFAULT NULL, + `b` char(8) CHARACTER SET utf8 DEFAULT NULL, + `c` char(8) CHARACTER SET utf8 DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci +ALTER TABLE t1 FORCE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11) DEFAULT NULL, + `b` char(8) CHARACTER SET utf8 DEFAULT NULL, + `c` char(8) CHARACTER SET utf8 DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result b/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result new file mode 100644 index 00000000000..ff2973230db --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT(11), b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,1,'a'),(2,2,'b'); +CREATE TABLE t2 (pk INT PRIMARY KEY, a INT(11), b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (3,3,'c'); +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +INSERT INTO t2 VALUES (1,4,'d'); +ANALYZE NO_WRITE_TO_BINLOG TABLE t2; +Table Op Msg_type Msg_text +test.t2 analyze status OK +INSERT INTO t1 VALUES (4,5,'e'); +INSERT INTO t2 VALUES (2,6,'f'); +ANALYZE LOCAL TABLE t1, t2; +Table Op Msg_type Msg_text +test.t1 analyze status OK +test.t2 analyze status OK +DROP TABLE t1, t2; +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT(11), KEY(a)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,1),(2,2),(3,4),(4,7); +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +INSERT INTO t1 VALUES (5,8),(6,10),(7,11),(8,12); +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/apply_changes_iter.result b/storage/rocksdb/mysql-test/rocksdb/r/apply_changes_iter.result new file mode 100644 index 00000000000..a5d81031cd2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/apply_changes_iter.result @@ -0,0 +1,64 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +CREATE TABLE t1 ( +pk INT NOT NULL PRIMARY KEY, +key1 INT NOT NULL, +KEY (key1) +) ENGINE=ROCKSDB; +INSERT INTO t1 VALUES (12,12); +INSERT INTO t1 VALUES (6,6); +BEGIN; +INSERT INTO t1 VALUES (8,8), (10,10); +SELECT * FROM t1 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 DESC; +pk key1 +10 10 +8 8 +6 6 +SELECT * FROM t1 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 ASC; +pk key1 +6 6 +8 8 +10 10 +SELECT * FROM t1 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 DESC; +pk key1 +10 10 +8 8 +6 6 +SELECT * FROM t1 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 ASC; +pk key1 +6 6 +8 8 +10 10 +ROLLBACK; +CREATE TABLE t2 ( +pk INT NOT NULL PRIMARY KEY, +key1 INT NOT NULL, +KEY (key1) COMMENT 'rev:cf' +) ENGINE=ROCKSDB; +INSERT INTO t2 VALUES (12,12); +INSERT INTO t2 VALUES (6,6); +BEGIN; +INSERT INTO t2 VALUES (8,8), (10,10); +SELECT * FROM t2 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 DESC; +pk key1 +10 10 +8 8 +6 6 +SELECT * FROM t2 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 ASC; +pk key1 +6 6 +8 8 +10 10 +SELECT * FROM t2 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 DESC; +pk key1 +10 10 +8 8 +6 6 +SELECT * FROM t2 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 ASC; +pk key1 +6 6 +8 8 +10 10 +ROLLBACK; +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_secondary.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_secondary.result new file mode 100644 index 00000000000..100bc5fd638 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_secondary.result @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT AUTO_INCREMENT, KEY(a)) ENGINE=rocksdb; +INSERT INTO t1 (pk) VALUES (3), (2), (1); +SELECT * FROM t1; +pk a +3 1 +2 2 +1 3 +INSERT INTO t1 (pk) VALUES (4); +SELECT * FROM t1; +pk a +3 1 +2 2 +1 3 +4 4 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result new file mode 100644 index 00000000000..b14a7a4c0a9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result @@ -0,0 +1,64 @@ +DROP TABLE IF EXISTS t1; +#--------------------------- +# auto_increment_offset +#--------------------------- +SET auto_increment_offset = 200; +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (NULL,'a'),(NULL,'b'),(NULL,'c'); +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +1 +SELECT a,b FROM t1 ORDER BY a; +a b +1 a +2 b +3 c +#--------------------------- +# auto_increment_increment +#--------------------------- +SET auto_increment_increment = 300; +INSERT INTO t1 (a,b) VALUES (NULL,'d'),(NULL,'e'),(NULL,'f'); +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +200 +SELECT a,b FROM t1 ORDER BY a; +a b +1 a +2 b +3 c +200 d +500 e +800 f +SET auto_increment_increment = 50; +INSERT INTO t1 (a,b) VALUES (NULL,'g'),(NULL,'h'),(NULL,'i'); +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +850 +SELECT a,b FROM t1 ORDER BY a; +a b +1 a +2 b +3 c +200 d +500 e +800 f +850 g +900 h +950 i +DROP TABLE t1; +#--------------------------- +# offset is greater than the max value +#--------------------------- +SET auto_increment_increment = 500; +SET auto_increment_offset = 300; +CREATE TABLE t1 (a TINYINT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a) VALUES (NULL); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +127 +SELECT a FROM t1 ORDER BY a; +a +127 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoincrement.result b/storage/rocksdb/mysql-test/rocksdb/r/autoincrement.result new file mode 100644 index 00000000000..28b5b6cd070 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/autoincrement.result @@ -0,0 +1 @@ +# The test checks AUTO_INCREMENT capabilities that are not supported by RocksDB-SE. diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result new file mode 100644 index 00000000000..d65a4efea30 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result @@ -0,0 +1,1235 @@ +CREATE PROCEDURE bloom_start() +BEGIN +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +END// +CREATE PROCEDURE bloom_end() +BEGIN +select case when variable_value-@c > 0 then 'true' else 'false' end as checked from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +END// +drop table if exists t1; +Warnings: +Note 1051 Unknown table 'test.t1' +drop table if exists t2; +Warnings: +Note 1051 Unknown table 'test.t2' +create table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id1, id2, id3, id4), +index id2 (id2), +index id2_id1 (id2, id1), +index id2_id3 (id2, id3), +index id2_id4 (id2, id4), +index id2_id3_id1_id4 (id2, id3, id1, id4), +index id3_id2 (id3, id2) +) engine=ROCKSDB; +create table t2 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id4), +index id2 (id2), +index id2_id3 (id2, id3), +index id2_id4 (id2, id4), +index id2_id4_id5 (id2, id4, id5), +index id3_id4 (id3, id4), +index id3_id5 (id3, id5) +) engine=ROCKSDB; +call bloom_start(); +select count(*) from t1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +count(*) +1 +call bloom_end(); +checked +false +drop table if exists t1; +drop table if exists t2; +create table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id1, id2, id3, id4) COMMENT 'cf_short_prefix', +index id2 (id2) COMMENT 'cf_short_prefix', +index id2_id1 (id2, id1) COMMENT 'cf_short_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_short_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_short_prefix', +index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'cf_short_prefix', +index id3_id2 (id3, id2) COMMENT 'cf_short_prefix' +) engine=ROCKSDB; +create table t2 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id4) COMMENT 'cf_short_prefix', +index id2 (id2) COMMENT 'cf_short_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_short_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_short_prefix', +index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_short_prefix', +index id3_id4 (id3, id4) COMMENT 'cf_short_prefix', +index id3_id5 (id3, id5) COMMENT 'cf_short_prefix' +) engine=ROCKSDB; +call bloom_start(); +select count(*) from t1; +count(*) +10000 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2; +count(*) +10000 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +count(*) +10000 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +count(*) +10000 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +count(*) +10000 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +count(*) +10 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +count(*) +10 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +count(*) +10 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +count(*) +10 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +count(*) +10 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +count(*) +1 +call bloom_end(); +checked +true +drop table if exists t1; +drop table if exists t2; +create table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id1, id2, id3, id4) COMMENT 'cf_long_prefix', +index id2 (id2) COMMENT 'cf_long_prefix', +index id2_id1 (id2, id1) COMMENT 'cf_long_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_long_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_long_prefix', +index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'cf_long_prefix', +index id3_id2 (id3, id2) COMMENT 'cf_long_prefix' +) engine=ROCKSDB; +create table t2 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id4) COMMENT 'cf_long_prefix', +index id2 (id2) COMMENT 'cf_long_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_long_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_long_prefix', +index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_long_prefix', +index id3_id4 (id3, id4) COMMENT 'cf_long_prefix', +index id3_id5 (id3, id5) COMMENT 'cf_long_prefix' +) engine=ROCKSDB; +call bloom_start(); +select count(*) from t1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +count(*) +1 +call bloom_end(); +checked +false +create table r1 (id1 bigint, id2 bigint, id3 bigint, v1 int, v2 text, primary key (id1, id2, id3)) engine=rocksdb DEFAULT CHARSET=latin1 collate latin1_bin; +call bloom_start(); +select * from r1 where id1=1 and id2 in (1) order by id3 asc; +id1 id2 id3 v1 v2 +1 1 1 1 1 +call bloom_end(); +checked +true +call bloom_start(); +select * from r1 where id1=1 and id2 in (1) order by id3 desc; +id1 id2 id3 v1 v2 +1 1 1 1 1 +call bloom_end(); +checked +false +DROP PROCEDURE bloom_start; +DROP PROCEDURE bloom_end; +truncate table t1; +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +truncate table t2; +optimize table t2; +Table Op Msg_type Msg_text +test.t2 optimize status OK +drop table if exists t1; +drop table if exists t2; +drop table if exists r1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter2.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter2.result new file mode 100644 index 00000000000..d5369e2dbed --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter2.result @@ -0,0 +1,71 @@ +CREATE TABLE t0 (id1 VARCHAR(30), id2 INT, value INT, PRIMARY KEY (id1, id2)) ENGINE=rocksdb collate latin1_bin; +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t0 WHERE id1='X' AND id2>=1; +COUNT(*) +10000 +select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +case when variable_value-@u = 0 then 'true' else 'false' end +true +DROP TABLE t0; +CREATE TABLE t1 (id1 BIGINT, id2 INT, id3 BIGINT, value INT, PRIMARY KEY (id1, id2, id3)) ENGINE=rocksdb; +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=1 AND id3>=2; +COUNT(*) +9999 +select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +case when variable_value-@u = 0 then 'true' else 'false' end +true +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2>=1 AND id3>=2; +COUNT(*) +9999 +select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +case when variable_value-@u = 0 then 'true' else 'false' end +true +DROP TABLE t1; +CREATE TABLE t2 (id1 INT, id2 VARCHAR(100), id3 BIGINT, value INT, PRIMARY KEY (id1, id2, id3)) ENGINE=rocksdb collate latin1_bin; +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +select count(*) from t2 WHERE id1=100 and id2 IN ('00000000000000000000', '100'); +count(*) +1 +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +case when variable_value-@u > 0 then 'true' else 'false' end +true +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +select count(*) from t2 WHERE id1=200 and id2 IN ('00000000000000000000', '200'); +count(*) +1 +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +case when variable_value-@u > 0 then 'true' else 'false' end +true +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +select count(*) from t2 WHERE id1=200 and id2 IN ('3', '200'); +count(*) +1 +select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +case when variable_value-@u = 0 then 'true' else 'false' end +true +DROP TABLE t2; +CREATE TABLE t3 (id1 BIGINT, id2 BIGINT, id3 BIGINT, id4 BIGINT, PRIMARY KEY (id1, id2, id3, id4)) ENGINE=rocksdb collate latin1_bin; +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_useful'; +SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=5000 AND id3=1 AND id4=1; +COUNT(*) +0 +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_useful'; +case when variable_value-@u > 0 then 'true' else 'false' end +true +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=1 AND id3=1; +COUNT(*) +1 +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +case when variable_value-@u > 0 then 'true' else 'false' end +true +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=1 AND id3=1 AND id4 <= 500; +COUNT(*) +1 +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +case when variable_value-@u > 0 then 'true' else 'false' end +true +DROP TABLE t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result new file mode 100644 index 00000000000..c7b5c42f2b3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result @@ -0,0 +1,122 @@ +CREATE TABLE `linktable` ( +`id1` bigint(20) unsigned NOT NULL DEFAULT '0', +`id1_type` int(10) unsigned NOT NULL DEFAULT '0', +`id2` bigint(20) unsigned NOT NULL DEFAULT '0', +`id2_type` int(10) unsigned NOT NULL DEFAULT '0', +`link_type` bigint(20) unsigned NOT NULL DEFAULT '0', +`visibility` tinyint(3) NOT NULL DEFAULT '0', +`data` varchar(255) NOT NULL DEFAULT '', +`time` bigint(20) unsigned NOT NULL DEFAULT '0', +`version` int(11) unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (link_type, `id1`,`id2`) COMMENT 'cf_link_pk', +KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'rev:cf_link_id1_type', +KEY `id1_type2` (`id1`,`link_type`,`time`,`version`,`data`,`visibility`) COMMENT 'rev:cf_link_id1_type2', +KEY `id1_type3` (`id1`,`visibility`,`time`,`version`,`data`,`link_type`) COMMENT 'rev:cf_link_id1_type3' +) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin; +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and time >= 0 and time <= 9223372036854775807 and visibility = 1 order by time desc; +id1 id2 link_type visibility data time version +100 100 1 1 100 100 100 +select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c > 0 then 'true' else 'false' end +true +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 and time >= 0 and time <= 9223372036854775807 order by time desc; +id1 id2 link_type visibility data time version +100 100 1 1 100 100 100 +select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c > 0 then 'true' else 'false' end +true +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type3`) where id1 = 100 and time >= 0 and time <= 9223372036854775807 and visibility = 1 order by time desc; +id1 id2 link_type visibility data time version +100 100 1 1 100 100 100 +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c = 0 then 'true' else 'false' end +true +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and visibility = 1 and time >= 0 order by time desc; +id1 id2 link_type visibility data time version +100 100 1 1 100 100 100 +select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c > 0 then 'true' else 'false' end +true +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 and time >= 0 order by time desc; +id1 id2 link_type visibility data time version +100 100 1 1 100 100 100 +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c = 0 then 'true' else 'false' end +true +## HA_READ_PREFIX_LAST +# BF len 20 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and visibility = 1 order by time desc; +id1 id2 link_type visibility data time version +100 100 1 1 100 100 100 +select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c > 0 then 'true' else 'false' end +true +# BF len 19 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 order by time desc; +id1 id2 link_type visibility data time version +100 100 1 1 100 100 100 +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c = 0 then 'true' else 'false' end +true +# BF len 12 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type3`) where id1 = 100 and visibility = 1 order by time desc; +id1 id2 link_type visibility data time version +100 100 1 1 100 100 100 +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c = 0 then 'true' else 'false' end +true +DROP TABLE linktable; +# +# bloom filter prefix is 20 byte +# Create a key which is longer than that, so that we see that +# eq_cond_len= slice.size() - 1; +# doesnt work. +# +# indexnr 4 +# kp0 + 4 = 8 +# kp1 + 8 = 16 +# kp2 + 8 = 24 24>20 byte length prefix +# kp3 + 8 = 28 +create table t1 ( +pk int primary key, +kp0 int not null, +kp1 bigint not null, +kp2 bigint not null, +kp3 bigint not null, +key kp12(kp0, kp1, kp2, kp3) comment 'rev:x1' +) engine=rocksdb; +insert into t1 values (1, 1,1, 1,1); +insert into t1 values (10,1,1,0x12FFFFFFFFFF,1); +insert into t1 values (11,1,1,0x12FFFFFFFFFF,1); +insert into t1 values (20,2,2,0x12FFFFFFFFFF,1); +insert into t1 values (21,2,2,0x12FFFFFFFFFF,1); +explain +select * from t1 where kp0=1 and kp1=1 and kp2=0x12FFFFFFFFFF order by kp3 desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index kp12 kp12 28 NULL # Using where; Using index +show status like '%rocksdb_bloom_filter_prefix%'; +Variable_name Value +rocksdb_bloom_filter_prefix_checked 0 +rocksdb_bloom_filter_prefix_useful 0 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select * from t1 where kp0=1 and kp1=1 and kp2=0x12FFFFFFFFFF order by kp3 desc; +pk kp0 kp1 kp2 kp3 +11 1 1 20890720927743 1 +10 1 1 20890720927743 1 +show status like '%rocksdb_bloom_filter_prefix%'; +Variable_name Value +rocksdb_bloom_filter_prefix_checked 0 +rocksdb_bloom_filter_prefix_useful 0 +# The following MUST show TRUE: +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c = 0 then 'true' else 'false' end +true +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result new file mode 100644 index 00000000000..1f4d1a641a2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result @@ -0,0 +1,30 @@ +CREATE TABLE t1 ( +`id1` int unsigned NOT NULL DEFAULT '0', +`id2` int unsigned NOT NULL DEFAULT '0', +`link_type` int unsigned NOT NULL DEFAULT '0', +`visibility` tinyint NOT NULL DEFAULT '0', +`data` varchar(255) NOT NULL DEFAULT '', +`time` int unsigned NOT NULL DEFAULT '0', +`version` int unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (id1, link_type, visibility, id2) COMMENT 'rev:cf_link_pk' +) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin; +CREATE PROCEDURE select_test() +BEGIN +DECLARE id1_cond INT; +SET id1_cond = 1; +WHILE id1_cond <= 20000 DO +SELECT count(*) AS cnt FROM (SELECT id1 FROM t1 FORCE INDEX (PRIMARY) WHERE id1 = id1_cond AND link_type = 1 AND visibility = 1 ORDER BY id2 DESC) AS t INTO @cnt; +IF @cnt < 1 THEN +SELECT id1_cond, @cnt; +END IF; +SET id1_cond = id1_cond + 1; +END WHILE; +END// +"Skipping bloom filter" +SET session rocksdb_skip_bloom_filter_on_read=1; +CALL select_test(); +"Using bloom filter" +SET session rocksdb_skip_bloom_filter_on_read=0; +CALL select_test(); +DROP PROCEDURE select_test; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result new file mode 100644 index 00000000000..af7feaf8682 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result @@ -0,0 +1,1235 @@ +CREATE PROCEDURE bloom_start() +BEGIN +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +END// +CREATE PROCEDURE bloom_end() +BEGIN +select case when variable_value-@c > 0 then 'true' else 'false' end as checked from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +END// +drop table if exists t1; +Warnings: +Note 1051 Unknown table 'test.t1' +drop table if exists t2; +Warnings: +Note 1051 Unknown table 'test.t2' +create table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id1, id2, id3, id4), +index id2 (id2), +index id2_id1 (id2, id1), +index id2_id3 (id2, id3), +index id2_id4 (id2, id4), +index id2_id3_id1_id4 (id2, id3, id1, id4), +index id3_id2 (id3, id2) +) engine=ROCKSDB; +create table t2 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id4), +index id2 (id2), +index id2_id3 (id2, id3), +index id2_id4 (id2, id4), +index id2_id4_id5 (id2, id4, id5), +index id3_id4 (id3, id4), +index id3_id5 (id3, id5) +) engine=ROCKSDB; +call bloom_start(); +select count(*) from t1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +count(*) +1 +call bloom_end(); +checked +false +drop table if exists t1; +drop table if exists t2; +create table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id1, id2, id3, id4) COMMENT 'cf_short_prefix', +index id2 (id2) COMMENT 'cf_short_prefix', +index id2_id1 (id2, id1) COMMENT 'cf_short_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_short_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_short_prefix', +index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'cf_short_prefix', +index id3_id2 (id3, id2) COMMENT 'cf_short_prefix' +) engine=ROCKSDB; +create table t2 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id4) COMMENT 'cf_short_prefix', +index id2 (id2) COMMENT 'cf_short_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_short_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_short_prefix', +index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_short_prefix', +index id3_id4 (id3, id4) COMMENT 'cf_short_prefix', +index id3_id5 (id3, id5) COMMENT 'cf_short_prefix' +) engine=ROCKSDB; +call bloom_start(); +select count(*) from t1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +count(*) +1 +call bloom_end(); +checked +false +drop table if exists t1; +drop table if exists t2; +create table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id1, id2, id3, id4) COMMENT 'cf_long_prefix', +index id2 (id2) COMMENT 'cf_long_prefix', +index id2_id1 (id2, id1) COMMENT 'cf_long_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_long_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_long_prefix', +index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'cf_long_prefix', +index id3_id2 (id3, id2) COMMENT 'cf_long_prefix' +) engine=ROCKSDB; +create table t2 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id4) COMMENT 'cf_long_prefix', +index id2 (id2) COMMENT 'cf_long_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_long_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_long_prefix', +index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_long_prefix', +index id3_id4 (id3, id4) COMMENT 'cf_long_prefix', +index id3_id5 (id3, id5) COMMENT 'cf_long_prefix' +) engine=ROCKSDB; +call bloom_start(); +select count(*) from t1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +count(*) +1 +call bloom_end(); +checked +false +create table r1 (id1 bigint, id2 bigint, id3 bigint, v1 int, v2 text, primary key (id1, id2, id3)) engine=rocksdb DEFAULT CHARSET=latin1 collate latin1_bin; +call bloom_start(); +select * from r1 where id1=1 and id2 in (1) order by id3 asc; +id1 id2 id3 v1 v2 +1 1 1 1 1 +call bloom_end(); +checked +false +call bloom_start(); +select * from r1 where id1=1 and id2 in (1) order by id3 desc; +id1 id2 id3 v1 v2 +1 1 1 1 1 +call bloom_end(); +checked +false +DROP PROCEDURE bloom_start; +DROP PROCEDURE bloom_end; +truncate table t1; +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +truncate table t2; +optimize table t2; +Table Op Msg_type Msg_text +test.t2 optimize status OK +drop table if exists t1; +drop table if exists t2; +drop table if exists r1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result new file mode 100644 index 00000000000..4440cb3ea8d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result @@ -0,0 +1,49 @@ +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'; +CREATE TABLE t2(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'; +CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin' + PARTITION BY KEY() PARTITIONS 4; +set session transaction isolation level repeatable read; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 0 +start transaction with consistent snapshot; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 1 +set rocksdb_bulk_load=1; +set rocksdb_bulk_load_size=100000; +LOAD DATA INFILE INTO TABLE t1; +LOAD DATA INFILE INTO TABLE t2; +LOAD DATA INFILE INTO TABLE t3; +set rocksdb_bulk_load=0; +select count(pk) from t1; +count(pk) +10000000 +select count(a) from t1; +count(a) +10000000 +select count(b) from t1; +count(b) +10000000 +select count(pk) from t2; +count(pk) +10000000 +select count(a) from t2; +count(a) +10000000 +select count(b) from t2; +count(b) +10000000 +select count(pk) from t3; +count(pk) +10000000 +select count(a) from t3; +count(a) +10000000 +select count(b) from t3; +count(b) +10000000 +longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp +test.bulk_load.tmp +DROP TABLE t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result b/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result new file mode 100644 index 00000000000..840ad9a723c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result @@ -0,0 +1,50 @@ +DROP TABLE IF EXISTS t1; +create table t1( +id bigint not null primary key, +i1 bigint, #unique +i2 bigint, #repeating +c1 varchar(20), #unique +c2 varchar(20), #repeating +index t1_1(id, i1), +index t1_2(i1, i2), +index t1_3(i2, i1), +index t1_4(c1, c2), +index t1_5(c2, c1) +) engine=rocksdb; +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +show index in t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 id A 100000 NULL NULL LSMTREE +t1 1 t1_1 1 id A 100000 NULL NULL LSMTREE +t1 1 t1_1 2 i1 A 100000 NULL NULL YES LSMTREE +t1 1 t1_2 1 i1 A 100000 NULL NULL YES LSMTREE +t1 1 t1_2 2 i2 A 100000 NULL NULL YES LSMTREE +t1 1 t1_3 1 i2 A 11111 NULL NULL YES LSMTREE +t1 1 t1_3 2 i1 A 100000 NULL NULL YES LSMTREE +t1 1 t1_4 1 c1 A 100000 NULL NULL YES LSMTREE +t1 1 t1_4 2 c2 A 100000 NULL NULL YES LSMTREE +t1 1 t1_5 1 c2 A 11111 NULL NULL YES LSMTREE +t1 1 t1_5 2 c1 A 100000 NULL NULL YES LSMTREE +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); +table_name table_rows +t1 100000 +restarting... +show index in t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 id A 100000 NULL NULL LSMTREE +t1 1 t1_1 1 id A 100000 NULL NULL LSMTREE +t1 1 t1_1 2 i1 A 100000 NULL NULL YES LSMTREE +t1 1 t1_2 1 i1 A 100000 NULL NULL YES LSMTREE +t1 1 t1_2 2 i2 A 100000 NULL NULL YES LSMTREE +t1 1 t1_3 1 i2 A 11111 NULL NULL YES LSMTREE +t1 1 t1_3 2 i1 A 100000 NULL NULL YES LSMTREE +t1 1 t1_4 1 c1 A 100000 NULL NULL YES LSMTREE +t1 1 t1_4 2 c2 A 100000 NULL NULL YES LSMTREE +t1 1 t1_5 1 c2 A 11111 NULL NULL YES LSMTREE +t1 1 t1_5 2 c1 A 100000 NULL NULL YES LSMTREE +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); +table_name table_rows +t1 100000 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/check_table.result b/storage/rocksdb/mysql-test/rocksdb/r/check_table.result new file mode 100644 index 00000000000..116c168c4da --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/check_table.result @@ -0,0 +1,68 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a,b) VALUES (3,'c'); +INSERT INTO t2 (a,b) VALUES (4,'d'); +CHECK TABLE t1, t2 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check status OK +test.t2 check status OK +INSERT INTO t2 (a,b) VALUES (5,'e'); +CHECK TABLE t2 QUICK; +Table Op Msg_type Msg_text +test.t2 check status OK +INSERT INTO t1 (a,b) VALUES (6,'f'); +CHECK TABLE t1 FAST; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a,b) VALUES (7,'g'); +INSERT INTO t2 (a,b) VALUES (8,'h'); +CHECK TABLE t2, t1 MEDIUM; +Table Op Msg_type Msg_text +test.t2 check status OK +test.t1 check status OK +INSERT INTO t1 (a,b) VALUES (9,'i'); +INSERT INTO t2 (a,b) VALUES (10,'j'); +CHECK TABLE t1, t2 EXTENDED; +Table Op Msg_type Msg_text +test.t1 check status OK +test.t2 check status OK +INSERT INTO t1 (a,b) VALUES (11,'k'); +CHECK TABLE t1 CHANGED; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(a)) ENGINE=rocksdb; +INSERT INTO t1 (a) VALUES (1),(2),(5); +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (6),(8),(12); +CHECK TABLE t1 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (13),(15),(16); +CHECK TABLE t1 QUICK; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (17),(120),(132); +CHECK TABLE t1 FAST; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (801),(900),(7714); +CHECK TABLE t1 MEDIUM; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (8760),(10023),(12000); +CHECK TABLE t1 EXTENDED; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (13345),(24456),(78302),(143028); +CHECK TABLE t1 CHANGED; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/checkpoint.result b/storage/rocksdb/mysql-test/rocksdb/r/checkpoint.result new file mode 100644 index 00000000000..fd1ac63629f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/checkpoint.result @@ -0,0 +1,59 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +DROP TABLE IF EXISTS t5; +CREATE TABLE t1 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t2 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t3 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t4 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +DELETE FROM t1; +DELETE FROM t2; +DELETE FROM t3; +DELETE FROM t4; +CREATE TABLE t5 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +DELETE FROM t5; +SET GLOBAL ROCKSDB_CREATE_CHECKPOINT = '[CHECKPOINT]'; +CURRENT +SET GLOBAL ROCKSDB_CREATE_CHECKPOINT = '[CHECKPOINT]'; +CURRENT +truncate table t1; +optimize table t1; +truncate table t2; +optimize table t2; +truncate table t3; +optimize table t3; +truncate table t4; +optimize table t4; +truncate table t5; +optimize table t5; +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +drop table if exists t4; +drop table if exists t5; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/checksum_table.result b/storage/rocksdb/mysql-test/rocksdb/r/checksum_table.result new file mode 100644 index 00000000000..06a4c3f6f1c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/checksum_table.result @@ -0,0 +1,87 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=0; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=0; +CHECKSUM TABLE t1; +Table Checksum +test.t1 4259194219 +CHECKSUM TABLE t2, t1; +Table Checksum +test.t2 0 +test.t1 4259194219 +CHECKSUM TABLE t1, t2 QUICK; +Table Checksum +test.t1 NULL +test.t2 NULL +CHECKSUM TABLE t1, t2 EXTENDED; +Table Checksum +test.t1 4259194219 +test.t2 0 +DROP TABLE t1, t2; +# +# Issue #110: SQL command checksum returns inconsistent result +# +create table t1 (pk int primary key, col1 varchar(10)) engine=rocksdb; +insert into t1 values (2,'fooo'); +insert into t1 values (1,NULL); +checksum table t1; +Table Checksum +test.t1 1303411884 +checksum table t1; +Table Checksum +test.t1 1303411884 +select * from t1 where pk=2; +pk col1 +2 fooo +checksum table t1; +Table Checksum +test.t1 1303411884 +checksum table t1; +Table Checksum +test.t1 1303411884 +flush tables; +checksum table t1; +Table Checksum +test.t1 1303411884 +checksum table t1; +Table Checksum +test.t1 1303411884 +drop table t1; +# +# The following test is about making sure MyRocks CHECKSUM TABLE +# values are the same as with InnoDB. +# If you see checksum values changed, make sure their counterparts +# in suite/innodb/r/checksum-matches-myrocks.result match. +# +create table t1 (pk int primary key, col1 varchar(10)) engine=rocksdb; +insert into t1 values (2,'fooo'); +insert into t1 values (1,NULL); +checksum table t1; +Table Checksum +test.t1 1303411884 +drop table t1; +create table t1 ( +pk bigint unsigned primary key, +col1 varchar(10), +col2 tinyint, +col3 double +) engine=rocksdb; +checksum table t1; +Table Checksum +test.t1 0 +insert into t1 values (1, NULL, NULL, NULL); +insert into t1 values (2, 'foo', NULL, NULL); +checksum table t1; +Table Checksum +test.t1 3633741545 +insert into t1 values (3, NULL, 123, NULL); +insert into t1 values (4, NULL, NULL, 2.78); +checksum table t1; +Table Checksum +test.t1 390004011 +insert into t1 values (5, 'xxxYYYzzzT', NULL, 2.78); +insert into t1 values (6, '', NULL, 2.78); +checksum table t1; +Table Checksum +test.t1 3183101003 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/checksum_table_live.result b/storage/rocksdb/mysql-test/rocksdb/r/checksum_table_live.result new file mode 100644 index 00000000000..fb86c0af260 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/checksum_table_live.result @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1; +CHECKSUM TABLE t1; +Table Checksum +test.t1 4259194219 +CHECKSUM TABLE t2, t1; +Table Checksum +test.t2 0 +test.t1 4259194219 +CHECKSUM TABLE t1, t2 QUICK; +Table Checksum +test.t1 NULL +test.t2 NULL +CHECKSUM TABLE t1, t2 EXTENDED; +Table Checksum +test.t1 4259194219 +test.t2 0 +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_default.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_default.result new file mode 100644 index 00000000000..af53f061753 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_default.result @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY DEFAULT '0') ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) NO PRI 0 +INSERT INTO t1 (a) VALUES (1); +SELECT a FROM t1; +a +1 +ALTER TABLE t1 ADD COLUMN b CHAR(8) DEFAULT ''; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) NO PRI 0 +b char(8) YES +INSERT INTO t1 (b) VALUES ('a'); +SELECT a,b FROM t1 ORDER BY a,b; +a b +0 a +1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result new file mode 100644 index 00000000000..005aa1e2989 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result @@ -0,0 +1,2612 @@ +######################## +# BINARY columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +b BINARY NOT NULL, +b0 BINARY(0) NOT NULL, +b1 BINARY(1) NOT NULL, +b20 BINARY(20) NOT NULL, +b255 BINARY(255) NOT NULL, +pk BINARY PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +b binary(1) NO NULL +b0 binary(0) NO NULL +b1 binary(1) NO NULL +b20 binary(20) NO NULL +b255 binary(255) NO NULL +pk binary(1) NO PRI NULL +INSERT INTO t1 VALUES ('','','','','',''); +INSERT INTO t1 VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.','a'); +SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk; +HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk) +00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00 +61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61 +INSERT INTO t1 VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256),'b'); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'b0' at row 1 +Warning 1265 Data truncated for column 'b1' at row 1 +Warning 1265 Data truncated for column 'b20' at row 1 +Warning 1265 Data truncated for column 'b255' at row 1 +INSERT INTO t1 SELECT b255, b255, b255, b255, CONCAT('a',b255,b255), 'c' FROM t1; +ERROR 23000: Duplicate entry 'c' for key 'PRIMARY' +SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk; +HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk) +00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00 +61 61 6161616161616161616161616161616161616161 787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878 62 +61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61 +ALTER TABLE t1 ADD COLUMN b257 BINARY(257) NOT NULL; +ERROR 42000: Column length too big for column 'b257' (max = 255); use BLOB or TEXT instead +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +b binary(1) NO NULL +b0 binary(0) NO NULL +b1 binary(1) NO NULL +b20 binary(20) NO NULL +b255 binary(255) NO NULL +pk binary(1) NO PRI NULL +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# BINARY NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c BINARY NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c binary(1) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (0); +SELECT HEX(c) FROM t1; +HEX(c) +30 +DROP TABLE t1; +#---------------------------------- +# BINARY NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c BINARY NOT NULL DEFAULT 0 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c binary(1) NO 0 +ALTER TABLE t1 ADD COLUMN err BINARY NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (0); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 30 +2 30 +DROP TABLE t1; +######################## +# VARBINARY columns +######################## +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 ( +v0 VARBINARY(0) NOT NULL, +v1 VARBINARY(1) NOT NULL, +v64 VARBINARY(64) NOT NULL, +v65000 VARBINARY(65000) NOT NULL, +PRIMARY KEY (v64) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varbinary(0) NO NULL +v1 varbinary(1) NO NULL +v64 varbinary(64) NO PRI NULL +v65000 varbinary(65000) NO NULL +CREATE TABLE t2 (v VARBINARY(65532) NOT NULL, PRIMARY KEY(v(255))) ENGINE=rocksdb; +SHOW COLUMNS IN t2; +Field Type Null Key Default Extra +v varbinary(65532) NO PRI NULL +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. + For developers who want to code on MariaDB or MySQL + + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + + For MariaDB / MySQL end users + + * MariaDB Crash Course by Ben Forta + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o Free to read in the Knowledgebase! + + * MySQL (4th Edition) by Paul DuBois + o The \'default\' book to read if you wont to learn to use MySQL / MariaDB. + + * MySQL Cookbook by Paul DuBois + o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject. + + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly) + + * MySQL Admin Cookbook + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. '); +SELECT HEX(v0), HEX(v1), HEX(v64), HEX(v65000) FROM t1; +HEX(v0) HEX(v1) HEX(v64) HEX(v65000) + + 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E2057652776652070726F7669646564206C696E6B7320746F20416D617A6F6E2E636F6D206865726520666F7220636F6E76656E69656E63652C2062757420746865792063616E20626520666F756E64206174206D616E79206F7468657220626F6F6B73746F7265732C20626F7468206F6E6C696E6520616E64206F66662E0A0A2020496620796F752077616E7420746F206861766520796F7572206661766F72697465204D7953514C202F204D61726961444220626F6F6B206C697374656420686572652C20706C65617365206C65617665206120636F6D6D656E742E0A2020466F7220646576656C6F706572732077686F2077616E7420746F20636F6465206F6E204D617269614442206F72204D7953514C0A0A2020202020202A20556E6465727374616E64696E67204D7953514C20496E7465726E616C73206279205361736861205061636865762C20666F726D6572204D7953514C20646576656C6F706572206174204D7953514C2041422E0A2020202020202020202020206F205468697320697320746865206F6E6C7920626F6F6B207765206B6E6F772061626F75742074686174206465736372696265732074686520696E7465726E616C73206F66204D617269614442202F204D7953514C2E2041206D757374206861766520666F7220616E796F6E652077686F2077616E747320746F20756E6465727374616E6420616E6420646576656C6F70206F6E204D617269614442210A2020202020202020202020206F204E6F7420616C6C20746F706963732061726520636F766572656420616E6420736F6D652070617274732061726520736C696768746C79206F757464617465642C20627574207374696C6C20746865206265737420626F6F6B206F6E207468697320746F7069632E200A2020202020202A204D7953514C20352E3120506C7567696E20446576656C6F706D656E742062792053657267656920476F6C75626368696B20616E6420416E64726577204875746368696E67730A2020202020202020202020206F2041206D757374207265616420666F7220616E796F6E652077616E74696E6720746F207772697465206120706C7567696E20666F72204D6172696144422C207772697474656E20627920746865205365726765692077686F2064657369676E65642074686520706C7567696E20696E7465726661636520666F72204D7953514C20616E64204D61726961444221200A0A2020466F72204D617269614442202F204D7953514C20656E642075736572730A0A2020202020202A204D61726961444220437261736820436F757273652062792042656E20466F7274610A2020202020202020202020206F204669727374204D61726961444220626F6F6B210A2020202020202020202020206F20466F722070656F706C652077686F2077616E7420746F206C6561726E2053514C20616E642074686520626173696373206F66204D6172696144422E0A2020202020202020202020206F204E6F77207368697070696E672E20507572636861736520617420416D617A6F6E2E636F6D206F7220796F7572206661766F7269746520626F6F6B73656C6C65722E200A0A2020202020202A2053514C2D393920436F6D706C6574652C205265616C6C792062792050657465722047756C75747A616E20262054727564792050656C7A65722E0A2020202020202020202020206F2045766572797468696E6720796F752077616E74656420746F206B6E6F772061626F7574207468652053514C203939207374616E646172642E20457863656C6C656E74207265666572656E636520626F6F6B210A2020202020202020202020206F204672656520746F207265616420696E20746865204B6E6F776C656467656261736521200A0A2020202020202A204D7953514C20283474682045646974696F6E29206279205061756C204475426F69730A2020202020202020202020206F20546865202764656661756C742720626F6F6B20746F207265616420696620796F7520776F6E7420746F206C6561726E20746F20757365204D7953514C202F204D6172696144422E200A0A2020202020202A204D7953514C20436F6F6B626F6F6B206279205061756C204475426F69730A2020202020202020202020206F2041206C6F74206F66206578616D706C6573206F6620686F7720746F20757365204D7953514C2E204173207769746820616C6C206F66205061756C277320626F6F6B732C206974277320776F727468206974732077656967687420696E20676F6C6420616E64206576656E20656E6A6F7961626C652072656164696E6720666F7220737563682061202764727927207375626A6563742E200A0A2020202020202A204869676820506572666F726D616E6365204D7953514C2C205365636F6E642045646974696F6E2C204279204261726F6E20536368776172747A2C205065746572205A6169747365762C20566164696D20546B616368656E6B6F2C204A6572656D7920442E205A61776F646E792C2041726A656E204C656E747A2C20446572656B204A2E2042616C6C696E672C20657420616C2E0A2020202020202020202020206F20224869676820506572666F726D616E6365204D7953514C2069732074686520646566696E697469766520677569646520746F206275696C64696E6720666173742C2072656C6961626C652073797374656D732077697468204D7953514C2E205772697474656E206279206E6F74656420657870657274732077697468207965617273206F66207265616C2D776F726C6420657870657269656E6365206275696C64696E672076657279206C617267652073797374656D732C207468697320626F6F6B20636F7665727320657665727920617370656374206F66204D7953514C20706572666F726D616E636520696E2064657461696C2C20616E6420666F6375736573206F6E20726F627573746E6573732C2073656375726974792C20616E64206461746120696E746567726974792E204C6561726E20616476616E63656420746563686E697175657320696E20646570746820736F20796F752063616E206272696E67206F7574204D7953514C27732066756C6C20706F7765722E22202846726F6D2074686520626F6F6B206465736372697074696F6E206174204F275265696C6C7929200A0A2020202020202A204D7953514C2041646D696E20436F6F6B626F6F6B0A2020202020202020202020206F204120717569636B20737465702D62792D7374657020677569646520666F72204D7953514C20757365727320616E642064617461626173652061646D696E6973747261746F727320746F207461636B6C65207265616C2D776F726C64206368616C6C656E6765732077697468204D7953514C20636F6E66696775726174696F6E20616E642061646D696E697374726174696F6E200A0A2020202020202A204D7953514C20352E302043657274696669636174696F6E2053747564792047756964652C204279205061756C204475426F69732C2053746566616E2048696E7A2C204361727374656E20506564657273656E0A2020202020202020202020206F205468697320697320746865206F6666696369616C20677569646520746F20636F766572207468652070617373696E67206F66207468652074776F204D7953514C2043657274696669636174696F6E206578616D696E6174696F6E732E2049742069732076616C69642074696C6C2076657273696F6E20352E30206F6620746865207365727665722C20736F207768696C65206974206D697373657320616C6C2074686520666561747572657320617661696C61626C6520696E204D7953514C20352E3120616E6420677265617465722028696E636C7564696E67204D61726961444220352E3120616E642067726561746572292C2069742070726F7669646573206120676F6F6420626173696320756E6465727374616E64696E67206F66204D7953514C20666F722074686520656E642D757365722E20 +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501)); +Warnings: +Warning 1265 Data truncated for column 'v0' at row 1 +Warning 1265 Data truncated for column 'v1' at row 1 +Warning 1265 Data truncated for column 'v64' at row 1 +Warning 1265 Data truncated for column 'v65000' at row 1 +INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'v0' at row 5 +Warning 1265 Data truncated for column 'v1' at row 5 +Warning 1265 Data truncated for column 'v64' at row 5 +Warning 1265 Data truncated for column 'v0' at row 6 +Warning 1265 Data truncated for column 'v1' at row 6 +Warning 1265 Data truncated for column 'v64' at row 6 +Warning 1265 Data truncated for column 'v65000' at row 6 +SELECT HEX(v0), HEX(v1), HEX(v64), LENGTH(HEX(v65000)) FROM t1; +HEX(v0) HEX(v1) HEX(v64) LENGTH(HEX(v65000)) + 0 + 61 0 + 48 61486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E205765277665 5932 + 61 61616263646566676869206162636465666768692061626364656667686920616263646566676869206162636465666768692061626364656667686920616263 130000 + 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 5930 + 79 63636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363 130000 +ALTER TABLE t1 ADD COLUMN v65536 VARBINARY(65536) NOT NULL; +Warnings: +Note 1246 Converting column 'v65536' from VARBINARY to BLOB +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varbinary(0) NO NULL +v1 varbinary(1) NO NULL +v64 varbinary(64) NO PRI NULL +v65000 varbinary(65000) NO NULL +v65536 mediumblob NO NULL +DROP TABLE t1, t2; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# VARBINARY(64) NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c VARBINARY(64) NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c varbinary(64) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('test'); +SELECT HEX(c) FROM t1; +HEX(c) +74657374 +DROP TABLE t1; +#---------------------------------- +# VARBINARY(64) NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c VARBINARY(64) NOT NULL DEFAULT 'test' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c varbinary(64) NO test +ALTER TABLE t1 ADD COLUMN err VARBINARY(64) NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('test'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 74657374 +2 74657374 +DROP TABLE t1; +######################## +# BIT columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a BIT NOT NULL, +b BIT(20) NOT NULL, +c BIT(64) NOT NULL, +d BIT(1) NOT NULL, +PRIMARY KEY (c) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a bit(1) NO NULL +b bit(20) NO NULL +c bit(64) NO PRI NULL +d bit(1) NO NULL +ALTER TABLE t1 DROP COLUMN d; +ALTER TABLE t1 ADD COLUMN d BIT(0) NOT NULL; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a bit(1) NO NULL +b bit(20) NO NULL +c bit(64) NO PRI NULL +d bit(1) NO NULL +INSERT INTO t1 (a,b,c,d) VALUES (0,POW(2,20)-1,b'1111111111111111111111111111111111111111111111111111111111111111',1); +SELECT BIN(a), HEX(b), c+0 FROM t1 WHERE d>0; +BIN(a) HEX(b) c+0 +0 FFFFF 18446744073709551615 +INSERT INTO t1 (a,b,c,d) VALUES (1,0,-2,0); +SELECT a+0, b+0, c+0 FROM t1 WHERE d<100; +a+0 b+0 c+0 +0 1048575 18446744073709551615 +1 0 18446744073709551614 +INSERT INTO t1 (a,b,c,d) VALUES (b'1', 'f', 0xFF, 0x0); +SELECT a+0, b+0, c+0 FROM t1 WHERE d IN (0, 2); +a+0 b+0 c+0 +1 0 18446744073709551614 +1 102 255 +DELETE FROM t1; +INSERT INTO t1 (a,b,c,d) VALUES (0x10,0,0,1); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +SELECT a+0,b+0,c+0,d+0 FROM t1; +a+0 b+0 c+0 d+0 +1 0 0 1 +INSERT INTO t1 (a,b,c,d) VALUES (0x01,0,0x10000000000000000,0); +Warnings: +Warning 1264 Out of range value for column 'c' at row 1 +SELECT a+0,b+0,c+0,d+0 FROM t1; +a+0 b+0 c+0 d+0 +1 0 0 1 +1 0 18446744073709551615 0 +DROP TABLE t1; +CREATE TABLE t1 (pk INT PRIMARY KEY, a BIT(65) NOT NULL) ENGINE=rocksdb; +ERROR 42000: Display width out of range for column 'a' (max = 64) +DROP TABLE IF EXISTS t1; +#---------------------------------- +# BIT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c BIT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c bit(1) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (1); +SELECT HEX(c) FROM t1; +HEX(c) +1 +DROP TABLE t1; +#---------------------------------- +# BIT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c BIT NOT NULL DEFAULT 1 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c bit(1) NO b'1' +ALTER TABLE t1 ADD COLUMN err BIT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (1); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 1 +2 1 +DROP TABLE t1; +######################## +# BLOB columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +b BLOB NOT NULL, +b0 BLOB(0) NOT NULL, +b1 BLOB(1) NOT NULL, +b300 BLOB(300) NOT NULL, +bm BLOB(65535) NOT NULL, +b70k BLOB(70000) NOT NULL, +b17m BLOB(17000000) NOT NULL, +t TINYBLOB NOT NULL, +m MEDIUMBLOB NOT NULL, +l LONGBLOB NOT NULL +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +b blob NO NULL +b0 blob NO NULL +b1 tinyblob NO NULL +b300 blob NO NULL +bm blob NO NULL +b70k mediumblob NO NULL +b17m longblob NO NULL +t tinyblob NO NULL +m mediumblob NO NULL +l longblob NO NULL +INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES +('','','','','','','','','',''), +('a','b','c','d','e','f','g','h','i','j'), +('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'), +( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), HEX(REPEAT('g',1048576)), REPEAT('h',255), REPEAT('i',1048576), HEX(REPEAT('j',1048576)) ); +SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152 +INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES +( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) ); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'b0' at row 1 +Warning 1265 Data truncated for column 'b1' at row 1 +Warning 1265 Data truncated for column 'b300' at row 1 +Warning 1265 Data truncated for column 'bm' at row 1 +Warning 1265 Data truncated for column 't' at row 1 +SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152 +ALTER TABLE t1 ADD COLUMN bbb BLOB(4294967296); +ERROR 42000: Display width out of range for column 'bbb' (max = 4294967295) +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# BLOB NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c BLOB NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c blob NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +SELECT HEX(c) FROM t1; +HEX(c) + +DROP TABLE t1; +#---------------------------------- +# BLOB NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c BLOB NOT NULL DEFAULT '' +) ENGINE=rocksdb; +Warnings: +Warning 1101 BLOB/TEXT column 'c' can't have a default value +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c blob NO NULL +ALTER TABLE t1 ADD COLUMN err BLOB NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 +2 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# TINYBLOB NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TINYBLOB NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c tinyblob NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +SELECT HEX(c) FROM t1; +HEX(c) + +DROP TABLE t1; +#---------------------------------- +# TINYBLOB NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c TINYBLOB NOT NULL DEFAULT '' +) ENGINE=rocksdb; +Warnings: +Warning 1101 BLOB/TEXT column 'c' can't have a default value +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c tinyblob NO NULL +ALTER TABLE t1 ADD COLUMN err TINYBLOB NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 +2 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# MEDIUMBLOB NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c MEDIUMBLOB NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c mediumblob NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +SELECT HEX(c) FROM t1; +HEX(c) + +DROP TABLE t1; +#---------------------------------- +# MEDIUMBLOB NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c MEDIUMBLOB NOT NULL DEFAULT '' +) ENGINE=rocksdb; +Warnings: +Warning 1101 BLOB/TEXT column 'c' can't have a default value +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c mediumblob NO NULL +ALTER TABLE t1 ADD COLUMN err MEDIUMBLOB NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 +2 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# LONGBLOB NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c LONGBLOB NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c longblob NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +SELECT HEX(c) FROM t1; +HEX(c) + +DROP TABLE t1; +#---------------------------------- +# LONGBLOB NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c LONGBLOB NOT NULL DEFAULT '' +) ENGINE=rocksdb; +Warnings: +Warning 1101 BLOB/TEXT column 'c' can't have a default value +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c longblob NO NULL +ALTER TABLE t1 ADD COLUMN err LONGBLOB NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 +2 +DROP TABLE t1; +######################## +# BOOL columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +b1 BOOL NOT NULL, +b2 BOOLEAN NOT NULL +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +b1 tinyint(1) NO NULL +b2 tinyint(1) NO NULL +INSERT INTO t1 (b1,b2) VALUES (1,TRUE); +SELECT b1,b2 FROM t1; +b1 b2 +1 1 +INSERT INTO t1 (b1,b2) VALUES (FALSE,0); +SELECT b1,b2 FROM t1; +b1 b2 +0 0 +1 1 +INSERT INTO t1 (b1,b2) VALUES (2,3); +SELECT b1,b2 FROM t1; +b1 b2 +0 0 +1 1 +2 3 +INSERT INTO t1 (b1,b2) VALUES (-1,-2); +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +1 1 +2 3 +SELECT IF(b1,'true','false') AS a, IF(b2,'true','false') AS b FROM t1; +a b +false false +true true +true true +true true +SELECT b1,b2 FROM t1 WHERE b1 = TRUE; +b1 b2 +1 1 +SELECT b1,b2 FROM t1 WHERE b2 = FALSE; +b1 b2 +0 0 +INSERT INTO t1 (b1,b2) VALUES ('a','b'); +Warnings: +Warning 1366 Incorrect integer value: 'a' for column 'b1' at row 1 +Warning 1366 Incorrect integer value: 'b' for column 'b2' at row 1 +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +0 0 +1 1 +2 3 +INSERT INTO t1 (b1,b2) VALUES (128,-129); +Warnings: +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b2' at row 1 +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +0 0 +1 1 +127 -128 +2 3 +ALTER TABLE t1 ADD COLUMN b3 BOOLEAN UNSIGNED NOT NULL; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'UNSIGNED NOT NULL' at line 1 +ALTER TABLE ADD COLUMN b3 BOOL ZEROFILL NOT NULL; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ADD COLUMN b3 BOOL ZEROFILL NOT NULL' at line 1 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# BOOL NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c BOOL NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c tinyint(1) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('0'); +SELECT HEX(c) FROM t1; +HEX(c) +0 +DROP TABLE t1; +#---------------------------------- +# BOOL NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c BOOL NOT NULL DEFAULT '0' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c tinyint(1) NO 0 +ALTER TABLE t1 ADD COLUMN err BOOL NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('0'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 0 +2 0 +DROP TABLE t1; +######################## +# CHAR columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c CHAR NOT NULL, +c0 CHAR(0) NOT NULL, +c1 CHAR(1) NOT NULL, +c20 CHAR(20) NOT NULL, +c255 CHAR(255) NOT NULL, +PRIMARY KEY (c255) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c char(1) NO NULL +c0 char(0) NO NULL +c1 char(1) NO NULL +c20 char(20) NO NULL +c255 char(255) NO PRI NULL +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('','','','',''); +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.'); +SELECT c,c0,c1,c20,c255 FROM t1; +c c0 c1 c20 c255 + +a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work. +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256)); +Warnings: +Warning 1265 Data truncated for column 'c' at row 1 +Warning 1265 Data truncated for column 'c0' at row 1 +Warning 1265 Data truncated for column 'c1' at row 1 +Warning 1265 Data truncated for column 'c20' at row 1 +Warning 1265 Data truncated for column 'c255' at row 1 +INSERT INTO t1 (c,c0,c1,c20,c255) SELECT c255, c255, c255, c255, CONCAT('a',c255,c1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'c' at row 5 +Warning 1265 Data truncated for column 'c0' at row 5 +Warning 1265 Data truncated for column 'c1' at row 5 +Warning 1265 Data truncated for column 'c20' at row 5 +Warning 1265 Data truncated for column 'c' at row 6 +Warning 1265 Data truncated for column 'c0' at row 6 +Warning 1265 Data truncated for column 'c1' at row 6 +Warning 1265 Data truncated for column 'c20' at row 6 +Warning 1265 Data truncated for column 'c255' at row 6 +SELECT c,c0,c1,c20,c255 FROM t1; +c c0 c1 c20 c255 + + a +C C Creating an article aCreating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work.b +a a aaaaaaaaaaaaaaaaaaaa xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work. +x x xxxxxxxxxxxxxxxxxxxx axxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +SELECT DISTINCT c20, REPEAT('a',LENGTH(c20)), COUNT(*) FROM t1 GROUP BY c1, c20; +c20 REPEAT('a',LENGTH(c20)) COUNT(*) + 2 +Creating an article aaaaaaaaaaaaaaaaaaa 1 +aaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaa 1 +abcdefghi klmnopqrst aaaaaaaaaaaaaaaaaaaa 1 +xxxxxxxxxxxxxxxxxxxx aaaaaaaaaaaaaaaaaaaa 1 +ALTER TABLE t1 ADD COLUMN c257 CHAR(257) NOT NULL; +ERROR 42000: Column length too big for column 'c257' (max = 255); use BLOB or TEXT instead +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# CHAR NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c CHAR NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c char(1) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('_'); +SELECT HEX(c) FROM t1; +HEX(c) +5F +DROP TABLE t1; +#---------------------------------- +# CHAR NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c CHAR NOT NULL DEFAULT '_' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c char(1) NO _ +ALTER TABLE t1 ADD COLUMN err CHAR NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('_'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 5F +2 5F +DROP TABLE t1; +######################## +# VARCHAR columns +######################## +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 ( +v0 VARCHAR(0) NOT NULL, +v1 VARCHAR(1) NOT NULL, +v64 VARCHAR(64) NOT NULL, +v65000 VARCHAR(65000) NOT NULL, +PRIMARY KEY (v64) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varchar(0) NO NULL +v1 varchar(1) NO NULL +v64 varchar(64) NO PRI NULL +v65000 varchar(65000) NO NULL +CREATE TABLE t2 (v VARCHAR(65532), PRIMARY KEY (v(255))) ENGINE=rocksdb; +SHOW COLUMNS IN t2; +Field Type Null Key Default Extra +v varchar(65532) NO PRI +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. + For developers who want to code on MariaDB or MySQL + + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + + For MariaDB / MySQL end users + + * MariaDB Crash Course by Ben Forta + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o Free to read in the Knowledgebase! + + * MySQL (4th Edition) by Paul DuBois + o The \'default\' book to read if you wont to learn to use MySQL / MariaDB. + + * MySQL Cookbook by Paul DuBois + o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject. + + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly) + + * MySQL Admin Cookbook + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. '); +SELECT v0,v1,v64,v65000 FROM t1; +v0 v1 v64 v65000 + + + + + + + + + + + + y Once there, double check that an article doesn't already exist Here is a list of recommended books on MariaDB and MySQL. We've provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + o "High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL's full power." (From the book description at O'Reilly) + o A lot of examples of how to use MySQL. As with all of Paul's books, it's worth its weight in gold and even enjoyable reading for such a 'dry' subject. + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Free to read in the Knowledgebase! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + o The 'default' book to read if you wont to learn to use MySQL / MariaDB. + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + * MariaDB Crash Course by Ben Forta + * MySQL (4th Edition) by Paul DuBois + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + * MySQL Admin Cookbook + * MySQL Cookbook by Paul DuBois + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + For MariaDB / MySQL end users + For developers who want to code on MariaDB or MySQL + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501)); +Warnings: +Warning 1265 Data truncated for column 'v0' at row 1 +Warning 1265 Data truncated for column 'v1' at row 1 +Warning 1265 Data truncated for column 'v64' at row 1 +Warning 1265 Data truncated for column 'v65000' at row 1 +INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'v0' at row 5 +Warning 1265 Data truncated for column 'v1' at row 5 +Warning 1265 Data truncated for column 'v64' at row 5 +Warning 1265 Data truncated for column 'v65000' at row 5 +Warning 1265 Data truncated for column 'v0' at row 6 +Warning 1265 Data truncated for column 'v1' at row 6 +Warning 1265 Data truncated for column 'v64' at row 6 +SELECT v0, v1, v64, LENGTH(v65000) FROM t1; +v0 v1 v64 LENGTH(v65000) + 0 + a 0 + H aHere is a list of recommended books on MariaDB and MySQL. We've 2966 + a aabcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abc 65000 + y Once there, double check that an article doesn't already exist 2965 + y cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc 65000 +ALTER TABLE t1 ADD COLUMN v65536 VARCHAR(65536) NOT NULL; +Warnings: +Note 1246 Converting column 'v65536' from VARCHAR to TEXT +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varchar(0) NO NULL +v1 varchar(1) NO NULL +v64 varchar(64) NO PRI NULL +v65000 varchar(65000) NO NULL +v65536 mediumtext NO NULL +DROP TABLE t1, t2; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# VARCHAR(64) NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c VARCHAR(64) NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c varchar(64) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('test default'); +SELECT HEX(c) FROM t1; +HEX(c) +746573742064656661756C74 +DROP TABLE t1; +#---------------------------------- +# VARCHAR(64) NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c VARCHAR(64) NOT NULL DEFAULT 'test default' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c varchar(64) NO test default +ALTER TABLE t1 ADD COLUMN err VARCHAR(64) NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('test default'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 746573742064656661756C74 +2 746573742064656661756C74 +DROP TABLE t1; +######################## +# date and time columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d DATE NOT NULL, +dt DATETIME NOT NULL, +ts TIMESTAMP NOT NULL, +t TIME NOT NULL, +y YEAR NOT NULL, +y4 YEAR(4) NOT NULL, +y2 YEAR(2) NOT NULL, +pk DATETIME PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead. +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +d date NO NULL +dt datetime NO NULL +ts timestamp NO NULL +t time NO NULL +y year(4) NO NULL +y4 year(4) NO NULL +y2 year(4) NO NULL +pk datetime NO PRI NULL +SET @tm = '2012-04-09 05:27:00'; +INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES +('1000-01-01', '1000-01-01 00:00:00', FROM_UNIXTIME(1), '-838:59:59', '1901', '1901', '00','2012-12-12 12:12:12'), +('9999-12-31', '9999-12-31 23:59:59', FROM_UNIXTIME(2147483647), '838:59:59', '2155', '2155', '99','2012-12-12 12:12:13'), +('0000-00-00', '0000-00-00 00:00:00', '0000-00-00 00:00:00', '00:00:00', '0', '0', '0','2012-12-12 12:12:14'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),YEAR(@tm),YEAR(@tm),'2012-12-12 12:12:15'); +SELECT d,dt,ts,t,y,y4,y2 FROM t1; +d dt ts t y y4 y2 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000 +1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012 +9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999 +INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES +('999-13-32', '999-11-31 00:00:00', '0', '-839:00:00', '1900', '1900', '-1','2012-12-12 12:12:16'); +Warnings: +Warning 1265 Data truncated for column 'd' at row 1 +Warning 1264 Out of range value for column 'dt' at row 1 +Warning 1264 Out of range value for column 'ts' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 'y' at row 1 +Warning 1264 Out of range value for column 'y4' at row 1 +Warning 1264 Out of range value for column 'y2' at row 1 +SELECT d,dt,ts,t,y,y4,y2 FROM t1; +d dt ts t y y4 y2 +1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000 +9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 0000 +DROP TABLE t1; +SET TIMESTAMP=UNIX_TIMESTAMP('2013-12-12 12:12:12'); +DROP TABLE IF EXISTS t1; +#---------------------------------- +# DATE NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c DATE NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c date NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('2012-12-21'); +SELECT HEX(c) FROM t1; +HEX(c) +323031322D31322D3231 +DROP TABLE t1; +#---------------------------------- +# DATE NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c DATE NOT NULL DEFAULT '2012-12-21' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c date NO 2012-12-21 +ALTER TABLE t1 ADD COLUMN err DATE NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('2012-12-21'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 323031322D31322D3231 +2 323031322D31322D3231 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# DATETIME NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c DATETIME NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c datetime NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('2012-12-21 12:21:12'); +SELECT HEX(c) FROM t1; +HEX(c) +323031322D31322D32312031323A32313A3132 +DROP TABLE t1; +#---------------------------------- +# DATETIME NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c DATETIME NOT NULL DEFAULT '2012-12-21 12:21:12' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c datetime NO 2012-12-21 12:21:12 +ALTER TABLE t1 ADD COLUMN err DATETIME NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('2012-12-21 12:21:12'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 323031322D31322D32312031323A32313A3132 +2 323031322D31322D32312031323A32313A3132 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# TIMESTAMP NOT NULL column without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TIMESTAMP NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c timestamp NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('2012-12-21 12:21:12'); +SELECT HEX(c) FROM t1; +HEX(c) +323031322D31322D32312031323A32313A3132 +DROP TABLE t1; +#---------------------------------- +# TIMESTAMP NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c TIMESTAMP NOT NULL DEFAULT '2012-12-21 12:21:12' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c timestamp NO 2012-12-21 12:21:12 +ALTER TABLE t1 ADD COLUMN err TIMESTAMP NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('2012-12-21 12:21:12'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 323031322D31322D32312031323A32313A3132 +2 323031322D31322D32312031323A32313A3132 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# TIME NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TIME NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c time NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('12:21:12'); +SELECT HEX(c) FROM t1; +HEX(c) +31323A32313A3132 +DROP TABLE t1; +#---------------------------------- +# TIME NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c TIME NOT NULL DEFAULT '12:21:12' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c time NO 12:21:12 +ALTER TABLE t1 ADD COLUMN err TIME NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('12:21:12'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 31323A32313A3132 +2 31323A32313A3132 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# YEAR NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c YEAR NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c year(4) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('2012'); +SELECT HEX(c) FROM t1; +HEX(c) +7DC +DROP TABLE t1; +#---------------------------------- +# YEAR NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c YEAR NOT NULL DEFAULT '2012' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c year(4) NO 2012 +ALTER TABLE t1 ADD COLUMN err YEAR NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('2012'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 7DC +2 7DC +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# YEAR(2) NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c YEAR(2) NOT NULL) ENGINE=rocksdb; +Warnings: +Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead. +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c year(4) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('12'); +SELECT HEX(c) FROM t1; +HEX(c) +7DC +DROP TABLE t1; +#---------------------------------- +# YEAR(2) NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c YEAR(2) NOT NULL DEFAULT '12' +) ENGINE=rocksdb; +Warnings: +Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead. +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c year(4) NO 2012 +ALTER TABLE t1 ADD COLUMN err YEAR(2) NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('12'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 7DC +2 7DC +DROP TABLE t1; +######################## +# ENUM columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a ENUM('') NOT NULL, +b ENUM('test1','test2','test3','test4','test5') NOT NULL, +c ENUM('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') NOT NULL, +PRIMARY KEY (b) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a enum('') NO NULL +b enum('test1','test2','test3','test4','test5') NO PRI NULL +c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') NO NULL +INSERT INTO t1 (a,b,c) VALUES ('','test2','4'),('',5,2); +SELECT a,b,c FROM t1; +a b c + test2 4 + test5 2 +INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1); +Warnings: +Warning 1265 Data truncated for column 'a' at row 1 +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + + test2 4 + test5 2 +ALTER TABLE t1 ADD COLUMN e ENUM('a','A') NOT NULL; +Warnings: +Note 1291 Column 'e' has duplicated value 'a' in ENUM +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a enum('') NO NULL +b enum('test1','test2','test3','test4','test5') NO PRI NULL +c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') NO NULL +e enum('a','A') NO NULL +INSERT INTO t1 (a,b,c,e) VALUES ('','test3','75','A'); +SELECT a,b,c,e FROM t1; +a b c e + a + test2 4 a + test3 75 a + test5 2 a +SELECT a,b,c,e FROM t1 WHERE b='test2' OR a != ''; +a b c e + test2 4 a +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# ENUM('test1','test2','test3') NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c ENUM('test1','test2','test3') NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c enum('test1','test2','test3') NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('test2'); +SELECT HEX(c) FROM t1; +HEX(c) +7465737432 +DROP TABLE t1; +#---------------------------------- +# ENUM('test1','test2','test3') NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c ENUM('test1','test2','test3') NOT NULL DEFAULT 'test2' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c enum('test1','test2','test3') NO test2 +ALTER TABLE t1 ADD COLUMN err ENUM('test1','test2','test3') NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('test2'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 7465737432 +2 7465737432 +DROP TABLE t1; +######################## +# Fixed point columns (NUMERIC, DECIMAL) +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d DECIMAL NOT NULL, +d0 DECIMAL(0) NOT NULL, +d1_1 DECIMAL(1,1) NOT NULL, +d10_2 DECIMAL(10,2) NOT NULL, +d60_10 DECIMAL(60,10) NOT NULL, +n NUMERIC NOT NULL, +n0_0 NUMERIC(0,0) NOT NULL, +n1 NUMERIC(1) NOT NULL, +n20_4 NUMERIC(20,4) NOT NULL, +n65_4 NUMERIC(65,4) NOT NULL, +pk NUMERIC NOT NULL PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +d decimal(10,0) NO NULL +d0 decimal(10,0) NO NULL +d1_1 decimal(1,1) NO NULL +d10_2 decimal(10,2) NO NULL +d60_10 decimal(60,10) NO NULL +n decimal(10,0) NO NULL +n0_0 decimal(10,0) NO NULL +n1 decimal(1,0) NO NULL +n20_4 decimal(20,4) NO NULL +n65_4 decimal(65,4) NO NULL +pk decimal(10,0) NO PRI NULL +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3); +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5); +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +6 +); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8); +Warnings: +Note 1265 Data truncated for column 'd' at row 1 +Note 1265 Data truncated for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Note 1265 Data truncated for column 'd10_2' at row 1 +Note 1265 Data truncated for column 'd60_10' at row 1 +Note 1265 Data truncated for column 'n' at row 1 +Note 1265 Data truncated for column 'n0_0' at row 1 +Note 1265 Data truncated for column 'n1' at row 1 +Note 1265 Data truncated for column 'n20_4' at row 1 +Note 1265 Data truncated for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.00 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.0000 9999999999999999999999999999999999999999999999999999999999999.1111 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) NOT NULL; +ERROR 42000: Too big precision 66 specified for column 'n66'. Maximum is 65. +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) NOT NULL; +ERROR 42000: Too big precision 66 specified for column 'n66_6'. Maximum is 65. +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) NOT NULL; +ERROR 42000: Too big scale 66 specified for column 'n66_66'. Maximum is 30. +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# DECIMAL NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c DECIMAL NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c decimal(10,0) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (1.1); +Warnings: +Note 1265 Data truncated for column 'c' at row 1 +SELECT HEX(c) FROM t1; +HEX(c) +1 +DROP TABLE t1; +#---------------------------------- +# DECIMAL NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c DECIMAL NOT NULL DEFAULT 1.1 +) ENGINE=rocksdb; +Warnings: +Note 1265 Data truncated for column 'c' at row 1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c decimal(10,0) NO 1 +ALTER TABLE t1 ADD COLUMN err DECIMAL NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (1.1); +Warnings: +Note 1265 Data truncated for column 'c' at row 1 +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 1 +2 1 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# NUMERIC NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c NUMERIC NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c decimal(10,0) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (0); +SELECT HEX(c) FROM t1; +HEX(c) +0 +DROP TABLE t1; +#---------------------------------- +# NUMERIC NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c NUMERIC NOT NULL DEFAULT 0 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c decimal(10,0) NO 0 +ALTER TABLE t1 ADD COLUMN err NUMERIC NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (0); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 0 +2 0 +DROP TABLE t1; +######################## +# Floating point columns (FLOAT, DOUBLE) +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +f FLOAT NOT NULL, +f0 FLOAT(0) NOT NULL, +r1_1 REAL(1,1) NOT NULL, +f23_0 FLOAT(23) NOT NULL, +f20_3 FLOAT(20,3) NOT NULL, +d DOUBLE NOT NULL, +d1_0 DOUBLE(1,0) NOT NULL, +d10_10 DOUBLE PRECISION (10,10) NOT NULL, +d53 DOUBLE(53,0) NOT NULL, +d53_10 DOUBLE(53,10) NOT NULL, +pk DOUBLE NOT NULL PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +f float NO NULL +f0 float NO NULL +r1_1 double(1,1) NO NULL +f23_0 float NO NULL +f20_3 float(20,3) NO NULL +d double NO NULL +d1_0 double(1,0) NO NULL +d10_10 double(10,10) NO NULL +d53 double(53,0) NO NULL +d53_10 double(53,10) NO NULL +pk double NO PRI NULL +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1); +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 11111111.111 +d10_10 0.0123456789 +d1_0 8 +d53 1234566789123456800 +d53_10 100000000000000000.0000000000 +f0 12345.1 +f20_3 56789.988 +f23_0 123457000 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +99999999999999999999999999999999999999, +99999999999999999999999999999999999999.9999999999999999, +0.9, +99999999999999999999999999999999999999.9, +99999999999999999.999, +999999999999999999999999999999999999999999999999999999999999999999999999999999999, +9, +0.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +3 +); +Warnings: +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 0 +d 11111111.111 +d 1e81 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 0 +d1_0 8 +d1_0 9 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 0 +f 1e38 +f0 0 +f0 12345.1 +f0 1e38 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4); +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1; +MAX(f) 9.999999680285692e37 +MAX(d) 1e81 +MAX(d10_10) 0.9999999999 +MAX(d1_0) 9 +MAX(d53) 100000000000000000000000000000000000000000000000000000 +MAX(d53_10) 10000000000000000000000000000000000000000000.0000000000 +MAX(f0) 9.999999680285692e37 +MAX(f20_3) 99999998430674940.000 +MAX(f23_0) 9.999999680285692e37 +MAX(r1_1) 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +5 +); +Warnings: +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e61 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f 3.40282e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f0 3.40282e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +f23_0 3.40282e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +999999999999999999999999999999999999999, +999999999999999999999999999999999999999.9999999999999999, +1.9, +999999999999999999999999999999999999999.9, +999999999999999999.999, +9999999999999999999999999999999999999999999999999999999999999999999999999999999999, +99, +1.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +6 +); +Warnings: +Warning 1292 Truncated incorrect DECIMAL value: '' +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e61 +d 1e65 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f 3.40282e38 +f 3.40282e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f0 3.40282e38 +f0 3.40282e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +f23_0 3.40282e38 +f23_0 3.40282e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) NOT NULL; +ERROR 42000: Display width out of range for column 'd0_0' (max = 255) +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) NOT NULL; +ERROR 42000: Too big precision 256 specified for column 'n66_6'. Maximum is 65. +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) NOT NULL; +ERROR 42000: Too big scale 35 specified for column 'n66_66'. Maximum is 30. +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# FLOAT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c FLOAT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c float NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (1.1); +SELECT HEX(c) FROM t1; +HEX(c) +1 +DROP TABLE t1; +#---------------------------------- +# FLOAT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c FLOAT NOT NULL DEFAULT 1.1 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c float NO 1.1 +ALTER TABLE t1 ADD COLUMN err FLOAT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (1.1); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 1 +2 1 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# DOUBLE NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c DOUBLE NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c double NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (0); +SELECT HEX(c) FROM t1; +HEX(c) +0 +DROP TABLE t1; +#---------------------------------- +# DOUBLE NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c DOUBLE NOT NULL DEFAULT 0 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c double NO 0 +ALTER TABLE t1 ADD COLUMN err DOUBLE NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (0); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 0 +2 0 +DROP TABLE t1; +######################## +# INT columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +i INT NOT NULL, +i0 INT(0) NOT NULL, +i1 INT(1) NOT NULL, +i20 INT(20) NOT NULL, +t TINYINT NOT NULL, +t0 TINYINT(0) NOT NULL, +t1 TINYINT(1) NOT NULL, +t20 TINYINT(20) NOT NULL, +s SMALLINT NOT NULL, +s0 SMALLINT(0) NOT NULL, +s1 SMALLINT(1) NOT NULL, +s20 SMALLINT(20) NOT NULL, +m MEDIUMINT NOT NULL, +m0 MEDIUMINT(0) NOT NULL, +m1 MEDIUMINT(1) NOT NULL, +m20 MEDIUMINT(20) NOT NULL, +b BIGINT NOT NULL, +b0 BIGINT(0) NOT NULL, +b1 BIGINT(1) NOT NULL, +b20 BIGINT(20) NOT NULL, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +i int(11) NO NULL +i0 int(11) NO NULL +i1 int(1) NO NULL +i20 int(20) NO NULL +t tinyint(4) NO NULL +t0 tinyint(4) NO NULL +t1 tinyint(1) NO NULL +t20 tinyint(20) NO NULL +s smallint(6) NO NULL +s0 smallint(6) NO NULL +s1 smallint(1) NO NULL +s20 smallint(20) NO NULL +m mediumint(9) NO NULL +m0 mediumint(9) NO NULL +m1 mediumint(1) NO NULL +m20 mediumint(20) NO NULL +b bigint(20) NO NULL +b0 bigint(20) NO NULL +b1 bigint(1) NO NULL +b20 bigint(20) NO NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807); +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615); +Warnings: +Warning 1264 Out of range value for column 'i' at row 8 +Warning 1264 Out of range value for column 'i0' at row 8 +Warning 1264 Out of range value for column 'i1' at row 8 +Warning 1264 Out of range value for column 'i20' at row 8 +Warning 1264 Out of range value for column 't' at row 8 +Warning 1264 Out of range value for column 't0' at row 8 +Warning 1264 Out of range value for column 't1' at row 8 +Warning 1264 Out of range value for column 't20' at row 8 +Warning 1264 Out of range value for column 's' at row 8 +Warning 1264 Out of range value for column 's0' at row 8 +Warning 1264 Out of range value for column 's1' at row 8 +Warning 1264 Out of range value for column 's20' at row 8 +Warning 1264 Out of range value for column 'm' at row 8 +Warning 1264 Out of range value for column 'm0' at row 8 +Warning 1264 Out of range value for column 'm1' at row 8 +Warning 1264 Out of range value for column 'm20' at row 8 +Warning 1264 Out of range value for column 'i' at row 9 +Warning 1264 Out of range value for column 'i0' at row 9 +Warning 1264 Out of range value for column 'i1' at row 9 +Warning 1264 Out of range value for column 'i20' at row 9 +Warning 1264 Out of range value for column 't' at row 9 +Warning 1264 Out of range value for column 't0' at row 9 +Warning 1264 Out of range value for column 't1' at row 9 +Warning 1264 Out of range value for column 't20' at row 9 +Warning 1264 Out of range value for column 's' at row 9 +Warning 1264 Out of range value for column 's0' at row 9 +Warning 1264 Out of range value for column 's1' at row 9 +Warning 1264 Out of range value for column 's20' at row 9 +Warning 1264 Out of range value for column 'm' at row 9 +Warning 1264 Out of range value for column 'm0' at row 9 +Warning 1264 Out of range value for column 'm1' at row 9 +Warning 1264 Out of range value for column 'm20' at row 9 +Warning 1264 Out of range value for column 'i' at row 10 +Warning 1264 Out of range value for column 'i0' at row 10 +Warning 1264 Out of range value for column 'i1' at row 10 +Warning 1264 Out of range value for column 'i20' at row 10 +Warning 1264 Out of range value for column 't' at row 10 +Warning 1264 Out of range value for column 't0' at row 10 +Warning 1264 Out of range value for column 't1' at row 10 +Warning 1264 Out of range value for column 't20' at row 10 +Warning 1264 Out of range value for column 's' at row 10 +Warning 1264 Out of range value for column 's0' at row 10 +Warning 1264 Out of range value for column 's1' at row 10 +Warning 1264 Out of range value for column 's20' at row 10 +Warning 1264 Out of range value for column 'm' at row 10 +Warning 1264 Out of range value for column 'm0' at row 10 +Warning 1264 Out of range value for column 'm1' at row 10 +Warning 1264 Out of range value for column 'm20' at row 10 +Warning 1264 Out of range value for column 'i' at row 11 +Warning 1264 Out of range value for column 'i0' at row 11 +Warning 1264 Out of range value for column 'i1' at row 11 +Warning 1264 Out of range value for column 'i20' at row 11 +Warning 1264 Out of range value for column 't' at row 11 +Warning 1264 Out of range value for column 't0' at row 11 +Warning 1264 Out of range value for column 't1' at row 11 +Warning 1264 Out of range value for column 't20' at row 11 +Warning 1264 Out of range value for column 's' at row 11 +Warning 1264 Out of range value for column 's0' at row 11 +Warning 1264 Out of range value for column 's1' at row 11 +Warning 1264 Out of range value for column 's20' at row 11 +Warning 1264 Out of range value for column 'm' at row 11 +Warning 1264 Out of range value for column 'm0' at row 11 +Warning 1264 Out of range value for column 'm1' at row 11 +Warning 1264 Out of range value for column 'm20' at row 11 +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +ALTER TABLE t1 ADD COLUMN i257 INT(257) NOT NULL; +ERROR 42000: Display width out of range for column 'i257' (max = 255) +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# INT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c INT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c int(11) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (2147483647); +SELECT HEX(c) FROM t1; +HEX(c) +7FFFFFFF +DROP TABLE t1; +#---------------------------------- +# INT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c INT NOT NULL DEFAULT 2147483647 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c int(11) NO 2147483647 +ALTER TABLE t1 ADD COLUMN err INT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (2147483647); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 7FFFFFFF +2 7FFFFFFF +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# TINYINT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TINYINT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c tinyint(4) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (127); +SELECT HEX(c) FROM t1; +HEX(c) +7F +DROP TABLE t1; +#---------------------------------- +# TINYINT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c TINYINT NOT NULL DEFAULT 127 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c tinyint(4) NO 127 +ALTER TABLE t1 ADD COLUMN err TINYINT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (127); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 7F +2 7F +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# SMALLINT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c SMALLINT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c smallint(6) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (0); +SELECT HEX(c) FROM t1; +HEX(c) +0 +DROP TABLE t1; +#---------------------------------- +# SMALLINT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c SMALLINT NOT NULL DEFAULT 0 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c smallint(6) NO 0 +ALTER TABLE t1 ADD COLUMN err SMALLINT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (0); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 0 +2 0 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# MEDIUMINT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c MEDIUMINT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c mediumint(9) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (1); +SELECT HEX(c) FROM t1; +HEX(c) +1 +DROP TABLE t1; +#---------------------------------- +# MEDIUMINT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c MEDIUMINT NOT NULL DEFAULT 1 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c mediumint(9) NO 1 +ALTER TABLE t1 ADD COLUMN err MEDIUMINT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (1); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 1 +2 1 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# BIGINT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c BIGINT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c bigint(20) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (9223372036854775807); +SELECT HEX(c) FROM t1; +HEX(c) +7FFFFFFFFFFFFFFF +DROP TABLE t1; +#---------------------------------- +# BIGINT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c BIGINT NOT NULL DEFAULT 9223372036854775807 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c bigint(20) NO 9223372036854775807 +ALTER TABLE t1 ADD COLUMN err BIGINT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (9223372036854775807); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 7FFFFFFFFFFFFFFF +2 7FFFFFFFFFFFFFFF +DROP TABLE t1; +######################## +# SET columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a SET('') NOT NULL, +b SET('test1','test2','test3','test4','test5') NOT NULL, +c SET('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NOT NULL, +PRIMARY KEY (c) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a set('') NO NULL +b set('test1','test2','test3','test4','test5') NO NULL +c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI NULL +INSERT INTO t1 (a,b,c) VALUES +('','test2,test3','01,34,44,,23'), +('',5,2), +(',','test4,test2',''); +Warnings: +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + test1,test3 02 + test2,test3 01,23,34,44 + test2,test4 +INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + 01,02,03,04,05,06,07,08,09,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50'51,52,53,54,55,56,57,58,59,60,61,62,63,64 + test1,test3 02 + test2,test3 01,23,34,44 + test2,test4 +ALTER TABLE t1 ADD COLUMN e SET('a','A') NOT NULL; +Warnings: +Note 1291 Column 'e' has duplicated value 'a' in SET +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a set('') NO NULL +b set('test1','test2','test3','test4','test5') NO NULL +c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI NULL +e set('a','A') NO NULL +ALTER TABLE t1 ADD COLUMN f SET('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i') NOT NULL; +ERROR HY000: Too many strings for column f and SET +SELECT a,b,c,e FROM t1 WHERE FIND_IN_SET('test2',b)>0 OR a != ''; +a b c e + test2,test3 01,23,34,44 + test2,test4 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# SET('test1','test2','test3') NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c SET('test1','test2','test3') NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c set('test1','test2','test3') NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('test2,test3'); +SELECT HEX(c) FROM t1; +HEX(c) +74657374322C7465737433 +DROP TABLE t1; +#---------------------------------- +# SET('test1','test2','test3') NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c SET('test1','test2','test3') NOT NULL DEFAULT 'test2,test3' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c set('test1','test2','test3') NO test2,test3 +ALTER TABLE t1 ADD COLUMN err SET('test1','test2','test3') NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('test2,test3'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 74657374322C7465737433 +2 74657374322C7465737433 +DROP TABLE t1; +######################## +# TEXT columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +t TEXT NOT NULL, +t0 TEXT(0) NOT NULL, +t1 TEXT(1) NOT NULL, +t300 TEXT(300) NOT NULL, +tm TEXT(65535) NOT NULL, +t70k TEXT(70000) NOT NULL, +t17m TEXT(17000000) NOT NULL, +tt TINYTEXT NOT NULL, +m MEDIUMTEXT NOT NULL, +l LONGTEXT NOT NULL +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +t text NO NULL +t0 text NO NULL +t1 tinytext NO NULL +t300 text NO NULL +tm text NO NULL +t70k mediumtext NO NULL +t17m longtext NO NULL +tt tinytext NO NULL +m mediumtext NO NULL +l longtext NO NULL +INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES +('','','','','','','','','',''), +('a','b','c','d','e','f','g','h','i','j'), +('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'), +( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',255), REPEAT('i',1048576), REPEAT('j',1048576) ); +SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES +( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) ); +Warnings: +Warning 1265 Data truncated for column 't' at row 1 +Warning 1265 Data truncated for column 't0' at row 1 +Warning 1265 Data truncated for column 't1' at row 1 +Warning 1265 Data truncated for column 't300' at row 1 +Warning 1265 Data truncated for column 'tm' at row 1 +Warning 1265 Data truncated for column 'tt' at row 1 +SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +ALTER TABLE t1 ADD COLUMN ttt TEXT(4294967296) NOT NULL; +ERROR 42000: Display width out of range for column 'ttt' (max = 4294967295) +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# TEXT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TEXT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c text NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +SELECT HEX(c) FROM t1; +HEX(c) + +DROP TABLE t1; +#---------------------------------- +# TEXT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c TEXT NOT NULL DEFAULT '' +) ENGINE=rocksdb; +Warnings: +Warning 1101 BLOB/TEXT column 'c' can't have a default value +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c text NO NULL +ALTER TABLE t1 ADD COLUMN err TEXT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 +2 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# TINYTEXT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TINYTEXT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c tinytext NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +SELECT HEX(c) FROM t1; +HEX(c) + +DROP TABLE t1; +#---------------------------------- +# TINYTEXT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c TINYTEXT NOT NULL DEFAULT '' +) ENGINE=rocksdb; +Warnings: +Warning 1101 BLOB/TEXT column 'c' can't have a default value +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c tinytext NO NULL +ALTER TABLE t1 ADD COLUMN err TINYTEXT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 +2 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# MEDIUMTEXT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c MEDIUMTEXT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c mediumtext NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +SELECT HEX(c) FROM t1; +HEX(c) + +DROP TABLE t1; +#---------------------------------- +# MEDIUMTEXT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c MEDIUMTEXT NOT NULL DEFAULT '' +) ENGINE=rocksdb; +Warnings: +Warning 1101 BLOB/TEXT column 'c' can't have a default value +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c mediumtext NO NULL +ALTER TABLE t1 ADD COLUMN err MEDIUMTEXT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 +2 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# LONGTEXT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c LONGTEXT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c longtext NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +SELECT HEX(c) FROM t1; +HEX(c) + +DROP TABLE t1; +#---------------------------------- +# LONGTEXT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c LONGTEXT NOT NULL DEFAULT '' +) ENGINE=rocksdb; +Warnings: +Warning 1101 BLOB/TEXT column 'c' can't have a default value +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c longtext NO NULL +ALTER TABLE t1 ADD COLUMN err LONGTEXT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 +2 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result new file mode 100644 index 00000000000..051784528b1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result @@ -0,0 +1,2270 @@ +######################## +# BINARY columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +b BINARY NULL, +b0 BINARY(0) NULL, +b1 BINARY(1) NULL, +b20 BINARY(20) NULL, +b255 BINARY(255) NULL, +pk BINARY PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +b binary(1) YES NULL +b0 binary(0) YES NULL +b1 binary(1) YES NULL +b20 binary(20) YES NULL +b255 binary(255) YES NULL +pk binary(1) NO PRI NULL +INSERT INTO t1 VALUES ('','','','','',''); +INSERT INTO t1 VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.','a'); +SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk; +HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk) +00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00 +61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61 +INSERT INTO t1 VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256),'b'); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'b0' at row 1 +Warning 1265 Data truncated for column 'b1' at row 1 +Warning 1265 Data truncated for column 'b20' at row 1 +Warning 1265 Data truncated for column 'b255' at row 1 +INSERT INTO t1 SELECT b255, b255, b255, b255, CONCAT('a',b255,b255), 'c' FROM t1; +ERROR 23000: Duplicate entry 'c' for key 'PRIMARY' +SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk; +HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk) +00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00 +61 61 6161616161616161616161616161616161616161 787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878 62 +61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61 +ALTER TABLE t1 ADD COLUMN b257 BINARY(257) NULL; +ERROR 42000: Column length too big for column 'b257' (max = 255); use BLOB or TEXT instead +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +b binary(1) YES NULL +b0 binary(0) YES NULL +b1 binary(1) YES NULL +b20 binary(20) YES NULL +b255 binary(255) YES NULL +pk binary(1) NO PRI NULL +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c BINARY NULL, +c1 BINARY NULL DEFAULT NULL, +c2 BINARY NULL DEFAULT 0, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c binary(1) YES NULL +c1 binary(1) YES NULL +c2 binary(1) YES 0 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (0,0,0); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 30 30 30 +3 NULL NULL 30 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 30 +3 30 +DROP TABLE t1; +######################## +# VARBINARY columns +######################## +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 ( +v0 VARBINARY(0) NULL, +v1 VARBINARY(1) NULL, +v64 VARBINARY(64) NULL, +v65000 VARBINARY(65000) NULL, +PRIMARY KEY (v64) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varbinary(0) YES NULL +v1 varbinary(1) YES NULL +v64 varbinary(64) NO PRI +v65000 varbinary(65000) YES NULL +CREATE TABLE t2 (v VARBINARY(65532) NULL, PRIMARY KEY(v(255))) ENGINE=rocksdb; +SHOW COLUMNS IN t2; +Field Type Null Key Default Extra +v varbinary(65532) NO PRI +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. + For developers who want to code on MariaDB or MySQL + + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + + For MariaDB / MySQL end users + + * MariaDB Crash Course by Ben Forta + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o Free to read in the Knowledgebase! + + * MySQL (4th Edition) by Paul DuBois + o The \'default\' book to read if you wont to learn to use MySQL / MariaDB. + + * MySQL Cookbook by Paul DuBois + o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject. + + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly) + + * MySQL Admin Cookbook + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. '); +SELECT HEX(v0), HEX(v1), HEX(v64), HEX(v65000) FROM t1; +HEX(v0) HEX(v1) HEX(v64) HEX(v65000) + + 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E2057652776652070726F7669646564206C696E6B7320746F20416D617A6F6E2E636F6D206865726520666F7220636F6E76656E69656E63652C2062757420746865792063616E20626520666F756E64206174206D616E79206F7468657220626F6F6B73746F7265732C20626F7468206F6E6C696E6520616E64206F66662E0A0A2020496620796F752077616E7420746F206861766520796F7572206661766F72697465204D7953514C202F204D61726961444220626F6F6B206C697374656420686572652C20706C65617365206C65617665206120636F6D6D656E742E0A2020466F7220646576656C6F706572732077686F2077616E7420746F20636F6465206F6E204D617269614442206F72204D7953514C0A0A2020202020202A20556E6465727374616E64696E67204D7953514C20496E7465726E616C73206279205361736861205061636865762C20666F726D6572204D7953514C20646576656C6F706572206174204D7953514C2041422E0A2020202020202020202020206F205468697320697320746865206F6E6C7920626F6F6B207765206B6E6F772061626F75742074686174206465736372696265732074686520696E7465726E616C73206F66204D617269614442202F204D7953514C2E2041206D757374206861766520666F7220616E796F6E652077686F2077616E747320746F20756E6465727374616E6420616E6420646576656C6F70206F6E204D617269614442210A2020202020202020202020206F204E6F7420616C6C20746F706963732061726520636F766572656420616E6420736F6D652070617274732061726520736C696768746C79206F757464617465642C20627574207374696C6C20746865206265737420626F6F6B206F6E207468697320746F7069632E200A2020202020202A204D7953514C20352E3120506C7567696E20446576656C6F706D656E742062792053657267656920476F6C75626368696B20616E6420416E64726577204875746368696E67730A2020202020202020202020206F2041206D757374207265616420666F7220616E796F6E652077616E74696E6720746F207772697465206120706C7567696E20666F72204D6172696144422C207772697474656E20627920746865205365726765692077686F2064657369676E65642074686520706C7567696E20696E7465726661636520666F72204D7953514C20616E64204D61726961444221200A0A2020466F72204D617269614442202F204D7953514C20656E642075736572730A0A2020202020202A204D61726961444220437261736820436F757273652062792042656E20466F7274610A2020202020202020202020206F204669727374204D61726961444220626F6F6B210A2020202020202020202020206F20466F722070656F706C652077686F2077616E7420746F206C6561726E2053514C20616E642074686520626173696373206F66204D6172696144422E0A2020202020202020202020206F204E6F77207368697070696E672E20507572636861736520617420416D617A6F6E2E636F6D206F7220796F7572206661766F7269746520626F6F6B73656C6C65722E200A0A2020202020202A2053514C2D393920436F6D706C6574652C205265616C6C792062792050657465722047756C75747A616E20262054727564792050656C7A65722E0A2020202020202020202020206F2045766572797468696E6720796F752077616E74656420746F206B6E6F772061626F7574207468652053514C203939207374616E646172642E20457863656C6C656E74207265666572656E636520626F6F6B210A2020202020202020202020206F204672656520746F207265616420696E20746865204B6E6F776C656467656261736521200A0A2020202020202A204D7953514C20283474682045646974696F6E29206279205061756C204475426F69730A2020202020202020202020206F20546865202764656661756C742720626F6F6B20746F207265616420696620796F7520776F6E7420746F206C6561726E20746F20757365204D7953514C202F204D6172696144422E200A0A2020202020202A204D7953514C20436F6F6B626F6F6B206279205061756C204475426F69730A2020202020202020202020206F2041206C6F74206F66206578616D706C6573206F6620686F7720746F20757365204D7953514C2E204173207769746820616C6C206F66205061756C277320626F6F6B732C206974277320776F727468206974732077656967687420696E20676F6C6420616E64206576656E20656E6A6F7961626C652072656164696E6720666F7220737563682061202764727927207375626A6563742E200A0A2020202020202A204869676820506572666F726D616E6365204D7953514C2C205365636F6E642045646974696F6E2C204279204261726F6E20536368776172747A2C205065746572205A6169747365762C20566164696D20546B616368656E6B6F2C204A6572656D7920442E205A61776F646E792C2041726A656E204C656E747A2C20446572656B204A2E2042616C6C696E672C20657420616C2E0A2020202020202020202020206F20224869676820506572666F726D616E6365204D7953514C2069732074686520646566696E697469766520677569646520746F206275696C64696E6720666173742C2072656C6961626C652073797374656D732077697468204D7953514C2E205772697474656E206279206E6F74656420657870657274732077697468207965617273206F66207265616C2D776F726C6420657870657269656E6365206275696C64696E672076657279206C617267652073797374656D732C207468697320626F6F6B20636F7665727320657665727920617370656374206F66204D7953514C20706572666F726D616E636520696E2064657461696C2C20616E6420666F6375736573206F6E20726F627573746E6573732C2073656375726974792C20616E64206461746120696E746567726974792E204C6561726E20616476616E63656420746563686E697175657320696E20646570746820736F20796F752063616E206272696E67206F7574204D7953514C27732066756C6C20706F7765722E22202846726F6D2074686520626F6F6B206465736372697074696F6E206174204F275265696C6C7929200A0A2020202020202A204D7953514C2041646D696E20436F6F6B626F6F6B0A2020202020202020202020206F204120717569636B20737465702D62792D7374657020677569646520666F72204D7953514C20757365727320616E642064617461626173652061646D696E6973747261746F727320746F207461636B6C65207265616C2D776F726C64206368616C6C656E6765732077697468204D7953514C20636F6E66696775726174696F6E20616E642061646D696E697374726174696F6E200A0A2020202020202A204D7953514C20352E302043657274696669636174696F6E2053747564792047756964652C204279205061756C204475426F69732C2053746566616E2048696E7A2C204361727374656E20506564657273656E0A2020202020202020202020206F205468697320697320746865206F6666696369616C20677569646520746F20636F766572207468652070617373696E67206F66207468652074776F204D7953514C2043657274696669636174696F6E206578616D696E6174696F6E732E2049742069732076616C69642074696C6C2076657273696F6E20352E30206F6620746865207365727665722C20736F207768696C65206974206D697373657320616C6C2074686520666561747572657320617661696C61626C6520696E204D7953514C20352E3120616E6420677265617465722028696E636C7564696E67204D61726961444220352E3120616E642067726561746572292C2069742070726F7669646573206120676F6F6420626173696320756E6465727374616E64696E67206F66204D7953514C20666F722074686520656E642D757365722E20 +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501)); +Warnings: +Warning 1265 Data truncated for column 'v0' at row 1 +Warning 1265 Data truncated for column 'v1' at row 1 +Warning 1265 Data truncated for column 'v64' at row 1 +Warning 1265 Data truncated for column 'v65000' at row 1 +INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'v0' at row 5 +Warning 1265 Data truncated for column 'v1' at row 5 +Warning 1265 Data truncated for column 'v64' at row 5 +Warning 1265 Data truncated for column 'v0' at row 6 +Warning 1265 Data truncated for column 'v1' at row 6 +Warning 1265 Data truncated for column 'v64' at row 6 +Warning 1265 Data truncated for column 'v65000' at row 6 +SELECT HEX(v0), HEX(v1), HEX(v64), LENGTH(HEX(v65000)) FROM t1; +HEX(v0) HEX(v1) HEX(v64) LENGTH(HEX(v65000)) + 0 + 61 0 + 48 61486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E205765277665 5932 + 61 61616263646566676869206162636465666768692061626364656667686920616263646566676869206162636465666768692061626364656667686920616263 130000 + 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 5930 + 79 63636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363 130000 +ALTER TABLE t1 ADD COLUMN v65536 VARBINARY(65536) NULL; +Warnings: +Note 1246 Converting column 'v65536' from VARBINARY to BLOB +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varbinary(0) YES NULL +v1 varbinary(1) YES NULL +v64 varbinary(64) NO PRI +v65000 varbinary(65000) YES NULL +v65536 mediumblob YES NULL +DROP TABLE t1, t2; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c VARBINARY(64) NULL, +c1 VARBINARY(64) NULL DEFAULT NULL, +c2 VARBINARY(64) NULL DEFAULT 'test', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c varbinary(64) YES NULL +c1 varbinary(64) YES NULL +c2 varbinary(64) YES test +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('test','test','test'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 74657374 74657374 74657374 +3 NULL NULL 74657374 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 74657374 +3 74657374 +DROP TABLE t1; +######################## +# BIT columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a BIT NULL, +b BIT(20) NULL, +c BIT(64) NULL, +d BIT(1) NULL, +PRIMARY KEY (c) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a bit(1) YES NULL +b bit(20) YES NULL +c bit(64) NO PRI b'0' +d bit(1) YES NULL +ALTER TABLE t1 DROP COLUMN d; +ALTER TABLE t1 ADD COLUMN d BIT(0) NULL; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a bit(1) YES NULL +b bit(20) YES NULL +c bit(64) NO PRI b'0' +d bit(1) YES NULL +INSERT INTO t1 (a,b,c,d) VALUES (0,POW(2,20)-1,b'1111111111111111111111111111111111111111111111111111111111111111',1); +SELECT BIN(a), HEX(b), c+0 FROM t1 WHERE d>0; +BIN(a) HEX(b) c+0 +0 FFFFF 18446744073709551615 +INSERT INTO t1 (a,b,c,d) VALUES (1,0,-2,0); +SELECT a+0, b+0, c+0 FROM t1 WHERE d<100; +a+0 b+0 c+0 +0 1048575 18446744073709551615 +1 0 18446744073709551614 +INSERT INTO t1 (a,b,c,d) VALUES (b'1', 'f', 0xFF, 0x0); +SELECT a+0, b+0, c+0 FROM t1 WHERE d IN (0, 2); +a+0 b+0 c+0 +1 0 18446744073709551614 +1 102 255 +DELETE FROM t1; +INSERT INTO t1 (a,b,c,d) VALUES (0x10,0,0,1); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +SELECT a+0,b+0,c+0,d+0 FROM t1; +a+0 b+0 c+0 d+0 +1 0 0 1 +INSERT INTO t1 (a,b,c,d) VALUES (0x01,0,0x10000000000000000,0); +Warnings: +Warning 1264 Out of range value for column 'c' at row 1 +SELECT a+0,b+0,c+0,d+0 FROM t1; +a+0 b+0 c+0 d+0 +1 0 0 1 +1 0 18446744073709551615 0 +DROP TABLE t1; +CREATE TABLE t1 (pk INT PRIMARY KEY, a BIT(65) NULL) ENGINE=rocksdb; +ERROR 42000: Display width out of range for column 'a' (max = 64) +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c BIT NULL, +c1 BIT NULL DEFAULT NULL, +c2 BIT NULL DEFAULT 1, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c bit(1) YES NULL +c1 bit(1) YES NULL +c2 bit(1) YES b'1' +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (1,1,1); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 1 1 1 +3 NULL NULL 1 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 1 +3 1 +DROP TABLE t1; +######################## +# BLOB columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +b BLOB NULL, +b0 BLOB(0) NULL, +b1 BLOB(1) NULL, +b300 BLOB(300) NULL, +bm BLOB(65535) NULL, +b70k BLOB(70000) NULL, +b17m BLOB(17000000) NULL, +t TINYBLOB NULL, +m MEDIUMBLOB NULL, +l LONGBLOB NULL +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +b blob YES NULL +b0 blob YES NULL +b1 tinyblob YES NULL +b300 blob YES NULL +bm blob YES NULL +b70k mediumblob YES NULL +b17m longblob YES NULL +t tinyblob YES NULL +m mediumblob YES NULL +l longblob YES NULL +INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES +('','','','','','','','','',''), +('a','b','c','d','e','f','g','h','i','j'), +('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'), +( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), HEX(REPEAT('g',1048576)), REPEAT('h',255), REPEAT('i',1048576), HEX(REPEAT('j',1048576)) ); +SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152 +INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES +( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) ); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'b0' at row 1 +Warning 1265 Data truncated for column 'b1' at row 1 +Warning 1265 Data truncated for column 'b300' at row 1 +Warning 1265 Data truncated for column 'bm' at row 1 +Warning 1265 Data truncated for column 't' at row 1 +SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152 +ALTER TABLE t1 ADD COLUMN bbb BLOB(4294967296); +ERROR 42000: Display width out of range for column 'bbb' (max = 4294967295) +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c BLOB NULL, +c1 BLOB NULL DEFAULT NULL, +c2 BLOB NULL DEFAULT '', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Warning 1101 BLOB/TEXT column 'c2' can't have a default value +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c blob YES NULL +c1 blob YES NULL +c2 blob YES NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('','',''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 +3 NULL NULL NULL +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 +3 NULL +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c TINYBLOB NULL, +c1 TINYBLOB NULL DEFAULT NULL, +c2 TINYBLOB NULL DEFAULT '', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Warning 1101 BLOB/TEXT column 'c2' can't have a default value +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c tinyblob YES NULL +c1 tinyblob YES NULL +c2 tinyblob YES NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('','',''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 +3 NULL NULL NULL +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 +3 NULL +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c MEDIUMBLOB NULL, +c1 MEDIUMBLOB NULL DEFAULT NULL, +c2 MEDIUMBLOB NULL DEFAULT '', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Warning 1101 BLOB/TEXT column 'c2' can't have a default value +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c mediumblob YES NULL +c1 mediumblob YES NULL +c2 mediumblob YES NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('','',''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 +3 NULL NULL NULL +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 +3 NULL +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c LONGBLOB NULL, +c1 LONGBLOB NULL DEFAULT NULL, +c2 LONGBLOB NULL DEFAULT '', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Warning 1101 BLOB/TEXT column 'c2' can't have a default value +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c longblob YES NULL +c1 longblob YES NULL +c2 longblob YES NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('','',''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 +3 NULL NULL NULL +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 +3 NULL +DROP TABLE t1; +######################## +# BOOL columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +b1 BOOL NULL, +b2 BOOLEAN NULL +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +b1 tinyint(1) YES NULL +b2 tinyint(1) YES NULL +INSERT INTO t1 (b1,b2) VALUES (1,TRUE); +SELECT b1,b2 FROM t1; +b1 b2 +1 1 +INSERT INTO t1 (b1,b2) VALUES (FALSE,0); +SELECT b1,b2 FROM t1; +b1 b2 +0 0 +1 1 +INSERT INTO t1 (b1,b2) VALUES (2,3); +SELECT b1,b2 FROM t1; +b1 b2 +0 0 +1 1 +2 3 +INSERT INTO t1 (b1,b2) VALUES (-1,-2); +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +1 1 +2 3 +SELECT IF(b1,'true','false') AS a, IF(b2,'true','false') AS b FROM t1; +a b +false false +true true +true true +true true +SELECT b1,b2 FROM t1 WHERE b1 = TRUE; +b1 b2 +1 1 +SELECT b1,b2 FROM t1 WHERE b2 = FALSE; +b1 b2 +0 0 +INSERT INTO t1 (b1,b2) VALUES ('a','b'); +Warnings: +Warning 1366 Incorrect integer value: 'a' for column 'b1' at row 1 +Warning 1366 Incorrect integer value: 'b' for column 'b2' at row 1 +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +0 0 +1 1 +2 3 +INSERT INTO t1 (b1,b2) VALUES (128,-129); +Warnings: +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b2' at row 1 +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +0 0 +1 1 +127 -128 +2 3 +ALTER TABLE t1 ADD COLUMN b3 BOOLEAN UNSIGNED NULL; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'UNSIGNED NULL' at line 1 +ALTER TABLE ADD COLUMN b3 BOOL ZEROFILL NULL; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ADD COLUMN b3 BOOL ZEROFILL NULL' at line 1 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c BOOL NULL, +c1 BOOL NULL DEFAULT NULL, +c2 BOOL NULL DEFAULT '0', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c tinyint(1) YES NULL +c1 tinyint(1) YES NULL +c2 tinyint(1) YES 0 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('0','0','0'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 0 0 0 +3 NULL NULL 0 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 0 +3 0 +DROP TABLE t1; +######################## +# CHAR columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c CHAR NULL, +c0 CHAR(0) NULL, +c1 CHAR(1) NULL, +c20 CHAR(20) NULL, +c255 CHAR(255) NULL, +PRIMARY KEY (c255) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c char(1) YES NULL +c0 char(0) YES NULL +c1 char(1) YES NULL +c20 char(20) YES NULL +c255 char(255) NO PRI +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('','','','',''); +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.'); +SELECT c,c0,c1,c20,c255 FROM t1; +c c0 c1 c20 c255 + +a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work. +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256)); +Warnings: +Warning 1265 Data truncated for column 'c' at row 1 +Warning 1265 Data truncated for column 'c0' at row 1 +Warning 1265 Data truncated for column 'c1' at row 1 +Warning 1265 Data truncated for column 'c20' at row 1 +Warning 1265 Data truncated for column 'c255' at row 1 +INSERT INTO t1 (c,c0,c1,c20,c255) SELECT c255, c255, c255, c255, CONCAT('a',c255,c1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'c' at row 5 +Warning 1265 Data truncated for column 'c0' at row 5 +Warning 1265 Data truncated for column 'c1' at row 5 +Warning 1265 Data truncated for column 'c20' at row 5 +Warning 1265 Data truncated for column 'c' at row 6 +Warning 1265 Data truncated for column 'c0' at row 6 +Warning 1265 Data truncated for column 'c1' at row 6 +Warning 1265 Data truncated for column 'c20' at row 6 +Warning 1265 Data truncated for column 'c255' at row 6 +SELECT c,c0,c1,c20,c255 FROM t1; +c c0 c1 c20 c255 + + a +C C Creating an article aCreating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work.b +a a aaaaaaaaaaaaaaaaaaaa xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work. +x x xxxxxxxxxxxxxxxxxxxx axxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +SELECT DISTINCT c20, REPEAT('a',LENGTH(c20)), COUNT(*) FROM t1 GROUP BY c1, c20; +c20 REPEAT('a',LENGTH(c20)) COUNT(*) + 2 +Creating an article aaaaaaaaaaaaaaaaaaa 1 +aaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaa 1 +abcdefghi klmnopqrst aaaaaaaaaaaaaaaaaaaa 1 +xxxxxxxxxxxxxxxxxxxx aaaaaaaaaaaaaaaaaaaa 1 +ALTER TABLE t1 ADD COLUMN c257 CHAR(257) NULL; +ERROR 42000: Column length too big for column 'c257' (max = 255); use BLOB or TEXT instead +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c CHAR NULL, +c1 CHAR NULL DEFAULT NULL, +c2 CHAR NULL DEFAULT '_', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c char(1) YES NULL +c1 char(1) YES NULL +c2 char(1) YES _ +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('_','_','_'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 5F 5F 5F +3 NULL NULL 5F +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 5F +3 5F +DROP TABLE t1; +######################## +# VARCHAR columns +######################## +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 ( +v0 VARCHAR(0) NULL, +v1 VARCHAR(1) NULL, +v64 VARCHAR(64) NULL, +v65000 VARCHAR(65000) NULL, +PRIMARY KEY (v64) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varchar(0) YES NULL +v1 varchar(1) YES NULL +v64 varchar(64) NO PRI +v65000 varchar(65000) YES NULL +CREATE TABLE t2 (v VARCHAR(65532), PRIMARY KEY (v(255))) ENGINE=rocksdb; +SHOW COLUMNS IN t2; +Field Type Null Key Default Extra +v varchar(65532) NO PRI +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. + For developers who want to code on MariaDB or MySQL + + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + + For MariaDB / MySQL end users + + * MariaDB Crash Course by Ben Forta + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o Free to read in the Knowledgebase! + + * MySQL (4th Edition) by Paul DuBois + o The \'default\' book to read if you wont to learn to use MySQL / MariaDB. + + * MySQL Cookbook by Paul DuBois + o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject. + + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly) + + * MySQL Admin Cookbook + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. '); +SELECT v0,v1,v64,v65000 FROM t1; +v0 v1 v64 v65000 + + + + + + + + + + + + y Once there, double check that an article doesn't already exist Here is a list of recommended books on MariaDB and MySQL. We've provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + o "High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL's full power." (From the book description at O'Reilly) + o A lot of examples of how to use MySQL. As with all of Paul's books, it's worth its weight in gold and even enjoyable reading for such a 'dry' subject. + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Free to read in the Knowledgebase! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + o The 'default' book to read if you wont to learn to use MySQL / MariaDB. + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + * MariaDB Crash Course by Ben Forta + * MySQL (4th Edition) by Paul DuBois + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + * MySQL Admin Cookbook + * MySQL Cookbook by Paul DuBois + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + For MariaDB / MySQL end users + For developers who want to code on MariaDB or MySQL + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501)); +Warnings: +Warning 1265 Data truncated for column 'v0' at row 1 +Warning 1265 Data truncated for column 'v1' at row 1 +Warning 1265 Data truncated for column 'v64' at row 1 +Warning 1265 Data truncated for column 'v65000' at row 1 +INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'v0' at row 5 +Warning 1265 Data truncated for column 'v1' at row 5 +Warning 1265 Data truncated for column 'v64' at row 5 +Warning 1265 Data truncated for column 'v65000' at row 5 +Warning 1265 Data truncated for column 'v0' at row 6 +Warning 1265 Data truncated for column 'v1' at row 6 +Warning 1265 Data truncated for column 'v64' at row 6 +SELECT v0, v1, v64, LENGTH(v65000) FROM t1; +v0 v1 v64 LENGTH(v65000) + 0 + a 0 + H aHere is a list of recommended books on MariaDB and MySQL. We've 2966 + a aabcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abc 65000 + y Once there, double check that an article doesn't already exist 2965 + y cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc 65000 +ALTER TABLE t1 ADD COLUMN v65536 VARCHAR(65536) NULL; +Warnings: +Note 1246 Converting column 'v65536' from VARCHAR to TEXT +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varchar(0) YES NULL +v1 varchar(1) YES NULL +v64 varchar(64) NO PRI +v65000 varchar(65000) YES NULL +v65536 mediumtext YES NULL +DROP TABLE t1, t2; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c VARCHAR(64) NULL, +c1 VARCHAR(64) NULL DEFAULT NULL, +c2 VARCHAR(64) NULL DEFAULT 'test default', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c varchar(64) YES NULL +c1 varchar(64) YES NULL +c2 varchar(64) YES test default +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('test default','test default','test default'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 746573742064656661756C74 746573742064656661756C74 746573742064656661756C74 +3 NULL NULL 746573742064656661756C74 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 746573742064656661756C74 +3 746573742064656661756C74 +DROP TABLE t1; +######################## +# date and time columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d DATE NULL, +dt DATETIME NULL, +ts TIMESTAMP NULL, +t TIME NULL, +y YEAR NULL, +y4 YEAR(4) NULL, +y2 YEAR(2) NULL, +pk DATETIME PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead. +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +d date YES NULL +dt datetime YES NULL +ts timestamp YES NULL +t time YES NULL +y year(4) YES NULL +y4 year(4) YES NULL +y2 year(4) YES NULL +pk datetime NO PRI NULL +SET @tm = '2012-04-09 05:27:00'; +INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES +('1000-01-01', '1000-01-01 00:00:00', FROM_UNIXTIME(1), '-838:59:59', '1901', '1901', '00','2012-12-12 12:12:12'), +('9999-12-31', '9999-12-31 23:59:59', FROM_UNIXTIME(2147483647), '838:59:59', '2155', '2155', '99','2012-12-12 12:12:13'), +('0000-00-00', '0000-00-00 00:00:00', '0000-00-00 00:00:00', '00:00:00', '0', '0', '0','2012-12-12 12:12:14'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),YEAR(@tm),YEAR(@tm),'2012-12-12 12:12:15'); +SELECT d,dt,ts,t,y,y4,y2 FROM t1; +d dt ts t y y4 y2 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000 +1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012 +9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999 +INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES +('999-13-32', '999-11-31 00:00:00', '0', '-839:00:00', '1900', '1900', '-1','2012-12-12 12:12:16'); +Warnings: +Warning 1265 Data truncated for column 'd' at row 1 +Warning 1264 Out of range value for column 'dt' at row 1 +Warning 1264 Out of range value for column 'ts' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 'y' at row 1 +Warning 1264 Out of range value for column 'y4' at row 1 +Warning 1264 Out of range value for column 'y2' at row 1 +SELECT d,dt,ts,t,y,y4,y2 FROM t1; +d dt ts t y y4 y2 +1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000 +9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 0000 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c DATE NULL, +c1 DATE NULL DEFAULT NULL, +c2 DATE NULL DEFAULT '2012-12-21', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c date YES NULL +c1 date YES NULL +c2 date YES 2012-12-21 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('2012-12-21','2012-12-21','2012-12-21'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 323031322D31322D3231 323031322D31322D3231 323031322D31322D3231 +3 NULL NULL 323031322D31322D3231 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 323031322D31322D3231 +3 323031322D31322D3231 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c DATETIME NULL, +c1 DATETIME NULL DEFAULT NULL, +c2 DATETIME NULL DEFAULT '2012-12-21 12:21:12', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c datetime YES NULL +c1 datetime YES NULL +c2 datetime YES 2012-12-21 12:21:12 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('2012-12-21 12:21:12','2012-12-21 12:21:12','2012-12-21 12:21:12'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 323031322D31322D32312031323A32313A3132 323031322D31322D32312031323A32313A3132 323031322D31322D32312031323A32313A3132 +3 NULL NULL 323031322D31322D32312031323A32313A3132 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 323031322D31322D32312031323A32313A3132 +3 323031322D31322D32312031323A32313A3132 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c TIMESTAMP NULL, +c1 TIMESTAMP NULL DEFAULT NULL, +c2 TIMESTAMP NULL DEFAULT '2012-12-21 12:21:12', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c timestamp YES NULL +c1 timestamp YES NULL +c2 timestamp YES 2012-12-21 12:21:12 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('2012-12-21 12:21:12','2012-12-21 12:21:12','2012-12-21 12:21:12'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 323031322D31322D32312031323A32313A3132 323031322D31322D32312031323A32313A3132 323031322D31322D32312031323A32313A3132 +3 NULL NULL 323031322D31322D32312031323A32313A3132 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 323031322D31322D32312031323A32313A3132 +3 323031322D31322D32312031323A32313A3132 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c TIME NULL, +c1 TIME NULL DEFAULT NULL, +c2 TIME NULL DEFAULT '12:21:12', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c time YES NULL +c1 time YES NULL +c2 time YES 12:21:12 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('12:21:12','12:21:12','12:21:12'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 31323A32313A3132 31323A32313A3132 31323A32313A3132 +3 NULL NULL 31323A32313A3132 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 31323A32313A3132 +3 31323A32313A3132 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c YEAR NULL, +c1 YEAR NULL DEFAULT NULL, +c2 YEAR NULL DEFAULT '2012', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c year(4) YES NULL +c1 year(4) YES NULL +c2 year(4) YES 2012 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('2012','2012','2012'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 7DC 7DC 7DC +3 NULL NULL 7DC +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 7DC +3 7DC +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c YEAR(2) NULL, +c1 YEAR(2) NULL DEFAULT NULL, +c2 YEAR(2) NULL DEFAULT '12', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead. +Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead. +Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead. +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c year(4) YES NULL +c1 year(4) YES NULL +c2 year(4) YES 2012 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('12','12','12'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 7DC 7DC 7DC +3 NULL NULL 7DC +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 7DC +3 7DC +DROP TABLE t1; +######################## +# ENUM columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a ENUM('') NULL, +b ENUM('test1','test2','test3','test4','test5') NULL, +c ENUM('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') NULL, +PRIMARY KEY (b) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a enum('') YES NULL +b enum('test1','test2','test3','test4','test5') NO PRI test1 +c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') YES NULL +INSERT INTO t1 (a,b,c) VALUES ('','test2','4'),('',5,2); +SELECT a,b,c FROM t1; +a b c + test2 4 + test5 2 +INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1); +Warnings: +Warning 1265 Data truncated for column 'a' at row 1 +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + + test2 4 + test5 2 +ALTER TABLE t1 ADD COLUMN e ENUM('a','A') NULL; +Warnings: +Note 1291 Column 'e' has duplicated value 'a' in ENUM +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a enum('') YES NULL +b enum('test1','test2','test3','test4','test5') NO PRI test1 +c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') YES NULL +e enum('a','A') YES NULL +INSERT INTO t1 (a,b,c,e) VALUES ('','test3','75','A'); +SELECT a,b,c,e FROM t1; +a b c e + NULL + test2 4 NULL + test3 75 a + test5 2 NULL +SELECT a,b,c,e FROM t1 WHERE b='test2' OR a != ''; +a b c e + test2 4 NULL +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c ENUM('test1','test2','test3') NULL, +c1 ENUM('test1','test2','test3') NULL DEFAULT NULL, +c2 ENUM('test1','test2','test3') NULL DEFAULT 'test2', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c enum('test1','test2','test3') YES NULL +c1 enum('test1','test2','test3') YES NULL +c2 enum('test1','test2','test3') YES test2 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('test2','test2','test2'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 7465737432 7465737432 7465737432 +3 NULL NULL 7465737432 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 7465737432 +3 7465737432 +DROP TABLE t1; +######################## +# Fixed point columns (NUMERIC, DECIMAL) +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d DECIMAL NULL, +d0 DECIMAL(0) NULL, +d1_1 DECIMAL(1,1) NULL, +d10_2 DECIMAL(10,2) NULL, +d60_10 DECIMAL(60,10) NULL, +n NUMERIC NULL, +n0_0 NUMERIC(0,0) NULL, +n1 NUMERIC(1) NULL, +n20_4 NUMERIC(20,4) NULL, +n65_4 NUMERIC(65,4) NULL, +pk NUMERIC NULL PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +d decimal(10,0) YES NULL +d0 decimal(10,0) YES NULL +d1_1 decimal(1,1) YES NULL +d10_2 decimal(10,2) YES NULL +d60_10 decimal(60,10) YES NULL +n decimal(10,0) YES NULL +n0_0 decimal(10,0) YES NULL +n1 decimal(1,0) YES NULL +n20_4 decimal(20,4) YES NULL +n65_4 decimal(65,4) YES NULL +pk decimal(10,0) NO PRI NULL +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3); +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5); +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +6 +); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8); +Warnings: +Note 1265 Data truncated for column 'd' at row 1 +Note 1265 Data truncated for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Note 1265 Data truncated for column 'd10_2' at row 1 +Note 1265 Data truncated for column 'd60_10' at row 1 +Note 1265 Data truncated for column 'n' at row 1 +Note 1265 Data truncated for column 'n0_0' at row 1 +Note 1265 Data truncated for column 'n1' at row 1 +Note 1265 Data truncated for column 'n20_4' at row 1 +Note 1265 Data truncated for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.00 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.0000 9999999999999999999999999999999999999999999999999999999999999.1111 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) NULL; +ERROR 42000: Too big precision 66 specified for column 'n66'. Maximum is 65. +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) NULL; +ERROR 42000: Too big precision 66 specified for column 'n66_6'. Maximum is 65. +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) NULL; +ERROR 42000: Too big scale 66 specified for column 'n66_66'. Maximum is 30. +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c DECIMAL NULL, +c1 DECIMAL NULL DEFAULT NULL, +c2 DECIMAL NULL DEFAULT 1.1, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Note 1265 Data truncated for column 'c2' at row 1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c decimal(10,0) YES NULL +c1 decimal(10,0) YES NULL +c2 decimal(10,0) YES 1 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (1.1,1.1,1.1); +Warnings: +Note 1265 Data truncated for column 'c' at row 1 +Note 1265 Data truncated for column 'c1' at row 1 +Note 1265 Data truncated for column 'c2' at row 1 +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 1 1 1 +3 NULL NULL 1 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 1 +3 1 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c NUMERIC NULL, +c1 NUMERIC NULL DEFAULT NULL, +c2 NUMERIC NULL DEFAULT 0 , +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c decimal(10,0) YES NULL +c1 decimal(10,0) YES NULL +c2 decimal(10,0) YES 0 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (0 ,0 ,0 ); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 0 0 0 +3 NULL NULL 0 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 0 +3 0 +DROP TABLE t1; +######################## +# Floating point columns (FLOAT, DOUBLE) +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +f FLOAT NULL, +f0 FLOAT(0) NULL, +r1_1 REAL(1,1) NULL, +f23_0 FLOAT(23) NULL, +f20_3 FLOAT(20,3) NULL, +d DOUBLE NULL, +d1_0 DOUBLE(1,0) NULL, +d10_10 DOUBLE PRECISION (10,10) NULL, +d53 DOUBLE(53,0) NULL, +d53_10 DOUBLE(53,10) NULL, +pk DOUBLE NULL PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +f float YES NULL +f0 float YES NULL +r1_1 double(1,1) YES NULL +f23_0 float YES NULL +f20_3 float(20,3) YES NULL +d double YES NULL +d1_0 double(1,0) YES NULL +d10_10 double(10,10) YES NULL +d53 double(53,0) YES NULL +d53_10 double(53,10) YES NULL +pk double NO PRI NULL +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1); +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 11111111.111 +d10_10 0.0123456789 +d1_0 8 +d53 1234566789123456800 +d53_10 100000000000000000.0000000000 +f0 12345.1 +f20_3 56789.988 +f23_0 123457000 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +99999999999999999999999999999999999999, +99999999999999999999999999999999999999.9999999999999999, +0.9, +99999999999999999999999999999999999999.9, +99999999999999999.999, +999999999999999999999999999999999999999999999999999999999999999999999999999999999, +9, +0.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +3 +); +Warnings: +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 0 +d 11111111.111 +d 1e81 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 0 +d1_0 8 +d1_0 9 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 0 +f 1e38 +f0 0 +f0 12345.1 +f0 1e38 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4); +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1; +MAX(f) 9.999999680285692e37 +MAX(d) 1e81 +MAX(d10_10) 0.9999999999 +MAX(d1_0) 9 +MAX(d53) 100000000000000000000000000000000000000000000000000000 +MAX(d53_10) 10000000000000000000000000000000000000000000.0000000000 +MAX(f0) 9.999999680285692e37 +MAX(f20_3) 99999998430674940.000 +MAX(f23_0) 9.999999680285692e37 +MAX(r1_1) 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +5 +); +Warnings: +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e61 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f 3.40282e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f0 3.40282e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +f23_0 3.40282e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +999999999999999999999999999999999999999, +999999999999999999999999999999999999999.9999999999999999, +1.9, +999999999999999999999999999999999999999.9, +999999999999999999.999, +9999999999999999999999999999999999999999999999999999999999999999999999999999999999, +99, +1.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +6 +); +Warnings: +Warning 1292 Truncated incorrect DECIMAL value: '' +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e61 +d 1e65 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f 3.40282e38 +f 3.40282e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f0 3.40282e38 +f0 3.40282e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +f23_0 3.40282e38 +f23_0 3.40282e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) NULL; +ERROR 42000: Display width out of range for column 'd0_0' (max = 255) +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) NULL; +ERROR 42000: Too big precision 256 specified for column 'n66_6'. Maximum is 65. +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) NULL; +ERROR 42000: Too big scale 35 specified for column 'n66_66'. Maximum is 30. +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c FLOAT NULL, +c1 FLOAT NULL DEFAULT NULL, +c2 FLOAT NULL DEFAULT 1.1 , +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c float YES NULL +c1 float YES NULL +c2 float YES 1.1 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (1.1 ,1.1 ,1.1 ); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 1 1 1 +3 NULL NULL 1 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 1 +3 1 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c DOUBLE NULL, +c1 DOUBLE NULL DEFAULT NULL, +c2 DOUBLE NULL DEFAULT 0 , +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c double YES NULL +c1 double YES NULL +c2 double YES 0 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (0 ,0 ,0 ); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 0 0 0 +3 NULL NULL 0 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 0 +3 0 +DROP TABLE t1; +######################## +# INT columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +i INT NULL, +i0 INT(0) NULL, +i1 INT(1) NULL, +i20 INT(20) NULL, +t TINYINT NULL, +t0 TINYINT(0) NULL, +t1 TINYINT(1) NULL, +t20 TINYINT(20) NULL, +s SMALLINT NULL, +s0 SMALLINT(0) NULL, +s1 SMALLINT(1) NULL, +s20 SMALLINT(20) NULL, +m MEDIUMINT NULL, +m0 MEDIUMINT(0) NULL, +m1 MEDIUMINT(1) NULL, +m20 MEDIUMINT(20) NULL, +b BIGINT NULL, +b0 BIGINT(0) NULL, +b1 BIGINT(1) NULL, +b20 BIGINT(20) NULL, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +i int(11) YES NULL +i0 int(11) YES NULL +i1 int(1) YES NULL +i20 int(20) YES NULL +t tinyint(4) YES NULL +t0 tinyint(4) YES NULL +t1 tinyint(1) YES NULL +t20 tinyint(20) YES NULL +s smallint(6) YES NULL +s0 smallint(6) YES NULL +s1 smallint(1) YES NULL +s20 smallint(20) YES NULL +m mediumint(9) YES NULL +m0 mediumint(9) YES NULL +m1 mediumint(1) YES NULL +m20 mediumint(20) YES NULL +b bigint(20) YES NULL +b0 bigint(20) YES NULL +b1 bigint(1) YES NULL +b20 bigint(20) YES NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807); +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615); +Warnings: +Warning 1264 Out of range value for column 'i' at row 8 +Warning 1264 Out of range value for column 'i0' at row 8 +Warning 1264 Out of range value for column 'i1' at row 8 +Warning 1264 Out of range value for column 'i20' at row 8 +Warning 1264 Out of range value for column 't' at row 8 +Warning 1264 Out of range value for column 't0' at row 8 +Warning 1264 Out of range value for column 't1' at row 8 +Warning 1264 Out of range value for column 't20' at row 8 +Warning 1264 Out of range value for column 's' at row 8 +Warning 1264 Out of range value for column 's0' at row 8 +Warning 1264 Out of range value for column 's1' at row 8 +Warning 1264 Out of range value for column 's20' at row 8 +Warning 1264 Out of range value for column 'm' at row 8 +Warning 1264 Out of range value for column 'm0' at row 8 +Warning 1264 Out of range value for column 'm1' at row 8 +Warning 1264 Out of range value for column 'm20' at row 8 +Warning 1264 Out of range value for column 'i' at row 9 +Warning 1264 Out of range value for column 'i0' at row 9 +Warning 1264 Out of range value for column 'i1' at row 9 +Warning 1264 Out of range value for column 'i20' at row 9 +Warning 1264 Out of range value for column 't' at row 9 +Warning 1264 Out of range value for column 't0' at row 9 +Warning 1264 Out of range value for column 't1' at row 9 +Warning 1264 Out of range value for column 't20' at row 9 +Warning 1264 Out of range value for column 's' at row 9 +Warning 1264 Out of range value for column 's0' at row 9 +Warning 1264 Out of range value for column 's1' at row 9 +Warning 1264 Out of range value for column 's20' at row 9 +Warning 1264 Out of range value for column 'm' at row 9 +Warning 1264 Out of range value for column 'm0' at row 9 +Warning 1264 Out of range value for column 'm1' at row 9 +Warning 1264 Out of range value for column 'm20' at row 9 +Warning 1264 Out of range value for column 'i' at row 10 +Warning 1264 Out of range value for column 'i0' at row 10 +Warning 1264 Out of range value for column 'i1' at row 10 +Warning 1264 Out of range value for column 'i20' at row 10 +Warning 1264 Out of range value for column 't' at row 10 +Warning 1264 Out of range value for column 't0' at row 10 +Warning 1264 Out of range value for column 't1' at row 10 +Warning 1264 Out of range value for column 't20' at row 10 +Warning 1264 Out of range value for column 's' at row 10 +Warning 1264 Out of range value for column 's0' at row 10 +Warning 1264 Out of range value for column 's1' at row 10 +Warning 1264 Out of range value for column 's20' at row 10 +Warning 1264 Out of range value for column 'm' at row 10 +Warning 1264 Out of range value for column 'm0' at row 10 +Warning 1264 Out of range value for column 'm1' at row 10 +Warning 1264 Out of range value for column 'm20' at row 10 +Warning 1264 Out of range value for column 'i' at row 11 +Warning 1264 Out of range value for column 'i0' at row 11 +Warning 1264 Out of range value for column 'i1' at row 11 +Warning 1264 Out of range value for column 'i20' at row 11 +Warning 1264 Out of range value for column 't' at row 11 +Warning 1264 Out of range value for column 't0' at row 11 +Warning 1264 Out of range value for column 't1' at row 11 +Warning 1264 Out of range value for column 't20' at row 11 +Warning 1264 Out of range value for column 's' at row 11 +Warning 1264 Out of range value for column 's0' at row 11 +Warning 1264 Out of range value for column 's1' at row 11 +Warning 1264 Out of range value for column 's20' at row 11 +Warning 1264 Out of range value for column 'm' at row 11 +Warning 1264 Out of range value for column 'm0' at row 11 +Warning 1264 Out of range value for column 'm1' at row 11 +Warning 1264 Out of range value for column 'm20' at row 11 +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +ALTER TABLE t1 ADD COLUMN i257 INT(257) NULL; +ERROR 42000: Display width out of range for column 'i257' (max = 255) +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c INT NULL, +c1 INT NULL DEFAULT NULL, +c2 INT NULL DEFAULT 2147483647, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c int(11) YES NULL +c1 int(11) YES NULL +c2 int(11) YES 2147483647 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (2147483647,2147483647,2147483647); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 7FFFFFFF 7FFFFFFF 7FFFFFFF +3 NULL NULL 7FFFFFFF +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 7FFFFFFF +3 7FFFFFFF +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c TINYINT NULL, +c1 TINYINT NULL DEFAULT NULL, +c2 TINYINT NULL DEFAULT 127 , +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c tinyint(4) YES NULL +c1 tinyint(4) YES NULL +c2 tinyint(4) YES 127 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (127 ,127 ,127 ); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 7F 7F 7F +3 NULL NULL 7F +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 7F +3 7F +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c SMALLINT NULL, +c1 SMALLINT NULL DEFAULT NULL, +c2 SMALLINT NULL DEFAULT 0, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c smallint(6) YES NULL +c1 smallint(6) YES NULL +c2 smallint(6) YES 0 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (0,0,0); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 0 0 0 +3 NULL NULL 0 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 0 +3 0 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c MEDIUMINT NULL, +c1 MEDIUMINT NULL DEFAULT NULL, +c2 MEDIUMINT NULL DEFAULT 1, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c mediumint(9) YES NULL +c1 mediumint(9) YES NULL +c2 mediumint(9) YES 1 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (1,1,1); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 1 1 1 +3 NULL NULL 1 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 1 +3 1 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c BIGINT NULL, +c1 BIGINT NULL DEFAULT NULL, +c2 BIGINT NULL DEFAULT 9223372036854775807, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c bigint(20) YES NULL +c1 bigint(20) YES NULL +c2 bigint(20) YES 9223372036854775807 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (9223372036854775807,9223372036854775807,9223372036854775807); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 7FFFFFFFFFFFFFFF 7FFFFFFFFFFFFFFF 7FFFFFFFFFFFFFFF +3 NULL NULL 7FFFFFFFFFFFFFFF +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 7FFFFFFFFFFFFFFF +3 7FFFFFFFFFFFFFFF +DROP TABLE t1; +######################## +# SET columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a SET('') NULL, +b SET('test1','test2','test3','test4','test5') NULL, +c SET('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NULL, +PRIMARY KEY (c) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a set('') YES NULL +b set('test1','test2','test3','test4','test5') YES NULL +c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI +INSERT INTO t1 (a,b,c) VALUES +('','test2,test3','01,34,44,,23'), +('',5,2), +(',','test4,test2',''); +Warnings: +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + test1,test3 02 + test2,test3 01,23,34,44 + test2,test4 +INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + 01,02,03,04,05,06,07,08,09,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50'51,52,53,54,55,56,57,58,59,60,61,62,63,64 + test1,test3 02 + test2,test3 01,23,34,44 + test2,test4 +ALTER TABLE t1 ADD COLUMN e SET('a','A') NULL; +Warnings: +Note 1291 Column 'e' has duplicated value 'a' in SET +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a set('') YES NULL +b set('test1','test2','test3','test4','test5') YES NULL +c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI +e set('a','A') YES NULL +ALTER TABLE t1 ADD COLUMN f SET('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i') NULL; +ERROR HY000: Too many strings for column f and SET +SELECT a,b,c,e FROM t1 WHERE FIND_IN_SET('test2',b)>0 OR a != ''; +a b c e + test2,test3 01,23,34,44 NULL + test2,test4 NULL +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c SET('test1','test2','test3') NULL, +c1 SET('test1','test2','test3') NULL DEFAULT NULL, +c2 SET('test1','test2','test3') NULL DEFAULT 'test2,test3', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c set('test1','test2','test3') YES NULL +c1 set('test1','test2','test3') YES NULL +c2 set('test1','test2','test3') YES test2,test3 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('test2,test3','test2,test3','test2,test3'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 74657374322C7465737433 74657374322C7465737433 74657374322C7465737433 +3 NULL NULL 74657374322C7465737433 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 74657374322C7465737433 +3 74657374322C7465737433 +DROP TABLE t1; +######################## +# TEXT columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +t TEXT NULL, +t0 TEXT(0) NULL, +t1 TEXT(1) NULL, +t300 TEXT(300) NULL, +tm TEXT(65535) NULL, +t70k TEXT(70000) NULL, +t17m TEXT(17000000) NULL, +tt TINYTEXT NULL, +m MEDIUMTEXT NULL, +l LONGTEXT NULL +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +t text YES NULL +t0 text YES NULL +t1 tinytext YES NULL +t300 text YES NULL +tm text YES NULL +t70k mediumtext YES NULL +t17m longtext YES NULL +tt tinytext YES NULL +m mediumtext YES NULL +l longtext YES NULL +INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES +('','','','','','','','','',''), +('a','b','c','d','e','f','g','h','i','j'), +('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'), +( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',255), REPEAT('i',1048576), REPEAT('j',1048576) ); +SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES +( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) ); +Warnings: +Warning 1265 Data truncated for column 't' at row 1 +Warning 1265 Data truncated for column 't0' at row 1 +Warning 1265 Data truncated for column 't1' at row 1 +Warning 1265 Data truncated for column 't300' at row 1 +Warning 1265 Data truncated for column 'tm' at row 1 +Warning 1265 Data truncated for column 'tt' at row 1 +SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +ALTER TABLE t1 ADD COLUMN ttt TEXT(4294967296) NULL; +ERROR 42000: Display width out of range for column 'ttt' (max = 4294967295) +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c TEXT NULL, +c1 TEXT NULL DEFAULT NULL, +c2 TEXT NULL DEFAULT '', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Warning 1101 BLOB/TEXT column 'c2' can't have a default value +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c text YES NULL +c1 text YES NULL +c2 text YES NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('','',''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 +3 NULL NULL NULL +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 +3 NULL +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c TINYTEXT NULL, +c1 TINYTEXT NULL DEFAULT NULL, +c2 TINYTEXT NULL DEFAULT '', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Warning 1101 BLOB/TEXT column 'c2' can't have a default value +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c tinytext YES NULL +c1 tinytext YES NULL +c2 tinytext YES NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('','',''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 +3 NULL NULL NULL +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 +3 NULL +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c MEDIUMTEXT NULL, +c1 MEDIUMTEXT NULL DEFAULT NULL, +c2 MEDIUMTEXT NULL DEFAULT '', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Warning 1101 BLOB/TEXT column 'c2' can't have a default value +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c mediumtext YES NULL +c1 mediumtext YES NULL +c2 mediumtext YES NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('','',''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 +3 NULL NULL NULL +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 +3 NULL +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c LONGTEXT NULL, +c1 LONGTEXT NULL DEFAULT NULL, +c2 LONGTEXT NULL DEFAULT '', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Warning 1101 BLOB/TEXT column 'c2' can't have a default value +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c longtext YES NULL +c1 longtext YES NULL +c2 longtext YES NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('','',''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 +3 NULL NULL NULL +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 +3 NULL +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result new file mode 100644 index 00000000000..66b6a5fe799 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result @@ -0,0 +1,741 @@ +######################## +# Fixed point columns (NUMERIC, DECIMAL) +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d DECIMAL UNSIGNED, +d0 DECIMAL(0) UNSIGNED, +d1_1 DECIMAL(1,1) UNSIGNED, +d10_2 DECIMAL(10,2) UNSIGNED, +d60_10 DECIMAL(60,10) UNSIGNED, +n NUMERIC UNSIGNED, +n0_0 NUMERIC(0,0) UNSIGNED, +n1 NUMERIC(1) UNSIGNED, +n20_4 NUMERIC(20,4) UNSIGNED, +n65_4 NUMERIC(65,4) UNSIGNED, +pk NUMERIC UNSIGNED PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +d decimal(10,0) unsigned YES NULL +d0 decimal(10,0) unsigned YES NULL +d1_1 decimal(1,1) unsigned YES NULL +d10_2 decimal(10,2) unsigned YES NULL +d60_10 decimal(60,10) unsigned YES NULL +n decimal(10,0) unsigned YES NULL +n0_0 decimal(10,0) unsigned YES NULL +n1 decimal(1,0) unsigned YES NULL +n20_4 decimal(20,4) unsigned YES NULL +n65_4 decimal(65,4) unsigned YES NULL +pk decimal(10,0) unsigned NO PRI NULL +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3); +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +6 +); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8); +Warnings: +Note 1265 Data truncated for column 'd' at row 1 +Note 1265 Data truncated for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Note 1265 Data truncated for column 'd10_2' at row 1 +Note 1265 Data truncated for column 'd60_10' at row 1 +Note 1265 Data truncated for column 'n' at row 1 +Note 1265 Data truncated for column 'n0_0' at row 1 +Note 1265 Data truncated for column 'n1' at row 1 +Note 1265 Data truncated for column 'n20_4' at row 1 +Note 1265 Data truncated for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.00 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.0000 9999999999999999999999999999999999999999999999999999999999999.1111 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) UNSIGNED; +ERROR 42000: Too big precision 66 specified for column 'n66'. Maximum is 65. +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) UNSIGNED; +ERROR 42000: Too big precision 66 specified for column 'n66_6'. Maximum is 65. +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) UNSIGNED; +ERROR 42000: Too big scale 66 specified for column 'n66_66'. Maximum is 30. +DROP TABLE t1; +CREATE TABLE t1 ( +a DECIMAL UNSIGNED, +b NUMERIC UNSIGNED, +PRIMARY KEY (a) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a decimal(10,0) unsigned NO PRI 0 +b decimal(10,0) unsigned YES NULL +INSERT INTO t1 (a,b) VALUES (1.0,-1.0); +Warnings: +Warning 1264 Out of range value for column 'b' at row 1 +INSERT INTO t1 (a,b) VALUES (-100,100); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +SELECT a,b FROM t1; +a b +0 100 +1 0 +DROP TABLE t1; +######################## +# Floating point columns (FLOAT, DOUBLE) +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +f FLOAT UNSIGNED, +f0 FLOAT(0) UNSIGNED, +r1_1 REAL(1,1) UNSIGNED, +f23_0 FLOAT(23) UNSIGNED, +f20_3 FLOAT(20,3) UNSIGNED, +d DOUBLE UNSIGNED, +d1_0 DOUBLE(1,0) UNSIGNED, +d10_10 DOUBLE PRECISION (10,10) UNSIGNED, +d53 DOUBLE(53,0) UNSIGNED, +d53_10 DOUBLE(53,10) UNSIGNED, +pk DOUBLE UNSIGNED PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +f float unsigned YES NULL +f0 float unsigned YES NULL +r1_1 double(1,1) unsigned YES NULL +f23_0 float unsigned YES NULL +f20_3 float(20,3) unsigned YES NULL +d double unsigned YES NULL +d1_0 double(1,0) unsigned YES NULL +d10_10 double(10,10) unsigned YES NULL +d53 double(53,0) unsigned YES NULL +d53_10 double(53,10) unsigned YES NULL +pk double unsigned NO PRI NULL +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1); +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 11111111.111 +d10_10 0.0123456789 +d1_0 8 +d53 1234566789123456800 +d53_10 100000000000000000.0000000000 +f0 12345.1 +f20_3 56789.988 +f23_0 123457000 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +99999999999999999999999999999999999999, +99999999999999999999999999999999999999.9999999999999999, +0.9, +99999999999999999999999999999999999999.9, +99999999999999999.999, +999999999999999999999999999999999999999999999999999999999999999999999999999999999, +9, +0.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +3 +); +Warnings: +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 0 +d 11111111.111 +d 1e81 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 0 +d1_0 8 +d1_0 9 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 0 +f 1e38 +f0 0 +f0 12345.1 +f0 1e38 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4); +Warnings: +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 0 +d 0 +d 11111111.111 +d 1e81 +d10_10 0.0000000000 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 0 +d1_0 0 +d1_0 8 +d1_0 9 +d53 0 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 0.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 0 +f 0 +f 1e38 +f0 0 +f0 0 +f0 12345.1 +f0 1e38 +f20_3 0.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f23_0 0 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +r1_1 0.0 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1; +MAX(f) 9.999999680285692e37 +MAX(d) 1e81 +MAX(d10_10) 0.9999999999 +MAX(d1_0) 9 +MAX(d53) 100000000000000000000000000000000000000000000000000000 +MAX(d53_10) 10000000000000000000000000000000000000000000.0000000000 +MAX(f0) 9.999999680285692e37 +MAX(f20_3) 99999998430674940.000 +MAX(f23_0) 9.999999680285692e37 +MAX(r1_1) 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +5 +); +Warnings: +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 0 +d 0 +d 11111111.111 +d 1e61 +d 1e81 +d10_10 0.0000000000 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 0 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d53 0 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 0.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 0 +f 0 +f 1e38 +f 3.40282e38 +f0 0 +f0 0 +f0 12345.1 +f0 1e38 +f0 3.40282e38 +f20_3 0.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 0 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +f23_0 3.40282e38 +r1_1 0.0 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +999999999999999999999999999999999999999, +999999999999999999999999999999999999999.9999999999999999, +1.9, +999999999999999999999999999999999999999.9, +999999999999999999.999, +9999999999999999999999999999999999999999999999999999999999999999999999999999999999, +99, +1.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +6 +); +Warnings: +Warning 1292 Truncated incorrect DECIMAL value: '' +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 0 +d 0 +d 11111111.111 +d 1e61 +d 1e65 +d 1e81 +d10_10 0.0000000000 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 0 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d1_0 9 +d53 0 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 0.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 0 +f 0 +f 1e38 +f 3.40282e38 +f 3.40282e38 +f0 0 +f0 0 +f0 12345.1 +f0 1e38 +f0 3.40282e38 +f0 3.40282e38 +f20_3 0.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 0 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +f23_0 3.40282e38 +f23_0 3.40282e38 +r1_1 0.0 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) UNSIGNED; +ERROR 42000: Display width out of range for column 'd0_0' (max = 255) +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) UNSIGNED; +ERROR 42000: Too big precision 256 specified for column 'n66_6'. Maximum is 65. +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) UNSIGNED; +ERROR 42000: Too big scale 35 specified for column 'n66_66'. Maximum is 30. +DROP TABLE t1; +CREATE TABLE t1 ( +a DOUBLE UNSIGNED, +b FLOAT UNSIGNED, +PRIMARY KEY (b) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a double unsigned YES NULL +b float unsigned NO PRI 0 +INSERT INTO t1 (a,b) VALUES (1.0,-1.0); +Warnings: +Warning 1264 Out of range value for column 'b' at row 1 +INSERT INTO t1 (a,b) VALUES (-100,100); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +SELECT a,b FROM t1; +a b +0 100 +1 0 +DROP TABLE t1; +######################## +# INT columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +i INT UNSIGNED, +i0 INT(0) UNSIGNED, +i1 INT(1) UNSIGNED, +i20 INT(20) UNSIGNED, +t TINYINT UNSIGNED, +t0 TINYINT(0) UNSIGNED, +t1 TINYINT(1) UNSIGNED, +t20 TINYINT(20) UNSIGNED, +s SMALLINT UNSIGNED, +s0 SMALLINT(0) UNSIGNED, +s1 SMALLINT(1) UNSIGNED, +s20 SMALLINT(20) UNSIGNED, +m MEDIUMINT UNSIGNED, +m0 MEDIUMINT(0) UNSIGNED, +m1 MEDIUMINT(1) UNSIGNED, +m20 MEDIUMINT(20) UNSIGNED, +b BIGINT UNSIGNED, +b0 BIGINT(0) UNSIGNED, +b1 BIGINT(1) UNSIGNED, +b20 BIGINT(20) UNSIGNED, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +i int(10) unsigned YES NULL +i0 int(10) unsigned YES NULL +i1 int(1) unsigned YES NULL +i20 int(20) unsigned YES NULL +t tinyint(3) unsigned YES NULL +t0 tinyint(3) unsigned YES NULL +t1 tinyint(1) unsigned YES NULL +t20 tinyint(20) unsigned YES NULL +s smallint(5) unsigned YES NULL +s0 smallint(5) unsigned YES NULL +s1 smallint(1) unsigned YES NULL +s20 smallint(20) unsigned YES NULL +m mediumint(8) unsigned YES NULL +m0 mediumint(8) unsigned YES NULL +m1 mediumint(1) unsigned YES NULL +m20 mediumint(20) unsigned YES NULL +b bigint(20) unsigned YES NULL +b0 bigint(20) unsigned YES NULL +b1 bigint(1) unsigned YES NULL +b20 bigint(20) unsigned YES NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807); +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615); +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615); +Warnings: +Warning 1264 Out of range value for column 'i' at row 8 +Warning 1264 Out of range value for column 'i0' at row 8 +Warning 1264 Out of range value for column 'i1' at row 8 +Warning 1264 Out of range value for column 'i20' at row 8 +Warning 1264 Out of range value for column 't' at row 8 +Warning 1264 Out of range value for column 't0' at row 8 +Warning 1264 Out of range value for column 't1' at row 8 +Warning 1264 Out of range value for column 't20' at row 8 +Warning 1264 Out of range value for column 's' at row 8 +Warning 1264 Out of range value for column 's0' at row 8 +Warning 1264 Out of range value for column 's1' at row 8 +Warning 1264 Out of range value for column 's20' at row 8 +Warning 1264 Out of range value for column 'm' at row 8 +Warning 1264 Out of range value for column 'm0' at row 8 +Warning 1264 Out of range value for column 'm1' at row 8 +Warning 1264 Out of range value for column 'm20' at row 8 +Warning 1264 Out of range value for column 'i' at row 9 +Warning 1264 Out of range value for column 'i0' at row 9 +Warning 1264 Out of range value for column 'i1' at row 9 +Warning 1264 Out of range value for column 'i20' at row 9 +Warning 1264 Out of range value for column 't' at row 9 +Warning 1264 Out of range value for column 't0' at row 9 +Warning 1264 Out of range value for column 't1' at row 9 +Warning 1264 Out of range value for column 't20' at row 9 +Warning 1264 Out of range value for column 's' at row 9 +Warning 1264 Out of range value for column 's0' at row 9 +Warning 1264 Out of range value for column 's1' at row 9 +Warning 1264 Out of range value for column 's20' at row 9 +Warning 1264 Out of range value for column 'm' at row 9 +Warning 1264 Out of range value for column 'm0' at row 9 +Warning 1264 Out of range value for column 'm1' at row 9 +Warning 1264 Out of range value for column 'm20' at row 9 +Warning 1264 Out of range value for column 'i' at row 10 +Warning 1264 Out of range value for column 'i0' at row 10 +Warning 1264 Out of range value for column 'i1' at row 10 +Warning 1264 Out of range value for column 'i20' at row 10 +Warning 1264 Out of range value for column 't' at row 10 +Warning 1264 Out of range value for column 't0' at row 10 +Warning 1264 Out of range value for column 't1' at row 10 +Warning 1264 Out of range value for column 't20' at row 10 +Warning 1264 Out of range value for column 's' at row 10 +Warning 1264 Out of range value for column 's0' at row 10 +Warning 1264 Out of range value for column 's1' at row 10 +Warning 1264 Out of range value for column 's20' at row 10 +Warning 1264 Out of range value for column 'm' at row 10 +Warning 1264 Out of range value for column 'm0' at row 10 +Warning 1264 Out of range value for column 'm1' at row 10 +Warning 1264 Out of range value for column 'm20' at row 10 +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +ALTER TABLE t1 ADD COLUMN i257 INT(257) UNSIGNED; +ERROR 42000: Display width out of range for column 'i257' (max = 255) +DROP TABLE t1; +CREATE TABLE t1 ( +t TINYINT UNSIGNED, +s SMALLINT UNSIGNED, +m MEDIUMINT UNSIGNED, +i INT UNSIGNED, +b BIGINT UNSIGNED, +PRIMARY KEY (b) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +t tinyint(3) unsigned YES NULL +s smallint(5) unsigned YES NULL +m mediumint(8) unsigned YES NULL +i int(10) unsigned YES NULL +b bigint(20) unsigned NO PRI 0 +INSERT INTO t1 (t,s,m,i,b) VALUES (255,65535,16777215,4294967295,18446744073709551615); +INSERT INTO t1 (t,s,m,i,b) VALUES (-1,-1,-1,-1,-1); +Warnings: +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +SELECT t,s,m,i,b FROM t1; +t s m i b +0 0 0 0 0 +255 65535 16777215 4294967295 18446744073709551615 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result new file mode 100644 index 00000000000..823ad2f2fc4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result @@ -0,0 +1,723 @@ +######################## +# Fixed point columns (NUMERIC, DECIMAL) +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d DECIMAL ZEROFILL, +d0 DECIMAL(0) ZEROFILL, +d1_1 DECIMAL(1,1) ZEROFILL, +d10_2 DECIMAL(10,2) ZEROFILL, +d60_10 DECIMAL(60,10) ZEROFILL, +n NUMERIC ZEROFILL, +n0_0 NUMERIC(0,0) ZEROFILL, +n1 NUMERIC(1) ZEROFILL, +n20_4 NUMERIC(20,4) ZEROFILL, +n65_4 NUMERIC(65,4) ZEROFILL, +pk NUMERIC ZEROFILL PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +d decimal(10,0) unsigned zerofill YES NULL +d0 decimal(10,0) unsigned zerofill YES NULL +d1_1 decimal(1,1) unsigned zerofill YES NULL +d10_2 decimal(10,2) unsigned zerofill YES NULL +d60_10 decimal(60,10) unsigned zerofill YES NULL +n decimal(10,0) unsigned zerofill YES NULL +n0_0 decimal(10,0) unsigned zerofill YES NULL +n1 decimal(1,0) unsigned zerofill YES NULL +n20_4 decimal(20,4) unsigned zerofill YES NULL +n65_4 decimal(65,4) unsigned zerofill YES NULL +pk decimal(10,0) unsigned zerofill NO PRI NULL +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3); +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000100 0000123456 0.3 00040000.25 00000000000000000000000000000000123456789123456789.1000100000 0000001024 0000007000 8 0000000000999999.9000 0000000000000000000000000000000000000000009223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000100 0000123456 0.3 00040000.25 00000000000000000000000000000000123456789123456789.1000100000 0000001024 0000007000 8 0000000000999999.9000 0000000000000000000000000000000000000000009223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +6 +); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000100 0000123456 0.3 00040000.25 00000000000000000000000000000000123456789123456789.1000100000 0000001024 0000007000 8 0000000000999999.9000 0000000000000000000000000000000000000000009223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000100 0000123456 0.3 00040000.25 00000000000000000000000000000000123456789123456789.1000100000 0000001024 0000007000 8 0000000000999999.9000 0000000000000000000000000000000000000000009223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8); +Warnings: +Note 1265 Data truncated for column 'd' at row 1 +Note 1265 Data truncated for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Note 1265 Data truncated for column 'd10_2' at row 1 +Note 1265 Data truncated for column 'd60_10' at row 1 +Note 1265 Data truncated for column 'n' at row 1 +Note 1265 Data truncated for column 'n0_0' at row 1 +Note 1265 Data truncated for column 'n1' at row 1 +Note 1265 Data truncated for column 'n20_4' at row 1 +Note 1265 Data truncated for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000100 0000123456 0.3 00040000.25 00000000000000000000000000000000123456789123456789.1000100000 0000001024 0000007000 8 0000000000999999.9000 0000000000000000000000000000000000000000009223372036854775807.0000 +9999999999 9999999999 0.9 99999999.00 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.0000 9999999999999999999999999999999999999999999999999999999999999.1111 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) ZEROFILL; +ERROR 42000: Too big precision 66 specified for column 'n66'. Maximum is 65. +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) ZEROFILL; +ERROR 42000: Too big precision 66 specified for column 'n66_6'. Maximum is 65. +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) ZEROFILL; +ERROR 42000: Too big scale 66 specified for column 'n66_66'. Maximum is 30. +DROP TABLE t1; +CREATE TABLE t1 ( +a DECIMAL ZEROFILL, +b NUMERIC ZEROFILL, +PRIMARY KEY (a) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a decimal(10,0) unsigned zerofill NO PRI 0000000000 +b decimal(10,0) unsigned zerofill YES NULL +INSERT INTO t1 (a,b) VALUES (1.1,1234); +Warnings: +Note 1265 Data truncated for column 'a' at row 1 +SELECT a,b FROM t1; +a b +0000000001 0000001234 +DROP TABLE t1; +######################## +# Floating point columns (FLOAT, DOUBLE) +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +f FLOAT ZEROFILL, +f0 FLOAT(0) ZEROFILL, +r1_1 REAL(1,1) ZEROFILL, +f23_0 FLOAT(23) ZEROFILL, +f20_3 FLOAT(20,3) ZEROFILL, +d DOUBLE ZEROFILL, +d1_0 DOUBLE(1,0) ZEROFILL, +d10_10 DOUBLE PRECISION (10,10) ZEROFILL, +d53 DOUBLE(53,0) ZEROFILL, +d53_10 DOUBLE(53,10) ZEROFILL, +pk DOUBLE ZEROFILL PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +f float unsigned zerofill YES NULL +f0 float unsigned zerofill YES NULL +r1_1 double(1,1) unsigned zerofill YES NULL +f23_0 float unsigned zerofill YES NULL +f20_3 float(20,3) unsigned zerofill YES NULL +d double unsigned zerofill YES NULL +d1_0 double(1,0) unsigned zerofill YES NULL +d10_10 double(10,10) unsigned zerofill YES NULL +d53 double(53,0) unsigned zerofill YES NULL +d53_10 double(53,10) unsigned zerofill YES NULL +pk double unsigned zerofill NO PRI NULL +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1); +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 0000012345.1 +d 000000000011111111.111 +d10_10 0.0123456789 +d1_0 8 +d53 00000000000000000000000000000000001234566789123456800 +d53_10 000000000000000000000000100000000000000000.0000000000 +f0 0000012345.1 +f20_3 0000000000056789.988 +f23_0 000123457000 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +99999999999999999999999999999999999999, +99999999999999999999999999999999999999.9999999999999999, +0.9, +99999999999999999999999999999999999999.9, +99999999999999999.999, +999999999999999999999999999999999999999999999999999999999999999999999999999999999, +9, +0.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +3 +); +Warnings: +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 0000012345.1 +d 0000000000000000000000 +d 0000000000000000001e81 +d 000000000011111111.111 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 0 +d1_0 8 +d1_0 9 +d53 00000000000000000000000000000000000000000000000000000 +d53 00000000000000000000000000000000001234566789123456800 +d53 100000000000000000000000000000000000000000000000000000 +d53_10 000000000000000000000000000000000000000000.0000000000 +d53_10 000000000000000000000000100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 000000000000 +f 000000001e38 +f0 000000000000 +f0 000000001e38 +f0 0000012345.1 +f20_3 0000000000000000.000 +f20_3 0000000000056789.988 +f20_3 99999998430674940.000 +f23_0 000000000000 +f23_0 000000001e38 +f23_0 000123457000 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4); +Warnings: +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 0000012345.1 +d 0000000000000000000000 +d 0000000000000000000000 +d 0000000000000000001e81 +d 000000000011111111.111 +d10_10 0.0000000000 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 0 +d1_0 0 +d1_0 8 +d1_0 9 +d53 00000000000000000000000000000000000000000000000000000 +d53 00000000000000000000000000000000000000000000000000000 +d53 00000000000000000000000000000000001234566789123456800 +d53 100000000000000000000000000000000000000000000000000000 +d53_10 000000000000000000000000000000000000000000.0000000000 +d53_10 000000000000000000000000000000000000000000.0000000000 +d53_10 000000000000000000000000100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 000000000000 +f 000000000000 +f 000000001e38 +f0 000000000000 +f0 000000000000 +f0 000000001e38 +f0 0000012345.1 +f20_3 0000000000000000.000 +f20_3 0000000000000000.000 +f20_3 0000000000056789.988 +f20_3 99999998430674940.000 +f23_0 000000000000 +f23_0 000000000000 +f23_0 000000001e38 +f23_0 000123457000 +r1_1 0.0 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1; +MAX(f) 9.999999680285692e37 +MAX(d) 1e81 +MAX(d10_10) 0.9999999999 +MAX(d1_0) 9 +MAX(d53) 100000000000000000000000000000000000000000000000000000 +MAX(d53_10) 10000000000000000000000000000000000000000000.0000000000 +MAX(f0) 9.999999680285692e37 +MAX(f20_3) 99999998430674940.000 +MAX(f23_0) 9.999999680285692e37 +MAX(r1_1) 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +5 +); +Warnings: +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 0000012345.1 +d 0000000000000000000000 +d 0000000000000000000000 +d 0000000000000000001e61 +d 0000000000000000001e81 +d 000000000011111111.111 +d10_10 0.0000000000 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 0 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d53 00000000000000000000000000000000000000000000000000000 +d53 00000000000000000000000000000000000000000000000000000 +d53 00000000000000000000000000000000001234566789123456800 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53_10 000000000000000000000000000000000000000000.0000000000 +d53_10 000000000000000000000000000000000000000000.0000000000 +d53_10 000000000000000000000000100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 000000000000 +f 000000000000 +f 000000001e38 +f 003.40282e38 +f0 000000000000 +f0 000000000000 +f0 000000001e38 +f0 0000012345.1 +f0 003.40282e38 +f20_3 0000000000000000.000 +f20_3 0000000000000000.000 +f20_3 0000000000056789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 000000000000 +f23_0 000000000000 +f23_0 000000001e38 +f23_0 000123457000 +f23_0 003.40282e38 +r1_1 0.0 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +999999999999999999999999999999999999999, +999999999999999999999999999999999999999.9999999999999999, +1.9, +999999999999999999999999999999999999999.9, +999999999999999999.999, +9999999999999999999999999999999999999999999999999999999999999999999999999999999999, +99, +1.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +6 +); +Warnings: +Warning 1292 Truncated incorrect DECIMAL value: '' +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 0000012345.1 +d 0000000000000000000000 +d 0000000000000000000000 +d 0000000000000000001e61 +d 0000000000000000001e65 +d 0000000000000000001e81 +d 000000000011111111.111 +d10_10 0.0000000000 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 0 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d1_0 9 +d53 00000000000000000000000000000000000000000000000000000 +d53 00000000000000000000000000000000000000000000000000000 +d53 00000000000000000000000000000000001234566789123456800 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53_10 000000000000000000000000000000000000000000.0000000000 +d53_10 000000000000000000000000000000000000000000.0000000000 +d53_10 000000000000000000000000100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 000000000000 +f 000000000000 +f 000000001e38 +f 003.40282e38 +f 003.40282e38 +f0 000000000000 +f0 000000000000 +f0 000000001e38 +f0 0000012345.1 +f0 003.40282e38 +f0 003.40282e38 +f20_3 0000000000000000.000 +f20_3 0000000000000000.000 +f20_3 0000000000056789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 000000000000 +f23_0 000000000000 +f23_0 000000001e38 +f23_0 000123457000 +f23_0 003.40282e38 +f23_0 003.40282e38 +r1_1 0.0 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) ZEROFILL; +ERROR 42000: Display width out of range for column 'd0_0' (max = 255) +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) ZEROFILL; +ERROR 42000: Too big precision 256 specified for column 'n66_6'. Maximum is 65. +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) ZEROFILL; +ERROR 42000: Too big scale 35 specified for column 'n66_66'. Maximum is 30. +DROP TABLE t1; +CREATE TABLE t1 ( +a DOUBLE ZEROFILL, +b FLOAT ZEROFILL, +PRIMARY KEY (b) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a double unsigned zerofill YES NULL +b float unsigned zerofill NO PRI 000000000000 +INSERT INTO t1 (a,b) VALUES (1,1234.5); +SELECT a,b FROM t1; +a b +0000000000000000000001 0000001234.5 +DROP TABLE t1; +######################## +# INT columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +i INT ZEROFILL, +i0 INT(0) ZEROFILL, +i1 INT(1) ZEROFILL, +i20 INT(20) ZEROFILL, +t TINYINT ZEROFILL, +t0 TINYINT(0) ZEROFILL, +t1 TINYINT(1) ZEROFILL, +t20 TINYINT(20) ZEROFILL, +s SMALLINT ZEROFILL, +s0 SMALLINT(0) ZEROFILL, +s1 SMALLINT(1) ZEROFILL, +s20 SMALLINT(20) ZEROFILL, +m MEDIUMINT ZEROFILL, +m0 MEDIUMINT(0) ZEROFILL, +m1 MEDIUMINT(1) ZEROFILL, +m20 MEDIUMINT(20) ZEROFILL, +b BIGINT ZEROFILL, +b0 BIGINT(0) ZEROFILL, +b1 BIGINT(1) ZEROFILL, +b20 BIGINT(20) ZEROFILL, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +i int(10) unsigned zerofill YES NULL +i0 int(10) unsigned zerofill YES NULL +i1 int(1) unsigned zerofill YES NULL +i20 int(20) unsigned zerofill YES NULL +t tinyint(3) unsigned zerofill YES NULL +t0 tinyint(3) unsigned zerofill YES NULL +t1 tinyint(1) unsigned zerofill YES NULL +t20 tinyint(20) unsigned zerofill YES NULL +s smallint(5) unsigned zerofill YES NULL +s0 smallint(5) unsigned zerofill YES NULL +s1 smallint(1) unsigned zerofill YES NULL +s20 smallint(20) unsigned zerofill YES NULL +m mediumint(8) unsigned zerofill YES NULL +m0 mediumint(8) unsigned zerofill YES NULL +m1 mediumint(1) unsigned zerofill YES NULL +m20 mediumint(20) unsigned zerofill YES NULL +b bigint(20) unsigned zerofill YES NULL +b0 bigint(20) unsigned zerofill YES NULL +b1 bigint(1) unsigned zerofill YES NULL +b20 bigint(20) unsigned zerofill YES NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807); +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000 +0000000001 0000000002 3 00000000000000000004 005 006 7 00000000000000000008 00009 00010 11 00000000000000000012 00000013 00000014 15 00000000000000000016 00000000000000000017 00000000000000000018 19 00000000000000000020 +2147483647 2147483647 2147483647 00000000002147483647 127 127 127 00000000000000000127 32767 32767 32767 00000000000000032767 08388607 08388607 8388607 00000000000008388607 09223372036854775807 09223372036854775807 9223372036854775807 09223372036854775807 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615); +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000 +0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000 +0000000001 0000000002 3 00000000000000000004 005 006 7 00000000000000000008 00009 00010 11 00000000000000000012 00000013 00000014 15 00000000000000000016 00000000000000000017 00000000000000000018 19 00000000000000000020 +2147483647 2147483647 2147483647 00000000002147483647 127 127 127 00000000000000000127 32767 32767 32767 00000000000000032767 08388607 08388607 8388607 00000000000008388607 09223372036854775807 09223372036854775807 9223372036854775807 09223372036854775807 +4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615); +Warnings: +Warning 1264 Out of range value for column 'i' at row 8 +Warning 1264 Out of range value for column 'i0' at row 8 +Warning 1264 Out of range value for column 'i1' at row 8 +Warning 1264 Out of range value for column 'i20' at row 8 +Warning 1264 Out of range value for column 't' at row 8 +Warning 1264 Out of range value for column 't0' at row 8 +Warning 1264 Out of range value for column 't1' at row 8 +Warning 1264 Out of range value for column 't20' at row 8 +Warning 1264 Out of range value for column 's' at row 8 +Warning 1264 Out of range value for column 's0' at row 8 +Warning 1264 Out of range value for column 's1' at row 8 +Warning 1264 Out of range value for column 's20' at row 8 +Warning 1264 Out of range value for column 'm' at row 8 +Warning 1264 Out of range value for column 'm0' at row 8 +Warning 1264 Out of range value for column 'm1' at row 8 +Warning 1264 Out of range value for column 'm20' at row 8 +Warning 1264 Out of range value for column 'i' at row 9 +Warning 1264 Out of range value for column 'i0' at row 9 +Warning 1264 Out of range value for column 'i1' at row 9 +Warning 1264 Out of range value for column 'i20' at row 9 +Warning 1264 Out of range value for column 't' at row 9 +Warning 1264 Out of range value for column 't0' at row 9 +Warning 1264 Out of range value for column 't1' at row 9 +Warning 1264 Out of range value for column 't20' at row 9 +Warning 1264 Out of range value for column 's' at row 9 +Warning 1264 Out of range value for column 's0' at row 9 +Warning 1264 Out of range value for column 's1' at row 9 +Warning 1264 Out of range value for column 's20' at row 9 +Warning 1264 Out of range value for column 'm' at row 9 +Warning 1264 Out of range value for column 'm0' at row 9 +Warning 1264 Out of range value for column 'm1' at row 9 +Warning 1264 Out of range value for column 'm20' at row 9 +Warning 1264 Out of range value for column 'i' at row 10 +Warning 1264 Out of range value for column 'i0' at row 10 +Warning 1264 Out of range value for column 'i1' at row 10 +Warning 1264 Out of range value for column 'i20' at row 10 +Warning 1264 Out of range value for column 't' at row 10 +Warning 1264 Out of range value for column 't0' at row 10 +Warning 1264 Out of range value for column 't1' at row 10 +Warning 1264 Out of range value for column 't20' at row 10 +Warning 1264 Out of range value for column 's' at row 10 +Warning 1264 Out of range value for column 's0' at row 10 +Warning 1264 Out of range value for column 's1' at row 10 +Warning 1264 Out of range value for column 's20' at row 10 +Warning 1264 Out of range value for column 'm' at row 10 +Warning 1264 Out of range value for column 'm0' at row 10 +Warning 1264 Out of range value for column 'm1' at row 10 +Warning 1264 Out of range value for column 'm20' at row 10 +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000 +0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000 +0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000 +0000000001 0000000002 3 00000000000000000004 005 006 7 00000000000000000008 00009 00010 11 00000000000000000012 00000013 00000014 15 00000000000000000016 00000000000000000017 00000000000000000018 19 00000000000000000020 +2147483647 2147483647 2147483647 00000000002147483647 127 127 127 00000000000000000127 32767 32767 32767 00000000000000032767 08388607 08388607 8388607 00000000000008388607 09223372036854775807 09223372036854775807 9223372036854775807 09223372036854775807 +4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 09223372036854775807 09223372036854775807 9223372036854775807 09223372036854775807 +4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +ALTER TABLE t1 ADD COLUMN i257 INT(257) ZEROFILL; +ERROR 42000: Display width out of range for column 'i257' (max = 255) +DROP TABLE t1; +CREATE TABLE t1 ( +t TINYINT ZEROFILL, +s SMALLINT ZEROFILL, +m MEDIUMINT ZEROFILL, +i INT ZEROFILL, +b BIGINT ZEROFILL, +PRIMARY KEY (b) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +t tinyint(3) unsigned zerofill YES NULL +s smallint(5) unsigned zerofill YES NULL +m mediumint(8) unsigned zerofill YES NULL +i int(10) unsigned zerofill YES NULL +b bigint(20) unsigned zerofill NO PRI 00000000000000000000 +INSERT INTO t1 (t,s,m,i,b) VALUES (1,10,100,1000,0); +SELECT t,s,m,i,b FROM t1; +t s m i b +001 00010 00000100 0000001000 00000000000000000000 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/collation.result b/storage/rocksdb/mysql-test/rocksdb/r/collation.result new file mode 100644 index 00000000000..b6bde05cc70 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/collation.result @@ -0,0 +1,128 @@ +SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.t1.value Use binary collation (binary, latin1_bin, utf8_bin). +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value3(50))) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.t1.value3 Use binary collation (binary, latin1_bin, utf8_bin). +SET GLOBAL rocksdb_strict_collation_check=0; +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value3(50))) engine=rocksdb charset utf8; +DROP TABLE t1; +SET GLOBAL rocksdb_strict_collation_check=1; +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value2)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset latin1 collate latin1_bin; +DROP TABLE t1; +CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset utf8 collate utf8_bin; +DROP TABLE t1; +CREATE TABLE t1 (id varchar(20) collate latin1_bin, value varchar(50) collate utf8_bin, value2 varchar(50) collate latin1_bin, value3 text, primary key (id), index(value, value2)) engine=rocksdb; +DROP TABLE t1; +SET GLOBAL rocksdb_strict_collation_exceptions=t1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE t2 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.t2.value Use binary collation (binary, latin1_bin, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="t.*"; +CREATE TABLE t123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t123; +CREATE TABLE s123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.s123.value Use binary collation (binary, latin1_bin, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions=".t.*"; +CREATE TABLE xt123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE xt123; +CREATE TABLE t123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.t123.value Use binary collation (binary, latin1_bin, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="s.*,t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="s.*|t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions=",s.*,t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="|s.*|t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="s.*,,t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="s.*||t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="s.*,t.*,"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="s.*|t.*|"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="||||,,,,s.*,,|,,||,t.*,,|||,,,"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions='t1'; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb; +ALTER TABLE t1 AUTO_INCREMENT=1; +DROP TABLE t1; +CREATE TABLE t2 (id INT primary key, value varchar(50), index(value)) engine=rocksdb; +ERROR HY000: Unsupported collation on string indexed column test.t2.value Use binary collation (binary, latin1_bin, utf8_bin). +CREATE TABLE t2 (id INT primary key, value varchar(50)) engine=rocksdb; +ALTER TABLE t2 ADD INDEX(value); +ERROR HY000: Unsupported collation on string indexed column test.t2.value Use binary collation (binary, latin1_bin, utf8_bin). +DROP TABLE t2; +SET GLOBAL rocksdb_strict_collation_exceptions="[a-b"; + Invalid pattern in strict_collation_exceptions: [a-b +CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.a.value Use binary collation (binary, latin1_bin, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="[a-b]"; +CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +CREATE TABLE b (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +CREATE TABLE c (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.c.value Use binary collation (binary, latin1_bin, utf8_bin). +DROP TABLE a, b; +SET GLOBAL rocksdb_strict_collation_exceptions="abc\\"; + Invalid pattern in strict_collation_exceptions: abc\ +CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.abc.value Use binary collation (binary, latin1_bin, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="abc"; +CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.abcd.value Use binary collation (binary, latin1_bin, utf8_bin). +DROP TABLE abc; +SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/collation_exception.result b/storage/rocksdb/mysql-test/rocksdb/r/collation_exception.result new file mode 100644 index 00000000000..83d72d6c449 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/collation_exception.result @@ -0,0 +1,25 @@ +CREATE TABLE `r1.lol` ( +`c1` int(10) NOT NULL DEFAULT '0', +`c2` int(11) NOT NULL DEFAULT '0', +`c3` int(1) NOT NULL DEFAULT '0', +`c4` int(11) NOT NULL DEFAULT '0', +`c5` int(11) NOT NULL DEFAULT '0', +`c6` varchar(100) NOT NULL DEFAULT '', +`c7` varchar(100) NOT NULL DEFAULT '', +`c8` varchar(255) NOT NULL DEFAULT '', +`c9` int(10) NOT NULL DEFAULT '125', +`c10` int(10) NOT NULL DEFAULT '125', +`c11` text NOT NULL, +`c12` int(11) NOT NULL DEFAULT '0', +`c13` int(10) NOT NULL DEFAULT '0', +`c14` text NOT NULL, +`c15` blob NOT NULL, +`c16` int(11) NOT NULL DEFAULT '0', +`c17` int(11) NOT NULL DEFAULT '0', +`c18` int(11) NOT NULL DEFAULT '0', +PRIMARY KEY (`c1`), +KEY i1 (`c4`), +KEY i2 (`c7`), +KEY i3 (`c2`)) ENGINE=RocksDB DEFAULT CHARSET=latin1; +DROP INDEX i1 ON `r1.lol`; +DROP TABLE `r1.lol`; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/compact_deletes.result b/storage/rocksdb/mysql-test/rocksdb/r/compact_deletes.result new file mode 100644 index 00000000000..408a93441b9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/compact_deletes.result @@ -0,0 +1,93 @@ +DROP TABLE IF EXISTS r1; +create table r1 ( +id1 int, +id2 int, +type int, +value varchar(100), +value2 int, +value3 int, +primary key (type, id1, id2), +index id1_type (id1, type, value2, value, id2) +) engine=rocksdb collate latin1_bin; +select 'loading data'; +loading data +loading data +set global rocksdb_force_flush_memtable_now=1; +optimize table r1; +Table Op Msg_type Msg_text +test.r1 optimize status OK +Test 1: Do a bunch of updates without setting the compaction sysvar +Expect: no compaction +set global rocksdb_compaction_sequential_deletes_window=0; +set global rocksdb_compaction_sequential_deletes= 0; +set global rocksdb_compaction_sequential_deletes_file_size=0; +set global rocksdb_force_flush_memtable_now=1; +select sleep(1); +sleep(1) +0 +wait_for_delete: 0 +There are deletes left +SET GLOBAL rocksdb_compaction_sequential_deletes= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_window= 0; +Test 2: Do a bunch of updates and set the compaction sysvar +Expect: compaction +set global rocksdb_compaction_sequential_deletes_window=1000; +set global rocksdb_compaction_sequential_deletes= 990; +set global rocksdb_compaction_sequential_deletes_file_size=0; +set global rocksdb_force_flush_memtable_now=1; +select sleep(1); +sleep(1) +0 +wait_for_delete: 1 +No more deletes left +SET GLOBAL rocksdb_compaction_sequential_deletes= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_window= 0; +Test 3: Do a bunch of updates and set the compaction sysvar and a file size to something large +Expect: no compaction +set global rocksdb_compaction_sequential_deletes_window=1000; +set global rocksdb_compaction_sequential_deletes= 1000; +set global rocksdb_compaction_sequential_deletes_file_size=1000000; +set global rocksdb_force_flush_memtable_now=1; +select sleep(1); +sleep(1) +0 +wait_for_delete: 0 +There are deletes left +SET GLOBAL rocksdb_compaction_sequential_deletes= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_window= 0; +Test 4: Do a bunch of secondary key updates and set the compaction sysvar +Expect: compaction +set global rocksdb_compaction_sequential_deletes_window=1000; +set global rocksdb_compaction_sequential_deletes= 50; +set global rocksdb_compaction_sequential_deletes_file_size=0; +set global rocksdb_force_flush_memtable_now=1; +select sleep(1); +sleep(1) +0 +wait_for_delete: 1 +No more deletes left +SET GLOBAL rocksdb_compaction_sequential_deletes= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_window= 0; +Test 5: Do a bunch of secondary key updates and set the compaction sysvar, +and rocksdb_compaction_sequential_deletes_count_sd turned on +Expect: compaction +SET @save_rocksdb_compaction_sequential_deletes_count_sd = @@global.rocksdb_compaction_sequential_deletes_count_sd; +SET GLOBAL rocksdb_compaction_sequential_deletes_count_sd= ON; +set global rocksdb_compaction_sequential_deletes_window=1000; +set global rocksdb_compaction_sequential_deletes= 50; +set global rocksdb_compaction_sequential_deletes_file_size=0; +set global rocksdb_force_flush_memtable_now=1; +select sleep(1); +sleep(1) +0 +wait_for_delete: 1 +No more deletes left +SET GLOBAL rocksdb_compaction_sequential_deletes= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_window= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_count_sd= @save_rocksdb_compaction_sequential_deletes_count_sd; +drop table r1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/compression_zstd.result b/storage/rocksdb/mysql-test/rocksdb/r/compression_zstd.result new file mode 100644 index 00000000000..62a6dbbdaca --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/compression_zstd.result @@ -0,0 +1,2 @@ +create table t (id int primary key) engine=rocksdb; +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/concurrent_alter.result b/storage/rocksdb/mysql-test/rocksdb/r/concurrent_alter.result new file mode 100644 index 00000000000..396f80a2ecb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/concurrent_alter.result @@ -0,0 +1,12 @@ +DROP DATABASE IF EXISTS mysqlslap; +CREATE DATABASE mysqlslap; +use mysqlslap; +CREATE TABLE a1 (a int, b int) ENGINE=ROCKSDB; +INSERT INTO a1 VALUES (1, 1); +SHOW CREATE TABLE a1; +Table Create Table +a1 CREATE TABLE `a1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP DATABASE mysqlslap; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result new file mode 100644 index 00000000000..d75a548e6ff --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result @@ -0,0 +1,151 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=ROCKSDB; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 1105 +connection con2; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 0 +connection con1; +COMMIT; +connection con2; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 0 +connection con1; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 1105 +connection con2; +INSERT INTO t1 (a) VALUES (1); +connection con1; +# If consistent read works on this isolation level (READ COMMITTED), the following SELECT should not return the value we inserted (1) +SELECT a FROM t1; +a +1 +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +CREATE TABLE r1 (id int primary key, value int, value2 int) engine=ROCKSDB; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +insert into r1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4); +BEGIN; +connection con2; +INSERT INTO r1 values (5,5,5); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +connection con2; +INSERT INTO r1 values (6,6,6); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +COMMIT; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 1105 +connection con2; +INSERT INTO r1 values (7,7,7); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +connection con2; +INSERT INTO r1 values (8,8,8); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +COMMIT; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 1105 +connection con2; +INSERT INTO r1 values (9,9,9); +connection con1; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 1105 +connection con2; +INSERT INTO r1 values (10,10,10); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 1105 +INSERT INTO r1 values (11,11,11); +ERROR: 0 +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +11 11 11 +drop table r1; +connection default; +disconnect con1; +disconnect con2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result new file mode 100644 index 00000000000..7458e6b72c3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result @@ -0,0 +1,144 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=ROCKSDB; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 0 +connection con2; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 1 +connection con1; +COMMIT; +connection con2; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 0 +connection con1; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 0 +connection con2; +INSERT INTO t1 (a) VALUES (1); +connection con1; +# If consistent read works on this isolation level (REPEATABLE READ), the following SELECT should not return the value we inserted (1) +SELECT a FROM t1; +a +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +CREATE TABLE r1 (id int primary key, value int, value2 int) engine=ROCKSDB; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +insert into r1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4); +BEGIN; +connection con2; +INSERT INTO r1 values (5,5,5); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +connection con2; +INSERT INTO r1 values (6,6,6); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +COMMIT; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 0 +connection con2; +INSERT INTO r1 values (7,7,7); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +connection con2; +INSERT INTO r1 values (8,8,8); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +COMMIT; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 0 +connection con2; +INSERT INTO r1 values (9,9,9); +connection con1; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 0 +connection con2; +INSERT INTO r1 values (10,10,10); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 0 +INSERT INTO r1 values (11,11,11); +ERROR: 1105 +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +drop table r1; +connection default; +disconnect con1; +disconnect con2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_serializable.result b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_serializable.result new file mode 100644 index 00000000000..9c55b0dd689 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_serializable.result @@ -0,0 +1,24 @@ +# -- WARNING ---------------------------------------------------------------- +# According to I_S.ENGINES, does not support transactions. +# If it is true, the test will most likely fail; you can +# either create an rdiff file, or add the test to disabled.def. +# If transactions should be supported, check the data in Information Schema. +# --------------------------------------------------------------------------- +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=ROCKSDB; +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connection con2; +INSERT INTO t1 (a) VALUES (1); +connection con1; +# If consistent read works on this isolation level (SERIALIZABLE), the following SELECT should not return the value we inserted (1) +SELECT a FROM t1; +a +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result b/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result new file mode 100644 index 00000000000..28c200ebf30 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result @@ -0,0 +1,74 @@ +# +# Test how MyRocks handles reading corrupted data from disk. +# Data corruption is simulated at source-code level. +# +# +# A test for case when data in the table *record* is longer +# than table DDL expects it to be +# +create table t1 ( +pk int not null primary key, +col1 varchar(10) +) engine=rocksdb; +insert into t1 values (1,1),(2,2),(3,3); +select * from t1; +pk col1 +1 1 +2 2 +3 3 +set @tmp1=@@rocksdb_verify_checksums; +set rocksdb_verify_checksums=1; +set session debug= "+d,myrocks_simulate_bad_row_read1"; +select * from t1 where pk=1; +ERROR HY000: Got error 122 from storage engine +set session debug= "-d,myrocks_simulate_bad_row_read1"; +set rocksdb_verify_checksums=@tmp1; +select * from t1 where pk=1; +pk col1 +1 1 +set session debug= "+d,myrocks_simulate_bad_row_read2"; +select * from t1 where pk=1; +ERROR HY000: Got error 122 from storage engine +set session debug= "-d,myrocks_simulate_bad_row_read2"; +set session debug= "+d,myrocks_simulate_bad_row_read3"; +select * from t1 where pk=1; +ERROR HY000: Got error 122 from storage engine +set session debug= "-d,myrocks_simulate_bad_row_read3"; +insert into t1 values(4,'0123456789'); +select * from t1; +pk col1 +1 1 +2 2 +3 3 +4 0123456789 +drop table t1; +# +# A test for case when index data is longer than table DDL +# expects it to be +# +create table t2 ( +pk varchar(4) not null primary key, +col1 int not null +) engine=rocksdb collate latin1_bin; +insert into t2 values ('ABCD',1); +select * from t2; +pk col1 +ABCD 1 +set session debug= "+d,myrocks_simulate_bad_pk_read1"; +select * from t2; +ERROR HY000: Got error 122 from storage engine +set session debug= "-d,myrocks_simulate_bad_pk_read1"; +drop table t2; +create table t2 ( +pk varchar(4) not null primary key, +col1 int not null +) engine=rocksdb; +insert into t2 values ('ABCD',1); +select * from t2; +pk col1 +ABCD 1 +set session debug= "+d,myrocks_simulate_bad_pk_read1"; +select * from t2; +ERROR HY000: Got error 122 from storage engine +set session debug= "-d,myrocks_simulate_bad_pk_read1"; +drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/create_table.result b/storage/rocksdb/mysql-test/rocksdb/r/create_table.result new file mode 100644 index 00000000000..8c879d82611 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/create_table.result @@ -0,0 +1,165 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CREATE TABLE IF NOT EXISTS t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +Warnings: +Note 1050 Table 't1' already exists +CREATE TABLE t2 LIKE t1; +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CREATE TEMPORARY TABLE t2 (a INT PRIMARY KEY) ENGINE=rocksdb; +ERROR HY000: Table storage engine 'ROCKSDB' does not support the create option 'TEMPORARY' +DROP TABLE t2; +DROP TABLE IF EXISTS t1; +SET default_storage_engine = rocksdb; +CREATE TABLE t1 (a INT PRIMARY KEY); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 (a INT PRIMARY KEY) AS SELECT 1 AS a UNION SELECT 2 AS a; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT * FROM t1; +a +1 +2 +FLUSH LOGS; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(c1 INT,c2 INT,c3 INT,c4 INT,c5 INT,c6 INT,c7 INT,c8 INT,c9 INT, +c10 INT,c11 INT,c12 INT,c13 INT,c14 INT,c15 INT,c16 INT,c17 INT, +c18 INT,c19 INT,c20 INT,c21 INT,c22 INT,c23 INT,c24 INT,c25 INT, +c26 INT,c27 INT,c28 INT,c29 INT,c30 INT,c31 INT,c32 INT,c33 INT, +c34 INT,c35 INT,c36 INT,c37 INT,c38 INT,c39 INT,c40 INT,c41 INT, +c42 INT,c43 INT,c44 INT,c45 INT,c46 INT,c47 INT,c48 INT,c49 INT, +c50 INT,c51 INT,c52 INT,c53 INT,c54 INT,c55 INT,c56 INT,c57 INT, +c58 INT,c59 INT,c60 INT,c61 INT,c62 INT,c63 INT,c64 INT,c65 INT, +c66 INT,c67 INT,c68 INT,c69 INT,c70 INT,c71 INT,c72 INT,c73 INT, +c74 INT,c75 INT,c76 INT,c77 INT,c78 INT,c79 INT,c80 INT,c81 INT, +c82 INT,c83 INT,c84 INT,c85 INT,c86 INT,c87 INT,c88 INT,c89 INT, +c90 INT,c91 INT,c92 INT,c93 INT,c94 INT,c95 INT,c96 INT,c97 INT, +c98 INT,c99 INT,c100 INT,c101 INT,c102 INT,c103 INT,c104 INT, +c105 INT,c106 INT,c107 INT,c108 INT,c109 INT,c110 INT,c111 INT, +c112 INT,c113 INT,c114 INT,c115 INT,c116 INT,c117 INT,c118 INT, +c119 INT,c120 INT,c121 INT,c122 INT,c123 INT,c124 INT,c125 INT, +c126 INT,c127 INT,c128 INT,c129 INT,c130 INT,c131 INT,c132 INT, +c133 INT,c134 INT,c135 INT,c136 INT,c137 INT,c138 INT,c139 INT, +c140 INT,c141 INT,c142 INT,c143 INT,c144 INT,c145 INT,c146 INT, +c147 INT,c148 INT,c149 INT,c150 INT,c151 INT,c152 INT,c153 INT, +c154 INT,c155 INT,c156 INT,c157 INT,c158 INT,c159 INT,c160 INT, +c161 INT,c162 INT,c163 INT,c164 INT,c165 INT,c166 INT,c167 INT, +c168 INT,c169 INT,c170 INT,c171 INT,c172 INT,c173 INT,c174 INT, +c175 INT,c176 INT,c177 INT,c178 INT,c179 INT,c180 INT,c181 INT, +c182 INT,c183 INT,c184 INT,c185 INT,c186 INT,c187 INT,c188 INT, +c189 INT,c190 INT,c191 INT,c192 INT,c193 INT,c194 INT,c195 INT, +c196 INT,c197 INT,c198 INT,c199 INT,c200 INT,c201 INT,c202 INT, +c203 INT,c204 INT,c205 INT,c206 INT,c207 INT,c208 INT,c209 INT, +c210 INT,c211 INT,c212 INT,c213 INT,c214 INT,c215 INT,c216 INT, +c217 INT,c218 INT,c219 INT,c220 INT,c221 INT,c222 INT,c223 INT, +c224 INT,c225 INT,c226 INT,c227 INT,c228 INT,c229 INT,c230 INT, +c231 INT,c232 INT,c233 INT,c234 INT,c235 INT,c236 INT,c237 INT, +c238 INT,c239 INT,c240 INT,c241 INT,c242 INT,c243 INT,c244 INT, +c245 INT,c246 INT,c247 INT,c248 INT,c249 INT,c250 INT,c251 INT, +c252 INT,c253 INT,c254 INT,c255 INT,c256 INT,c257 INT,c258 INT, +c259 INT,c260 INT,c261 INT,c262 INT,c263 INT,c264 INT,c265 INT, +c266 INT,c267 INT,c268 INT,c269 INT,c270 INT,c271 INT,c272 INT, +c273 INT,c274 INT,c275 INT,c276 INT,c277 INT,c278 INT,c279 INT, +c280 INT,c281 INT,c282 INT,c283 INT,c284 INT,c285 INT,c286 INT, +c287 INT,c288 INT,c289 INT,c290 INT,c291 INT,c292 INT,c293 INT, +c294 INT,c295 INT,c296 INT,c297 INT,c298 INT,c299 INT,c300 INT, +c301 INT,c302 INT,c303 INT,c304 INT,c305 INT,c306 INT,c307 INT, +c308 INT,c309 INT,c310 INT,c311 INT,c312 INT,c313 INT,c314 INT, +c315 INT,c316 INT,c317 INT,c318 INT,c319 INT,c320 INT,c321 INT, +c322 INT,c323 INT,c324 INT,c325 INT,c326 INT,c327 INT,c328 INT, +c329 INT,c330 INT,c331 INT,c332 INT,c333 INT,c334 INT,c335 INT, +c336 INT,c337 INT,c338 INT,c339 INT,c340 INT,c341 INT,c342 INT, +c343 INT,c344 INT,c345 INT,c346 INT,c347 INT,c348 INT,c349 INT, +c350 INT,c351 INT,c352 INT,c353 INT,c354 INT,c355 INT,c356 INT, +c357 INT,c358 INT,c359 INT,c360 INT,c361 INT,c362 INT,c363 INT, +c364 INT,c365 INT,c366 INT,c367 INT,c368 INT,c369 INT,c370 INT, +c371 INT,c372 INT,c373 INT,c374 INT,c375 INT,c376 INT,c377 INT, +c378 INT,c379 INT,c380 INT,c381 INT,c382 INT,c383 INT,c384 INT, +c385 INT,c386 INT,c387 INT,c388 INT,c389 INT,c390 INT,c391 INT, +c392 INT,c393 INT,c394 INT,c395 INT,c396 INT,c397 INT,c398 INT, +c399 INT,c400 INT,c401 INT,c402 INT,c403 INT,c404 INT,c405 INT, +c406 INT,c407 INT,c408 INT,c409 INT,c410 INT,c411 INT,c412 INT, +c413 INT,c414 INT,c415 INT,c416 INT,c417 INT,c418 INT,c419 INT, +c420 INT,c421 INT,c422 INT,c423 INT,c424 INT,c425 INT,c426 INT, +c427 INT,c428 INT,c429 INT,c430 INT,c431 INT,c432 INT,c433 INT, +c434 INT,c435 INT,c436 INT,c437 INT,c438 INT,c439 INT,c440 INT, +c441 INT,c442 INT,c443 INT,c444 INT,c445 INT,c446 INT,c447 INT, +c448 INT, +KEY (c1,c2,c3,c4,c5,c6,c7),KEY (c8,c9,c10,c11,c12,c13,c14), +KEY (c15,c16,c17,c18,c19,c20,c21),KEY (c22,c23,c24,c25,c26,c27,c28), +KEY (c29,c30,c31,c32,c33,c34,c35),KEY (c36,c37,c38,c39,c40,c41,c42), +KEY (c43,c44,c45,c46,c47,c48,c49),KEY (c50,c51,c52,c53,c54,c55,c56), +KEY (c57,c58,c59,c60,c61,c62,c63),KEY (c64,c65,c66,c67,c68,c69,c70), +KEY (c71,c72,c73,c74,c75,c76,c77),KEY (c78,c79,c80,c81,c82,c83,c84), +KEY (c85,c86,c87,c88,c89,c90,c91),KEY (c92,c93,c94,c95,c96,c97,c98), +KEY (c99,c100,c101,c102,c103,c104,c105), +KEY (c106,c107,c108,c109,c110,c111,c112), +KEY (c113,c114,c115,c116,c117,c118,c119), +KEY (c120,c121,c122,c123,c124,c125,c126), +KEY (c127,c128,c129,c130,c131,c132,c133), +KEY (c134,c135,c136,c137,c138,c139,c140), +KEY (c141,c142,c143,c144,c145,c146,c147), +KEY (c148,c149,c150,c151,c152,c153,c154), +KEY (c155,c156,c157,c158,c159,c160,c161), +KEY (c162,c163,c164,c165,c166,c167,c168), +KEY (c169,c170,c171,c172,c173,c174,c175), +KEY (c176,c177,c178,c179,c180,c181,c182), +KEY (c183,c184,c185,c186,c187,c188,c189), +KEY (c190,c191,c192,c193,c194,c195,c196), +KEY (c197,c198,c199,c200,c201,c202,c203), +KEY (c204,c205,c206,c207,c208,c209,c210), +KEY (c211,c212,c213,c214,c215,c216,c217), +KEY (c218,c219,c220,c221,c222,c223,c224), +KEY (c225,c226,c227,c228,c229,c230,c231), +KEY (c232,c233,c234,c235,c236,c237,c238), +KEY (c239,c240,c241,c242,c243,c244,c245), +KEY (c246,c247,c248,c249,c250,c251,c252), +KEY (c253,c254,c255,c256,c257,c258,c259), +KEY (c260,c261,c262,c263,c264,c265,c266), +KEY (c267,c268,c269,c270,c271,c272,c273), +KEY (c274,c275,c276,c277,c278,c279,c280), +KEY (c281,c282,c283,c284,c285,c286,c287), +KEY (c288,c289,c290,c291,c292,c293,c294), +KEY (c295,c296,c297,c298,c299,c300,c301), +KEY (c302,c303,c304,c305,c306,c307,c308), +KEY (c309,c310,c311,c312,c313,c314,c315), +KEY (c316,c317,c318,c319,c320,c321,c322), +KEY (c323,c324,c325,c326,c327,c328,c329), +KEY (c330,c331,c332,c333,c334,c335,c336), +KEY (c337,c338,c339,c340,c341,c342,c343), +KEY (c344,c345,c346,c347,c348,c349,c350), +KEY (c351,c352,c353,c354,c355,c356,c357), +KEY (c358,c359,c360,c361,c362,c363,c364), +KEY (c365,c366,c367,c368,c369,c370,c371), +KEY (c372,c373,c374,c375,c376,c377,c378), +KEY (c379,c380,c381,c382,c383,c384,c385), +KEY (c386,c387,c388,c389,c390,c391,c392), +KEY (c393,c394,c395,c396,c397,c398,c399), +KEY (c400,c401,c402,c403,c404,c405,c406), +KEY (c407,c408,c409,c410,c411,c412,c413), +KEY (c414,c415,c416,c417,c418,c419,c420), +KEY (c421,c422,c423,c424,c425,c426,c427), +KEY (c428,c429,c430,c431,c432,c433,c434), +KEY (c435,c436,c437,c438,c439,c440,c441), +KEY (c442,c443,c444,c445,c446,c447,c448)); +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/deadlock.result b/storage/rocksdb/mysql-test/rocksdb/r/deadlock.result new file mode 100644 index 00000000000..3e2f5709ca0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/deadlock.result @@ -0,0 +1,37 @@ +# +# Validate that deadlock errors don't occur with a high level of concurrency +# +# Disable for valgrind because this takes too long +DROP DATABASE IF EXISTS mysqlslap; +CREATE DATABASE mysqlslap; +USE mysqlslap; +CREATE TABLE t1(id1 BIGINT, id2 BIGINT, count INT, PRIMARY KEY(id1, id2), KEY(id2)) ENGINE=rocksdb; +CREATE TABLE t1rev(id1 BIGINT, id2 BIGINT, count INT, PRIMARY KEY(id1, id2) COMMENT "rev:cf2", KEY(id2) COMMENT "rev:cf2") ENGINE=rocksdb; +SET @save = @@global.rocksdb_lock_wait_timeout; +SET GLOBAL rocksdb_lock_wait_timeout = 60; +SELECT count from t1; +count +50000 +SELECT count from t1; +count +100000 +SELECT count from t1; +count +150000 +SELECT count from t1; +count +200000 +SELECT count from t1rev; +count +50000 +SELECT count from t1rev; +count +100000 +SELECT count from t1rev; +count +150000 +SELECT count from t1rev; +count +200000 +SET GLOBAL rocksdb_lock_wait_timeout = @save; +DROP DATABASE mysqlslap; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete.result b/storage/rocksdb/mysql-test/rocksdb/r/delete.result new file mode 100644 index 00000000000..8ec3c50f466 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/delete.result @@ -0,0 +1,166 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (10000,'foobar'),(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +DELETE FROM t1 WHERE b IN ('c'); +SELECT a,b FROM t1; +a b +1 a +1 a +10000 foobar +10000 foobar +2 b +2 b +4 d +4 d +5 e +5 e +DELETE FROM t1 WHERE a < 0 OR b = 'a'; +SELECT a,b FROM t1; +a b +10000 foobar +10000 foobar +2 b +2 b +4 d +4 d +5 e +5 e +DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1; +SELECT a,b FROM t1; +a b +10000 foobar +10000 foobar +2 b +2 b +4 d +5 e +5 e +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +SELECT c,d FROM t2; +c d +b 2 +b 2 +d 4 +e 5 +e 5 +foobar 10000 +foobar 10000 +DELETE t2.* FROM t1, t2 WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +10000 foobar +10000 foobar +2 b +2 b +4 d +5 e +5 e +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +DELETE FROM t2, t1.* USING t2, t1 WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +2 b +2 b +4 d +5 e +5 e +SELECT c,d FROM t2; +c d +DELETE FROM t1; +SELECT a,b FROM t1; +a b +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'f'),(7,'g'),(8,'h'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +BEGIN; +DELETE FROM t1 WHERE b IN ('c'); +SELECT a,b FROM t1; +a b +1 a +1 a +10000 foobar +10000 foobar +2 b +2 b +4 d +4 d +5 e +5 e +6 f +6 f +7 g +7 g +8 h +8 h +DELETE FROM t1 WHERE a < 0 OR b = 'a'; +COMMIT; +SELECT a,b FROM t1; +a b +10000 foobar +10000 foobar +2 b +2 b +4 d +4 d +5 e +5 e +6 f +6 f +7 g +7 g +8 h +8 h +BEGIN; +DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1; +SAVEPOINT spt1; +DELETE FROM t1; +RELEASE SAVEPOINT spt1; +ROLLBACK; +SELECT a,b FROM t1; +a b +10000 foobar +10000 foobar +2 b +2 b +4 d +4 d +5 e +5 e +6 f +6 f +7 g +7 g +8 h +8 h +BEGIN; +DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1; +SAVEPOINT spt1; +DELETE FROM t1; +INSERT INTO t1 (a,b) VALUES (1,'a'); +ROLLBACK TO SAVEPOINT spt1; +ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows. +COMMIT; +ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction. +SELECT a,b FROM t1; +a b +10000 foobar +10000 foobar +2 b +2 b +4 d +4 d +5 e +5 e +6 f +6 f +7 g +7 g +8 h +8 h +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete_before_lock.result b/storage/rocksdb/mysql-test/rocksdb/r/delete_before_lock.result new file mode 100644 index 00000000000..402ef539ffd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/delete_before_lock.result @@ -0,0 +1,15 @@ +set debug_sync='RESET'; +drop table if exists t1; +create table t1 (id1 int, id2 int, value int, primary key (id1, id2)) engine=rocksdb; +insert into t1 values (1, 1, 1),(1, 2, 1),(1, 3, 1), (2, 2, 2); +set debug_sync='rocksdb.get_row_by_rowid SIGNAL parked WAIT_FOR go'; +update t1 set value=100 where id1=1; +set debug_sync='now WAIT_FOR parked'; +delete from t1 where id1=1 and id2=1; +set debug_sync='now SIGNAL go'; +select * from t1 where id1=1 for update; +id1 id2 value +1 2 100 +1 3 100 +set debug_sync='RESET'; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete_ignore.result b/storage/rocksdb/mysql-test/rocksdb/r/delete_ignore.result new file mode 100644 index 00000000000..1f017dfb990 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/delete_ignore.result @@ -0,0 +1,59 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (10000,'foobar'),(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (pk INT AUTO_INCREMENT PRIMARY KEY, c CHAR(8), d INT) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +SELECT a,b FROM t1; +a b +1 a +1 a +10000 foobar +10000 foobar +2 b +2 b +3 c +3 c +4 d +4 d +5 e +5 e +SELECT c,d FROM t2; +c d +a 1 +a 1 +b 2 +b 2 +c 3 +c 3 +d 4 +d 4 +e 5 +e 5 +foobar 10000 +foobar 10000 +DELETE IGNORE FROM t1 WHERE b IS NOT NULL ORDER BY a LIMIT 1; +SELECT a,b FROM t1; +a b +1 a +10000 foobar +10000 foobar +2 b +2 b +3 c +3 c +4 d +4 d +5 e +5 e +DELETE IGNORE t1.*, t2.* FROM t1, t2 WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 ); +Warnings: +Error 1242 Subquery returns more than 1 row +SELECT a,b FROM t1; +a b +1 a +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete_quick.result b/storage/rocksdb/mysql-test/rocksdb/r/delete_quick.result new file mode 100644 index 00000000000..4173d875a82 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/delete_quick.result @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (a)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +DELETE QUICK FROM t1 WHERE a = 1 OR b > 'foo'; +SELECT a,b FROM t1; +a b +2 b +3 c +4 d +5 e +CREATE TABLE t2 (c CHAR(8), d INT, PRIMARY KEY (c)) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +SELECT c,d FROM t2; +c d +b 2 +c 3 +d 4 +e 5 +DELETE QUICK FROM t2, t1.* USING t2, t1 WHERE c IS NULL OR a = d; +SELECT a,b FROM t1; +a b +SELECT c,d FROM t2; +c d +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete_with_keys.result b/storage/rocksdb/mysql-test/rocksdb/r/delete_with_keys.result new file mode 100644 index 00000000000..c94708b872f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/delete_with_keys.result @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z'); +DELETE FROM t1 WHERE b > 'y'; +DELETE FROM t1 WHERE a=2; +SELECT a,b FROM t1; +a b +1 a +3 c +4 d +5 e +6 x +7 y +DELETE FROM t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z'); +DELETE FROM t1 WHERE b > 'y'; +DELETE FROM t1 WHERE a=2; +SELECT a,b FROM t1; +a b +1 a +3 c +4 d +5 e +6 x +7 y +DELETE FROM t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, c INT, pk INT AUTO_INCREMENT PRIMARY KEY, KEY(a), KEY (b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6),(7,8,9); +DELETE FROM t1 WHERE a = 10 OR b = 20 ORDER BY c LIMIT 1; +SELECT a,b,c FROM t1; +a b c +1 2 3 +4 5 6 +7 8 9 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/describe.result b/storage/rocksdb/mysql-test/rocksdb/r/describe.result new file mode 100644 index 00000000000..ec828577ae5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/describe.result @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS t1, t2, t3; +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (a)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'foo'),(2, 'b'); +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY (b)) ENGINE=rocksdb CHARACTER SET utf8; +INSERT INTO t2 (a,b) VALUES (1, 'bar'); +CREATE TABLE t3 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb CHARACTER SET utf8; +DESCRIBE t1; +Field Type Null Key Default Extra +a int(11) NO PRI 0 +b char(8) YES NULL +DESC t2 a; +Field Type Null Key Default Extra +a int(11) YES NULL +DESCRIBE t3 '%'; +Field Type Null Key Default Extra +a int(11) YES NULL +b char(8) YES NULL +pk int(11) NO PRI NULL auto_increment +DROP TABLE t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_database.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_database.result new file mode 100644 index 00000000000..f220efe9f38 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_database.result @@ -0,0 +1,6 @@ +DROP DATABASE IF EXISTS test_drop_database; +CREATE DATABASE test_drop_database; +CREATE TABLE t1 (a int, b int, c int, primary key (a), unique key (b)) ENGINE=ROCKSDB; +ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY (a); +DROP TABLE t1; +DROP DATABASE test_drop_database; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_index_inplace.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_index_inplace.result new file mode 100644 index 00000000000..dfa5c5b2590 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_index_inplace.result @@ -0,0 +1,154 @@ +drop table if exists t1; +CREATE TABLE t1 (a INT, b INT AUTO_INCREMENT, KEY ka(a), KEY kb(a,b), PRIMARY KEY(b)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`b`), + KEY `ka` (`a`), + KEY `kb` (`a`,`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +INSERT INTO t1 (a) VALUES (1); +INSERT INTO t1 (a) VALUES (3); +INSERT INTO t1 (a) VALUES (5); +ALTER TABLE t1 DROP INDEX ka, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`b`), + KEY `kb` (`a`,`b`) +) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1 +SELECT * FROM t1 FORCE INDEX(ka) where a > 1; +ERROR 42000: Key 'ka' doesn't exist in table 't1' +SELECT * FROM t1 FORCE INDEX(kb) where a > 1; +a b +3 2 +5 3 +SELECT * FROM t1 where b > 1; +a b +3 2 +5 3 +DROP TABLE t1; +CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, c INT, KEY kb(b), KEY kbc(b,c), KEY kc(c), PRIMARY KEY(a)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `kb` (`b`), + KEY `kbc` (`b`,`c`), + KEY `kc` (`c`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +INSERT INTO t1 (b,c) VALUES (1,2); +INSERT INTO t1 (b,c) VALUES (3,4); +INSERT INTO t1 (b,c) VALUES (5,6); +ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `kc` (`c`) +) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1 +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `kc` (`c`) +) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1 +INSERT INTO t1 (b,c) VALUES (1,2); +INSERT INTO t1 (b,c) VALUES (3,4); +INSERT INTO t1 (b,c) VALUES (5,6); +SELECT * FROM t1 FORCE INDEX(kc) where c > 3; +a b c +2 3 4 +3 5 6 +5 3 4 +6 5 6 +SELECT * FROM t1 where b > 3; +a b c +3 5 6 +6 5 6 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, c INT, KEY kb(b), KEY kbc(b,c), KEY kc(c), PRIMARY KEY(a)) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE +t1 1 kb 1 b A 0 NULL NULL YES LSMTREE +t1 1 kbc 1 b A 0 NULL NULL YES LSMTREE +t1 1 kbc 2 c A 0 NULL NULL YES LSMTREE +t1 1 kc 1 c A 0 NULL NULL YES LSMTREE +ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc, ALGORITHM=INPLACE; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE +t1 1 kc 1 c A 0 NULL NULL YES LSMTREE +ALTER TABLE t1 DROP PRIMARY KEY; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 1 kc 1 c A 0 NULL NULL YES LSMTREE +ALTER TABLE t1 DROP INDEX kc, ALGORITHM=INPLACE; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +DROP TABLE t1; +CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, c INT, PRIMARY KEY(a)) ENGINE=rocksdb; +ALTER TABLE t1 ADD UNIQUE INDEX kb(b); +ALTER TABLE t1 ADD UNIQUE INDEX kbc(b,c); +ALTER TABLE t1 ADD UNIQUE INDEX kc(c); +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE +t1 0 kb 1 b A 0 NULL NULL YES LSMTREE +t1 0 kbc 1 b A 0 NULL NULL YES LSMTREE +t1 0 kbc 2 c A 0 NULL NULL YES LSMTREE +t1 0 kc 1 c A 0 NULL NULL YES LSMTREE +ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE +t1 0 kc 1 c A 0 NULL NULL YES LSMTREE +INSERT INTO t1 (b,c) VALUES (1,2); +INSERT INTO t1 (b,c) VALUES (3,4); +INSERT INTO t1 (b,c) VALUES (5,6); +SELECT * FROM t1 FORCE INDEX(kc) where c > 3; +a b c +2 3 4 +3 5 6 +ALTER TABLE t1 DROP INDEX kc, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE IF NOT EXISTS t1 (col1 INT, col2 INT, col3 INT); +INSERT INTO t1 (col1,col2,col3) VALUES (1,2,3); +ALTER TABLE t1 ADD KEY idx ( col1, col2 ); +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +ALTER TABLE t1 DROP COLUMN col2; +ALTER TABLE t1 DROP COLUMN col3; +DROP TABLE t1; +CREATE TABLE IF NOT EXISTS t1 (col1 INT, col2 INT, col3 INT); +INSERT INTO t1 (col1,col2,col3) VALUES (1,2,3); +ALTER TABLE t1 ADD KEY idx ( col1, col2 ); +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +ALTER TABLE t1 DROP COLUMN col2; +ALTER TABLE t1 DROP COLUMN col3; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result new file mode 100644 index 00000000000..7d0fae229da --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result @@ -0,0 +1,71 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +DROP TABLE IF EXISTS t5; +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; +set global rocksdb_signal_drop_index_thread = 1; +CREATE TABLE t1 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t2 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t3 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t4 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +DELETE FROM t1; +DELETE FROM t2; +DELETE FROM t3; +DELETE FROM t4; +drop table t2; +DELETE FROM t1; +DELETE FROM t4; +drop table t3; +DELETE FROM t1; +DELETE FROM t4; +drop table t4; +CREATE TABLE t5 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +DELETE FROM t5; +drop table t5; +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; +set global rocksdb_signal_drop_index_thread = 1; +Begin filtering dropped index+ 0 +Begin filtering dropped index+ 1 +Begin filtering dropped index+ 1 +Begin filtering dropped index+ 1 +Begin filtering dropped index+ 1 +Begin filtering dropped index+ 1 +Begin filtering dropped index+ 1 +Begin filtering dropped index+ 1 +Finished filtering dropped index+ 0 +Finished filtering dropped index+ 1 +Finished filtering dropped index+ 1 +Finished filtering dropped index+ 1 +Finished filtering dropped index+ 1 +Finished filtering dropped index+ 1 +Finished filtering dropped index+ 1 +Finished filtering dropped index+ 1 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result new file mode 100644 index 00000000000..c46d3522dd7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result @@ -0,0 +1,53 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +DROP TABLE IF EXISTS t5; +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; +set global rocksdb_signal_drop_index_thread = 1; +CREATE TABLE t1 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t2 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t3 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t4 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +DELETE FROM t1; +DELETE FROM t2; +DELETE FROM t3; +DELETE FROM t4; +DELETE FROM t1; +DELETE FROM t4; +DELETE FROM t1; +DELETE FROM t4; +CREATE TABLE t5 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +DELETE FROM t5; +drop table t1; +drop table t2; +drop table t3; +drop table t4; +drop table t5; +Compacted diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result new file mode 100644 index 00000000000..c69d789c12a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS t1; +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; +set global rocksdb_signal_drop_index_thread = 1; +CREATE TABLE t1 ( +a int not null, +b int not null, +c varchar(500) not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +DELETE FROM t1; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_compact_read_bytes'; +drop table t1; +select case when variable_value-@a < 500000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_compact_read_bytes'; +case when variable_value-@a < 500000 then 'true' else 'false' end +true +DROP TABLE IF EXISTS t1; +Warnings: +Note 1051 Unknown table 'test.t1' diff --git a/storage/rocksdb/mysql-test/rocksdb/r/dup_key_update.result b/storage/rocksdb/mysql-test/rocksdb/r/dup_key_update.result new file mode 100644 index 00000000000..954335debf2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/dup_key_update.result @@ -0,0 +1,362 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +CREATE TABLE t1 (id1 INT, id2 INT, id3 INT, +PRIMARY KEY (id1, id2, id3), +UNIQUE KEY (id3, id1)) ENGINE=ROCKSDB; +CREATE TABLE t2 (id1 INT, id2 INT, id3 INT, +PRIMARY KEY (id1, id2, id3), +UNIQUE KEY (id3, id1) COMMENT 'rev:cf') ENGINE=ROCKSDB; +INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 9; +SELECT * FROM t1 WHERE id1 = 1; +id1 id2 id3 +1 1 1 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 1 1 +INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 10; +SELECT * FROM t1 WHERE id1 = 1; +id1 id2 id3 +1 10 1 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 10 1 +INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 11; +SELECT * FROM t1 WHERE id1 = 1; +id1 id2 id3 +1 11 1 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 11 1 +INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 12; +SELECT * FROM t1 WHERE id1 = 5; +id1 id2 id3 +5 12 5 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 12 5 +INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 13; +SELECT * FROM t1 WHERE id1 = 5; +id1 id2 id3 +5 13 5 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 13 5 +INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 14; +SELECT * FROM t1 WHERE id1 = 5; +id1 id2 id3 +5 14 5 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 14 5 +INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 15; +SELECT * FROM t1 WHERE id1 = 9; +id1 id2 id3 +9 15 9 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 15 9 +INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 16; +SELECT * FROM t1 WHERE id1 = 9; +id1 id2 id3 +9 16 9 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 16 9 +INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 17; +SELECT * FROM t1 WHERE id1 = 9; +id1 id2 id3 +9 17 9 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 17 9 +SELECT * FROM t1; +id1 id2 id3 +1 11 1 +2 2 2 +3 3 3 +4 4 4 +5 14 5 +6 6 6 +7 7 7 +8 8 8 +9 17 9 +SELECT * FROM t1 FORCE INDEX (id3); +id1 id2 id3 +1 11 1 +2 2 2 +3 3 3 +4 4 4 +5 14 5 +6 6 6 +7 7 7 +8 8 8 +9 17 9 +INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 9; +SELECT * FROM t2 WHERE id1 = 1; +id1 id2 id3 +1 1 1 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 1 1 +INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 10; +SELECT * FROM t2 WHERE id1 = 1; +id1 id2 id3 +1 10 1 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 10 1 +INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 11; +SELECT * FROM t2 WHERE id1 = 1; +id1 id2 id3 +1 11 1 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 11 1 +INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 12; +SELECT * FROM t2 WHERE id1 = 5; +id1 id2 id3 +5 12 5 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 12 5 +INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 13; +SELECT * FROM t2 WHERE id1 = 5; +id1 id2 id3 +5 13 5 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 13 5 +INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 14; +SELECT * FROM t2 WHERE id1 = 5; +id1 id2 id3 +5 14 5 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 14 5 +INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 15; +SELECT * FROM t2 WHERE id1 = 9; +id1 id2 id3 +9 15 9 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 15 9 +INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 16; +SELECT * FROM t2 WHERE id1 = 9; +id1 id2 id3 +9 16 9 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 16 9 +INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 17; +SELECT * FROM t2 WHERE id1 = 9; +id1 id2 id3 +9 17 9 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 17 9 +SELECT * FROM t2; +id1 id2 id3 +1 11 1 +2 2 2 +3 3 3 +4 4 4 +5 14 5 +6 6 6 +7 7 7 +8 8 8 +9 17 9 +SELECT * FROM t2 FORCE INDEX (id3); +id1 id2 id3 +1 11 1 +2 2 2 +3 3 3 +4 4 4 +5 14 5 +6 6 6 +7 7 7 +8 8 8 +9 17 9 +DROP TABLE t1; +DROP TABLE t2; +CREATE TABLE t1 (id1 varchar(128) CHARACTER SET latin1 COLLATE latin1_bin, +id2 varchar(256) CHARACTER SET utf8 COLLATE utf8_bin, +id3 varchar(200) CHARACTER SET latin1 COLLATE latin1_swedish_ci, +PRIMARY KEY (id1, id2, id3), +UNIQUE KEY (id3, id1)) ENGINE=ROCKSDB; +CREATE TABLE t2 (id1 varchar(128) CHARACTER SET latin1 COLLATE latin1_bin, +id2 varchar(256) CHARACTER SET utf8 COLLATE utf8_bin, +id3 varchar(200) CHARACTER SET latin1 COLLATE latin1_swedish_ci, +PRIMARY KEY (id1, id2, id3), +UNIQUE KEY (id3, id1) COMMENT 'rev:cf') ENGINE=ROCKSDB; +INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 9; +SELECT * FROM t1 WHERE id1 = 1; +id1 id2 id3 +1 1 1 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 1 1 +INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 10; +SELECT * FROM t1 WHERE id1 = 1; +id1 id2 id3 +1 10 1 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 10 1 +INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 11; +SELECT * FROM t1 WHERE id1 = 1; +id1 id2 id3 +1 11 1 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 11 1 +INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 12; +SELECT * FROM t1 WHERE id1 = 5; +id1 id2 id3 +5 12 5 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 12 5 +INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 13; +SELECT * FROM t1 WHERE id1 = 5; +id1 id2 id3 +5 13 5 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 13 5 +INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 14; +SELECT * FROM t1 WHERE id1 = 5; +id1 id2 id3 +5 14 5 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 14 5 +INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 15; +SELECT * FROM t1 WHERE id1 = 9; +id1 id2 id3 +9 15 9 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 15 9 +INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 16; +SELECT * FROM t1 WHERE id1 = 9; +id1 id2 id3 +9 16 9 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 16 9 +INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 17; +SELECT * FROM t1 WHERE id1 = 9; +id1 id2 id3 +9 17 9 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 17 9 +SELECT * FROM t1; +id1 id2 id3 +1 11 1 +2 2 2 +3 3 3 +4 4 4 +5 14 5 +6 6 6 +7 7 7 +8 8 8 +9 17 9 +SELECT * FROM t1 FORCE INDEX (id3); +id1 id2 id3 +1 11 1 +2 2 2 +3 3 3 +4 4 4 +5 14 5 +6 6 6 +7 7 7 +8 8 8 +9 17 9 +INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 9; +SELECT * FROM t2 WHERE id1 = 1; +id1 id2 id3 +1 1 1 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 1 1 +INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 10; +SELECT * FROM t2 WHERE id1 = 1; +id1 id2 id3 +1 10 1 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 10 1 +INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 11; +SELECT * FROM t2 WHERE id1 = 1; +id1 id2 id3 +1 11 1 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 11 1 +INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 12; +SELECT * FROM t2 WHERE id1 = 5; +id1 id2 id3 +5 12 5 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 12 5 +INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 13; +SELECT * FROM t2 WHERE id1 = 5; +id1 id2 id3 +5 13 5 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 13 5 +INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 14; +SELECT * FROM t2 WHERE id1 = 5; +id1 id2 id3 +5 14 5 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 14 5 +INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 15; +SELECT * FROM t2 WHERE id1 = 9; +id1 id2 id3 +9 15 9 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 15 9 +INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 16; +SELECT * FROM t2 WHERE id1 = 9; +id1 id2 id3 +9 16 9 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 16 9 +INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 17; +SELECT * FROM t2 WHERE id1 = 9; +id1 id2 id3 +9 17 9 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 17 9 +SELECT * FROM t2; +id1 id2 id3 +1 11 1 +2 2 2 +3 3 3 +4 4 4 +5 14 5 +6 6 6 +7 7 7 +8 8 8 +9 17 9 +SELECT * FROM t2 FORCE INDEX (id3); +id1 id2 id3 +1 11 1 +2 2 2 +3 3 3 +4 4 4 +5 14 5 +6 6 6 +7 7 7 +8 8 8 +9 17 9 +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/duplicate_table.result b/storage/rocksdb/mysql-test/rocksdb/r/duplicate_table.result new file mode 100644 index 00000000000..ba16aaa6d35 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/duplicate_table.result @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS t; +CREATE TABLE t(id int primary key) engine=rocksdb; +INSERT INTO t values (1), (2), (3); +CREATE TABLE t(id int primary key) engine=rocksdb; +ERROR 42S01: Table 't' already exists +FLUSH TABLES; +CREATE TABLE t(id int primary key) engine=rocksdb; +ERROR HY000: Table 'test.t' does not exist, but metadata information exists inside MyRocks. This is a sign of data inconsistency. Please check if './test/t.frm' exists, and try to restore it if it does not exist. +FLUSH TABLES; +SELECT * FROM t; +id +1 +2 +3 +DROP TABLE t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/fail_system_cf.result b/storage/rocksdb/mysql-test/rocksdb/r/fail_system_cf.result new file mode 100644 index 00000000000..1ae56ae5f05 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/fail_system_cf.result @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT '__system__') ENGINE = ROCKSDB; +ERROR HY000: Incorrect arguments to column family not valid for storing index data +DROP TABLE IF EXISTS t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result b/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result new file mode 100644 index 00000000000..483be726bb3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 (b INT PRIMARY KEY); +CREATE TABLE t2 (a INT NOT NULL, b INT NOT NULL, FOREIGN KEY (b) REFERENCES t1(b)); +ERROR 42000: MyRocks does not currently support foreign key constraints +CREATE TABLE t2 (a INT NOT NULL, bforeign INT NOT NULL); +DROP TABLE t2; +CREATE TABLE t2 (a INT NOT NULL, foreignkey INT NOT NULL); +DROP TABLE t2; +CREATE TABLE t2 (a INT NOT NULL, bforeign INT not null, FOREIGN KEY (bforeign) REFERENCES t1(b)); +ERROR 42000: MyRocks does not currently support foreign key constraints +CREATE TABLE t2 (a INT NOT NULL, b INT NOT NULL); +ALTER TABLE t2 ADD FOREIGN KEY (b) REFERENCES t1(b); +ERROR 42000: MyRocks does not currently support foreign key constraints +DROP TABLE t2; +CREATE TABLE t2 (a INT NOT NULL); +ALTER TABLE t2 ADD bforeign INT NOT NULL; +DROP TABLE t2; +CREATE TABLE t2 (a INT NOT NULL); +ALTER TABLE t2 ADD foreignkey INT NOT NULL; +DROP TABLE t2; +CREATE TABLE t2 (a INT NOT NULL); +ALTER TABLE t2 ADD bforeign INT NOT NULL, ADD FOREIGN KEY (bforeign) REFERENCES t1(b); +ERROR 42000: MyRocks does not currently support foreign key constraints +DROP TABLE t2; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_issue254.result b/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_issue254.result new file mode 100644 index 00000000000..d42041183c8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_issue254.result @@ -0,0 +1,9 @@ +create table t (id int primary key, value int); +begin; +update t set value=100 where id in (1, 2); +commit; +begin; +select * from t for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from t for update +commit; +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_raise_error.result b/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_raise_error.result new file mode 100644 index 00000000000..c1cf1e77ecf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_raise_error.result @@ -0,0 +1,504 @@ +drop table if exists gap1,gap2,gap3; +CREATE DATABASE mysqlslap; +CREATE TABLE gap1 (id1 INT, id2 INT, id3 INT, c1 INT, value INT, +PRIMARY KEY (id1, id2, id3), +INDEX i (c1)) ENGINE=rocksdb; +CREATE TABLE gap2 like gap1; +CREATE TABLE gap3 (id INT, value INT, +PRIMARY KEY (id), +UNIQUE KEY ui(value)) ENGINE=rocksdb; +insert into gap3 values (1,1), (2,2),(3,3),(4,4),(5,5); +create table gap4 ( +pk int primary key, +a int, +b int, +key(a) +) ENGINE=rocksdb; +insert into gap4 values (1,1,1), (2,2,2), (3,3,3), (4,4,4); +create table gap5 like gap4; +insert into gap5 values (1,1,1), (2,2,2), (3,3,3), (4,4,4); +set session gap_lock_raise_error=1; +set session gap_lock_write_log=1; +set @save_gap_lock_write_log = @@gap_lock_write_log; +set @save_gap_lock_raise_error = @@gap_lock_raise_error; +set gap_lock_write_log = 1; +set gap_lock_raise_error = 0; +begin; +update gap4 set a= (select 1+max(a) from gap5 where gap5.pk between 1 and 3 and gap5.b=gap4.b); +1 +update gap4 set a= (select 2+max(a) from gap5 where gap5.pk between 1 and 3 and gap5.b=gap4.b); +update gap4 set a= (select 3+max(a) from gap5 where gap5.pk between 1 and 3 and gap5.b=gap4.b); +1 +1 +0 +flush logs; +0 +rollback; +set gap_lock_write_log = @save_gap_lock_write_log; +set gap_lock_raise_error = @save_gap_lock_raise_error; +set global gap_lock_write_log = 1; +set global gap_lock_write_log = 0; +1000 +set session autocommit=0; +select * from gap1 limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 limit 1 for update +select * from gap1 where value != 100 limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where value != 100 limit 1 for update +select * from gap1 where id1=1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 for update +select * from gap1 where id1=1 and id2= 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 for update +select * from gap1 where id1=1 and id2= 1 and id3 != 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 and id3 != 1 for update +select * from gap1 where id1=1 and id2= 1 and id3 +between 1 and 3 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 and id3 +between 1 and 3 for update +select * from gap1 where id1=1 and id2= 1 order by id3 asc +limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 order by id3 asc +limit 1 for update +select * from gap1 where id1=1 and id2= 1 order by id3 desc +limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 order by id3 desc +limit 1 for update +select * from gap1 order by id1 asc limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 asc limit 1 for update +select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 for update +select * from gap1 order by id1 desc limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 desc limit 1 for update +select * from gap1 order by id1 desc, id2 desc, id3 desc +limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 desc, id2 desc, id3 desc +limit 1 for update +select * from gap1 force index(i) where c1=1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 force index(i) where c1=1 for update +select * from gap3 force index(ui) where value=1 for update; +id value +1 1 +select * from gap1 where id1=1 and id2=1 and id3=1 for update; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) for update; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3=1 and value=1 +order by c1 for update; +id1 id2 id3 c1 value +select * from gap3 where id=1 for update; +id value +1 1 +set session autocommit=1; +select * from gap1 limit 1 for update; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where value != 100 limit 1 for update; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where id1=1 for update; +id1 id2 id3 c1 value +1 0 2 2 2 +1 0 3 3 3 +select * from gap1 where id1=1 and id2= 1 for update; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 and id3 != 1 for update; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 and id3 +between 1 and 3 for update; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 order by id3 asc +limit 1 for update; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 order by id3 desc +limit 1 for update; +id1 id2 id3 c1 value +select * from gap1 order by id1 asc limit 1 for update; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 for update; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 order by id1 desc limit 1 for update; +id1 id2 id3 c1 value +500 100 1000 1000 1000 +select * from gap1 order by id1 desc, id2 desc, id3 desc +limit 1 for update; +id1 id2 id3 c1 value +500 100 1000 1000 1000 +select * from gap1 force index(i) where c1=1 for update; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap3 force index(ui) where value=1 for update; +id value +1 1 +select * from gap1 where id1=1 and id2=1 and id3=1 for update; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) for update; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3=1 and value=1 +order by c1 for update; +id1 id2 id3 c1 value +select * from gap3 where id=1 for update; +id value +1 1 +set session autocommit=0; +select * from gap1 limit 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 limit 1 lock in share mode +select * from gap1 where value != 100 limit 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where value != 100 limit 1 lock in share mode +select * from gap1 where id1=1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 lock in share mode +select * from gap1 where id1=1 and id2= 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 lock in share mode +select * from gap1 where id1=1 and id2= 1 and id3 != 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 and id3 != 1 lock in share mode +select * from gap1 where id1=1 and id2= 1 and id3 +between 1 and 3 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 and id3 +between 1 and 3 lock in share mode +select * from gap1 where id1=1 and id2= 1 order by id3 asc +limit 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 order by id3 asc +limit 1 lock in share mode +select * from gap1 where id1=1 and id2= 1 order by id3 desc +limit 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 order by id3 desc +limit 1 lock in share mode +select * from gap1 order by id1 asc limit 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 asc limit 1 lock in share mode +select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 lock in share mode +select * from gap1 order by id1 desc limit 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 desc limit 1 lock in share mode +select * from gap1 order by id1 desc, id2 desc, id3 desc +limit 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 desc, id2 desc, id3 desc +limit 1 lock in share mode +select * from gap1 force index(i) where c1=1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 force index(i) where c1=1 lock in share mode +select * from gap3 force index(ui) where value=1 lock in share mode; +id value +1 1 +select * from gap1 where id1=1 and id2=1 and id3=1 lock in share mode; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) lock in share mode; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3=1 and value=1 +order by c1 lock in share mode; +id1 id2 id3 c1 value +select * from gap3 where id=1 lock in share mode; +id value +1 1 +set session autocommit=1; +select * from gap1 limit 1 lock in share mode; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where value != 100 limit 1 lock in share mode; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where id1=1 lock in share mode; +id1 id2 id3 c1 value +1 0 2 2 2 +1 0 3 3 3 +select * from gap1 where id1=1 and id2= 1 lock in share mode; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 and id3 != 1 lock in share mode; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 and id3 +between 1 and 3 lock in share mode; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 order by id3 asc +limit 1 lock in share mode; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 order by id3 desc +limit 1 lock in share mode; +id1 id2 id3 c1 value +select * from gap1 order by id1 asc limit 1 lock in share mode; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 lock in share mode; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 order by id1 desc limit 1 lock in share mode; +id1 id2 id3 c1 value +500 100 1000 1000 1000 +select * from gap1 order by id1 desc, id2 desc, id3 desc +limit 1 lock in share mode; +id1 id2 id3 c1 value +500 100 1000 1000 1000 +select * from gap1 force index(i) where c1=1 lock in share mode; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap3 force index(ui) where value=1 lock in share mode; +id value +1 1 +select * from gap1 where id1=1 and id2=1 and id3=1 lock in share mode; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) lock in share mode; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3=1 and value=1 +order by c1 lock in share mode; +id1 id2 id3 c1 value +select * from gap3 where id=1 lock in share mode; +id value +1 1 +set session autocommit=0; +select * from gap1 limit 1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where value != 100 limit 1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where id1=1 ; +id1 id2 id3 c1 value +1 0 2 2 2 +1 0 3 3 3 +select * from gap1 where id1=1 and id2= 1 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 and id3 != 1 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 and id3 +between 1 and 3 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 order by id3 asc +limit 1 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 order by id3 desc +limit 1 ; +id1 id2 id3 c1 value +select * from gap1 order by id1 asc limit 1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 order by id1 desc limit 1 ; +id1 id2 id3 c1 value +500 100 1000 1000 1000 +select * from gap1 order by id1 desc, id2 desc, id3 desc +limit 1 ; +id1 id2 id3 c1 value +500 100 1000 1000 1000 +select * from gap1 force index(i) where c1=1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap3 force index(ui) where value=1 ; +id value +1 1 +select * from gap1 where id1=1 and id2=1 and id3=1 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3=1 and value=1 +order by c1 ; +id1 id2 id3 c1 value +select * from gap3 where id=1 ; +id value +1 1 +set session autocommit=1; +select * from gap1 limit 1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where value != 100 limit 1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where id1=1 ; +id1 id2 id3 c1 value +1 0 2 2 2 +1 0 3 3 3 +select * from gap1 where id1=1 and id2= 1 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 and id3 != 1 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 and id3 +between 1 and 3 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 order by id3 asc +limit 1 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 order by id3 desc +limit 1 ; +id1 id2 id3 c1 value +select * from gap1 order by id1 asc limit 1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 order by id1 desc limit 1 ; +id1 id2 id3 c1 value +500 100 1000 1000 1000 +select * from gap1 order by id1 desc, id2 desc, id3 desc +limit 1 ; +id1 id2 id3 c1 value +500 100 1000 1000 1000 +select * from gap1 force index(i) where c1=1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap3 force index(ui) where value=1 ; +id value +1 1 +select * from gap1 where id1=1 and id2=1 and id3=1 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3=1 and value=1 +order by c1 ; +id1 id2 id3 c1 value +select * from gap3 where id=1 ; +id value +1 1 +set session autocommit=0; +insert into gap1 (id1, id2, id3) values (-1,-1,-1); +insert into gap1 (id1, id2, id3) values (-1,-1,-1) +on duplicate key update value=100; +update gap1 set value=100 where id1=1; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: update gap1 set value=100 where id1=1 +update gap1 set value=100 where id1=1 and id2=1 and id3=1; +delete from gap1 where id1=2; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: delete from gap1 where id1=2 +delete from gap1 where id1=-1 and id2=-1 and id3=-1; +commit; +set session autocommit=1; +insert into gap1 (id1, id2, id3) values (-1,-1,-1); +insert into gap1 (id1, id2, id3) values (-1,-1,-1) +on duplicate key update value=100; +update gap1 set value=100 where id1=1; +update gap1 set value=100 where id1=1 and id2=1 and id3=1; +delete from gap1 where id1=2; +delete from gap1 where id1=-1 and id2=-1 and id3=-1; +commit; +set session autocommit=1; +insert into gap2 select * from gap1; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: insert into gap2 select * from gap1 +insert into gap2 select * from gap1 where id1=1; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: insert into gap2 select * from gap1 where id1=1 +insert into gap2 select * from gap1 where id1=1 and id2=1 and id3=1; +create table t4 select * from gap1 where id1=1 and id2=1 and id3=1; +drop table t4; +create table t4 select * from gap1; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: create table t4 select * from gap1 +create table t4 select * from gap1 where id1=1; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: create table t4 select * from gap1 where id1=1 +update gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 set gap1.value=100 where gap2.id1=3 +and gap2.id2=3 and gap2.id3=3; +update gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 set gap1.value=100 where gap2.id1=3; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: update gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 set gap1.value=100 where gap2.id1=3 +update gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 join gap3 on gap1.id1=gap3.id +set gap1.value=100 where gap2.id1=3; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: update gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 join gap3 on gap1.id1=gap3.id +set gap1.value=100 where gap2.id1=3 +update gap1 set gap1.value= (select count(*) from gap2); +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: update gap1 set gap1.value= (select count(*) from gap2) +delete gap1 from gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 where gap2.id1=3 +and gap2.id2=3 and gap2.id3=3; +delete gap1 from gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 where gap2.id1=3; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: delete gap1 from gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 where gap2.id1=3 +select * from gap1, gap2 limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1, gap2 limit 1 for update +select * from gap1 a, gap1 b limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 a, gap1 b limit 1 for update +create table u1( +c1 int, +c2 int, +c3 int, +c4 int, +primary key (c1, c2, c3), +unique key (c3, c1) +); +set session gap_lock_raise_error=1; +begin; +insert into u1 values (1,1,1,1); +commit; +begin; +insert into u1 values (1,2,1,1) on duplicate key update c4=10; +commit; +begin; +select * from u1 where c3=1 and c1 = 1 for update; +c1 c2 c3 c4 +1 1 1 10 +select * from u1 where c3=1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from u1 where c3=1 for update +commit; +drop table u1; +set global gap_lock_write_log= 0; +set global gap_lock_raise_error= 0; +drop table if exists gap1, gap2, gap3, gap4, gap5; +DROP DATABASE mysqlslap; +0 +SET GLOBAL gap_lock_log_file=''; +SET GLOBAL gap_lock_log_file=''; +flush general logs; +SET @save_gap_lock_exceptions = @@global.gap_lock_exceptions; +SET GLOBAL gap_lock_exceptions="t.*"; +drop table if exists gap1,gap2,gap3; +CREATE DATABASE mysqlslap; +CREATE TABLE gap1 (id1 INT, id2 INT, id3 INT, c1 INT, value INT, +PRIMARY KEY (id1, id2, id3), +INDEX i (c1)) ENGINE=rocksdb; +CREATE TABLE gap2 like gap1; +CREATE TABLE gap3 (id INT, value INT, +PRIMARY KEY (id), +UNIQUE KEY ui(value)) ENGINE=rocksdb; +insert into gap3 values (1,1), (2,2),(3,3),(4,4),(5,5); +create table gap4 ( +pk int primary key, +a int, +b int, +key(a) +) ENGINE=rocksdb; +insert into gap4 values (1,1,1), (2,2,2), (3,3,3), (4,4,4); +create table gap5 like gap4; +insert into gap5 values (1,1,1), (2,2,2), (3,3,3), (4,4,4); +set session gap_lock_raise_error=1; +set session gap_lock_write_log=1; +set session autocommit=0; +select * from gap1 limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 limit 1 for update +select * from gap1 where value != 100 limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where value != 100 limit 1 for update +set global gap_lock_write_log= 0; +set global gap_lock_raise_error= 0; +drop table if exists gap1, gap2, gap3, gap4, gap5; +DROP DATABASE mysqlslap; +0 +SET GLOBAL gap_lock_log_file=''; +SET GLOBAL gap_lock_log_file=''; +flush general logs; +SET GLOBAL gap_lock_exceptions="gap.*"; +drop table if exists gap1,gap2,gap3; +CREATE DATABASE mysqlslap; +CREATE TABLE gap1 (id1 INT, id2 INT, id3 INT, c1 INT, value INT, +PRIMARY KEY (id1, id2, id3), +INDEX i (c1)) ENGINE=rocksdb; +CREATE TABLE gap2 like gap1; +CREATE TABLE gap3 (id INT, value INT, +PRIMARY KEY (id), +UNIQUE KEY ui(value)) ENGINE=rocksdb; +insert into gap3 values (1,1), (2,2),(3,3),(4,4),(5,5); +create table gap4 ( +pk int primary key, +a int, +b int, +key(a) +) ENGINE=rocksdb; +insert into gap4 values (1,1,1), (2,2,2), (3,3,3), (4,4,4); +create table gap5 like gap4; +insert into gap5 values (1,1,1), (2,2,2), (3,3,3), (4,4,4); +set session gap_lock_raise_error=1; +set session gap_lock_write_log=1; +set session autocommit=0; +select * from gap1 limit 1 for update; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where value != 100 limit 1 for update; +id1 id2 id3 c1 value +0 0 1 1 1 +set global gap_lock_write_log= 0; +set global gap_lock_raise_error= 0; +drop table if exists gap1, gap2, gap3, gap4, gap5; +DROP DATABASE mysqlslap; +0 +SET GLOBAL gap_lock_log_file=''; +SET GLOBAL gap_lock_log_file=''; +flush general logs; +SET GLOBAL gap_lock_exceptions=@save_gap_lock_exceptions; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/get_error_message.result b/storage/rocksdb/mysql-test/rocksdb/r/get_error_message.result new file mode 100644 index 00000000000..04dcac1fcb4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/get_error_message.result @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(a INT,b INT,KEY (a)) PARTITION BY HASH (a) PARTITIONS 3; +SHOW TABLES; +Tables_in_test +t1 +ALTER TABLE t1 ADD PARTITION(PARTITION p3 DATA DIRECTORY='G:/mysqltest/p3Data' INDEX DIRECTORY='H:/mysqltest/p3Index'); +ERROR 42000: Incorrect table name 'H:/mysqltest/p3Index' +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result b/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result new file mode 100644 index 00000000000..1ab8bd7678c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result @@ -0,0 +1,115 @@ +DROP TABLE IF EXISTS t1; +FLUSH STATUS; +CREATE TABLE t1 (id INT PRIMARY KEY, a VARCHAR(100), b INT, +INDEX b(b)) ENGINE=rocksdb; +INSERT INTO t1 (id,a,b) VALUES (1,'foobar',100),(2,'z',0),(3,'bar',50); +SHOW SESSION STATUS LIKE 'Handler_write%'; +Variable_name Value +Handler_write 3 +UPDATE t1 SET b=1000 WHERE id=1; +SHOW SESSION STATUS LIKE 'Handler_update%'; +Variable_name Value +Handler_update 1 +DELETE FROM t1 WHERE id=2; +SHOW SESSION STATUS LIKE 'Handler_delete%'; +Variable_name Value +Handler_delete 1 +INSERT INTO t1 (id,b) VALUES(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10); +SHOW SESSION STATUS LIKE 'Handler_write%'; +Variable_name Value +Handler_write 10 +FLUSH STATUS; +SELECT * FROM t1 WHERE id=8; +id a b +8 NULL 8 +SHOW SESSION STATUS LIKE 'Handler_read%'; +Variable_name Value +Handler_read_first 0 +Handler_read_key 1 +Handler_read_last 0 +Handler_read_next 0 +Handler_read_prev 0 +Handler_read_rnd 0 +Handler_read_rnd_next 0 +FLUSH STATUS; +SELECT * FROM t1 WHERE b=6; +id a b +6 NULL 6 +SHOW SESSION STATUS LIKE 'Handler_read%'; +Variable_name Value +Handler_read_first 0 +Handler_read_key 1 +Handler_read_last 0 +Handler_read_next 1 +Handler_read_prev 0 +Handler_read_rnd 0 +Handler_read_rnd_next 0 +FLUSH STATUS; +SELECT * FROM t1; +id a b +1 foobar 1000 +10 NULL 10 +3 bar 50 +4 NULL 4 +5 NULL 5 +6 NULL 6 +7 NULL 7 +8 NULL 8 +9 NULL 9 +SHOW SESSION STATUS LIKE 'Handler_read%'; +Variable_name Value +Handler_read_first 0 +Handler_read_key 0 +Handler_read_last 0 +Handler_read_next 0 +Handler_read_prev 0 +Handler_read_rnd 0 +Handler_read_rnd_next 10 +FLUSH STATUS; +SELECT * FROM t1 WHERE b <=5 ORDER BY b; +id a b +4 NULL 4 +5 NULL 5 +SHOW SESSION STATUS LIKE 'Handler_read%'; +Variable_name Value +Handler_read_first 0 +Handler_read_key 1 +Handler_read_last 0 +Handler_read_next 2 +Handler_read_prev 0 +Handler_read_rnd 0 +Handler_read_rnd_next 0 +FLUSH STATUS; +SELECT * FROM t1 WHERE id >=8 ORDER BY id; +id a b +8 NULL 8 +9 NULL 9 +10 NULL 10 +SHOW SESSION STATUS LIKE 'Handler_read%'; +Variable_name Value +Handler_read_first 0 +Handler_read_key 1 +Handler_read_last 0 +Handler_read_next 3 +Handler_read_prev 0 +Handler_read_rnd 0 +Handler_read_rnd_next 0 +FLUSH STATUS; +SELECT * FROM t1 WHERE id < 8 ORDER BY id; +id a b +1 foobar 1000 +3 bar 50 +4 NULL 4 +5 NULL 5 +6 NULL 6 +7 NULL 7 +SHOW SESSION STATUS LIKE 'Handler_read%'; +Variable_name Value +Handler_read_first 1 +Handler_read_key 0 +Handler_read_last 0 +Handler_read_next 6 +Handler_read_prev 0 +Handler_read_rnd 0 +Handler_read_rnd_next 0 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/hermitage.result b/storage/rocksdb/mysql-test/rocksdb/r/hermitage.result new file mode 100644 index 00000000000..e4d080289dc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/hermitage.result @@ -0,0 +1,648 @@ +DROP TABLE IF EXISTS test; +connect con1,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +connect con3,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +connection con1; +create table test (id int primary key, value int) engine=rocksdb; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test; +id value +1 10 +2 20 +update test set value = 101 where id = 1; +connection con2; +select * from test; +id value +1 10 +2 20 +connection con1; +rollback; +connection con2; +select * from test; +id value +1 10 +2 20 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +update test set value = 101 where id = 1; +connection con2; +select * from test; +id value +1 10 +2 20 +connection con1; +update test set value = 11 where id = 1; +commit; +connection con2; +select * from test; +id value +1 11 +2 20 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +update test set value = 11 where id = 1; +connection con2; +update test set value = 22 where id = 2; +connection con1; +select * from test where id = 2; +id value +2 20 +connection con2; +select * from test where id = 1; +id value +1 10 +connection con1; +commit; +connection con2; +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +update test set value = 11 where id = 1; +update test set value = 19 where id = 2; +connection con2; +update test set value = 12 where id = 1; +connection con1; +commit; +connection con2; +connection con3; +select * from test; +id value +1 11 +2 19 +connection con2; +update test set value = 18 where id = 2; +connection con3; +select * from test; +id value +1 11 +2 19 +connection con2; +commit; +connection con3; +select * from test; +id value +1 12 +2 18 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where value = 30; +id value +connection con2; +insert into test (id, value) values(3, 30); +commit; +connection con1; +select * from test where value % 3 = 0; +id value +3 30 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +update test set value = value + 10; +connection con2; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_snapshot_conflict_errors'; +select * from test; +id value +1 10 +2 20 +delete from test where value = 20; +connection con1; +commit; +connection con2; +select * from test; +id value +2 30 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where id = 1; +id value +1 10 +connection con2; +select * from test where id = 1; +id value +1 10 +connection con1; +update test set value = 11 where id = 1; +connection con2; +update test set value = 12 where id = 1; +connection con1; +commit; +connection con2; +select * from test; +id value +1 12 +2 20 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where id = 1; +id value +1 10 +connection con2; +select * from test where id = 1; +id value +1 10 +select * from test where id = 2; +id value +2 20 +update test set value = 12 where id = 1; +update test set value = 18 where id = 2; +commit; +connection con1; +select * from test where id = 2; +id value +2 18 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where value % 5 = 0; +id value +1 10 +2 20 +connection con2; +update test set value = 12 where value = 10; +commit; +connection con1; +select * from test where value % 3 = 0; +id value +1 12 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where id = 1; +id value +1 10 +connection con2; +select * from test; +id value +1 10 +2 20 +update test set value = 12 where id = 1; +update test set value = 18 where id = 2; +commit; +connection con1; +delete from test where value = 20; +select * from test where id = 2; +id value +2 18 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where id in (1,2); +id value +1 10 +2 20 +connection con2; +select * from test where id in (1,2); +id value +1 10 +2 20 +connection con1; +update test set value = 11 where id = 1; +connection con2; +update test set value = 21 where id = 2; +connection con1; +commit; +connection con2; +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where value % 3 = 0; +id value +connection con2; +select * from test where value % 3 = 0; +id value +connection con1; +insert into test (id, value) values(3, 30); +connection con2; +insert into test (id, value) values(4, 42); +connection con1; +commit; +connection con2; +commit; +select * from test where value % 3 = 0; +id value +3 30 +4 42 +connection con1; +select * from test where value % 3 = 0; +id value +3 30 +4 42 +connection default; +drop table test; +disconnect con1; +disconnect con2; +disconnect con3; +DROP TABLE IF EXISTS test; +connect con1,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +connect con3,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +connection con1; +create table test (id int primary key, value int) engine=rocksdb; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test; +id value +1 10 +2 20 +update test set value = 101 where id = 1; +connection con2; +select * from test; +id value +1 10 +2 20 +connection con1; +rollback; +connection con2; +select * from test; +id value +1 10 +2 20 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +update test set value = 101 where id = 1; +connection con2; +select * from test; +id value +1 10 +2 20 +connection con1; +update test set value = 11 where id = 1; +commit; +connection con2; +select * from test; +id value +1 10 +2 20 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +update test set value = 11 where id = 1; +connection con2; +update test set value = 22 where id = 2; +connection con1; +select * from test where id = 2; +id value +2 20 +connection con2; +select * from test where id = 1; +id value +1 10 +connection con1; +commit; +connection con2; +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +update test set value = 11 where id = 1; +update test set value = 19 where id = 2; +connection con2; +update test set value = 12 where id = 1; +connection con1; +commit; +connection con2; +connection con3; +select * from test; +id value +1 11 +2 19 +connection con2; +update test set value = 18 where id = 2; +connection con3; +select * from test; +id value +1 11 +2 19 +connection con2; +commit; +connection con3; +select * from test; +id value +1 11 +2 19 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where value = 30; +id value +connection con2; +insert into test (id, value) values(3, 30); +commit; +connection con1; +select * from test where value % 3 = 0; +id value +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +update test set value = value + 10; +connection con2; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_snapshot_conflict_errors'; +select * from test; +id value +1 10 +2 20 +delete from test where value = 20; +connection con1; +commit; +connection con2; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_snapshot_conflict_errors'; +variable_value-@a +1 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where id = 1; +id value +1 10 +connection con2; +select * from test where id = 1; +id value +1 10 +connection con1; +update test set value = 11 where id = 1; +connection con2; +update test set value = 12 where id = 1; +connection con1; +commit; +connection con2; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where id = 1; +id value +1 10 +connection con2; +select * from test where id = 1; +id value +1 10 +select * from test where id = 2; +id value +2 20 +update test set value = 12 where id = 1; +update test set value = 18 where id = 2; +commit; +connection con1; +select * from test where id = 2; +id value +2 20 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where value % 5 = 0; +id value +1 10 +2 20 +connection con2; +update test set value = 12 where value = 10; +commit; +connection con1; +select * from test where value % 3 = 0; +id value +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where id = 1; +id value +1 10 +connection con2; +select * from test; +id value +1 10 +2 20 +update test set value = 12 where id = 1; +update test set value = 18 where id = 2; +commit; +connection con1; +delete from test where value = 20; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where id in (1,2); +id value +1 10 +2 20 +connection con2; +select * from test where id in (1,2); +id value +1 10 +2 20 +connection con1; +update test set value = 11 where id = 1; +connection con2; +update test set value = 21 where id = 2; +connection con1; +commit; +connection con2; +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where value % 3 = 0; +id value +connection con2; +select * from test where value % 3 = 0; +id value +connection con1; +insert into test (id, value) values(3, 30); +connection con2; +insert into test (id, value) values(4, 42); +connection con1; +commit; +connection con2; +commit; +select * from test where value % 3 = 0; +id value +3 30 +4 42 +connection con1; +select * from test where value % 3 = 0; +id value +3 30 +4 42 +connection default; +drop table test; +disconnect con1; +disconnect con2; +disconnect con3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result b/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result new file mode 100644 index 00000000000..a0fd7a13780 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS is_ddl_t1; +DROP TABLE IF EXISTS is_ddl_t2; +CREATE TABLE is_ddl_t1 (i INT, j INT, k INT, l INT, +PRIMARY KEY (i), KEY (j), KEY (k, l) COMMENT 'kl_cf') +ENGINE = ROCKSDB; +CREATE TABLE is_ddl_t2 (x INT, y INT, z INT, +PRIMARY KEY (z, y) COMMENT 'zy_cf', +KEY (x)) ENGINE = ROCKSDB; +SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%'; +TABLE_SCHEMA TABLE_NAME PARTITION_NAME INDEX_NAME INDEX_TYPE KV_FORMAT_VERSION CF +test is_ddl_t2 NULL PRIMARY 1 11 zy_cf +test is_ddl_t2 NULL x 2 11 default +test is_ddl_t1 NULL PRIMARY 1 11 default +test is_ddl_t1 NULL j 2 11 default +test is_ddl_t1 NULL k 2 11 kl_cf +DROP TABLE is_ddl_t1; +DROP TABLE is_ddl_t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index.result b/storage/rocksdb/mysql-test/rocksdb/r/index.result new file mode 100644 index 00000000000..f61bad7c4a9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/index.result @@ -0,0 +1,42 @@ +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY (a) +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY a_b (a,b) COMMENT 'a_b index' +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a_b 1 a A # NULL NULL YES LSMTREE a_b index +t1 1 a_b 2 b A # NULL NULL YES LSMTREE a_b index +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY (a), +KEY (b) +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE +t1 1 b 1 b A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'z'); +ALTER TABLE t1 ADD KEY (a) COMMENT 'simple index on a'; +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE simple index on a +ALTER TABLE t1 DROP KEY a; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_file_map.result b/storage/rocksdb/mysql-test/rocksdb/r/index_file_map.result new file mode 100644 index 00000000000..c3e54a25864 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/index_file_map.result @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +CREATE TABLE t1 (i INT PRIMARY KEY, j INT, INDEX(j)) ENGINE = ROCKSDB; +CREATE TABLE t2 (k INT PRIMARY KEY, l INT REFERENCES t1.i) ENGINE = ROCKSDB; +INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10); +INSERT INTO t2 VALUES (100,1), (200,2), (300,3), (400,4); +COMMIT; +SET GLOBAL rocksdb_force_flush_memtable_now = 1; +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = +(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL +WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY"); +COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS +# # SSTNAME 5 # # # # # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = +(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL +WHERE TABLE_NAME = 't1' AND INDEX_NAME = "j"); +COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS +# # SSTNAME 5 # # # # # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = +(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL +WHERE TABLE_NAME = 't2' AND INDEX_NAME = "PRIMARY"); +COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS +# # SSTNAME 4 # # # # # +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_key_block_size.result b/storage/rocksdb/mysql-test/rocksdb/r/index_key_block_size.result new file mode 100644 index 00000000000..b0113d79bb2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/index_key_block_size.result @@ -0,0 +1,51 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY (a) KEY_BLOCK_SIZE=8 +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY ind1(b ASC) KEY_BLOCK_SIZE=0 +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 ind1 1 b A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +PRIMARY KEY ind2(b(1) DESC) KEY_BLOCK_SIZE=32768 COMMENT 'big key_block_size value' +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 b A # 1 NULL LSMTREE big key_block_size value +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT AUTO_INCREMENT PRIMARY KEY, +KEY a_b(a,b) KEY_BLOCK_SIZE=8192 +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a_b 1 a A # NULL NULL YES LSMTREE +t1 1 a_b 2 b A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +PRIMARY KEY (b) +) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'z'); +ALTER TABLE t1 ADD KEY(a) KEY_BLOCK_SIZE 8192; +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 b A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_primary.result b/storage/rocksdb/mysql-test/rocksdb/r/index_primary.result new file mode 100644 index 00000000000..66481f81c67 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/index_primary.result @@ -0,0 +1,48 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, +b CHAR(8) +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A # NULL NULL LSMTREE +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +INSERT INTO t1 (a,b) VALUES (1,'c'); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +DROP TABLE t1; +CREATE TABLE t1 (a INT PRIMARY KEY, +b CHAR(8) PRIMARY KEY +) ENGINE=rocksdb; +ERROR 42000: Multiple primary key defined +CREATE TABLE t1 (a INT, +b CHAR(8), +PRIMARY KEY (a,b) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A # NULL NULL LSMTREE +t1 0 PRIMARY 2 b A # NULL NULL LSMTREE +INSERT INTO t1 (a,b) VALUES (1,'a'),(1,'b'),(2,'a'),(2,'b'); +INSERT INTO t1 (a,b) VALUES (1,'b'); +ERROR 23000: Duplicate entry '1-b' for key 'PRIMARY' +DROP TABLE t1; +CREATE TABLE t1 (a INT KEY, +b CHAR(8), +KEY (b) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A # NULL NULL LSMTREE +t1 1 b 1 b A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8) PRIMARY KEY +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 b A # NULL NULL LSMTREE +ALTER TABLE t1 ADD CONSTRAINT PRIMARY KEY pk (a); +ERROR 42000: Multiple primary key defined +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 b A # NULL NULL LSMTREE +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_type_btree.result b/storage/rocksdb/mysql-test/rocksdb/r/index_type_btree.result new file mode 100644 index 00000000000..a604663954b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/index_type_btree.result @@ -0,0 +1,42 @@ +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY USING BTREE (a) +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY a_b USING BTREE (a,b) COMMENT 'a_b index' +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a_b 1 a A # NULL NULL YES LSMTREE a_b index +t1 1 a_b 2 b A # NULL NULL YES LSMTREE a_b index +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY USING BTREE (a), +KEY USING BTREE (b) +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE +t1 1 b 1 b A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'z'); +ALTER TABLE t1 ADD KEY (a) USING BTREE COMMENT 'simple index on a'; +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE simple index on a +ALTER TABLE t1 DROP KEY a; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_type_hash.result b/storage/rocksdb/mysql-test/rocksdb/r/index_type_hash.result new file mode 100644 index 00000000000..ae99badff14 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/index_type_hash.result @@ -0,0 +1,42 @@ +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY USING HASH (a) +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY a_b USING HASH (a,b) COMMENT 'a_b index' +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a_b 1 a A # NULL NULL YES LSMTREE a_b index +t1 1 a_b 2 b A # NULL NULL YES LSMTREE a_b index +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY USING HASH (a), +KEY USING HASH (b) +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE +t1 1 b 1 b A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'z'); +ALTER TABLE t1 ADD KEY (a) USING HASH COMMENT 'simple index on a'; +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE simple index on a +ALTER TABLE t1 DROP KEY a; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result b/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result new file mode 100644 index 00000000000..d6177a3f019 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result @@ -0,0 +1,78 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +TYPE NAME VALUE +MAX_INDEX_ID MAX_INDEX_ID max_index_id +CF_FLAGS 0 default [0] +CF_FLAGS 1 __system__ [0] +select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +count(*) +3 +CREATE TABLE t1 (i1 INT, i2 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB; +INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3); +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +TYPE NAME VALUE +BINLOG FILE master-bin.000001 +BINLOG POS 1066 +BINLOG GTID uuid:5 +MAX_INDEX_ID MAX_INDEX_ID max_index_id +CF_FLAGS 0 default [0] +CF_FLAGS 1 __system__ [0] +select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +count(*) +6 +CREATE INDEX tindex1 on t1 (i1); +CREATE INDEX tindex2 on t1 (i2); +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where TYPE = 'CF_FLAGS'; +TYPE NAME VALUE +CF_FLAGS 0 default [0] +CF_FLAGS 1 __system__ [0] +CREATE TABLE t2 ( +a int, +b int, +c int, +d int, +e int, +PRIMARY KEY (a) COMMENT "cf_a", +KEY (b) COMMENT "cf_b", +KEY (c) COMMENT "cf_c", +KEY (d) COMMENT "$per_index_cf", +KEY (e) COMMENT "rev:cf_d") ENGINE=ROCKSDB; +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where TYPE = 'CF_FLAGS'; +TYPE NAME VALUE +CF_FLAGS 0 default [0] +CF_FLAGS 1 __system__ [0] +CF_FLAGS 2 cf_a [0] +CF_FLAGS 3 cf_b [0] +CF_FLAGS 4 cf_c [0] +CF_FLAGS 5 test.t2.d [2] +CF_FLAGS 6 rev:cf_d [1] +CREATE TABLE t3 (a INT, PRIMARY KEY (a)) ENGINE=ROCKSDB; +insert into t3 (a) values (1), (2), (3); +SET @ORIG_ROCKSDB_PAUSE_BACKGROUND_WORK = @@GLOBAL.ROCKSDB_PAUSE_BACKGROUND_WORK; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +Variable_name Value +rocksdb_pause_background_work OFF +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=1; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +Variable_name Value +rocksdb_pause_background_work ON +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=1; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +Variable_name Value +rocksdb_pause_background_work ON +DROP TABLE t3; +cf_id:0,index_id:268 +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +Variable_name Value +rocksdb_pause_background_work OFF +next line shouldn't cause assertion to fail +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +Variable_name Value +rocksdb_pause_background_work OFF +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_ROCKSDB_PAUSE_BACKGROUND_WORK; +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result b/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result new file mode 100644 index 00000000000..6d8d9685a79 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result @@ -0,0 +1,120 @@ +SELECT * FROM INFORMATION_SCHEMA.INNODB_TRX; +trx_id trx_state trx_started trx_requested_lock_id trx_wait_started trx_weight trx_mysql_thread_id trx_query trx_operation_state trx_tables_in_use trx_tables_locked trx_lock_structs trx_lock_memory_bytes trx_rows_locked trx_rows_modified trx_concurrency_tickets trx_isolation_level trx_unique_checks trx_foreign_key_checks trx_last_foreign_key_error trx_adaptive_hash_latched trx_adaptive_hash_timeout trx_is_read_only trx_autocommit_non_locking +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_TRX but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_FILE_STATUS; +FILE OPERATION REQUESTS SLOW BYTES BYTES/R SVC:SECS SVC:MSECS/R SVC:MAX_MSECS WAIT:SECS WAIT:MSECS/R WAIT:MAX_MSECS +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FILE_STATUS but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCKS; +lock_id lock_trx_id lock_mode lock_type lock_table lock_index lock_space lock_page lock_rec lock_data +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_LOCKS but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCK_WAITS; +requesting_trx_id requested_lock_id blocking_trx_id blocking_lock_id +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_LOCK_WAITS but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP; +page_size compress_ops compress_ops_ok compress_time compress_ok_time compress_primary_ops compress_primary_ops_ok compress_primary_time compress_primary_ok_time compress_secondary_ops compress_secondary_ops_ok compress_secondary_time compress_secondary_ok_time uncompress_ops uncompress_time uncompress_primary_ops uncompress_primary_time uncompress_secondary_ops uncompress_secondary_time +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMP but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_RESET; +page_size compress_ops compress_ops_ok compress_time compress_ok_time compress_primary_ops compress_primary_ops_ok compress_primary_time compress_primary_ok_time compress_secondary_ops compress_secondary_ops_ok compress_secondary_time compress_secondary_ok_time uncompress_ops uncompress_time uncompress_primary_ops uncompress_primary_time uncompress_secondary_ops uncompress_secondary_time +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMP_RESET but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX; +database_name table_name index_name compress_ops compress_ops_ok compress_time uncompress_ops uncompress_time +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX_RESET; +database_name table_name index_name compress_ops compress_ops_ok compress_time uncompress_ops uncompress_time +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX_RESET but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMPMEM; +page_size buffer_pool_instance pages_used pages_free relocation_ops relocation_time +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMPMEM but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMPMEM_RESET; +page_size buffer_pool_instance pages_used pages_free relocation_ops relocation_time +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMPMEM_RESET but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_METRICS; +NAME SUBSYSTEM COUNT MAX_COUNT MIN_COUNT AVG_COUNT COUNT_RESET MAX_COUNT_RESET MIN_COUNT_RESET AVG_COUNT_RESET TIME_ENABLED TIME_DISABLED TIME_ELAPSED TIME_RESET STATUS TYPE COMMENT +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_METRICS but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_DEFAULT_STOPWORD; +value +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_DEFAULT_STOPWORD but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_DELETED; +DOC_ID +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_DELETED but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_BEING_DELETED; +DOC_ID +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_BEING_DELETED but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHE; +WORD FIRST_DOC_ID LAST_DOC_ID DOC_COUNT DOC_ID POSITION +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHE but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE; +WORD FIRST_DOC_ID LAST_DOC_ID DOC_COUNT DOC_ID POSITION +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_CONFIG; +KEY VALUE +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_CONFIG but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_POOL_STATS; +POOL_ID POOL_SIZE FREE_BUFFERS DATABASE_PAGES OLD_DATABASE_PAGES MODIFIED_DATABASE_PAGES PENDING_DECOMPRESS PENDING_READS PENDING_FLUSH_LRU PENDING_FLUSH_LIST PAGES_MADE_YOUNG PAGES_NOT_MADE_YOUNG PAGES_MADE_YOUNG_RATE PAGES_MADE_NOT_YOUNG_RATE NUMBER_PAGES_READ NUMBER_PAGES_CREATED NUMBER_PAGES_WRITTEN PAGES_READ_RATE PAGES_CREATE_RATE PAGES_WRITTEN_RATE NUMBER_PAGES_GET HIT_RATE YOUNG_MAKE_PER_THOUSAND_GETS NOT_YOUNG_MAKE_PER_THOUSAND_GETS NUMBER_PAGES_READ_AHEAD NUMBER_READ_AHEAD_EVICTED READ_AHEAD_RATE READ_AHEAD_EVICTED_RATE LRU_IO_TOTAL LRU_IO_CURRENT UNCOMPRESS_TOTAL UNCOMPRESS_CURRENT +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_BUFFER_POOL_STATS but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE; +POOL_ID BLOCK_ID SPACE PAGE_NUMBER PAGE_TYPE FLUSH_TYPE FIX_COUNT IS_HASHED NEWEST_MODIFICATION OLDEST_MODIFICATION ACCESS_TIME TABLE_NAME INDEX_NAME NUMBER_RECORDS DATA_SIZE COMPRESSED_SIZE PAGE_STATE IO_FIX IS_OLD FREE_PAGE_CLOCK +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_BUFFER_PAGE but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE_LRU; +POOL_ID LRU_POSITION SPACE PAGE_NUMBER PAGE_TYPE FLUSH_TYPE FIX_COUNT IS_HASHED NEWEST_MODIFICATION OLDEST_MODIFICATION ACCESS_TIME TABLE_NAME INDEX_NAME NUMBER_RECORDS DATA_SIZE COMPRESSED_SIZE COMPRESSED IO_FIX IS_OLD FREE_PAGE_CLOCK +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_BUFFER_PAGE_LRU but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES; +TABLE_ID NAME FLAG N_COLS SPACE FILE_FORMAT ROW_FORMAT ZIP_PAGE_SIZE +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_TABLES but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS; +TABLE_ID NAME STATS_INITIALIZED NUM_ROWS CLUST_INDEX_SIZE OTHER_INDEX_SIZE MODIFIED_COUNTER AUTOINC REF_COUNT +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_INDEXES; +INDEX_ID NAME TABLE_ID TYPE N_FIELDS PAGE_NO SPACE +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_INDEXES but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_COLUMNS; +TABLE_ID NAME POS MTYPE PRTYPE LEN +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_COLUMNS but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FIELDS; +INDEX_ID NAME POS +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_FIELDS but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN; +ID FOR_NAME REF_NAME N_COLS TYPE +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_FOREIGN but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS; +ID FOR_COL_NAME REF_COL_NAME POS +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES; +SPACE NAME FLAG FILE_FORMAT ROW_FORMAT PAGE_SIZE ZIP_PAGE_SIZE +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DATAFILES; +SPACE PATH +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_DATAFILES but the InnoDB storage engine is not installed +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DOCSTORE_FIELDS; +INDEX_ID POS DOCUMENT_PATH DOCUMENT_TYPE +Warnings: +Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_DOCSTORE_FIELDS but the InnoDB storage engine is not installed diff --git a/storage/rocksdb/mysql-test/rocksdb/r/insert.result b/storage/rocksdb/mysql-test/rocksdb/r/insert.result new file mode 100644 index 00000000000..a1fb3ae90be --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/insert.result @@ -0,0 +1,202 @@ +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES (100,'foobar',100),(1,'a',101),(2,'b',103),(3,'c',104),(4,'d',105),(5,'e',106); +SELECT a,b FROM t1; +a b +1 a +100 foobar +2 b +3 c +4 d +5 e +INSERT t1 VALUE (10,'foo',107),(11,'abc',108); +SELECT a,b FROM t1; +a b +1 a +10 foo +100 foobar +11 abc +2 b +3 c +4 d +5 e +INSERT INTO t1 (b,a) VALUES ('test',0); +SELECT a,b FROM t1; +a b +0 test +1 a +10 foo +100 foobar +11 abc +2 b +3 c +4 d +5 e +INSERT INTO t1 VALUES (DEFAULT,DEFAULT,NULL); +SELECT a,b FROM t1; +a b +0 test +1 a +10 foo +100 foobar +11 abc +2 b +3 c +4 d +5 e +NULL NULL +INSERT t1 (a) VALUE (10),(20); +SELECT a,b FROM t1; +a b +0 test +1 a +10 NULL +10 foo +100 foobar +11 abc +2 b +20 NULL +3 c +4 d +5 e +NULL NULL +INSERT INTO t1 SET a = 11, b = 'f'; +SELECT a,b FROM t1; +a b +0 test +1 a +10 NULL +10 foo +100 foobar +11 abc +11 f +2 b +20 NULL +3 c +4 d +5 e +NULL NULL +INSERT t1 SET b = DEFAULT; +SELECT a,b FROM t1; +a b +0 test +1 a +10 NULL +10 foo +100 foobar +11 abc +11 f +2 b +20 NULL +3 c +4 d +5 e +NULL NULL +NULL NULL +CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 SELECT a,b,pk FROM t1; +INSERT INTO t1 (a) SELECT a FROM t2 WHERE b = 'foo'; +SELECT a,b FROM t1; +a b +0 test +1 a +10 NULL +10 NULL +10 foo +100 foobar +11 abc +11 f +2 b +20 NULL +3 c +4 d +5 e +NULL NULL +NULL NULL +INSERT t1 (a,b) SELECT a,b FROM t1; +SELECT a,b FROM t1; +a b +0 test +0 test +1 a +1 a +10 NULL +10 NULL +10 NULL +10 NULL +10 foo +10 foo +100 foobar +100 foobar +11 abc +11 abc +11 f +11 f +2 b +2 b +20 NULL +20 NULL +3 c +3 c +4 d +4 d +5 e +5 e +NULL NULL +NULL NULL +NULL NULL +NULL NULL +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +BEGIN; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(100,'foo'); +INSERT t1 (a,b) VALUE (10,'foo'),(11,'abc'); +COMMIT; +SELECT a,b FROM t1; +a b +1 a +10 foo +100 foo +11 abc +2 b +3 c +4 d +5 e +BEGIN; +INSERT INTO t1 (b,a) VALUES ('test',0); +SAVEPOINT spt1; +INSERT INTO t1 (a,b) VALUES (DEFAULT,DEFAULT); +RELEASE SAVEPOINT spt1; +INSERT INTO t1 (a,b) VALUES (DEFAULT,DEFAULT); +ROLLBACK; +SELECT a,b FROM t1; +a b +1 a +10 foo +100 foo +11 abc +2 b +3 c +4 d +5 e +BEGIN; +INSERT t1 (a) VALUE (10),(20); +SAVEPOINT spt1; +INSERT INTO t1 SET a = 11, b = 'f'; +INSERT t1 SET b = DEFAULT; +ROLLBACK TO SAVEPOINT spt1; +ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows. +INSERT INTO t1 (b,a) VALUES ('test1',10); +COMMIT; +ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction. +SELECT a,b FROM t1; +a b +1 a +10 foo +100 foo +11 abc +2 b +3 c +4 d +5 e +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/insert_optimized_config.result b/storage/rocksdb/mysql-test/rocksdb/r/insert_optimized_config.result new file mode 100644 index 00000000000..ded48057854 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/insert_optimized_config.result @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS t1; +create table t1( +id bigint not null primary key, +i1 bigint, #unique +i2 bigint, #repeating +c1 varchar(20), #unique +c2 varchar(20), #repeating +index t1_2(i1) +) engine=rocksdb; +select count(*), sum(id), sum(i1), sum(i2) from t1; +count(*) sum(id) sum(i1) sum(i2) +50000 1250025000 1250025000 124980000 +select count(*), sum(id), sum(i1), sum(i2) from t1; +count(*) sum(id) sum(i1) sum(i2) +50000 1250025000 1250025000 124980000 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/insert_with_keys.result b/storage/rocksdb/mysql-test/rocksdb/r/insert_with_keys.result new file mode 100644 index 00000000000..9d0fef276e9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/insert_with_keys.result @@ -0,0 +1,63 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f'); +INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n'); +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +INSERT INTO t1 (a,b) VALUES (3,'a'),(0,''); +SELECT a,b FROM t1; +a b +0 +1 a +1 a +100 a +12345 z +2 b +29 n +3 a +3 c +30 m +4 d +5 e +6 f +DROP TABLE t1; +#---------------------------------------- +# UNIQUE KEYS are not supported currently +#----------------------------------------- +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f'); +INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n'); +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO t1 (a,b) VALUES (3,'a'),(0,''); +ERROR 23000: Duplicate entry '3' for key 'PRIMARY' +INSERT INTO t1 (a,b) VALUES (0,''); +SELECT a,b FROM t1; +a b +0 +1 a +100 a +2 b +29 n +3 c +30 m +4 d +5 e +6 f +INSERT IGNORE INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z') ON DUPLICATE KEY UPDATE b = CONCAT(b,b); +SELECT a,b FROM t1; +a b +0 +1 aa +100 a +12345 zz +2 b +29 n +3 c +30 m +4 d +5 e +6 f +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue100.result b/storage/rocksdb/mysql-test/rocksdb/r/issue100.result new file mode 100644 index 00000000000..ee73ac3e134 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue100.result @@ -0,0 +1,23 @@ +create table t1 ( +id int, +value int, +primary key (id) +) engine=rocksdb; +insert into t1 values(1,1),(2,2); +set autocommit=0; +begin; +insert into t1 values (50,50); +select * from t1; +id value +1 1 +2 2 +50 50 +update t1 set id=id+100; +select * from t1; +id value +101 1 +102 2 +150 50 +rollback; +set autocommit=1; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue100_delete.result b/storage/rocksdb/mysql-test/rocksdb/r/issue100_delete.result new file mode 100644 index 00000000000..9e55ebd006f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue100_delete.result @@ -0,0 +1,17 @@ +create table ten(a int primary key); +insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table one_k(a int primary key); +insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C; +create table t100(pk int primary key, a int, b int, key(a)); +insert into t100 select a,a,a from test.one_k; +set global rocksdb_force_flush_memtable_now=1; +select num_rows, entry_deletes, entry_singledeletes from information_schema.rocksdb_index_file_map where index_number = (select max(index_number) from information_schema.rocksdb_index_file_map) order by entry_deletes, entry_singledeletes; +num_rows entry_deletes entry_singledeletes +1000 0 0 +update t100 set a=a+1; +set global rocksdb_force_flush_memtable_now=1; +select num_rows, entry_deletes, entry_singledeletes from information_schema.rocksdb_index_file_map where index_number = (select max(index_number) from information_schema.rocksdb_index_file_map) order by entry_deletes, entry_singledeletes; +num_rows entry_deletes entry_singledeletes +1000 0 0 +1000 0 1000 +drop table ten, t100, one_k; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue111.result b/storage/rocksdb/mysql-test/rocksdb/r/issue111.result new file mode 100644 index 00000000000..315d2d2b50b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue111.result @@ -0,0 +1,32 @@ +create table t1 ( +pk int not null primary key, +col1 int not null, +col2 int not null, +key(col1) +) engine=rocksdb; +create table ten(a int primary key); +insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table one_k(a int primary key); +insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C; +insert into t1 select a,a,a from one_k; +# Start the transaction, get the snapshot +begin; +select * from t1 where col1<10; +pk col1 col2 +0 0 0 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +# Connect with another connection and make a conflicting change +begin; +update t1 set col2=123456 where pk=0; +commit; +update t1 set col2=col2+1 where col1 < 10 limit 5; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +drop table t1, ten, one_k; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue290.result b/storage/rocksdb/mysql-test/rocksdb/r/issue290.result new file mode 100644 index 00000000000..8b1a35648c0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue290.result @@ -0,0 +1,28 @@ +CREATE TABLE `linktable` ( +`id1` bigint(20) unsigned NOT NULL DEFAULT '0', +`id1_type` int(10) unsigned NOT NULL DEFAULT '0', +`id2` bigint(20) unsigned NOT NULL DEFAULT '0', +`id2_type` int(10) unsigned NOT NULL DEFAULT '0', +`link_type` bigint(20) unsigned NOT NULL DEFAULT '0', +`visibility` tinyint(3) NOT NULL DEFAULT '0', +`data` varchar(255) NOT NULL DEFAULT '', +`time` bigint(20) unsigned NOT NULL DEFAULT '0', +`version` int(11) unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (link_type, `id1`,`id2`) COMMENT 'cf_link_pk', +KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`id2`,`version`,`data`) COMMENT 'rev:cf_link_id1_type' +) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin; +set global rocksdb_force_flush_memtable_now=1; +insert into linktable (id1, link_type, id2) values (2, 1, 1); +insert into linktable (id1, link_type, id2) values (2, 1, 2); +insert into linktable (id1, link_type, id2) values (2, 1, 3); +insert into linktable (id1, link_type, id2) values (2, 1, 4); +insert into linktable (id1, link_type, id2) values (2, 1, 5); +insert into linktable (id1, link_type, id2) values (2, 1, 6); +insert into linktable (id1, link_type, id2) values (2, 1, 7); +insert into linktable (id1, link_type, id2) values (2, 1, 8); +insert into linktable (id1, link_type, id2) values (2, 1, 9); +insert into linktable (id1, link_type, id2) values (2, 1, 10); +explain select id1, id2, link_type, data from linktable where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE linktable range PRIMARY,id1_type PRIMARY 24 NULL # Using where +drop table linktable; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue314.result b/storage/rocksdb/mysql-test/rocksdb/r/issue314.result new file mode 100644 index 00000000000..eee90800286 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue314.result @@ -0,0 +1,12 @@ +drop table if exists t1; +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +CREATE TABLE t1(a int); +SET TRANSACTION ISOLATION LEVEL READ COMMITTED; +INSERT INTO t1 VALUES(1); +select * from t1; +ERROR HY000: MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level SERIALIZABLE +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +select * from t1; +a +1 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/level_read_committed.result b/storage/rocksdb/mysql-test/rocksdb/r/level_read_committed.result new file mode 100644 index 00000000000..b68b37cf6c5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/level_read_committed.result @@ -0,0 +1,111 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +connection con1; +CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +START TRANSACTION; +SELECT a FROM t1; +a +connection con2; +BEGIN; +INSERT INTO t1 (a) VALUES(1); +connection con1; +SELECT a FROM t1; +a +connection con2; +INSERT INTO t1 (a) VALUES (2); +connection con1; +SELECT a FROM t1; +a +INSERT INTO t1 (a) SELECT a+100 FROM t1; +SELECT a FROM t1; +a +connection con2; +SELECT a FROM t1; +a +1 +2 +COMMIT; +SELECT a FROM t1; +a +1 +2 +connection con1; +SELECT a FROM t1; +a +1 +2 +INSERT INTO t1 (a) SELECT a+200 FROM t1; +SELECT a FROM t1; +a +1 +2 +201 +202 +COMMIT; +SELECT a FROM t1; +a +1 +2 +201 +202 +connection con2; +SELECT a FROM t1; +a +1 +2 +201 +202 +connection default; +CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (a) VALUES (1); +COMMIT; +connection con1; +BEGIN; +SELECT a from t2; +a +1 +INSERT INTO t2 (a) VALUES (1), (3); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +connection con2; +INSERT INTO t2 (a) VALUES (2); +COMMIT; +connection con1; +SELECT a from t2; +a +1 +2 +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; +DROP TABLE t2; +CREATE TABLE t3 ( +pk int unsigned PRIMARY KEY, +count int unsigned DEFAULT '0' +) ENGINE=ROCKSDB; +connect con1,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +connection con1; +BEGIN; +SELECT * FROM t3; +pk count +connection con2; +BEGIN; +INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1; +COMMIT; +connection con1; +INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1; +COMMIT; +SELECT count FROM t3; +count +1 +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/level_read_uncommitted.result b/storage/rocksdb/mysql-test/rocksdb/r/level_read_uncommitted.result new file mode 100644 index 00000000000..68fbe5632cb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/level_read_uncommitted.result @@ -0,0 +1,116 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +connection con1; +CREATE TABLE t1 (a ) ENGINE= ; +START TRANSACTION; +SELECT a FROM t1; +a +connection con2; +BEGIN; +INSERT INTO t1 (a) VALUES(1); +# WARNING: Statement ended with errno 0, errname ''. +# If it differs from the result file, it might indicate a problem. +connection con1; +SELECT a FROM t1; +a +1 +connection con2; +INSERT INTO t1 (a) VALUES (2); +# WARNING: Statement ended with errno 0, errname ''. +# If it differs from the result file, it might indicate a problem. +connection con1; +SELECT a FROM t1; +a +1 +2 +INSERT INTO t1 (a) SELECT a+100 FROM t1; +# WARNING: Statement ended with errno 0, errname ''. +# If it differs from the result file, it might indicate a problem. +SELECT a FROM t1; +a +1 +101 +102 +2 +connection con2; +SELECT a FROM t1; +a +1 +101 +102 +2 +COMMIT; +SELECT a FROM t1; +a +1 +101 +102 +2 +connection con1; +SELECT a FROM t1; +a +1 +101 +102 +2 +INSERT INTO t1 (a) SELECT a+200 FROM t1; +# WARNING: Statement ended with errno 0, errname ''. +# If it differs from the result file, it might indicate a problem. +SELECT a FROM t1; +a +1 +101 +102 +2 +201 +202 +301 +302 +COMMIT; +SELECT a FROM t1; +a +1 +101 +102 +2 +201 +202 +301 +302 +connection con2; +SELECT a FROM t1; +a +1 +101 +102 +2 +201 +202 +301 +302 +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +CREATE TABLE t1 (a ) ENGINE= ; +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connection con2; +INSERT INTO t1 (a) VALUES (1); +connection con1; +# If consistent read works on this isolation level (READ UNCOMMITTED), the following SELECT should not return the value we inserted (1) +SELECT a FROM t1; +a +1 +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/level_repeatable_read.result b/storage/rocksdb/mysql-test/rocksdb/r/level_repeatable_read.result new file mode 100644 index 00000000000..13da8a0ffeb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/level_repeatable_read.result @@ -0,0 +1,100 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +connection con1; +CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +START TRANSACTION; +SELECT a FROM t1; +a +connection con2; +BEGIN; +INSERT INTO t1 (a) VALUES(1); +connection con1; +SELECT a FROM t1; +a +connection con2; +INSERT INTO t1 (a) VALUES (2); +connection con1; +SELECT a FROM t1; +a +INSERT INTO t1 (a) SELECT a+100 FROM t1; +SELECT a FROM t1; +a +connection con2; +SELECT a FROM t1; +a +1 +2 +COMMIT; +SELECT a FROM t1; +a +1 +2 +connection con1; +SELECT a FROM t1; +a +INSERT INTO t1 (a) SELECT a+200 FROM t1; +SELECT a FROM t1; +a +COMMIT; +SELECT a FROM t1; +a +1 +2 +connection con2; +SELECT a FROM t1; +a +1 +2 +connection default; +CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (a) VALUES (1); +COMMIT; +connection con1; +BEGIN; +SELECT a from t2; +a +1 +INSERT INTO t2 (a) VALUES (1), (3); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +connection con2; +INSERT INTO t2 (a) VALUES (2); +COMMIT; +connection con1; +SELECT a from t2; +a +1 +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; +DROP TABLE t2; +CREATE TABLE t3 ( +pk int unsigned PRIMARY KEY, +count int unsigned DEFAULT '0' +) ENGINE=ROCKSDB; +connect con1,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +connection con1; +BEGIN; +SELECT * FROM t3; +pk count +connection con2; +BEGIN; +INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1; +COMMIT; +connection con1; +INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1; +COMMIT; +SELECT count FROM t3; +count +0 +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/level_serializable.result b/storage/rocksdb/mysql-test/rocksdb/r/level_serializable.result new file mode 100644 index 00000000000..3f57395fa37 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/level_serializable.result @@ -0,0 +1,56 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +connection con1; +CREATE TABLE t1 (a ) ENGINE= ; +START TRANSACTION; +SELECT a FROM t1; +a +connection con2; +BEGIN; +INSERT INTO t1 (a) VALUES(1); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +# WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'. +# If it differs from the result file, it might indicate a problem. +connection con1; +SELECT a FROM t1; +a +connection con2; +INSERT INTO t1 (a) VALUES (2); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +# WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'. +# If it differs from the result file, it might indicate a problem. +connection con1; +SELECT a FROM t1; +a +INSERT INTO t1 (a) SELECT a+100 FROM t1; +# WARNING: Statement ended with errno 0, errname ''. +# If it differs from the result file, it might indicate a problem. +SELECT a FROM t1; +a +connection con2; +SELECT a FROM t1; +a +COMMIT; +SELECT a FROM t1; +a +connection con1; +SELECT a FROM t1; +a +INSERT INTO t1 (a) SELECT a+200 FROM t1; +# WARNING: Statement ended with errno 0, errname ''. +# If it differs from the result file, it might indicate a problem. +SELECT a FROM t1; +a +COMMIT; +SELECT a FROM t1; +a +connection con2; +SELECT a FROM t1; +a +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/loaddata.result b/storage/rocksdb/mysql-test/rocksdb/r/loaddata.result new file mode 100644 index 00000000000..5f6df197c94 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/loaddata.result @@ -0,0 +1,239 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +LOAD DATA INFILE '/se_loaddata.dat' INTO TABLE t1 +FIELDS TERMINATED BY ',' (a,b); +SELECT a,b FROM t1; +a b +1 foo +2 bar +3 +4 abc +LOAD DATA LOCAL INFILE '/se_loaddata.dat' INTO TABLE t1 +CHARACTER SET utf8 COLUMNS TERMINATED BY ',' + ESCAPED BY '/' (a,b); +SELECT a,b FROM t1; +a b +1 foo +1 foo +2 bar +2 bar +3 +3 +4 abc +4 abc +LOAD DATA LOCAL INFILE '/se_loaddata.dat' INTO TABLE t1 +FIELDS TERMINATED BY ';' + (a) SET b='loaded'; +Warnings: +Warning 1262 Row 1 was truncated; it contained more data than there were input columns +Warning 1262 Row 2 was truncated; it contained more data than there were input columns +Warning 1262 Row 3 was truncated; it contained more data than there were input columns +SELECT a,b FROM t1; +a b +0 loaded +1 foo +1 foo +102 loaded +2 bar +2 bar +3 +3 +4 abc +4 abc +5 loaded +LOAD DATA INFILE '/se_loaddata.dat' INTO TABLE t1 +FIELDS TERMINATED BY ';' + OPTIONALLY ENCLOSED BY '''' + LINES STARTING BY 'prefix:' +IGNORE 2 LINES (a,b); +Warnings: +Warning 1262 Row 2 was truncated; it contained more data than there were input columns +SELECT a,b FROM t1; +a b +0 +0 loaded +1 foo +1 foo +100 foo +102 loaded +2 bar +2 bar +3 +3 +4 abc +4 abc +5 loaded +7 test +LOAD DATA INFILE '/se_loaddata.dat' INTO TABLE t1; +Warnings: +Warning 1261 Row 1 doesn't contain data for all columns +Warning 1261 Row 2 doesn't contain data for all columns +Warning 1261 Row 3 doesn't contain data for all columns +Warning 1261 Row 4 doesn't contain data for all columns +SELECT a,b FROM t1; +a b +0 +0 loaded +1 foo +1 foo +1 foo +100 foo +102 loaded +2 bar +2 bar +2 bar +3 +3 +3 +4 abc +4 abc +4 abc +5 loaded +7 test +LOAD DATA INFILE '/se_replacedata.dat' REPLACE INTO TABLE t1; +Warnings: +Warning 1261 Row 1 doesn't contain data for all columns +Warning 1261 Row 2 doesn't contain data for all columns +Warning 1261 Row 3 doesn't contain data for all columns +Warning 1261 Row 4 doesn't contain data for all columns +SELECT a,b FROM t1; +a b +0 +0 loaded +1 aaa +1 foo +1 foo +1 foo +100 foo +102 loaded +2 bar +2 bar +2 bar +2 bbb +3 +3 +3 +3 ccc +4 abc +4 abc +4 abc +4 ddd +5 loaded +7 test +DROP TABLE t1; +set session rocksdb_skip_unique_check=1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +LOAD DATA INFILE '/se_loaddata.dat' INTO TABLE t1 +FIELDS TERMINATED BY ',' (a,b); +SELECT a,b FROM t1; +a b +1 foo +2 bar +3 +4 abc +LOAD DATA LOCAL INFILE '/se_loaddata.dat' INTO TABLE t1 +CHARACTER SET utf8 COLUMNS TERMINATED BY ',' + ESCAPED BY '/' (a,b); +SELECT a,b FROM t1; +a b +1 foo +1 foo +2 bar +2 bar +3 +3 +4 abc +4 abc +LOAD DATA LOCAL INFILE '/se_loaddata.dat' INTO TABLE t1 +FIELDS TERMINATED BY ';' + (a) SET b='loaded'; +Warnings: +Warning 1262 Row 1 was truncated; it contained more data than there were input columns +Warning 1262 Row 2 was truncated; it contained more data than there were input columns +Warning 1262 Row 3 was truncated; it contained more data than there were input columns +SELECT a,b FROM t1; +a b +0 loaded +1 foo +1 foo +102 loaded +2 bar +2 bar +3 +3 +4 abc +4 abc +5 loaded +LOAD DATA INFILE '/se_loaddata.dat' INTO TABLE t1 +FIELDS TERMINATED BY ';' + OPTIONALLY ENCLOSED BY '''' + LINES STARTING BY 'prefix:' +IGNORE 2 LINES (a,b); +Warnings: +Warning 1262 Row 2 was truncated; it contained more data than there were input columns +SELECT a,b FROM t1; +a b +0 +0 loaded +1 foo +1 foo +100 foo +102 loaded +2 bar +2 bar +3 +3 +4 abc +4 abc +5 loaded +7 test +LOAD DATA INFILE '/se_loaddata.dat' INTO TABLE t1; +Warnings: +Warning 1261 Row 1 doesn't contain data for all columns +Warning 1261 Row 2 doesn't contain data for all columns +Warning 1261 Row 3 doesn't contain data for all columns +Warning 1261 Row 4 doesn't contain data for all columns +SELECT a,b FROM t1; +a b +0 +0 loaded +1 foo +1 foo +1 foo +100 foo +102 loaded +2 bar +2 bar +2 bar +3 +3 +3 +4 abc +4 abc +4 abc +5 loaded +7 test +LOAD DATA INFILE '/se_replacedata.dat' REPLACE INTO TABLE t1; +ERROR HY000: When unique checking is disabled in MyRocks, INSERT,UPDATE,LOAD statements with clauses that update or replace the key (i.e. INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not allowed. Query: LOAD DATA INFILE '/se_replacedata.dat' REPLACE INTO TABLE t1 +SELECT a,b FROM t1; +a b +0 +0 loaded +1 foo +1 foo +1 foo +100 foo +102 loaded +2 bar +2 bar +2 bar +3 +3 +3 +4 abc +4 abc +4 abc +5 loaded +7 test +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/lock.result b/storage/rocksdb/mysql-test/rocksdb/r/lock.result new file mode 100644 index 00000000000..8c89fa1b934 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/lock.result @@ -0,0 +1,108 @@ +connect con1,localhost,root,,; +SET lock_wait_timeout=1; +connection default; +DROP TABLE IF EXISTS t1, t2, t3; +CREATE TABLE t1 (id INT, id2 INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (id,id2) VALUES (1,1),(1,2),(1,3); +LOCK TABLE t1 LOW_PRIORITY WRITE; +Warnings: +Warning 1287 'LOW_PRIORITY WRITE' is deprecated and will be removed in a future release. Please use WRITE instead +SELECT id2,COUNT(DISTINCT id) FROM t1 GROUP BY id2; +id2 COUNT(DISTINCT id) +1 1 +2 1 +3 1 +UPDATE t1 SET id=-1 WHERE id=1; +connection con1; +SELECT id,id2 FROM t1; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1 +LOCK TABLE t1 READ; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1 +connection default; +LOCK TABLE t1 READ; +UPDATE t1 SET id=1 WHERE id=1; +ERROR HY000: Table 't1' was locked with a READ lock and can't be updated +connection con1; +SELECT COUNT(DISTINCT id) FROM t1; +COUNT(DISTINCT id) +1 +UPDATE t1 SET id=2 WHERE id=2; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table: test.t1 +LOCK TABLE t1 WRITE; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1 +LOCK TABLE t1 READ; +UNLOCK TABLES; +connection default; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +ERROR HY000: Table 't2' was not locked with LOCK TABLES +UNLOCK TABLES; +CREATE TABLE t2 (id INT, id2 INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +LOCK TABLE t1 WRITE, t2 WRITE; +INSERT INTO t2 (id,id2) SELECT id,id2 FROM t1; +UPDATE t1 SET id=1 WHERE id=-1; +DROP TABLE t1,t2; +CREATE TABLE t1 (i1 INT, nr INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +CREATE TABLE t2 (nr INT, nm INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (nr,nm) VALUES (1,3); +INSERT INTO t2 (nr,nm) VALUES (2,4); +LOCK TABLES t1 WRITE, t2 READ; +INSERT INTO t1 (i1,nr) SELECT 1, nr FROM t2 WHERE nm=3; +INSERT INTO t1 (i1,nr) SELECT 2, nr FROM t2 WHERE nm=4; +UNLOCK TABLES; +LOCK TABLES t1 WRITE; +INSERT INTO t1 (i1,nr) SELECT i1, nr FROM t1; +ERROR HY000: Table 't1' was not locked with LOCK TABLES +UNLOCK TABLES; +LOCK TABLES t1 WRITE, t1 AS t1_alias READ; +INSERT INTO t1 (i1,nr) SELECT i1, nr FROM t1 AS t1_alias; +DROP TABLE t1,t2; +ERROR HY000: Table 't2' was not locked with LOCK TABLES +UNLOCK TABLES; +DROP TABLE t1,t2; +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb; +CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb; +LOCK TABLES t1 WRITE, t2 WRITE, t3 WRITE; +DROP TABLE t2, t3, t1; +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb; +CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb; +LOCK TABLES t1 WRITE, t2 WRITE, t3 WRITE, t1 AS t4 READ; +ALTER TABLE t2 ADD COLUMN c2 INT; +DROP TABLE t1, t2, t3; +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb; +LOCK TABLE t1 READ, t2 READ; +FLUSH TABLE t1; +ERROR HY000: Table 't1' was locked with a READ lock and can't be updated +FLUSH TABLES; +ERROR HY000: Table 't2' was locked with a READ lock and can't be updated +FLUSH TABLES t1, t2 WITH READ LOCK; +ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction +UNLOCK TABLES; +FLUSH TABLES t1, t2 WITH READ LOCK; +connection con1; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1 +connection default; +UNLOCK TABLES; +FLUSH TABLES WITH READ LOCK; +connection con1; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on global read: +connection default; +UNLOCK TABLES; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +FLUSH TABLES WITH READ LOCK; +DROP TABLE t1, t2; +ERROR HY000: Can't execute the query because you have a conflicting read lock +UNLOCK TABLES; +DROP TABLE t1, t2; +disconnect con1; +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb; +LOCK TABLE t1 WRITE, t2 WRITE; +SELECT a,b FROM t1; +a b +UNLOCK TABLES; +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/lock_rows_not_exist.result b/storage/rocksdb/mysql-test/rocksdb/r/lock_rows_not_exist.result new file mode 100644 index 00000000000..cf764f89581 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/lock_rows_not_exist.result @@ -0,0 +1,40 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +CREATE TABLE t (id1 int, id2 int, id3 int, value int, PRIMARY KEY (id1, id2, id3)) ENGINE=RocksDB; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE; +id1 id2 id3 value +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t.PRIMARY +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=2 FOR UPDATE; +id1 id2 id3 value +connection con1; +ROLLBACK; +BEGIN; +UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1; +connection con2; +ROLLBACK; +BEGIN; +UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t.PRIMARY +UPDATE t SET value=value+100 WHERE id1=1 AND id2=0 AND id3=1; +connection con1; +ROLLBACK; +BEGIN; +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1; +connection con2; +ROLLBACK; +BEGIN; +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t.PRIMARY +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=0; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result new file mode 100644 index 00000000000..4b237dcb7aa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result @@ -0,0 +1,490 @@ + +----------------------------------------------------------------------- +- Locking issues case 1.1: +- Locking rows that do not exist when using all primary key columns in +- a WHERE clause +- using REPEATABLE READ transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2)); +INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0); +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE; +id1 id2 value +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +INSERT INTO t0 VALUES (1,5,0); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY +SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY +COMMIT; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 1.1: +- Locking rows that do not exist when using all primary key columns in +- a WHERE clause +- using READ COMMITTED transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2)); +INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0); +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE; +id1 id2 value +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +INSERT INTO t0 VALUES (1,5,0); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY +SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY +COMMIT; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 1.2: +- Locking rows that do not exist without using all primary key +- columns in a WHERE clause +- using REPEATABLE READ transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2)); +INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0); +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t0 WHERE id1=1 FOR UPDATE; +id1 id2 value +1 1 0 +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE; +id1 id2 value +INSERT INTO t0 VALUES (1,5,0); +COMMIT; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 1.2: +- Locking rows that do not exist without using all primary key +- columns in a WHERE clause +- using READ COMMITTED transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2)); +INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0); +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t0 WHERE id1=1 FOR UPDATE; +id1 id2 value +1 1 0 +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE; +id1 id2 value +INSERT INTO t0 VALUES (1,5,0); +COMMIT; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 2: +- Rows that are scanned but do not match the WHERE are not locked +- using REPEATABLE READ transaction isolation level unless +- rocksdb_lock_scanned_rows is on +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +SELECT @@global.rocksdb_lock_scanned_rows; +@@global.rocksdb_lock_scanned_rows +0 +CREATE TABLE t0(id INT PRIMARY KEY, value INT); +INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1); +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +id value +2 1 +5 1 +UPDATE t0 SET VALUE=10 WHERE id=1; +UPDATE t0 SET VALUE=10 WHERE id=5; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY +UPDATE t0 SET value=100 WHERE id in (4,5) and value>0; +SELECT * FROM t0 WHERE id=4 FOR UPDATE; +id value +4 0 +COMMIT; +SELECT * FROM t0; +id value +1 10 +2 1 +3 0 +4 0 +5 1 +COMMIT; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 2: +- Rows that are scanned but do not match the WHERE are not locked +- using READ COMMITTED transaction isolation level unless +- rocksdb_lock_scanned_rows is on +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +SELECT @@global.rocksdb_lock_scanned_rows; +@@global.rocksdb_lock_scanned_rows +0 +CREATE TABLE t0(id INT PRIMARY KEY, value INT); +INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1); +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +id value +2 1 +5 1 +UPDATE t0 SET VALUE=10 WHERE id=1; +UPDATE t0 SET VALUE=10 WHERE id=5; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY +UPDATE t0 SET value=100 WHERE id in (4,5) and value>0; +SELECT * FROM t0 WHERE id=4 FOR UPDATE; +id value +4 0 +COMMIT; +SELECT * FROM t0; +id value +1 10 +2 1 +3 0 +4 0 +5 1 +COMMIT; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 2: +- Rows that are scanned but do not match the WHERE are not locked +- using REPEATABLE READ transaction isolation level unless +- rocksdb_lock_scanned_rows is on +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +SELECT @@global.rocksdb_lock_scanned_rows; +@@global.rocksdb_lock_scanned_rows +0 +SET GLOBAL rocksdb_lock_scanned_rows=ON; +CREATE TABLE t0(id INT PRIMARY KEY, value INT); +INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1); +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +id value +2 1 +5 1 +UPDATE t0 SET VALUE=10 WHERE id=1; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY +COMMIT; +DROP TABLE t0; +SET GLOBAL rocksdb_lock_scanned_rows=0; + +----------------------------------------------------------------------- +- Locking issues case 2: +- Rows that are scanned but do not match the WHERE are not locked +- using READ COMMITTED transaction isolation level unless +- rocksdb_lock_scanned_rows is on +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +SELECT @@global.rocksdb_lock_scanned_rows; +@@global.rocksdb_lock_scanned_rows +0 +SET GLOBAL rocksdb_lock_scanned_rows=ON; +CREATE TABLE t0(id INT PRIMARY KEY, value INT); +INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1); +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +id value +2 1 +5 1 +UPDATE t0 SET VALUE=10 WHERE id=1; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY +COMMIT; +DROP TABLE t0; +SET GLOBAL rocksdb_lock_scanned_rows=0; + +----------------------------------------------------------------------- +- Locking issues case 3: +- After creating a snapshot, other clients updating rows +- using REPEATABLE READ transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); +Inserting 200,000 rows +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 3: +- After creating a snapshot, other clients updating rows +- using READ COMMITTED transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); +Inserting 200,000 rows +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 4: +- Phantom rows +- using REPEATABLE READ transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); +Inserting 200,000 rows +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +INSERT INTO t0 VALUES(200001,1), (-1,1); +id value +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 4: +- Phantom rows +- using READ COMMITTED transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); +Inserting 200,000 rows +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +INSERT INTO t0 VALUES(200001,1), (-1,1); +id value +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 5: +- Deleting primary key +- using REPEATABLE READ transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); +Inserting 200,000 rows +UPDATE t0 SET value=100 WHERE id=190000; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +DELETE FROM t0 WHERE id=190000; +COMMIT; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +COMMIT; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 5: +- Deleting primary key +- using READ COMMITTED transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); +Inserting 200,000 rows +UPDATE t0 SET value=100 WHERE id=190000; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +DELETE FROM t0 WHERE id=190000; +COMMIT; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +COMMIT; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 6: +- Changing primary key +- using REPEATABLE READ transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); +Inserting 200,000 rows +UPDATE t0 SET value=100 WHERE id=190000; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +UPDATE t0 SET id=200001 WHERE id=190000; +COMMIT; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +COMMIT; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 6: +- Changing primary key +- using READ COMMITTED transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); +Inserting 200,000 rows +UPDATE t0 SET value=100 WHERE id=190000; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +UPDATE t0 SET id=200001 WHERE id=190000; +COMMIT; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +COMMIT; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 7: +- Rows that are scanned as part of a query but not in the table being +- updated should not be locked unless rocksdb_lock_scanned_rows is on +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t1, t2; +SELECT @@global.rocksdb_lock_scanned_rows; +@@global.rocksdb_lock_scanned_rows +0 +CREATE TABLE t1(id INT PRIMARY KEY, value INT); +CREATE TABLE t2(id INT PRIMARY KEY, value INT); +INSERT INTO t1 VALUES (1,1), (2,2), (3,3); +INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +lock_scanned_rows is 0 +UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3; +UPDATE t2 SET value=value+100; +SELECT * FROM t2; +id value +1 101 +2 102 +3 103 +4 104 +5 105 +COMMIT; +DROP TABLE t1; +DROP TABLE t2; + +----------------------------------------------------------------------- +- Locking issues case 7: +- Rows that are scanned as part of a query but not in the table being +- updated should not be locked unless rocksdb_lock_scanned_rows is on +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t1, t2; +SELECT @@global.rocksdb_lock_scanned_rows; +@@global.rocksdb_lock_scanned_rows +0 +CREATE TABLE t1(id INT PRIMARY KEY, value INT); +CREATE TABLE t2(id INT PRIMARY KEY, value INT); +INSERT INTO t1 VALUES (1,1), (2,2), (3,3); +INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +lock_scanned_rows is 0 +UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3; +UPDATE t2 SET value=value+100; +SELECT * FROM t2; +id value +1 101 +2 102 +3 103 +4 104 +5 105 +COMMIT; +DROP TABLE t1; +DROP TABLE t2; + +----------------------------------------------------------------------- +- Locking issues case 7: +- Rows that are scanned as part of a query but not in the table being +- updated should not be locked unless rocksdb_lock_scanned_rows is on +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t1, t2; +SELECT @@global.rocksdb_lock_scanned_rows; +@@global.rocksdb_lock_scanned_rows +0 +SET GLOBAL rocksdb_lock_scanned_rows=ON; +CREATE TABLE t1(id INT PRIMARY KEY, value INT); +CREATE TABLE t2(id INT PRIMARY KEY, value INT); +INSERT INTO t1 VALUES (1,1), (2,2), (3,3); +INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +lock_scanned_rows is 1 +UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3; +UPDATE t2 SET value=value+100 WHERE id=3; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t2.PRIMARY +UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5); +SELECT * FROM t2; +id value +1 101 +2 102 +3 3 +4 104 +5 105 +COMMIT; +DROP TABLE t1; +DROP TABLE t2; +SET GLOBAL rocksdb_lock_scanned_rows=0; + +----------------------------------------------------------------------- +- Locking issues case 7: +- Rows that are scanned as part of a query but not in the table being +- updated should not be locked unless rocksdb_lock_scanned_rows is on +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t1, t2; +SELECT @@global.rocksdb_lock_scanned_rows; +@@global.rocksdb_lock_scanned_rows +0 +SET GLOBAL rocksdb_lock_scanned_rows=ON; +CREATE TABLE t1(id INT PRIMARY KEY, value INT); +CREATE TABLE t2(id INT PRIMARY KEY, value INT); +INSERT INTO t1 VALUES (1,1), (2,2), (3,3); +INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +lock_scanned_rows is 1 +UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3; +UPDATE t2 SET value=value+100 WHERE id=3; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t2.PRIMARY +UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5); +SELECT * FROM t2; +id value +1 101 +2 102 +3 3 +4 104 +5 105 +COMMIT; +DROP TABLE t1; +DROP TABLE t2; +SET GLOBAL rocksdb_lock_scanned_rows=0; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/misc.result b/storage/rocksdb/mysql-test/rocksdb/r/misc.result new file mode 100644 index 00000000000..70c270d5538 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/misc.result @@ -0,0 +1,84 @@ +INSERT INTO mysql.event ( +db, +name, +body, +definer, +interval_value, +interval_field, +originator, +character_set_client, +collation_connection, +db_collation, +body_utf8) +values ( +database(), +"ev1", +"select 1", +user(), +100, +"SECOND_MICROSECOND", +1, +'utf8', +'utf8_general_ci', +'utf8_general_ci', +'select 1'); +SHOW EVENTS; +ERROR 42000: This version of MySQL doesn't yet support 'MICROSECOND' +DROP EVENT ev1; +SELECT TABLE_NAME, COLUMN_NAME, REFERENCED_TABLE_NAME, REFERENCED_COLUMN_NAME +FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE ORDER BY TABLE_NAME; +TABLE_NAME COLUMN_NAME REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME +columns_priv Column_name NULL NULL +columns_priv Db NULL NULL +columns_priv Host NULL NULL +columns_priv Table_name NULL NULL +columns_priv User NULL NULL +db Db NULL NULL +db Host NULL NULL +db User NULL NULL +event db NULL NULL +event name NULL NULL +func name NULL NULL +help_category help_category_id NULL NULL +help_category name NULL NULL +help_keyword help_keyword_id NULL NULL +help_keyword name NULL NULL +help_relation help_keyword_id NULL NULL +help_relation help_topic_id NULL NULL +help_topic help_topic_id NULL NULL +help_topic name NULL NULL +ndb_binlog_index epoch NULL NULL +ndb_binlog_index orig_epoch NULL NULL +ndb_binlog_index orig_server_id NULL NULL +plugin name NULL NULL +proc db NULL NULL +proc name NULL NULL +proc type NULL NULL +procs_priv Db NULL NULL +procs_priv Host NULL NULL +procs_priv Routine_name NULL NULL +procs_priv Routine_type NULL NULL +procs_priv User NULL NULL +proxies_priv Host NULL NULL +proxies_priv Proxied_host NULL NULL +proxies_priv Proxied_user NULL NULL +proxies_priv User NULL NULL +servers Server_name NULL NULL +slave_gtid_info Id NULL NULL +slave_master_info Host NULL NULL +slave_master_info Port NULL NULL +slave_relay_log_info Id NULL NULL +slave_worker_info Id NULL NULL +tables_priv Db NULL NULL +tables_priv Host NULL NULL +tables_priv Table_name NULL NULL +tables_priv User NULL NULL +time_zone Time_zone_id NULL NULL +time_zone_leap_second Transition_time NULL NULL +time_zone_name Name NULL NULL +time_zone_transition Time_zone_id NULL NULL +time_zone_transition Transition_time NULL NULL +time_zone_transition_type Time_zone_id NULL NULL +time_zone_transition_type Transition_type_id NULL NULL +user Host NULL NULL +user User NULL NULL diff --git a/storage/rocksdb/mysql-test/rocksdb/r/mysqlbinlog_gtid_skip_empty_trans_rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/mysqlbinlog_gtid_skip_empty_trans_rocksdb.result new file mode 100644 index 00000000000..835361eea35 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/mysqlbinlog_gtid_skip_empty_trans_rocksdb.result @@ -0,0 +1,143 @@ +reset master; +set timestamp=1000000000; +set SESSION binlog_format = 'ROW'; +create database test2; +create database test3; +use test; +create table t1 (a int primary key, b char(8)) ENGINE=rocksdb; +insert into t1 values(1, 'a'); +insert into t1 values(2, 'b'); +create table t2 (a int primary key, b char(8)) ENGINE=rocksdb; +start transaction; +insert into t2 values(1, 'a'); +insert into t2 values(2, 'b'); +insert into t2 values(3, 'c'); +insert into t2 values(4, 'd'); +commit; +use test2; +create table t1 (a int primary key, b char(8)) ENGINE=rocksdb; +insert into t1 values(1, 'a'); +insert into t1 values(2, 'b'); +create table t2 (a int primary key, b char(8)) ENGINE=rocksdb; +start transaction; +insert into t2 values(1, 'a'); +insert into t2 values(2, 'b'); +insert into t2 values(3, 'c'); +insert into t2 values(4, 'd'); +commit; +use test3; +create table t1 (a int primary key, b char(8)) ENGINE=rocksdb; +insert into t1 values(1, 'a'); +insert into t1 values(2, 'b'); +create table t2 (a int primary key, b char(8)) ENGINE=rocksdb; +start transaction; +insert into t2 values(1, 'a'); +insert into t2 values(2, 'b'); +insert into t2 values(3, 'c'); +insert into t2 values(4, 'd'); +commit; +FLUSH LOGS; +==== Output of mysqlbinlog with --short-form --skip-empty-trans, --database and --skip-gtids options ==== +/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/; +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/; +DELIMITER /*!*/; +ROLLBACK/*!*/; +SET TIMESTAMP=1000000000/*!*/; +SET @@session.pseudo_thread_id=999999999/*!*/; +SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=0, @@session.unique_checks=1, @@session.autocommit=1/*!*/; +SET @@session.sql_mode=1073741824/*!*/; +SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/; +/*!\C latin1 *//*!*/; +SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=8/*!*/; +SET @@session.lc_time_names=0/*!*/; +SET @@session.collation_database=DEFAULT/*!*/; +create database test2 +/*!*/; +use `test2`/*!*/; +SET TIMESTAMP=1000000000/*!*/; +create table t1 (a int primary key, b char(8)) ENGINE=rocksdb +/*!*/; +SET TIMESTAMP=1000000000/*!*/; +BEGIN +/*!*/; +COMMIT/*!*/; +SET TIMESTAMP=1000000000/*!*/; +BEGIN +/*!*/; +COMMIT/*!*/; +SET TIMESTAMP=1000000000/*!*/; +create table t2 (a int primary key, b char(8)) ENGINE=rocksdb +/*!*/; +SET TIMESTAMP=1000000000/*!*/; +BEGIN +/*!*/; +COMMIT/*!*/; +DELIMITER ; +# End of log file +ROLLBACK /* added by mysqlbinlog */; +/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/; +/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/; +use test2; +start transaction; +insert into t2 values(5, 'e'); +insert into t2 values(6, 'f'); +use test; +insert into t2 values(7, 'g'); +insert into t2 values(8, 'h'); +commit; +FLUSH LOGS; +==== Output of mysqlbinlog with --short-form --skip-empty-trans, --database and --skip-gtids options ==== +==== DB changed in the middle of the transaction, which belongs to the selected database +/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/; +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/; +DELIMITER /*!*/; +SET TIMESTAMP=1000000000/*!*/; +SET @@session.pseudo_thread_id=999999999/*!*/; +SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=0, @@session.unique_checks=1, @@session.autocommit=1/*!*/; +SET @@session.sql_mode=1073741824/*!*/; +SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/; +/*!\C latin1 *//*!*/; +SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=8/*!*/; +SET @@session.lc_time_names=0/*!*/; +SET @@session.collation_database=DEFAULT/*!*/; +BEGIN +/*!*/; +DELIMITER ; +# End of log file +ROLLBACK /* added by mysqlbinlog */; +/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/; +/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/; +use test; +start transaction; +insert into t2 values(9, 'i'); +insert into t2 values(10, 'j'); +use test2; +insert into t2 values(11, 'k'); +insert into t2 values(12, 'l'); +commit; +FLUSH LOGS; +==== Output of mysqlbinlog with --short-form --skip-empty-trans, --database and --skip-gtids options ==== +==== DB changed in the middle of the transaction, which belongs to the non-selected database +/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/; +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/; +DELIMITER /*!*/; +DELIMITER ; +# End of log file +ROLLBACK /* added by mysqlbinlog */; +/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/; +/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/; +use test; +drop table t1; +drop table if exists t2; +use test2; +drop table t1; +drop table if exists t2; +use test3; +drop table t1; +drop table if exists t2; +drop database test2; +drop database test3; +FLUSH LOGS; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result b/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result new file mode 100644 index 00000000000..849257d08fa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result @@ -0,0 +1,131 @@ +drop table if exists r1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +create table r1 (id1 int, id2 int, id3 varchar(100), id4 int, value1 int, value2 int, value3 int, value4 int, primary key (id1, id2, id3, id4)) engine=rocksdb; +insert into r1 values (1,1,1,1,1,1,1,1); +insert into r1 values (1,1,1,2,2,2,2,2); +insert into r1 values (1,1,2,1,3,3,3,3); +insert into r1 values (1,1,2,2,4,4,4,4); +insert into r1 values (1,2,1,1,5,5,5,5); +insert into r1 values (1,2,1,2,6,6,6,6); +insert into r1 values (1,2,2,1,7,7,7,7); +insert into r1 values (1,2,2,2,8,8,8,8); +insert into r1 values (2,1,1,1,9,9,9,9); +insert into r1 values (2,1,1,2,10,10,10,10); +insert into r1 values (2,1,2,1,11,11,11,11); +insert into r1 values (2,1,2,2,12,12,12,12); +insert into r1 values (2,2,1,1,13,13,13,13); +insert into r1 values (2,2,1,2,14,14,14,14); +insert into r1 values (2,2,2,1,15,15,15,15); +insert into r1 values (2,2,2,2,16,16,16,16); +connection con2; +BEGIN; +insert into r1 values (5,5,5,5,5,5,5,5); +update r1 set value1=value1+100 where id1=1 and id2=1 and id3='1'; + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; +/*!50601 SELECT count(*) INTO @is_rocksdb_supported FROM information_schema.SESSION_VARIABLES WHERE variable_name='rocksdb_bulk_load' */; +/*!50601 SET @enable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load=1', 'SET @dummy = 0') */; +/*!50601 PREPARE s FROM @enable_bulk_load */; +/*!50601 EXECUTE s */; +-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=3893; +DROP TABLE IF EXISTS `r1`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `r1` ( + `id1` int(11) NOT NULL DEFAULT '0', + `id2` int(11) NOT NULL DEFAULT '0', + `id3` varchar(100) NOT NULL DEFAULT '', + `id4` int(11) NOT NULL DEFAULT '0', + `value1` int(11) DEFAULT NULL, + `value2` int(11) DEFAULT NULL, + `value3` int(11) DEFAULT NULL, + `value4` int(11) DEFAULT NULL, + PRIMARY KEY (`id1`,`id2`,`id3`,`id4`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; +/* ORDERING KEY (DESC) : PRIMARY */; + +LOCK TABLES `r1` WRITE; +/*!40000 ALTER TABLE `r1` DISABLE KEYS */; +INSERT INTO `r1` VALUES (2,2,'2',2,16,16,16,16),(2,2,'2',1,15,15,15,15),(2,2,'1',2,14,14,14,14),(2,2,'1',1,13,13,13,13),(2,1,'2',2,12,12,12,12),(2,1,'2',1,11,11,11,11),(2,1,'1',2,10,10,10,10),(2,1,'1',1,9,9,9,9),(1,2,'2',2,8,8,8,8),(1,2,'2',1,7,7,7,7),(1,2,'1',2,6,6,6,6),(1,2,'1',1,5,5,5,5),(1,1,'2',2,4,4,4,4),(1,1,'2',1,3,3,3,3),(1,1,'1',2,2,2,2,2),(1,1,'1',1,1,1,1,1); +/*!40000 ALTER TABLE `r1` ENABLE KEYS */; +UNLOCK TABLES; +/*!50601 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load=0', 'SET @dummy = 0') */; +/*!50601 PREPARE s FROM @disable_bulk_load */; +/*!50601 EXECUTE s */; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +rollback; +connection con1; +1 +set @save_default_storage_engine=@@global.default_storage_engine; +SET GLOBAL default_storage_engine=rocksdb; + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; +-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=3893; +DROP TABLE IF EXISTS `r1`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `r1` ( + `id1` int(11) NOT NULL DEFAULT '0', + `id2` int(11) NOT NULL DEFAULT '0', + `id3` varchar(100) NOT NULL DEFAULT '', + `id4` int(11) NOT NULL DEFAULT '0', + `value1` int(11) DEFAULT NULL, + `value2` int(11) DEFAULT NULL, + `value3` int(11) DEFAULT NULL, + `value4` int(11) DEFAULT NULL, + PRIMARY KEY (`id1`,`id2`,`id3`,`id4`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; +/* ORDERING KEY : (null) */; + +LOCK TABLES `r1` WRITE; +/*!40000 ALTER TABLE `r1` DISABLE KEYS */; +INSERT INTO `r1` VALUES (1,1,'1',1,1,1,1,1),(1,1,'1',2,2,2,2,2),(1,1,'2',1,3,3,3,3),(1,1,'2',2,4,4,4,4),(1,2,'1',1,5,5,5,5),(1,2,'1',2,6,6,6,6),(1,2,'2',1,7,7,7,7),(1,2,'2',2,8,8,8,8),(2,1,'1',1,9,9,9,9),(2,1,'1',2,10,10,10,10),(2,1,'2',1,11,11,11,11),(2,1,'2',2,12,12,12,12),(2,2,'1',1,13,13,13,13),(2,2,'1',2,14,14,14,14),(2,2,'2',1,15,15,15,15),(2,2,'2',2,16,16,16,16); +/*!40000 ALTER TABLE `r1` ENABLE KEYS */; +UNLOCK TABLES; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +2 +SET GLOBAL binlog_format=statement; +SET GLOBAL binlog_format=row; +drop table r1; +reset master; +set @@global.default_storage_engine=@save_default_storage_engine; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/mysqldump2.result b/storage/rocksdb/mysql-test/rocksdb/r/mysqldump2.result new file mode 100644 index 00000000000..11c1f370e7a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/mysqldump2.result @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS t1; +create table t1 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_block_cache_add'; +select case when variable_value - @a > 20 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_block_cache_add'; +case when variable_value - @a > 20 then 'true' else 'false' end +false +select count(*) from t1; +count(*) +50000 +select case when variable_value - @a > 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_block_cache_add'; +case when variable_value - @a > 100 then 'true' else 'false' end +true +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/negative_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/negative_stats.result new file mode 100644 index 00000000000..e45c5d6efc7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/negative_stats.result @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (i1 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB; +SET GLOBAL ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW=1; +set session debug= "+d,myrocks_simulate_negative_stats"; +SELECT CASE WHEN DATA_LENGTH < 1024 * 1024 THEN 'true' ELSE 'false' END FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'; +CASE WHEN DATA_LENGTH < 1024 * 1024 THEN 'true' ELSE 'false' END +true +set session debug= "-d,myrocks_simulate_negative_stats"; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result b/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result new file mode 100644 index 00000000000..3a631d2925b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result @@ -0,0 +1,63 @@ +Warnings: +Note 1051 Unknown table 'test.ti_nk' +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +DROP TABLE ti_nk; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/optimize_table.result b/storage/rocksdb/mysql-test/rocksdb/r/optimize_table.result new file mode 100644 index 00000000000..fa2062b415e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/optimize_table.result @@ -0,0 +1,81 @@ +DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6; +create table t1 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +create table t3 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +create table t4 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; +create table t5 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; +create table t6 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; +select count(*) from t1; +count(*) +10000 +select count(*) from t2; +count(*) +10000 +select count(*) from t3; +count(*) +10000 +select count(*) from t4; +count(*) +10000 +select count(*) from t5; +count(*) +10000 +select count(*) from t6; +count(*) +10000 +delete from t1 where id <= 9900; +delete from t2 where id <= 9900; +delete from t3 where id <= 9900; +delete from t4 where id <= 9900; +delete from t5 where id <= 9900; +delete from t6 where id <= 9900; +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +optimize table t3; +Table Op Msg_type Msg_text +test.t3 optimize status OK +optimize table t4; +Table Op Msg_type Msg_text +test.t4 optimize status OK +optimize table t6; +Table Op Msg_type Msg_text +test.t6 optimize status OK +select count(*) from t1; +count(*) +100 +select count(*) from t2; +count(*) +100 +select count(*) from t3; +count(*) +100 +select count(*) from t4; +count(*) +100 +select count(*) from t5; +count(*) +100 +select count(*) from t6; +count(*) +100 +checking sst file reduction on optimize table from 0 to 1.. +ok. +checking sst file reduction on optimize table from 1 to 2.. +ok. +checking sst file reduction on optimize table from 2 to 3.. +ok. +checking sst file reduction on optimize table from 3 to 4.. +ok. +optimize table t2; +Table Op Msg_type Msg_text +test.t2 optimize status OK +optimize table t5; +Table Op Msg_type Msg_text +test.t5 optimize status OK +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE t4; +DROP TABLE t5; +DROP TABLE t6; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/partition.result b/storage/rocksdb/mysql-test/rocksdb/r/partition.result new file mode 100644 index 00000000000..76085cc1d27 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/partition.result @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS VAR_POP; +DROP TABLE IF EXISTS TEMP0; +DROP TABLE IF EXISTS VAR_SAMP; +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; +SHOW TABLES; +Tables_in_test +TEMP0 +VAR_POP +VAR_SAMP +t1 +SELECT * FROM t1 ORDER BY i LIMIT 10; +i j k +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +SELECT COUNT(*) FROM t1; +COUNT(*) +1000 +DROP TABLE t1; +DROP TABLE VAR_POP; +DROP TABLE TEMP0; +DROP TABLE VAR_SAMP; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/perf_context.result b/storage/rocksdb/mysql-test/rocksdb/r/perf_context.result new file mode 100644 index 00000000000..2e8610d43bd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/perf_context.result @@ -0,0 +1,160 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +SET @prior_rocksdb_perf_context_level = @@rocksdb_perf_context_level; +SET GLOBAL rocksdb_perf_context_level=3; +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; +CREATE TABLE t2 (k INT, PRIMARY KEY (k)) ENGINE = ROCKSDB; +INSERT INTO t1 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT WHERE TABLE_NAME = 't1'; +TABLE_SCHEMA TABLE_NAME PARTITION_NAME STAT_TYPE VALUE +test t1 NULL USER_KEY_COMPARISON_COUNT # +test t1 NULL BLOCK_CACHE_HIT_COUNT # +test t1 NULL BLOCK_READ_COUNT # +test t1 NULL BLOCK_READ_BYTE # +test t1 NULL BLOCK_READ_TIME # +test t1 NULL BLOCK_CHECKSUM_TIME # +test t1 NULL BLOCK_DECOMPRESS_TIME # +test t1 NULL INTERNAL_KEY_SKIPPED_COUNT # +test t1 NULL INTERNAL_DELETE_SKIPPED_COUNT # +test t1 NULL GET_SNAPSHOT_TIME # +test t1 NULL GET_FROM_MEMTABLE_TIME # +test t1 NULL GET_FROM_MEMTABLE_COUNT # +test t1 NULL GET_POST_PROCESS_TIME # +test t1 NULL GET_FROM_OUTPUT_FILES_TIME # +test t1 NULL SEEK_ON_MEMTABLE_TIME # +test t1 NULL SEEK_ON_MEMTABLE_COUNT # +test t1 NULL SEEK_CHILD_SEEK_TIME # +test t1 NULL SEEK_CHILD_SEEK_COUNT # +test t1 NULL SEEK_IN_HEAP_TIME # +test t1 NULL SEEK_INTERNAL_SEEK_TIME # +test t1 NULL FIND_NEXT_USER_ENTRY_TIME # +test t1 NULL WRITE_WAL_TIME # +test t1 NULL WRITE_MEMTABLE_TIME # +test t1 NULL WRITE_DELAY_TIME # +test t1 NULL WRITE_PRE_AND_POST_PROCESS_TIME # +test t1 NULL DB_MUTEX_LOCK_NANOS # +test t1 NULL DB_CONDITION_WAIT_NANOS # +test t1 NULL MERGE_OPERATOR_TIME_NANOS # +test t1 NULL READ_INDEX_BLOCK_NANOS # +test t1 NULL READ_FILTER_BLOCK_NANOS # +test t1 NULL NEW_TABLE_BLOCK_ITER_NANOS # +test t1 NULL NEW_TABLE_ITERATOR_NANOS # +test t1 NULL BLOCK_SEEK_NANOS # +test t1 NULL FIND_TABLE_NANOS # +test t1 NULL IO_THREAD_POOL_ID # +test t1 NULL IO_BYTES_WRITTEN # +test t1 NULL IO_BYTES_READ # +test t1 NULL IO_OPEN_NANOS # +test t1 NULL IO_ALLOCATE_NANOS # +test t1 NULL IO_WRITE_NANOS # +test t1 NULL IO_READ_NANOS # +test t1 NULL IO_RANGE_SYNC_NANOS # +test t1 NULL IO_LOGGER_NANOS # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL; +STAT_TYPE VALUE +USER_KEY_COMPARISON_COUNT # +BLOCK_CACHE_HIT_COUNT # +BLOCK_READ_COUNT # +BLOCK_READ_BYTE # +BLOCK_READ_TIME # +BLOCK_CHECKSUM_TIME # +BLOCK_DECOMPRESS_TIME # +INTERNAL_KEY_SKIPPED_COUNT # +INTERNAL_DELETE_SKIPPED_COUNT # +GET_SNAPSHOT_TIME # +GET_FROM_MEMTABLE_TIME # +GET_FROM_MEMTABLE_COUNT # +GET_POST_PROCESS_TIME # +GET_FROM_OUTPUT_FILES_TIME # +SEEK_ON_MEMTABLE_TIME # +SEEK_ON_MEMTABLE_COUNT # +SEEK_CHILD_SEEK_TIME # +SEEK_CHILD_SEEK_COUNT # +SEEK_IN_HEAP_TIME # +SEEK_INTERNAL_SEEK_TIME # +FIND_NEXT_USER_ENTRY_TIME # +WRITE_WAL_TIME # +WRITE_MEMTABLE_TIME # +WRITE_DELAY_TIME # +WRITE_PRE_AND_POST_PROCESS_TIME # +DB_MUTEX_LOCK_NANOS # +DB_CONDITION_WAIT_NANOS # +MERGE_OPERATOR_TIME_NANOS # +READ_INDEX_BLOCK_NANOS # +READ_FILTER_BLOCK_NANOS # +NEW_TABLE_BLOCK_ITER_NANOS # +NEW_TABLE_ITERATOR_NANOS # +BLOCK_SEEK_NANOS # +FIND_TABLE_NANOS # +IO_THREAD_POOL_ID # +IO_BYTES_WRITTEN # +IO_BYTES_READ # +IO_OPEN_NANOS # +IO_ALLOCATE_NANOS # +IO_WRITE_NANOS # +IO_READ_NANOS # +IO_RANGE_SYNC_NANOS # +IO_LOGGER_NANOS # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't1' +AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT'); +TABLE_SCHEMA TABLE_NAME PARTITION_NAME STAT_TYPE VALUE +test t1 NULL INTERNAL_KEY_SKIPPED_COUNT 0 +test t1 NULL INTERNAL_DELETE_SKIPPED_COUNT 0 +SELECT * FROM t1; +i j +1 1 +2 2 +3 3 +4 4 +5 5 +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't1' +AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT'); +TABLE_SCHEMA TABLE_NAME PARTITION_NAME STAT_TYPE VALUE +test t1 NULL INTERNAL_KEY_SKIPPED_COUNT 5 +test t1 NULL INTERNAL_DELETE_SKIPPED_COUNT 0 +SELECT * FROM t1 WHERE j BETWEEN 1 AND 5; +i j +1 1 +2 2 +3 3 +4 4 +5 5 +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't1' +AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT'); +TABLE_SCHEMA TABLE_NAME PARTITION_NAME STAT_TYPE VALUE +test t1 NULL INTERNAL_KEY_SKIPPED_COUNT 10 +test t1 NULL INTERNAL_DELETE_SKIPPED_COUNT 0 +BEGIN; +INSERT INTO t2 VALUES (1), (2); +INSERT INTO t2 VALUES (3), (4); +COMMIT; +SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't2' +AND STAT_TYPE = 'IO_WRITE_NANOS' +AND VALUE > 0; +COUNT(*) +0 +SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL +WHERE STAT_TYPE = 'IO_WRITE_NANOS' AND VALUE > 0; +COUNT(*) +1 +SELECT VALUE INTO @a from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL +WHERE STAT_TYPE = 'IO_WRITE_NANOS'; +INSERT INTO t2 VALUES (5), (6), (7), (8); +SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't2' +AND STAT_TYPE = 'IO_WRITE_NANOS' +AND VALUE > 0; +COUNT(*) +1 +SELECT VALUE INTO @b from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL +WHERE STAT_TYPE = 'IO_WRITE_NANOS'; +SELECT CASE WHEN @b - @a > 0 THEN 'true' ELSE 'false' END; +CASE WHEN @b - @a > 0 THEN 'true' ELSE 'false' END +true +DROP TABLE t1; +DROP TABLE t2; +SET GLOBAL rocksdb_perf_context_level = @prior_rocksdb_perf_context_level; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/read_only_tx.result b/storage/rocksdb/mysql-test/rocksdb/r/read_only_tx.result new file mode 100644 index 00000000000..b83f0a474cc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/read_only_tx.result @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,1); +select variable_value into @p from information_schema.global_status where variable_name='rocksdb_number_sst_entry_put'; +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 734 uuid:1-3 +select case when variable_value-@p < 1000 then 'true' else variable_value-@p end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_put'; +case when variable_value-@p < 1000 then 'true' else variable_value-@p end +true +select case when variable_value-@s < 100 then 'true' else variable_value-@s end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +case when variable_value-@s < 100 then 'true' else variable_value-@s end +true +SELECT * FROM t1; +id value +1 1 +INSERT INTO t1 values (2, 2); +ERROR HY000: Can't execute updates when you started a transaction with START TRANSACTION WITH CONSISTENT [ROCKSDB] SNAPSHOT. +ROLLBACK; +SELECT * FROM t1; +id value +1 10001 +INSERT INTO t1 values (2, 2); +SELECT * FROM t1 ORDER BY id; +id value +1 10001 +2 2 +BEGIN; +SELECT COUNT(*) FROM t1; +COUNT(*) +9998 +COMMIT; +OPTIMIZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +DROP TABLE t1; +reset master; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result b/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result new file mode 100644 index 00000000000..e165e117a99 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result @@ -0,0 +1,210 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +i INT, +a INT, +b INT, +PRIMARY KEY (i), +KEY ka(a), +KEY kb(b) comment 'rev:cf1' +) ENGINE = rocksdb; +explain extended select * from t1 where a> 500 and a< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` > 500) and (`test`.`t1`.`a` < 750)) +explain extended select * from t1 where a< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` < 750) +explain extended select * from t1 where a> 500; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` > 500) +explain extended select * from t1 where a>=0 and a<=1000; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` >= 0) and (`test`.`t1`.`a` <= 1000)) +explain extended select * from t1 where b> 500 and b< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750)) +explain extended select * from t1 where b< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`b` < 750) +explain extended select * from t1 where b> 500; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`b` > 500) +explain extended select * from t1 where b>=0 and b<=1000; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` >= 0) and (`test`.`t1`.`b` <= 1000)) +set @save_rocksdb_records_in_range = @@session.rocksdb_records_in_range; +set rocksdb_records_in_range = 15000; +explain extended select a from t1 where a < 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 15000 100.00 Using where; Using index +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` < 750) +explain extended select a, b from t1 where a < 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL ka NULL NULL NULL 20000 75.00 Using where +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` < 750) +explain extended select a from t1 where a = 700; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ref ka ka 5 const 15000 100.00 Using index +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` = 700) +explain extended select a,b from t1 where a = 700; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ref ka ka 5 const 15000 100.00 NULL +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` = 700) +explain extended select a from t1 where a in (700, 800); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 index ka ka 5 NULL 20000 100.00 Using where; Using index +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` in (700,800)) +explain extended select a,b from t1 where a in (700, 800); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL ka NULL NULL NULL 20000 100.00 Using where +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` in (700,800)) +set rocksdb_records_in_range=8000; +explain extended select a from t1 where a in (700, 800); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 16000 100.00 Using where; Using index +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` in (700,800)) +explain extended select a,b from t1 where a in (700, 800); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL ka NULL NULL NULL 20000 80.00 Using where +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` in (700,800)) +set rocksdb_records_in_range = @save_rocksdb_records_in_range; +set global rocksdb_force_flush_memtable_now = true; +explain extended select * from t1 where a> 500 and a< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` > 500) and (`test`.`t1`.`a` < 750)) +explain extended select * from t1 where a< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` < 750) +explain extended select * from t1 where a> 500; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` > 500) +explain extended select * from t1 where a>=0 and a<=1000; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` >= 0) and (`test`.`t1`.`a` <= 1000)) +explain extended select * from t1 where b> 500 and b< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750)) +explain extended select * from t1 where b< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`b` < 750) +explain extended select * from t1 where b> 500; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`b` > 500) +explain extended select * from t1 where b>=0 and b<=1000; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` >= 0) and (`test`.`t1`.`b` <= 1000)) +explain extended select * from t1 where a>= 500 and a<= 500; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` >= 500) and (`test`.`t1`.`a` <= 500)) +explain extended select * from t1 where b>= 500 and b<= 500; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` >= 500) and (`test`.`t1`.`b` <= 500)) +explain extended select * from t1 where a< 750 and b> 500 and b< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka,kb ka 5 NULL 1000 100.00 Using index condition; Using where +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` < 750) and (`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750)) +drop index ka on t1; +drop index kb on t1; +create index kab on t1(a,b); +set global rocksdb_force_flush_memtable_now = true; +explain extended select * from t1 where a< 750 and b> 500 and b< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kab kab 5 NULL 1000 100.00 Using where; Using index +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` < 750) and (`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750)) +set rocksdb_records_in_range=444; +explain extended select * from t1 where a< 750 and b> 500 and b< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kab kab 5 NULL 444 100.00 Using where; Using index +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` < 750) and (`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750)) +set rocksdb_records_in_range=0; +CREATE TABLE `linktable` ( +`id1` bigint(20) unsigned NOT NULL DEFAULT '0', +`id1_type` int(10) unsigned NOT NULL DEFAULT '0', +`id2` bigint(20) unsigned NOT NULL DEFAULT '0', +`id2_type` int(10) unsigned NOT NULL DEFAULT '0', +`link_type` bigint(20) unsigned NOT NULL DEFAULT '0', +`visibility` tinyint(3) NOT NULL DEFAULT '0', +`data` varchar(255) COLLATE latin1_bin NOT NULL DEFAULT '', +`time` bigint(20) unsigned NOT NULL DEFAULT '0', +`version` int(11) unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (`link_type`,`id1`,`id2`) COMMENT 'cf_link_pk', +KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'cf_link_id1_type' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; +insert into linktable values (1,1,1,1,1,1,1,1,1); +insert into linktable values (1,1,2,1,1,1,1,1,1); +insert into linktable values (1,1,3,1,1,1,1,1,1); +insert into linktable values (1,1,4,1,1,1,1,1,1); +set global rocksdb_force_flush_memtable_now = true; +explain select id1, id2, link_type, visibility, data, time, version from linktable where id1 = 1 and link_type = 1 and id2 in (1, 2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE linktable range PRIMARY,id1_type PRIMARY 24 NULL 2 Using where +drop table linktable; +CREATE TABLE `linktable` ( +`id1` bigint(20) unsigned NOT NULL DEFAULT '0', +`id1_type` int(10) unsigned NOT NULL DEFAULT '0', +`id2` bigint(20) unsigned NOT NULL DEFAULT '0', +`id2_type` int(10) unsigned NOT NULL DEFAULT '0', +`link_type` bigint(20) unsigned NOT NULL DEFAULT '0', +`visibility` tinyint(3) NOT NULL DEFAULT '0', +`data` varchar(255) COLLATE latin1_bin NOT NULL DEFAULT '', +`time` bigint(20) unsigned NOT NULL DEFAULT '0', +`version` int(11) unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (`link_type`,`id1`,`id2`) COMMENT 'cf_link_pk', +KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'rev:cf_link_id1_type' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; +insert into linktable values (1,1,1,1,1,1,1,1,1); +insert into linktable values (1,1,2,1,1,1,1,1,1); +insert into linktable values (1,1,3,1,1,1,1,1,1); +insert into linktable values (1,1,4,1,1,1,1,1,1); +set global rocksdb_force_flush_memtable_now = true; +explain select id1, id2, link_type, visibility, data, time, version from linktable where id1 = 1 and link_type = 1 and id2 in (1, 2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE linktable range PRIMARY,id1_type PRIMARY 24 NULL 2 Using where +drop table linktable; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/repair_table.result b/storage/rocksdb/mysql-test/rocksdb/r/repair_table.result new file mode 100644 index 00000000000..f227d200bcc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/repair_table.result @@ -0,0 +1,37 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +CREATE TABLE t2 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb; +REPAIR TABLE t1; +Table Op Msg_type Msg_text +test.t1 repair note The storage engine for the table doesn't support repair +INSERT INTO t1 (a,b) VALUES (3,'c'); +INSERT INTO t2 (a,b) VALUES (4,'d'); +REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2; +Table Op Msg_type Msg_text +test.t1 repair note The storage engine for the table doesn't support repair +test.t2 repair note The storage engine for the table doesn't support repair +INSERT INTO t2 (a,b) VALUES (5,'e'),(6,'f'); +REPAIR LOCAL TABLE t2; +Table Op Msg_type Msg_text +test.t2 repair note The storage engine for the table doesn't support repair +INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h'); +INSERT INTO t2 (a,b) VALUES (9,'i'); +REPAIR LOCAL TABLE t2, t1 EXTENDED; +Table Op Msg_type Msg_text +test.t2 repair note The storage engine for the table doesn't support repair +test.t1 repair note The storage engine for the table doesn't support repair +INSERT INTO t1 (a,b) VALUES (10,'j'); +INSERT INTO t2 (a,b) VALUES (11,'k'); +REPAIR TABLE t1, t2 QUICK USE_FRM; +Table Op Msg_type Msg_text +test.t1 repair note The storage engine for the table doesn't support repair +test.t2 repair note The storage engine for the table doesn't support repair +INSERT INTO t1 (a,b) VALUES (12,'l'); +INSERT INTO t2 (a,b) VALUES (13,'m'); +REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2 QUICK EXTENDED USE_FRM; +Table Op Msg_type Msg_text +test.t1 repair note The storage engine for the table doesn't support repair +test.t2 repair note The storage engine for the table doesn't support repair +FLUSH TABLE t1; +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/replace.result b/storage/rocksdb/mysql-test/rocksdb/r/replace.result new file mode 100644 index 00000000000..f8f61a3f8c3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/replace.result @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +REPLACE INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +SELECT a,b FROM t1; +a b +1 a +2 b +3 c +4 d +5 e +REPLACE t1 (a,b) VALUE (10,'foo'),(10,'foo'); +SELECT a,b FROM t1; +a b +1 a +10 foo +10 foo +2 b +3 c +4 d +5 e +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (b)) ENGINE=rocksdb; +REPLACE INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'); +INSERT INTO t1 (a,b) VALUES (4,'b'); +ERROR 23000: Duplicate entry 'b' for key 'PRIMARY' +REPLACE INTO t1 (a,b) VALUES (4,'b'); +SELECT a,b FROM t1; +a b +1 a +3 c +4 b +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result new file mode 100644 index 00000000000..55388c65b99 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result @@ -0,0 +1,2456 @@ +select ENGINE,COMMENT,TRANSACTIONS,XA,SAVEPOINTS from information_schema.engines where engine = 'rocksdb'; +ENGINE COMMENT TRANSACTIONS XA SAVEPOINTS +ROCKSDB RocksDB storage engine YES YES YES +drop table if exists t0,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +drop table if exists t11,t12,t13,t14,t15,t16,t17,t18,t19,t20; +drop table if exists t21,t22,t23,t24,t25,t26,t27,t28,t29; +drop table if exists t30,t31,t32,t33,t34,t35,t36,t37,t38,t39; +drop table if exists t40,t41,t42,t43,t44,t45,t46,t47,t48,t49; +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +# +# Issue #1: Don't update indexes if index values have not changed +# +create table t1 ( +pk int primary key, +a int, +b int, +key(a) +) engine=rocksdb; +insert into t1 values +(1,1,1), (2,2,2), (3,3,3), (4,4,4); +set @var1=(select variable_value +from information_schema.global_status +where variable_name='rocksdb_number_keys_written'); +# Do an update that doesn't change the key 'a'. +update t1 set b=3334341 where a=2; +set @var2=(select variable_value +from information_schema.global_status +where variable_name='rocksdb_number_keys_written'); +# The following should produce 1 +select @var2 - @var1; +@var2 - @var1 +1 +# Do an update that sets the key to the same value +update t1 set a=pk where a=3; +set @var3=(select variable_value +from information_schema.global_status +where variable_name='rocksdb_number_keys_written'); +# We have 'updated' column to the same value, so the following must return 0: +select @var3 - @var2; +@var3 - @var2 +0 +drop table t1; +create table t0 (a int primary key) engine=rocksdb; +show create table t0; +Table Create Table +t0 CREATE TABLE `t0` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +drop table t0; +create table t1 (a int primary key, b int) engine=rocksdb; +insert into t1 values (1,1); +insert into t1 values (2,2); +select * from t1; +a b +1 1 +2 2 +# Check that we can create another table and insert there +create table t2 (a varchar(10) primary key, b varchar(10)) engine=rocksdb; +insert into t2 value ('abc','def'); +insert into t2 value ('hijkl','mnopq'); +select * from t2; +a b +abc def +hijkl mnopq +# Select again from t1 to see that records from different tables dont mix +select * from t1; +a b +1 1 +2 2 +explain select * from t2 where a='no-such-key'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +explain select * from t2 where a='abc'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 const PRIMARY PRIMARY 12 const # NULL +select * from t2 where a='abc'; +a b +abc def +# Try a composite PK +create table t3 ( +pk1 int, +pk2 varchar(10), +col1 varchar(10), +primary key(pk1, pk2) +) engine=rocksdb; +insert into t3 values (2,'two', 'row#2'); +insert into t3 values (3,'three', 'row#3'); +insert into t3 values (1,'one', 'row#1'); +select * from t3; +pk1 pk2 col1 +1 one row#1 +2 two row#2 +3 three row#3 +select * from t3 where pk1=3 and pk2='three'; +pk1 pk2 col1 +3 three row#3 +drop table t1, t2, t3; +# +# Test blob values +# +create table t4 (a int primary key, b blob) engine=rocksdb; +insert into t4 values (1, repeat('quux-quux', 60)); +insert into t4 values (10, repeat('foo-bar', 43)); +insert into t4 values (5, repeat('foo-bar', 200)); +insert into t4 values (2, NULL); +select +a, +(case a +when 1 then b=repeat('quux-quux', 60) +when 10 then b=repeat('foo-bar', 43) +when 5 then b=repeat('foo-bar', 200) +when 2 then b is null +else 'IMPOSSIBLE!' end) as CMP +from t4; +a CMP +1 1 +2 1 +5 1 +10 1 +drop table t4; +# +# Test blobs of various sizes +# +# TINYBLOB +create table t5 (a int primary key, b tinyblob) engine=rocksdb; +insert into t5 values (1, repeat('quux-quux', 6)); +insert into t5 values (10, repeat('foo-bar', 4)); +insert into t5 values (5, repeat('foo-bar', 2)); +select +a, +(case a +when 1 then b=repeat('quux-quux', 6) +when 10 then b=repeat('foo-bar', 4) +when 5 then b=repeat('foo-bar', 2) +else 'IMPOSSIBLE!' end) as CMP +from t5; +a CMP +1 1 +5 1 +10 1 +drop table t5; +# MEDIUMBLOB +create table t6 (a int primary key, b mediumblob) engine=rocksdb; +insert into t6 values (1, repeat('AB', 65000)); +insert into t6 values (10, repeat('bbb', 40000)); +insert into t6 values (5, repeat('foo-bar', 2)); +select +a, +(case a +when 1 then b=repeat('AB', 65000) +when 10 then b=repeat('bbb', 40000) +when 5 then b=repeat('foo-bar', 2) +else 'IMPOSSIBLE!' end) as CMP +from t6; +a CMP +1 1 +5 1 +10 1 +drop table t6; +# LONGBLOB +create table t7 (a int primary key, b longblob) engine=rocksdb; +insert into t7 values (1, repeat('AB', 65000)); +insert into t7 values (10, repeat('bbb', 40000)); +insert into t7 values (5, repeat('foo-bar', 2)); +select +a, +(case a +when 1 then b=repeat('AB', 65000) +when 10 then b=repeat('bbb', 40000) +when 5 then b=repeat('foo-bar', 2) +else 'IMPOSSIBLE!' end) as CMP +from t7; +a CMP +1 1 +5 1 +10 1 +drop table t7; +# +# Check if DELETEs work +# +create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb; +insert into t8 values +('one', 'eins'), +('two', 'zwei'), +('three', 'drei'), +('four', 'vier'), +('five', 'funf'); +# Delete by PK +explain delete from t8 where a='three'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t8 range PRIMARY PRIMARY 12 const # Using where +delete from t8 where a='three'; +select * from t8; +a col1 +five funf +four vier +one eins +two zwei +# Delete while doing a full table scan +delete from t8 where col1='eins' or col1='vier'; +select * from t8; +a col1 +five funf +two zwei +# delete w/o WHERE: +delete from t8; +select * from t8; +a col1 +# +# Test UPDATEs +# +insert into t8 values +('one', 'eins'), +('two', 'zwei'), +('three', 'drei'), +('four', 'vier'), +('five', 'funf'); +update t8 set col1='dva' where a='two'; +update t8 set a='fourAAA' where col1='vier'; +select * from t8; +a col1 +five funf +fourAAA vier +one eins +three drei +two dva +delete from t8; +# +# Basic transactions tests +# +begin; +insert into t8 values ('trx1-val1', 'data'); +insert into t8 values ('trx1-val2', 'data'); +rollback; +select * from t8; +a col1 +begin; +insert into t8 values ('trx1-val1', 'data'); +insert into t8 values ('trx1-val2', 'data'); +commit; +select * from t8; +a col1 +trx1-val1 data +trx1-val2 data +drop table t8; +# +# Check if DROP TABLE works +# +create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb; +select * from t8; +a col1 +insert into t8 values ('foo','foo'); +drop table t8; +create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb; +select * from t8; +a col1 +drop table t8; +# +# MDEV-3961: Assertion ... on creating a TEMPORARY RocksDB table +# +CREATE TEMPORARY TABLE t10 (pk INT PRIMARY KEY) ENGINE=RocksDB; +ERROR HY000: Table storage engine 'ROCKSDB' does not support the create option 'TEMPORARY' +# +# MDEV-3963: JOIN or WHERE conditions involving keys on RocksDB tables don't work +# +CREATE TABLE t10 (i INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t10 VALUES (1),(3); +CREATE TABLE t11 (j INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t11 VALUES (1),(4); +select * from t10; +i +1 +3 +select * from t11; +j +1 +4 +EXPLAIN +SELECT * FROM t10, t11 WHERE i=j; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t10 index PRIMARY PRIMARY 4 NULL # Using index +1 SIMPLE t11 eq_ref PRIMARY PRIMARY 4 test.t10.i # Using index +SELECT * FROM t10, t11 WHERE i=j; +i j +1 1 +DROP TABLE t10,t11; +# +# MDEV-3962: SELECT with ORDER BY causes "ERROR 1030 (HY000): Got error 122 +# +CREATE TABLE t12 (pk INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t12 VALUES (2),(1); +SELECT * FROM t12 ORDER BY pk; +pk +1 +2 +DROP TABLE t12; +# +# MDEV-3964: Assertion `!pk_descr' fails in ha_rocksdb::open on adding partitions ... +# +create table t14 (pk int primary key) engine=RocksDB partition by hash(pk) partitions 2; +drop table t14; +# +# MDEV-3960: Server crashes on running DISCARD TABLESPACE on a RocksDB table +# +create table t9 (i int primary key) engine=rocksdb; +alter table t9 discard tablespace; +ERROR HY000: Table storage engine for 't9' doesn't have this option +drop table t9; +# +# MDEV-3959: Assertion `slice->size() == table->s->reclength' fails ... +# on accessing a table after ALTER +# +CREATE TABLE t15 (a INT, rocksdb_pk INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t15 VALUES (1,1),(5,2); +ALTER TABLE t15 DROP COLUMN a; +DROP TABLE t15; +# +# MDEV-3968: UPDATE produces a wrong result while modifying a PK on a RocksDB table +# +create table t16 (pk int primary key, a char(8)) engine=RocksDB; +insert into t16 values (1,'a'),(2,'b'),(3,'c'),(4,'d'); +update t16 set pk=100, a = 'updated' where a in ('b','c'); +ERROR 23000: Duplicate entry '100' for key 'PRIMARY' +select * from t16; +pk a +1 a +2 b +3 c +4 d +drop table t16; +# +# MDEV-3970: A set of assorted crashes on inserting a row into a RocksDB table +# +drop table if exists t_very_long_table_name; +CREATE TABLE `t_very_long_table_name` ( +`c` char(1) NOT NULL, +`c0` char(0) NOT NULL, +`c1` char(1) NOT NULL, +`c20` char(20) NOT NULL, +`c255` char(255) NOT NULL, +PRIMARY KEY (`c255`) +) ENGINE=RocksDB DEFAULT CHARSET=latin1; +INSERT INTO t_very_long_table_name VALUES ('a', '', 'c', REPEAT('a',20), REPEAT('x',255)); +drop table t_very_long_table_name; +# +# Test table locking and read-before-write checks. +# +create table t17 (pk varchar(12) primary key, col1 varchar(12)) engine=rocksdb; +insert into t17 values ('row1', 'val1'); +insert into t17 values ('row1', 'val1-try2'); +ERROR 23000: Duplicate entry 'row1' for key 'PRIMARY' +insert into t17 values ('ROW1', 'val1-try2'); +ERROR 23000: Duplicate entry 'ROW1' for key 'PRIMARY' +insert into t17 values ('row2', 'val2'); +insert into t17 values ('row3', 'val3'); +# This is ok +update t17 set pk='row4' where pk='row1'; +# This will try to overwrite another row: +update t17 set pk='row3' where pk='row2'; +ERROR 23000: Duplicate entry 'row3' for key 'PRIMARY' +select * from t17; +pk col1 +row2 val2 +row3 val3 +row4 val1 +# +# Locking tests +# +# First, make sure there's no locking when transactions update different rows +set autocommit=0; +update t17 set col1='UPD1' where pk='row2'; +update t17 set col1='UPD2' where pk='row3'; +commit; +select * from t17; +pk col1 +row2 UPD1 +row3 UPD2 +row4 val1 +# Check the variable +show variables like 'rocksdb_lock_wait_timeout'; +Variable_name Value +rocksdb_lock_wait_timeout 1 +set rocksdb_lock_wait_timeout=2; +show variables like 'rocksdb_lock_wait_timeout'; +Variable_name Value +rocksdb_lock_wait_timeout 2 +# Try updating the same row from two transactions +begin; +update t17 set col1='UPD2-AA' where pk='row2'; +update t17 set col1='UPD2-BB' where pk='row2'; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t17.PRIMARY +set rocksdb_lock_wait_timeout=1000; +update t17 set col1='UPD2-CC' where pk='row2'; +rollback; +select * from t17 where pk='row2'; +pk col1 +row2 UPD2-CC +drop table t17; +# +# MDEV-4035: RocksDB: SELECT produces different results inside a transaction (read is not repeatable) +# +create table t18 (pk int primary key, i int) engine=RocksDB; +begin; +select * from t18; +pk i +select * from t18 where pk = 1; +pk i +connect con1,localhost,root,,; +insert into t18 values (1,100); +connection default; +select * from t18; +pk i +select * from t18 where pk = 1; +pk i +commit; +drop table t18; +# +# MDEV-4036: RocksDB: INSERT .. ON DUPLICATE KEY UPDATE does not work, produces ER_DUP_KEY +# +create table t19 (pk int primary key, i int) engine=RocksDB; +insert into t19 values (1,1); +insert into t19 values (1,100) on duplicate key update i = 102; +select * from t19; +pk i +1 102 +drop table t19; +# MDEV-4037: RocksDB: REPLACE doesn't work, produces ER_DUP_KEY +create table t20 (pk int primary key, i int) engine=RocksDB; +insert into t20 values (1,1); +replace into t20 values (1,100); +select * from t20; +pk i +1 100 +drop table t20; +# +# MDEV-4041: Server crashes in Primary_key_comparator::get_hashnr on INSERT +# +create table t21 (v varbinary(16) primary key, i int) engine=RocksDB; +insert into t21 values ('a',1); +select * from t21; +v i +a 1 +drop table t21; +# +# MDEV-4047: RocksDB: Assertion `0' fails in Protocol::end_statement() on multi-table INSERT IGNORE +# +CREATE TABLE t22 (a int primary key) ENGINE=RocksDB; +INSERT INTO t22 VALUES (1),(2); +CREATE TABLE t23 (b int primary key) ENGINE=RocksDB; +INSERT INTO t23 SELECT * FROM t22; +DELETE IGNORE t22.*, t23.* FROM t22, t23 WHERE b < a; +DROP TABLE t22,t23; +# +# MDEV-4046: RocksDB: Multi-table DELETE locks itself and ends with ER_LOCK_WAIT_TIMEOUT +# +CREATE TABLE t24 (pk int primary key) ENGINE=RocksDB; +INSERT INTO t24 VALUES (1),(2); +CREATE TABLE t25 LIKE t24; +INSERT INTO t25 SELECT * FROM t24; +DELETE t25.* FROM t24, t25; +DROP TABLE t24,t25; +# +# MDEV-4044: RocksDB: UPDATE or DELETE with ORDER BY locks itself +# +create table t26 (pk int primary key, c char(1)) engine=RocksDB; +insert into t26 values (1,'a'),(2,'b'); +update t26 set c = 'x' order by pk limit 1; +delete from t26 order by pk limit 1; +select * from t26; +pk c +2 b +drop table t26; +# +# Test whether SELECT ... FOR UPDATE puts locks +# +create table t27(pk varchar(10) primary key, col1 varchar(20)) engine=RocksDB; +insert into t27 values +('row1', 'row1data'), +('row2', 'row2data'), +('row3', 'row3data'); +connection con1; +begin; +select * from t27 where pk='row3' for update; +pk col1 +row3 row3data +connection default; +set rocksdb_lock_wait_timeout=1; +update t27 set col1='row2-modified' where pk='row3'; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t27.PRIMARY +connection con1; +rollback; +connection default; +disconnect con1; +drop table t27; +# +# MDEV-4060: RocksDB: Assertion `! trx->batch' fails in +# +create table t28 (pk int primary key, a int) engine=RocksDB; +insert into t28 values (1,10),(2,20); +begin; +update t28 set a = 100 where pk = 3; +rollback; +select * from t28; +pk a +1 10 +2 20 +drop table t28; +# +# Secondary indexes +# +create table t30 ( +pk varchar(16) not null primary key, +key1 varchar(16) not null, +col1 varchar(16) not null, +key(key1) +) engine=rocksdb; +insert into t30 values ('row1', 'row1-key', 'row1-data'); +insert into t30 values ('row2', 'row2-key', 'row2-data'); +insert into t30 values ('row3', 'row3-key', 'row3-data'); +explain +select * from t30 where key1='row2-key'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 ref key1 key1 18 const # Using index condition +select * from t30 where key1='row2-key'; +pk key1 col1 +row2 row2-key row2-data +explain +select * from t30 where key1='row1'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 ref key1 key1 18 const # Using index condition +# This will produce nothing: +select * from t30 where key1='row1'; +pk key1 col1 +explain +select key1 from t30; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 index NULL key1 18 NULL # Using index +select key1 from t30; +key1 +row1-key +row2-key +row3-key +# Create a duplicate record +insert into t30 values ('row2a', 'row2-key', 'row2a-data'); +# Can we see it? +select * from t30 where key1='row2-key'; +pk key1 col1 +row2 row2-key row2-data +row2a row2-key row2a-data +delete from t30 where pk='row2'; +select * from t30 where key1='row2-key'; +pk key1 col1 +row2a row2-key row2a-data +# +# Range scans on secondary index +# +delete from t30; +insert into t30 values +('row1', 'row1-key', 'row1-data'), +('row2', 'row2-key', 'row2-data'), +('row3', 'row3-key', 'row3-data'), +('row4', 'row4-key', 'row4-data'), +('row5', 'row5-key', 'row5-data'); +analyze table t30; +Table Op Msg_type Msg_text +test.t30 analyze status OK +explain +select * from t30 where key1 <='row3-key'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 range key1 key1 18 NULL # Using index condition +select * from t30 where key1 <='row3-key'; +pk key1 col1 +row1 row1-key row1-data +row2 row2-key row2-data +row3 row3-key row3-data +explain +select * from t30 where key1 between 'row2-key' and 'row4-key'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 range key1 key1 18 NULL # Using index condition +select * from t30 where key1 between 'row2-key' and 'row4-key'; +pk key1 col1 +row2 row2-key row2-data +row3 row3-key row3-data +row4 row4-key row4-data +explain +select * from t30 where key1 in ('row2-key','row4-key'); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 range key1 key1 18 NULL # Using index condition +select * from t30 where key1 in ('row2-key','row4-key'); +pk key1 col1 +row2 row2-key row2-data +row4 row4-key row4-data +explain +select key1 from t30 where key1 in ('row2-key','row4-key'); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 range key1 key1 18 NULL # Using where; Using index +select key1 from t30 where key1 in ('row2-key','row4-key'); +key1 +row2-key +row4-key +explain +select * from t30 where key1 > 'row1-key' and key1 < 'row4-key'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 range key1 key1 18 NULL # Using index condition +select * from t30 where key1 > 'row1-key' and key1 < 'row4-key'; +pk key1 col1 +row2 row2-key row2-data +row3 row3-key row3-data +explain +select * from t30 order by key1 limit 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 index NULL key1 18 NULL # NULL +select * from t30 order by key1 limit 3; +pk key1 col1 +row1 row1-key row1-data +row2 row2-key row2-data +row3 row3-key row3-data +explain +select * from t30 order by key1 desc limit 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 index NULL key1 18 NULL # NULL +select * from t30 order by key1 desc limit 3; +pk key1 col1 +row5 row5-key row5-data +row4 row4-key row4-data +row3 row3-key row3-data +# +# Range scans on primary key +# +explain +select * from t30 where pk <='row3'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 range PRIMARY PRIMARY 18 NULL # Using where +select * from t30 where pk <='row3'; +pk key1 col1 +row1 row1-key row1-data +row2 row2-key row2-data +row3 row3-key row3-data +explain +select * from t30 where pk between 'row2' and 'row4'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 range PRIMARY PRIMARY 18 NULL # Using where +select * from t30 where pk between 'row2' and 'row4'; +pk key1 col1 +row2 row2-key row2-data +row3 row3-key row3-data +row4 row4-key row4-data +explain +select * from t30 where pk in ('row2','row4'); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 range PRIMARY PRIMARY 18 NULL # Using where +select * from t30 where pk in ('row2','row4'); +pk key1 col1 +row2 row2-key row2-data +row4 row4-key row4-data +explain +select * from t30 order by pk limit 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 index NULL PRIMARY 18 NULL # NULL +select * from t30 order by pk limit 3; +pk key1 col1 +row1 row1-key row1-data +row2 row2-key row2-data +row3 row3-key row3-data +drop table t30; +# +# MDEV-3841: RocksDB: Reading by PK prefix does not work +# +create table t31 (i int, j int, k int, primary key(i,j,k)) engine=RocksDB; +insert into t31 values (1,10,100),(2,20,200); +select * from t31 where i = 1; +i j k +1 10 100 +select * from t31 where j = 10; +i j k +1 10 100 +select * from t31 where k = 100; +i j k +1 10 100 +select * from t31 where i = 1 and j = 10; +i j k +1 10 100 +select * from t31 where i = 1 and k = 100; +i j k +1 10 100 +select * from t31 where j = 10 and k = 100; +i j k +1 10 100 +select * from t31 where i = 1 and j = 10 and k = 100; +i j k +1 10 100 +drop table t31; +# +# MDEV-4055: RocksDB: UPDATE/DELETE by a multi-part PK does not work +# +create table t32 (i int, j int, k int, primary key(i,j,k), a varchar(8)) engine=RocksDB; +insert into t32 values +(1,10,100,''), +(2,20,200,''); +select * from t32 where i = 1 and j = 10 and k = 100; +i j k a +1 10 100 +update t32 set a = 'updated' where i = 1 and j = 10 and k = 100; +select * from t32; +i j k a +1 10 100 updated +2 20 200 +drop table t32; +# +# MDEV-3841: RocksDB: Assertion `0' fails in ha_rocksdb::index_read_map on range select with ORDER BY .. DESC +# +CREATE TABLE t33 (pk INT PRIMARY KEY, a CHAR(1)) ENGINE=RocksDB; +INSERT INTO t33 VALUES (1,'a'),(2,'b'); +SELECT * FROM t33 WHERE pk <= 10 ORDER BY pk DESC; +pk a +2 b +1 a +DROP TABLE t33; +# +# MDEV-4081: RocksDB throws error 122 on an attempt to create a table with unique index +# +# Unique indexes can be created, but uniqueness won't be enforced +create table t33 (pk int primary key, u int, unique index(u)) engine=RocksDB; +drop table t33; +# +# MDEV-4077: RocksDB: Wrong result (duplicate row) on select with range +# +CREATE TABLE t34 (pk INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t34 VALUES (10),(11); +SELECT pk FROM t34 WHERE pk > 5 AND pk < 15; +pk +10 +11 +SELECT pk FROM t34 WHERE pk BETWEEN 5 AND 15; +pk +10 +11 +SELECT pk FROM t34 WHERE pk > 5; +pk +10 +11 +SELECT pk FROM t34 WHERE pk < 15; +pk +10 +11 +drop table t34; +# +# MDEV-4086: RocksDB does not allow a query with multi-part pk and index and ORDER BY .. DEC +# +create table t35 (a int, b int, c int, d int, e int, primary key (a,b,c), key (a,c,d,e)) engine=RocksDB; +insert into t35 values (1,1,1,1,1),(2,2,2,2,2); +select * from t35 where a = 1 and c = 1 and d = 1 order by e desc; +a b c d e +1 1 1 1 1 +drop table t35; +# +# MDEV-4084: RocksDB: Wrong result on IN subquery with index +# +CREATE TABLE t36 (pk INT PRIMARY KEY, a INT, KEY(a)) ENGINE=RocksDB; +INSERT INTO t36 VALUES (1,10),(2,20); +SELECT 3 IN ( SELECT a FROM t36 ); +3 IN ( SELECT a FROM t36 ) +0 +drop table t36; +# +# MDEV-4084: RocksDB: Wrong result on IN subquery with index +# +CREATE TABLE t37 (pk INT PRIMARY KEY, a INT, b CHAR(1), KEY(a), KEY(a,b)) +ENGINE=RocksDB; +INSERT INTO t37 VALUES (1,10,'x'), (2,20,'y'); +SELECT MAX(a) FROM t37 WHERE a < 100; +MAX(a) +20 +DROP TABLE t37; +# +# MDEV-4090: RocksDB: Wrong result (duplicate rows) on range access with secondary key and ORDER BY DESC +# +CREATE TABLE t38 (pk INT PRIMARY KEY, i INT, KEY(i)) ENGINE=RocksDB; +INSERT INTO t38 VALUES (1,10), (2,20); +SELECT i FROM t38 WHERE i NOT IN (8) ORDER BY i DESC; +i +20 +10 +drop table t38; +# +# MDEV-4092: RocksDB: Assertion `in_table(pa, a_len)' fails in Rdb_key_def::cmp_full_keys +# with a multi-part key and ORDER BY .. DESC +# +CREATE TABLE t40 (pk1 INT PRIMARY KEY, a INT, b VARCHAR(1), KEY(b,a)) ENGINE=RocksDB; +INSERT INTO t40 VALUES (1, 7,'x'),(2,8,'y'); +CREATE TABLE t41 (pk2 INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t41 VALUES (1),(2); +SELECT * FROM t40, t41 WHERE pk1 = pk2 AND b = 'o' ORDER BY a DESC; +pk1 a b pk2 +DROP TABLE t40,t41; +# +# MDEV-4093: RocksDB: IN subquery by secondary key with NULL among values returns true instead of NULL +# +CREATE TABLE t42 (pk INT PRIMARY KEY, a INT, KEY(a)) ENGINE=RocksDB; +INSERT INTO t42 VALUES (1, NULL),(2, 8); +SELECT ( 3 ) NOT IN ( SELECT a FROM t42 ); +( 3 ) NOT IN ( SELECT a FROM t42 ) +NULL +DROP TABLE t42; +# +# MDEV-4094: RocksDB: Wrong result on SELECT and ER_KEY_NOT_FOUND on +# DELETE with search by NULL-able secondary key ... +# +CREATE TABLE t43 (pk INT PRIMARY KEY, a INT, b CHAR(1), KEY(a)) ENGINE=RocksDB; +INSERT INTO t43 VALUES (1,8,'g'),(2,9,'x'); +UPDATE t43 SET pk = 10 WHERE a = 8; +REPLACE INTO t43 ( a ) VALUES ( 8 ); +Warnings: +Warning 1364 Field 'pk' doesn't have a default value +REPLACE INTO t43 ( b ) VALUES ( 'y' ); +Warnings: +Warning 1364 Field 'pk' doesn't have a default value +SELECT * FROM t43 WHERE a = 8; +pk a b +10 8 g +DELETE FROM t43 WHERE a = 8; +DROP TABLE t43; +# +# Basic AUTO_INCREMENT tests +# +create table t44(pk int primary key auto_increment, col1 varchar(12)) engine=rocksdb; +insert into t44 (col1) values ('row1'); +insert into t44 (col1) values ('row2'); +insert into t44 (col1) values ('row3'); +select * from t44; +pk col1 +1 row1 +2 row2 +3 row3 +drop table t44; +# +# ALTER TABLE tests +# +create table t45 (pk int primary key, col1 varchar(12)) engine=rocksdb; +insert into t45 values (1, 'row1'); +insert into t45 values (2, 'row2'); +alter table t45 rename t46; +select * from t46; +pk col1 +1 row1 +2 row2 +drop table t46; +drop table t45; +ERROR 42S02: Unknown table 'test.t45' +# +# Check Bulk loading +# Bulk loading used to overwrite existing data +# Now it fails if there is data overlap with what +# already exists +# +show variables like 'rocksdb%'; +Variable_name Value +rocksdb_access_hint_on_compaction_start 1 +rocksdb_advise_random_on_open ON +rocksdb_allow_concurrent_memtable_write OFF +rocksdb_allow_mmap_reads OFF +rocksdb_allow_mmap_writes OFF +rocksdb_allow_os_buffer ON +rocksdb_background_sync OFF +rocksdb_base_background_compactions 1 +rocksdb_block_cache_size 8388608 +rocksdb_block_restart_interval 16 +rocksdb_block_size 4096 +rocksdb_block_size_deviation 10 +rocksdb_bulk_load OFF +rocksdb_bulk_load_size 1000 +rocksdb_bytes_per_sync 0 +rocksdb_cache_index_and_filter_blocks ON +rocksdb_checksums_pct 100 +rocksdb_collect_sst_properties ON +rocksdb_commit_in_the_middle OFF +rocksdb_compact_cf +rocksdb_compaction_readahead_size 0 +rocksdb_compaction_sequential_deletes 0 +rocksdb_compaction_sequential_deletes_count_sd OFF +rocksdb_compaction_sequential_deletes_file_size 0 +rocksdb_compaction_sequential_deletes_window 0 +rocksdb_create_checkpoint +rocksdb_create_if_missing ON +rocksdb_create_missing_column_families OFF +rocksdb_datadir ./.rocksdb +rocksdb_db_write_buffer_size 0 +rocksdb_debug_optimizer_no_zero_cardinality ON +rocksdb_default_cf_options +rocksdb_delete_obsolete_files_period_micros 21600000000 +rocksdb_disable_2pc ON +rocksdb_disabledatasync OFF +rocksdb_enable_bulk_load_api ON +rocksdb_enable_thread_tracking OFF +rocksdb_enable_write_thread_adaptive_yield OFF +rocksdb_error_if_exists OFF +rocksdb_flush_memtable_on_analyze ON +rocksdb_force_flush_memtable_now OFF +rocksdb_force_index_records_in_range 0 +rocksdb_hash_index_allow_collision ON +rocksdb_index_type kBinarySearch +rocksdb_info_log_level error_level +rocksdb_is_fd_close_on_exec ON +rocksdb_keep_log_file_num 1000 +rocksdb_lock_scanned_rows OFF +rocksdb_lock_wait_timeout 1 +rocksdb_log_file_time_to_roll 0 +rocksdb_manifest_preallocation_size 4194304 +rocksdb_max_background_compactions 1 +rocksdb_max_background_flushes 1 +rocksdb_max_log_file_size 0 +rocksdb_max_manifest_file_size 18446744073709551615 +rocksdb_max_open_files -1 +rocksdb_max_row_locks 1073741824 +rocksdb_max_subcompactions 1 +rocksdb_max_total_wal_size 0 +rocksdb_merge_buf_size 67108864 +rocksdb_merge_combine_read_size 1073741824 +rocksdb_new_table_reader_for_compaction_inputs OFF +rocksdb_no_block_cache OFF +rocksdb_override_cf_options +rocksdb_paranoid_checks ON +rocksdb_pause_background_work ON +rocksdb_perf_context_level 0 +rocksdb_pin_l0_filter_and_index_blocks_in_cache ON +rocksdb_rate_limiter_bytes_per_sec 0 +rocksdb_read_free_rpl_tables +rocksdb_records_in_range 50 +rocksdb_rpl_skip_tx_api OFF +rocksdb_seconds_between_stat_computes 3600 +rocksdb_signal_drop_index_thread OFF +rocksdb_skip_bloom_filter_on_read OFF +rocksdb_skip_fill_cache OFF +rocksdb_skip_unique_check OFF +rocksdb_skip_unique_check_tables .* +rocksdb_stats_dump_period_sec 600 +rocksdb_store_checksums OFF +rocksdb_strict_collation_check OFF +rocksdb_strict_collation_exceptions +rocksdb_table_cache_numshardbits 6 +rocksdb_table_stats_sampling_pct 10 +rocksdb_unsafe_for_binlog OFF +rocksdb_use_adaptive_mutex OFF +rocksdb_use_fsync OFF +rocksdb_validate_tables 1 +rocksdb_verify_checksums OFF +rocksdb_wal_bytes_per_sync 0 +rocksdb_wal_dir +rocksdb_wal_recovery_mode 2 +rocksdb_wal_size_limit_mb 0 +rocksdb_wal_ttl_seconds 0 +rocksdb_whole_key_filtering ON +rocksdb_write_disable_wal OFF +rocksdb_write_ignore_missing_column_families OFF +rocksdb_write_sync OFF +create table t47 (pk int primary key, col1 varchar(12)) engine=rocksdb; +insert into t47 values (1, 'row1'); +insert into t47 values (2, 'row2'); +set rocksdb_bulk_load=1; +insert into t47 values (3, 'row3'),(4, 'row4'); +set rocksdb_bulk_load=0; +select * from t47; +pk col1 +1 row1 +2 row2 +3 row3 +4 row4 +drop table t47; +# +# Fix TRUNCATE over empty table (transaction is committed when it wasn't +# started) +# +create table t48(pk int primary key auto_increment, col1 varchar(12)) engine=rocksdb; +set autocommit=0; +truncate table t48; +set autocommit=1; +drop table t48; +# +# MDEV-4059: RocksDB: query waiting for a lock cannot be killed until query timeout exceeded +# +create table t49 (pk int primary key, a int) engine=RocksDB; +insert into t49 values (1,10),(2,20); +begin; +update t49 set a = 100 where pk = 1; +connect con1,localhost,root,,; +set rocksdb_lock_wait_timeout=5000; +set @var1= to_seconds(now()); +update t49 set a = 1000 where pk = 1; +connect con2,localhost,root,,; +kill query $con1_id; +connection con1; +ERROR 70100: Query execution was interrupted +set @var2= to_seconds(now()); +"[Jay Edgar] I've updated this query to help determine why it is sometimes failing" +"(t13541934). If you get an error here (i.e. not 'passed') notify me." +select if ((@var2 - @var1) < 1000, "passed", (@var2 - @var1)) as 'result'; +result +passed +connection default; +disconnect con1; +commit; +drop table t49; +# +# Index-only tests for INT-based columns +# +create table t1 (pk int primary key, key1 int, col1 int, key(key1)) engine=rocksdb; +insert into t1 values (1,1,1); +insert into t1 values (2,2,2); +insert into t1 values (-5,-5,-5); +# INT column uses index-only: +explain +select key1 from t1 where key1=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1 key1 5 const # Using index +select key1 from t1 where key1=2; +key1 +2 +select key1 from t1 where key1=-5; +key1 +-5 +drop table t1; +create table t2 (pk int primary key, key1 int unsigned, col1 int, key(key1)) engine=rocksdb; +insert into t2 values (1,1,1), (2,2,2); +# INT UNSIGNED column uses index-only: +explain +select key1 from t2 where key1=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref key1 key1 5 const # Using index +select key1 from t2 where key1=2; +key1 +2 +drop table t2; +create table t3 (pk bigint primary key, key1 bigint, col1 int, key(key1)) engine=rocksdb; +insert into t3 values (1,1,1), (2,2,2); +# BIGINT uses index-only: +explain +select key1 from t3 where key1=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 ref key1 key1 9 const # Using index +select key1 from t3 where key1=2; +key1 +2 +drop table t3; +# +# Index-only reads for string columns +# +create table t1 ( +pk int primary key, +key1 char(10) character set binary, +col1 int, +key (key1) +) engine=rocksdb; +insert into t1 values(1, 'one',11), (2,'two',22); +explain +select key1 from t1 where key1='one'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1 key1 11 const # Using where; Using index +# The following will produce no rows. This looks like a bug, +# but it is actually correct behavior. Binary strings are end-padded +# with \0 character (and not space). Comparison does not ignore +# the tail of \0. +select key1 from t1 where key1='one'; +key1 +explain +select hex(key1) from t1 where key1='one\0\0\0\0\0\0\0'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1 key1 11 const # Using where; Using index +select hex(key1) from t1 where key1='one\0\0\0\0\0\0\0'; +hex(key1) +6F6E6500000000000000 +drop table t1; +create table t2 ( +pk int primary key, +key1 char(10) collate latin1_bin, +col1 int, +key (key1) +) engine=rocksdb; +insert into t2 values(1, 'one',11), (2,'two',22); +explain +select key1 from t2 where key1='one'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref key1 key1 11 const # Using where; Using index +select key1 from t2 where key1='one'; +key1 +one +drop table t2; +create table t3 ( +pk int primary key, +key1 char(10) collate utf8_bin, +col1 int, +key (key1) +) engine=rocksdb; +insert into t3 values(1, 'one',11), (2,'two',22); +explain +select key1 from t3 where key1='one'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 ref key1 key1 31 const # Using where; Using index +select key1 from t3 where key1='one'; +key1 +one +drop table t3; +# a VARCHAR column +create table t4 ( +pk int primary key, +key1 varchar(10) collate latin1_bin, +key(key1) +) engine=rocksdb; +insert into t4 values(1, 'one'), (2,'two'),(3,'threee'),(55,'fifty-five'); +explain +select key1 from t4 where key1='two'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref key1 key1 13 const # Using where; Using index +select key1 from t4 where key1='two'; +key1 +two +select key1 from t4 where key1='fifty-five'; +key1 +fifty-five +explain +select key1 from t4 where key1 between 's' and 'u'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 range key1 key1 13 NULL # Using where; Using index +select key1 from t4 where key1 between 's' and 'u'; +key1 +threee +two +drop table t4; +# +# MDEV-4305: RocksDB: Assertion `((keypart_map + 1) & keypart_map) == 0' fails in calculate_key_len +# +CREATE TABLE t1 (pk1 INT, pk2 CHAR(32), i INT, PRIMARY KEY(pk1,pk2), KEY(i)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,'test1',6),(2,'test2',8); +SELECT * FROM t1 WHERE i != 3 OR pk1 > 9; +pk1 pk2 i +1 test1 6 +2 test2 8 +DROP TABLE t1; +# +# MDEV-4298: RocksDB: Assertion `thd->is_error() || kill_errno' fails in ha_rows filesort +# +CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, KEY(i)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,1),(2,2); +BEGIN; +UPDATE t1 SET i = 100; +connect con1,localhost,root,,test; +DELETE IGNORE FROM t1 ORDER BY i; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +disconnect con1; +connection default; +COMMIT; +DROP TABLE t1; +# +# MDEV-4324: RocksDB: Valgrind "Use of uninitialised value" warnings on inserting value into varchar field +# (testcase only) +# +CREATE TABLE t1 (pk INT PRIMARY KEY, c VARCHAR(4)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,'foo'), (2,'bar'); +DROP TABLE t1; +# +# MDEV-4304: RocksDB: Index-only scan by a field with utf8_bin collation returns garbage symbols +# +CREATE TABLE t1 (pk INT PRIMARY KEY, c1 CHAR(1), c2 CHAR(1), KEY(c1)) ENGINE=RocksDB CHARSET utf8 COLLATE utf8_bin; +INSERT INTO t1 VALUES (1,'h','h'); +SELECT * FROM t1; +pk c1 c2 +1 h h +SELECT c1 FROM t1; +c1 +h +DROP TABLE t1; +# +# MDEV-4300: RocksDB: Server crashes in inline_mysql_mutex_lock on SELECT .. FOR UPDATE +# +CREATE TABLE t2 (pk INT PRIMARY KEY, i INT, KEY (i)) ENGINE=RocksDB; +INSERT INTO t2 VALUES (1,4),(2,5); +SELECT 1 FROM t2 WHERE i < 0 FOR UPDATE; +1 +DROP TABLE t2; +# +# MDEV-4301: RocksDB: Assertion `pack_info != __null' fails in Rdb_key_def::unpack_record +# +CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, c CHAR(1), KEY(c,i)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,4,'d'),(2,8,'e'); +SELECT MAX( pk ) FROM t1 WHERE i = 105 AND c = 'h'; +MAX( pk ) +NULL +DROP TABLE t1; +# +# MDEV-4337: RocksDB: Inconsistent results comparing a char field with an int field +# +create table t1 (c char(1), i int, primary key(c), key(i)) engine=RocksDB; +insert into t1 values ('2',2),('6',6); +select * from t1 where c = i; +c i +2 2 +6 6 +select * from t1 ignore index (i) where c = i; +c i +2 2 +6 6 +drop table t1; +# +# Test statement rollback inside a transaction +# +create table t1 (pk varchar(12) primary key) engine=rocksdb; +insert into t1 values ('old-val1'),('old-val2'); +create table t2 (pk varchar(12) primary key) engine=rocksdb; +insert into t2 values ('new-val2'),('old-val1'); +begin; +insert into t1 values ('new-val1'); +insert into t1 select * from t2; +ERROR 23000: Duplicate entry 'old-val1' for key 'PRIMARY' +commit; +select * from t1; +pk +new-val1 +old-val1 +old-val2 +drop table t1, t2; +# +# MDEV-4383: RocksDB: Wrong result of DELETE .. ORDER BY .. LIMIT: +# rows that should be deleted remain in the table +# +CREATE TABLE t2 (pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=RocksDB; +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t1 (pk) VALUES (NULL),(NULL); +BEGIN; +INSERT INTO t2 (pk) VALUES (NULL),(NULL); +INSERT INTO t1 (pk) VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL); +SELECT * FROM t1 ORDER BY pk LIMIT 9; +pk +1 +2 +3 +4 +5 +6 +7 +8 +affected rows: 8 +DELETE FROM t1 ORDER BY pk LIMIT 9; +affected rows: 8 +SELECT * FROM t1 ORDER BY pk LIMIT 9; +pk +affected rows: 0 +DROP TABLE t1,t2; +# +# MDEV-4374: RocksDB: Valgrind warnings 'Use of uninitialised value' on +# inserting into a varchar column +# +CREATE TABLE t1 (pk INT PRIMARY KEY, a VARCHAR(32)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,'foo'),(2,'bar'); +DROP TABLE t1; +# +# MDEV-4061: RocksDB: Changes from an interrupted query are still applied +# +create table t1 (pk int primary key, a int) engine=RocksDB; +insert into t1 values (1,10),(2,20); +set autocommit = 1; +update t1 set a = sleep(100) where pk = 1; +connect con1,localhost,root,,; +kill query $con_id; +connection default; +ERROR 70100: Query execution was interrupted +select * from t1; +pk a +1 10 +2 20 +disconnect con1; +drop table t1; +# +# MDEV-4099: RocksDB: Wrong results with index and range access after INSERT IGNORE or REPLACE +# +CREATE TABLE t1 (pk INT PRIMARY KEY, a SMALLINT, b INT, KEY (a)) ENGINE=RocksDB; +INSERT IGNORE INTO t1 VALUES (1, 157, 0), (2, 1898, -504403), (1, -14659, 0); +SELECT * FROM t1; +pk a b +1 157 0 +2 1898 -504403 +SELECT pk FROM t1; +pk +1 +2 +SELECT * FROM t1 WHERE a != 97; +pk a b +1 157 0 +2 1898 -504403 +DROP TABLE t1; +# +# Test @@rocksdb_max_row_locks +# +CREATE TABLE t1 (pk INT PRIMARY KEY, a int) ENGINE=RocksDB; +set @a=-1; +insert into t1 select (@a:=@a+1), 1234 from information_schema.session_variables limit 100; +set @tmp1= @@rocksdb_max_row_locks; +set rocksdb_max_row_locks= 20; +update t1 set a=a+10; +ERROR HY000: Internal error: Operation aborted: Number of locks held by the transaction exceeded @@rocksdb_max_row_locks +DROP TABLE t1; +# +# Test AUTO_INCREMENT behavior problem, +# "explicit insert into an auto-inc column is not noticed by RocksDB" +# +create table t1 (i int primary key auto_increment) engine=RocksDB; +insert into t1 values (null); +insert into t1 values (null); +select * from t1; +i +1 +2 +drop table t1; +create table t2 (i int primary key auto_increment) engine=RocksDB; +insert into t2 values (1); +select * from t2; +i +1 +# this fails (ie. used to fail), RocksDB engine did not notice use of '1' above +insert into t2 values (null); +select * from t2; +i +1 +2 +# but then this succeeds, so previous statement must have incremented next number counter +insert into t2 values (null); +select * from t2; +i +1 +2 +3 +drop table t2; +# +# Fix Issue#2: AUTO_INCREMENT value doesn't survive server shutdown +# +create table t1 (i int primary key auto_increment) engine=RocksDB; +insert into t1 values (null); +insert into t1 values (null); +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +insert into t1 values (null); +select * from t1; +i +1 +2 +3 +drop table t1; +# +# Fix Issue #3: SHOW TABLE STATUS shows Auto_increment=0 +# +create table t1 (i int primary key auto_increment) engine=RocksDB; +insert into t1 values (null),(null); +show table status like 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 1000 0 # 0 0 0 3 NULL NULL NULL latin1_swedish_ci NULL +drop table t1; +# +# Fix Issue #4: Crash when using pseudo-unique keys +# +CREATE TABLE t1 ( +i INT, +t TINYINT, +s SMALLINT, +m MEDIUMINT, +b BIGINT, +pk MEDIUMINT AUTO_INCREMENT PRIMARY KEY, +UNIQUE KEY b_t (b,t) +) ENGINE=rocksdb; +INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16); +SELECT b+t FROM t1 WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t; +b+t +9 +11 +25 +27 +29 +207 +10107 +100000000000000100 +1000000000000000100 +DROP TABLE t1; +# +# Fix issue #5: Transaction rollback doesn't undo all changes. +# +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 (id int auto_increment primary key, value int) engine=rocksdb; +set autocommit=0; +begin; +set @a:=0; +insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4; +insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4; +insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4; +rollback; +select count(*) from t1; +count(*) +0 +set autocommit=1; +drop table t0, t1; +# +# Check status variables +# +show status like 'rocksdb%'; +Variable_name Value +rocksdb_rows_deleted # +rocksdb_rows_inserted # +rocksdb_rows_read # +rocksdb_rows_updated # +rocksdb_system_rows_deleted # +rocksdb_system_rows_inserted # +rocksdb_system_rows_read # +rocksdb_system_rows_updated # +rocksdb_block_cache_add # +rocksdb_block_cache_data_hit # +rocksdb_block_cache_data_miss # +rocksdb_block_cache_filter_hit # +rocksdb_block_cache_filter_miss # +rocksdb_block_cache_hit # +rocksdb_block_cache_index_hit # +rocksdb_block_cache_index_miss # +rocksdb_block_cache_miss # +rocksdb_block_cachecompressed_hit # +rocksdb_block_cachecompressed_miss # +rocksdb_bloom_filter_prefix_checked # +rocksdb_bloom_filter_prefix_useful # +rocksdb_bloom_filter_useful # +rocksdb_bytes_read # +rocksdb_bytes_written # +rocksdb_compact_read_bytes # +rocksdb_compact_write_bytes # +rocksdb_compaction_key_drop_new # +rocksdb_compaction_key_drop_obsolete # +rocksdb_compaction_key_drop_user # +rocksdb_flush_write_bytes # +rocksdb_getupdatessince_calls # +rocksdb_git_date # +rocksdb_git_hash # +rocksdb_l0_num_files_stall_micros # +rocksdb_l0_slowdown_micros # +rocksdb_memtable_compaction_micros # +rocksdb_memtable_hit # +rocksdb_memtable_miss # +rocksdb_no_file_closes # +rocksdb_no_file_errors # +rocksdb_no_file_opens # +rocksdb_num_iterators # +rocksdb_number_block_not_compressed # +rocksdb_number_deletes_filtered # +rocksdb_number_keys_read # +rocksdb_number_keys_updated # +rocksdb_number_keys_written # +rocksdb_number_merge_failures # +rocksdb_number_multiget_bytes_read # +rocksdb_number_multiget_get # +rocksdb_number_multiget_keys_read # +rocksdb_number_reseeks_iteration # +rocksdb_number_sst_entry_delete # +rocksdb_number_sst_entry_merge # +rocksdb_number_sst_entry_other # +rocksdb_number_sst_entry_put # +rocksdb_number_sst_entry_singledelete # +rocksdb_number_stat_computes # +rocksdb_number_superversion_acquires # +rocksdb_number_superversion_cleanups # +rocksdb_number_superversion_releases # +rocksdb_rate_limit_delay_millis # +rocksdb_sequence_number # +rocksdb_snapshot_conflict_errors # +rocksdb_wal_bytes # +rocksdb_wal_synced # +rocksdb_write_other # +rocksdb_write_self # +rocksdb_write_timedout # +rocksdb_write_wal # +select VARIABLE_NAME from INFORMATION_SCHEMA.global_status where VARIABLE_NAME LIKE 'rocksdb%'; +VARIABLE_NAME +ROCKSDB_ROWS_DELETED +ROCKSDB_ROWS_INSERTED +ROCKSDB_ROWS_READ +ROCKSDB_ROWS_UPDATED +ROCKSDB_SYSTEM_ROWS_DELETED +ROCKSDB_SYSTEM_ROWS_INSERTED +ROCKSDB_SYSTEM_ROWS_READ +ROCKSDB_SYSTEM_ROWS_UPDATED +ROCKSDB_BLOCK_CACHE_ADD +ROCKSDB_BLOCK_CACHE_DATA_HIT +ROCKSDB_BLOCK_CACHE_DATA_MISS +ROCKSDB_BLOCK_CACHE_FILTER_HIT +ROCKSDB_BLOCK_CACHE_FILTER_MISS +ROCKSDB_BLOCK_CACHE_HIT +ROCKSDB_BLOCK_CACHE_INDEX_HIT +ROCKSDB_BLOCK_CACHE_INDEX_MISS +ROCKSDB_BLOCK_CACHE_MISS +ROCKSDB_BLOCK_CACHECOMPRESSED_HIT +ROCKSDB_BLOCK_CACHECOMPRESSED_MISS +ROCKSDB_BLOOM_FILTER_PREFIX_CHECKED +ROCKSDB_BLOOM_FILTER_PREFIX_USEFUL +ROCKSDB_BLOOM_FILTER_USEFUL +ROCKSDB_BYTES_READ +ROCKSDB_BYTES_WRITTEN +ROCKSDB_COMPACT_READ_BYTES +ROCKSDB_COMPACT_WRITE_BYTES +ROCKSDB_COMPACTION_KEY_DROP_NEW +ROCKSDB_COMPACTION_KEY_DROP_OBSOLETE +ROCKSDB_COMPACTION_KEY_DROP_USER +ROCKSDB_FLUSH_WRITE_BYTES +ROCKSDB_GETUPDATESSINCE_CALLS +ROCKSDB_GIT_DATE +ROCKSDB_GIT_HASH +ROCKSDB_L0_NUM_FILES_STALL_MICROS +ROCKSDB_L0_SLOWDOWN_MICROS +ROCKSDB_MEMTABLE_COMPACTION_MICROS +ROCKSDB_MEMTABLE_HIT +ROCKSDB_MEMTABLE_MISS +ROCKSDB_NO_FILE_CLOSES +ROCKSDB_NO_FILE_ERRORS +ROCKSDB_NO_FILE_OPENS +ROCKSDB_NUM_ITERATORS +ROCKSDB_NUMBER_BLOCK_NOT_COMPRESSED +ROCKSDB_NUMBER_DELETES_FILTERED +ROCKSDB_NUMBER_KEYS_READ +ROCKSDB_NUMBER_KEYS_UPDATED +ROCKSDB_NUMBER_KEYS_WRITTEN +ROCKSDB_NUMBER_MERGE_FAILURES +ROCKSDB_NUMBER_MULTIGET_BYTES_READ +ROCKSDB_NUMBER_MULTIGET_GET +ROCKSDB_NUMBER_MULTIGET_KEYS_READ +ROCKSDB_NUMBER_RESEEKS_ITERATION +ROCKSDB_NUMBER_SST_ENTRY_DELETE +ROCKSDB_NUMBER_SST_ENTRY_MERGE +ROCKSDB_NUMBER_SST_ENTRY_OTHER +ROCKSDB_NUMBER_SST_ENTRY_PUT +ROCKSDB_NUMBER_SST_ENTRY_SINGLEDELETE +ROCKSDB_NUMBER_STAT_COMPUTES +ROCKSDB_NUMBER_SUPERVERSION_ACQUIRES +ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS +ROCKSDB_NUMBER_SUPERVERSION_RELEASES +ROCKSDB_RATE_LIMIT_DELAY_MILLIS +ROCKSDB_SEQUENCE_NUMBER +ROCKSDB_SNAPSHOT_CONFLICT_ERRORS +ROCKSDB_WAL_BYTES +ROCKSDB_WAL_SYNCED +ROCKSDB_WRITE_OTHER +ROCKSDB_WRITE_SELF +ROCKSDB_WRITE_TIMEDOUT +ROCKSDB_WRITE_WAL +# RocksDB-SE's status variables are global internally +# but they are shown as both session and global, like InnoDB's status vars. +select VARIABLE_NAME from INFORMATION_SCHEMA.session_status where VARIABLE_NAME LIKE 'rocksdb%'; +VARIABLE_NAME +ROCKSDB_ROWS_DELETED +ROCKSDB_ROWS_INSERTED +ROCKSDB_ROWS_READ +ROCKSDB_ROWS_UPDATED +ROCKSDB_SYSTEM_ROWS_DELETED +ROCKSDB_SYSTEM_ROWS_INSERTED +ROCKSDB_SYSTEM_ROWS_READ +ROCKSDB_SYSTEM_ROWS_UPDATED +ROCKSDB_BLOCK_CACHE_ADD +ROCKSDB_BLOCK_CACHE_DATA_HIT +ROCKSDB_BLOCK_CACHE_DATA_MISS +ROCKSDB_BLOCK_CACHE_FILTER_HIT +ROCKSDB_BLOCK_CACHE_FILTER_MISS +ROCKSDB_BLOCK_CACHE_HIT +ROCKSDB_BLOCK_CACHE_INDEX_HIT +ROCKSDB_BLOCK_CACHE_INDEX_MISS +ROCKSDB_BLOCK_CACHE_MISS +ROCKSDB_BLOCK_CACHECOMPRESSED_HIT +ROCKSDB_BLOCK_CACHECOMPRESSED_MISS +ROCKSDB_BLOOM_FILTER_PREFIX_CHECKED +ROCKSDB_BLOOM_FILTER_PREFIX_USEFUL +ROCKSDB_BLOOM_FILTER_USEFUL +ROCKSDB_BYTES_READ +ROCKSDB_BYTES_WRITTEN +ROCKSDB_COMPACT_READ_BYTES +ROCKSDB_COMPACT_WRITE_BYTES +ROCKSDB_COMPACTION_KEY_DROP_NEW +ROCKSDB_COMPACTION_KEY_DROP_OBSOLETE +ROCKSDB_COMPACTION_KEY_DROP_USER +ROCKSDB_FLUSH_WRITE_BYTES +ROCKSDB_GETUPDATESSINCE_CALLS +ROCKSDB_GIT_DATE +ROCKSDB_GIT_HASH +ROCKSDB_L0_NUM_FILES_STALL_MICROS +ROCKSDB_L0_SLOWDOWN_MICROS +ROCKSDB_MEMTABLE_COMPACTION_MICROS +ROCKSDB_MEMTABLE_HIT +ROCKSDB_MEMTABLE_MISS +ROCKSDB_NO_FILE_CLOSES +ROCKSDB_NO_FILE_ERRORS +ROCKSDB_NO_FILE_OPENS +ROCKSDB_NUM_ITERATORS +ROCKSDB_NUMBER_BLOCK_NOT_COMPRESSED +ROCKSDB_NUMBER_DELETES_FILTERED +ROCKSDB_NUMBER_KEYS_READ +ROCKSDB_NUMBER_KEYS_UPDATED +ROCKSDB_NUMBER_KEYS_WRITTEN +ROCKSDB_NUMBER_MERGE_FAILURES +ROCKSDB_NUMBER_MULTIGET_BYTES_READ +ROCKSDB_NUMBER_MULTIGET_GET +ROCKSDB_NUMBER_MULTIGET_KEYS_READ +ROCKSDB_NUMBER_RESEEKS_ITERATION +ROCKSDB_NUMBER_SST_ENTRY_DELETE +ROCKSDB_NUMBER_SST_ENTRY_MERGE +ROCKSDB_NUMBER_SST_ENTRY_OTHER +ROCKSDB_NUMBER_SST_ENTRY_PUT +ROCKSDB_NUMBER_SST_ENTRY_SINGLEDELETE +ROCKSDB_NUMBER_STAT_COMPUTES +ROCKSDB_NUMBER_SUPERVERSION_ACQUIRES +ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS +ROCKSDB_NUMBER_SUPERVERSION_RELEASES +ROCKSDB_RATE_LIMIT_DELAY_MILLIS +ROCKSDB_SEQUENCE_NUMBER +ROCKSDB_SNAPSHOT_CONFLICT_ERRORS +ROCKSDB_WAL_BYTES +ROCKSDB_WAL_SYNCED +ROCKSDB_WRITE_OTHER +ROCKSDB_WRITE_SELF +ROCKSDB_WRITE_TIMEDOUT +ROCKSDB_WRITE_WAL +# +# Fix issue #9: HA_ERR_INTERNAL_ERROR when running linkbench +# +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 ( +pk int primary key, +col1 varchar(255), +key(col1) +) engine=rocksdb; +insert into t1 select a, repeat('123456789ABCDEF-', 15) from t0; +select * from t1 where pk=3; +pk col1 +3 123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF- +drop table t0, t1; +# +# Fix issue #10: Segfault in Rdb_key_def::get_primary_key_tuple +# +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +CREATE TABLE t1 ( +id1 bigint(20) unsigned NOT NULL DEFAULT '0', +id2 bigint(20) unsigned NOT NULL DEFAULT '0', +link_type bigint(20) unsigned NOT NULL DEFAULT '0', +visibility tinyint(3) NOT NULL DEFAULT '0', +data varchar(255) NOT NULL DEFAULT '', +time bigint(20) unsigned NOT NULL DEFAULT '0', +version int(11) unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (link_type,id1,id2) +) engine=rocksdb; +insert into t1 select a,a,a,1,a,a,a from t0; +alter table t1 add index id1_type (id1,link_type,visibility,time,version,data); +select * from t1 where id1 = 3; +id1 id2 link_type visibility data time version +3 3 3 1 3 3 3 +drop table t0,t1; +# +# Test column families +# +create table t1 ( +pk int primary key, +col1 int, +col2 int, +key(col1) comment 'cf3', +key(col2) comment 'cf4' +) engine=rocksdb; +insert into t1 values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5); +explain +select * from t1 where col1=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref col1 col1 5 const # NULL +select * from t1 where col1=2; +pk col1 col2 +2 2 2 +explain +select * from t1 where col2=3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref col2 col2 5 const # NULL +select * from t1 where col2=3; +pk col1 col2 +3 3 3 +select * from t1 where pk=4; +pk col1 col2 +4 4 4 +drop table t1; +# +# Try primary key in a non-default CF: +# +create table t1 ( +pk int, +col1 int, +col2 int, +key(col1) comment 'cf3', +key(col2) comment 'cf4', +primary key (pk) comment 'cf5' +) engine=rocksdb; +insert into t1 values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5); +explain +select * from t1 where col1=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref col1 col1 5 const # NULL +select * from t1 where col1=2; +pk col1 col2 +2 2 2 +select * from t1 where pk=4; +pk col1 col2 +4 4 4 +drop table t1; +# +# Issue #15: SIGSEGV from reading in blob data +# +CREATE TABLE t1 ( +id int not null, +blob_col text, +PRIMARY KEY (id) +) ENGINE=ROCKSDB CHARSET=latin1; +INSERT INTO t1 SET id=123, blob_col=repeat('z',64000) ON DUPLICATE KEY UPDATE blob_col=VALUES(blob_col); +INSERT INTO t1 SET id=123, blob_col='' ON DUPLICATE KEY UPDATE blob_col=VALUES(blob_col); +DROP TABLE t1; +# +# Issue #17: Automatic per-index column families +# +create table t1 ( +id int not null, +key1 int, +PRIMARY KEY (id), +index (key1) comment '$per_index_cf' +) engine=rocksdb; +#Same CF ids with different CF flags +create table t1_err ( +id int not null, +key1 int, +PRIMARY KEY (id), +index (key1) comment 'test.t1.key1' +) engine=rocksdb; +ERROR HY000: Column Family Flag is different from existing flag. Assign a new CF flag, or do not change existing CF flag. +create table t1_err ( +id int not null, +key1 int, +PRIMARY KEY (id), +index (key1) comment 'test.t1.key2' +) engine=rocksdb; +drop table t1_err; +# Unfortunately there is no way to check which column family everything goes to +insert into t1 values (1,1); +select * from t1; +id key1 +1 1 +# Check that ALTER and RENAME are disallowed +alter table t1 add col2 int; +ERROR 42000: This version of MySQL doesn't yet support 'ALTER TABLE on table with per-index CF' +rename table t1 to t2; +ERROR 42000: This version of MySQL doesn't yet support 'ALTER TABLE on table with per-index CF' +drop table t1; +# Check detection of typos in $per_index_cf +create table t1 ( +id int not null, +key1 int, +PRIMARY KEY (id), +index (key1) comment '$per_idnex_cf' +)engine=rocksdb; +ERROR 42000: This version of MySQL doesn't yet support 'column family name looks like a typo of $per_index_cf' +# +# Issue #22: SELECT ... FOR UPDATE takes a long time +# +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 ( +id1 int, +id2 int, +value1 int, +value2 int, +primary key(id1, id2) COMMENT 'new_column_family', +key(id2) +) engine=rocksdb default charset=latin1 collate=latin1_bin; +insert into t1 select A.a, B.a, 31, 1234 from t0 A, t0 B; +explain +select * from t1 where id1=30 and value1=30 for update; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref PRIMARY PRIMARY 4 const # Using where +set @var1=(select variable_value +from information_schema.global_status +where variable_name='rocksdb_number_keys_read'); +select * from t1 where id1=3 and value1=3 for update; +id1 id2 value1 value2 +set @var2=(select variable_value +from information_schema.global_status +where variable_name='rocksdb_number_keys_read'); +# The following must return true (before the fix, the difference was 70): +select if((@var2 - @var1) < 30, 1, @var2-@var1); +if((@var2 - @var1) < 30, 1, @var2-@var1) +1 +drop table t0,t1; +# +# Issue #33: SELECT ... FROM rocksdb_table ORDER BY primary_key uses sorting +# +create table t1 (id int primary key, value int) engine=rocksdb; +insert into t1 values (1,1),(2,2),(3,3); +# The following must not use 'Using filesort': +explain select * from t1 ORDER BY id; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 4 NULL # NULL +drop table t1; +# +# Issue #26: Index-only scans for DATETIME and TIMESTAMP +# +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +# Try a DATETIME column: +create table t1 ( +pk int auto_increment primary key, +kp1 datetime, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t1 (kp1,kp2) +select date_add('2015-01-01 12:34:56', interval a day), a from t0; +select * from t1; +pk kp1 kp2 col1 +1 2015-01-01 12:34:56 0 NULL +2 2015-01-02 12:34:56 1 NULL +3 2015-01-03 12:34:56 2 NULL +4 2015-01-04 12:34:56 3 NULL +5 2015-01-05 12:34:56 4 NULL +6 2015-01-06 12:34:56 5 NULL +7 2015-01-07 12:34:56 6 NULL +8 2015-01-08 12:34:56 7 NULL +9 2015-01-09 12:34:56 8 NULL +10 2015-01-10 12:34:56 9 NULL +# This must show 'Using index' +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range kp1 kp1 6 NULL # Using where; Using index +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +kp1 kp2 +2015-01-01 12:34:56 0 +2015-01-02 12:34:56 1 +2015-01-03 12:34:56 2 +2015-01-04 12:34:56 3 +2015-01-05 12:34:56 4 +# Now, the same with NOT NULL column +create table t2 ( +pk int auto_increment primary key, +kp1 datetime not null, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +# This must show 'Using index' +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using where; Using index +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +kp1 kp2 +2015-01-01 12:34:56 0 +2015-01-02 12:34:56 1 +2015-01-03 12:34:56 2 +2015-01-04 12:34:56 3 +2015-01-05 12:34:56 4 +drop table t1,t2; +# Try a DATE column: +create table t1 ( +pk int auto_increment primary key, +kp1 date, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t1 (kp1,kp2) +select date_add('2015-01-01', interval a day), a from t0; +select * from t1; +pk kp1 kp2 col1 +1 2015-01-01 0 NULL +2 2015-01-02 1 NULL +3 2015-01-03 2 NULL +4 2015-01-04 3 NULL +5 2015-01-05 4 NULL +6 2015-01-06 5 NULL +7 2015-01-07 6 NULL +8 2015-01-08 7 NULL +9 2015-01-09 8 NULL +10 2015-01-10 9 NULL +# This must show 'Using index' +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01' and '2015-01-05'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range kp1 kp1 4 NULL # Using where; Using index +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01' and '2015-01-05'; +kp1 kp2 +2015-01-01 0 +2015-01-02 1 +2015-01-03 2 +2015-01-04 3 +2015-01-05 4 +# Now, the same with NOT NULL column +create table t2 ( +pk int auto_increment primary key, +kp1 date not null, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +# This must show 'Using index' +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 3 NULL # Using where; Using index +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +kp1 kp2 +2015-01-01 0 +2015-01-02 1 +2015-01-03 2 +2015-01-04 3 +2015-01-05 4 +drop table t1,t2; +# +# Try a TIMESTAMP column: +# +create table t1 ( +pk int auto_increment primary key, +kp1 timestamp, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t1 (kp1,kp2) +select date_add('2015-01-01 12:34:56', interval a day), a from t0; +select * from t1; +pk kp1 kp2 col1 +1 2015-01-01 12:34:56 0 NULL +2 2015-01-02 12:34:56 1 NULL +3 2015-01-03 12:34:56 2 NULL +4 2015-01-04 12:34:56 3 NULL +5 2015-01-05 12:34:56 4 NULL +6 2015-01-06 12:34:56 5 NULL +7 2015-01-07 12:34:56 6 NULL +8 2015-01-08 12:34:56 7 NULL +9 2015-01-09 12:34:56 8 NULL +10 2015-01-10 12:34:56 9 NULL +# This must show 'Using index' +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range kp1 kp1 5 NULL # Using where; Using index +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +kp1 kp2 +2015-01-01 12:34:56 0 +2015-01-02 12:34:56 1 +2015-01-03 12:34:56 2 +2015-01-04 12:34:56 3 +2015-01-05 12:34:56 4 +# Now, the same with NOT NULL column +create table t2 ( +pk int auto_increment primary key, +kp1 timestamp not null, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +# This must show 'Using index' +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 4 NULL # Using where; Using index +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +kp1 kp2 +2015-01-01 12:34:56 0 +2015-01-02 12:34:56 1 +2015-01-03 12:34:56 2 +2015-01-04 12:34:56 3 +2015-01-05 12:34:56 4 +drop table t1,t2; +# +# Try a TIME column: +# +create table t1 ( +pk int auto_increment primary key, +kp1 time, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t1 (kp1,kp2) +select date_add('2015-01-01 09:00:00', interval a minute), a from t0; +select * from t1; +pk kp1 kp2 col1 +1 09:00:00 0 NULL +2 09:01:00 1 NULL +3 09:02:00 2 NULL +4 09:03:00 3 NULL +5 09:04:00 4 NULL +6 09:05:00 5 NULL +7 09:06:00 6 NULL +8 09:07:00 7 NULL +9 09:08:00 8 NULL +10 09:09:00 9 NULL +# This must show 'Using index' +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '09:01:00' and '09:05:00'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range kp1 kp1 4 NULL # Using where; Using index +select kp1,kp2 from t1 force index (kp1) +where kp1 between '09:01:00' and '09:05:00'; +kp1 kp2 +09:01:00 1 +09:02:00 2 +09:03:00 3 +09:04:00 4 +09:05:00 5 +# Now, the same with NOT NULL column +create table t2 ( +pk int auto_increment primary key, +kp1 time not null, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +# This must show 'Using index' +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '09:01:00' and '09:05:00'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 3 NULL # Using where; Using index +select kp1,kp2 from t2 force index (kp1) +where kp1 between '09:01:00' and '09:05:00'; +kp1 kp2 +09:01:00 1 +09:02:00 2 +09:03:00 3 +09:04:00 4 +09:05:00 5 +drop table t1,t2; +# +# Try a YEAR column: +# +create table t1 ( +pk int auto_increment primary key, +kp1 year, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t1 (kp1,kp2) select 2015+a, a from t0; +select * from t1; +pk kp1 kp2 col1 +1 2015 0 NULL +2 2016 1 NULL +3 2017 2 NULL +4 2018 3 NULL +5 2019 4 NULL +6 2020 5 NULL +7 2021 6 NULL +8 2022 7 NULL +9 2023 8 NULL +10 2024 9 NULL +# This must show 'Using index' +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2016' and '2020'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range kp1 kp1 2 NULL # Using where; Using index +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2016' and '2020'; +kp1 kp2 +2016 1 +2017 2 +2018 3 +2019 4 +2020 5 +# Now, the same with NOT NULL column +create table t2 ( +pk int auto_increment primary key, +kp1 year not null, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +# This must show 'Using index' +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2016' and '2020'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 1 NULL # Using where; Using index +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2016' and '2020'; +kp1 kp2 +2016 1 +2017 2 +2018 3 +2019 4 +2020 5 +drop table t1,t2; +# +# Issue #57: Release row locks on statement errors +# +create table t1 (id int primary key) engine=rocksdb; +insert into t1 values (1), (2), (3); +begin; +insert into t1 values (4), (5), (6); +insert into t1 values (7), (8), (2), (9); +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +select * from t1; +id +1 +2 +3 +4 +5 +6 +begin; +select * from t1 where id=4 for update; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +select * from t1 where id=7 for update; +id +select * from t1 where id=9 for update; +id +drop table t1; +#Index on blob column +SET @old_mode = @@sql_mode; +SET sql_mode = 'strict_all_tables'; +create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(c, b(255))) engine=rocksdb; +drop table t1; +create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(b(1255))) engine=rocksdb; +insert into t1 values (1, '1abcde', '1abcde'), (2, '2abcde', '2abcde'), (3, '3abcde', '3abcde'); +select * from t1; +a b c +1 1abcde 1abcde +2 2abcde 2abcde +3 3abcde 3abcde +explain select * from t1 where b like '1%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range b b 1258 NULL # Using where +explain select b, a from t1 where b like '1%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range b b 1258 NULL # Using where +update t1 set b= '12345' where b = '2abcde'; +select * from t1; +a b c +1 1abcde 1abcde +2 12345 2abcde +3 3abcde 3abcde +drop table t1; +create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(b(2255))) engine=rocksdb; +ERROR 42000: Specified key was too long; max key length is 2048 bytes +SET sql_mode = @old_mode; +drop table t0; +# +# Fix assertion failure (attempt to overrun the key buffer) for prefix indexes +# +create table t1 ( +pk int primary key, +col1 varchar(100), +key (col1(10)) +) engine=rocksdb; +insert into t1 values (1, repeat('0123456789', 9)); +drop table t1; +# +# Issue #76: Assertion `buf == table->record[0]' fails in virtual int ha_rocksdb::delete_row(const uchar*) +# +CREATE TABLE t1 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB; +CREATE TABLE t2 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB; +CREATE TRIGGER tr AFTER DELETE ON t1 FOR EACH ROW DELETE FROM t2 WHERE pk = old.pk; +INSERT INTO t1 VALUES (1,1); +REPLACE INTO t1 VALUES (1,2); +SELECT * FROM t1; +pk f1 +1 2 +DROP TABLE t1, t2; +# +# Issue #99: UPDATE for table with VARCHAR pk gives "Can't find record" error +# +create table t1(a int primary key); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t2 ( +a varchar(32) primary key, +col1 int +) engine=rocksdb; +insert into t2 +select concat('v-', 100 + A.a*100 + B.a), 12345 from t1 A, t1 B; +update t2 set a=concat('x-', a) where a between 'v-1002' and 'v-1004'; +drop table t1,t2; +# +# Issue #131: Assertion `v->cfd_->internal_comparator().Compare(start, end) <= 0' failed +# +CREATE TABLE t2(c1 INTEGER UNSIGNED NOT NULL, c2 INTEGER NULL, c3 TINYINT, c4 SMALLINT , c5 MEDIUMINT, c6 INT, c7 BIGINT, PRIMARY KEY(c1,c6)) ENGINE=RocksDB; +INSERT INTO t2 VALUES (1,1,1,1,1,1,1); +SELECT * FROM t2 WHERE c1 > 4294967295 ORDER BY c1,c6; +c1 c2 c3 c4 c5 c6 c7 +EXPLAIN SELECT * FROM t2 WHERE c1 > 4294967295 ORDER BY c1,c6; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 50 Using where +drop table t2; +# +# Issue #135: register transaction was not being called for statement +# +DROP DATABASE IF EXISTS test_db; +CREATE DATABASE test_db; +CREATE TABLE test_db.t1(c1 INT PRIMARY KEY); +LOCK TABLES test_db.t1 READ; +SET AUTOCOMMIT=0; +SELECT c1 FROM test_db.t1; +c1 +START TRANSACTION WITH CONSISTENT SNAPSHOT, READ ONLY; +DROP DATABASE test_db; +# +# Issue #143: Split rocksdb_bulk_load option into two +# +CREATE TABLE t1 (id int primary key, value int) engine=RocksDB; +SET rocksdb_skip_unique_check=1; +INSERT INTO t1 VALUES(1, 1); +INSERT INTO t1 VALUES(1, 2); +INSERT INTO t1 VALUES(1, 3); +SELECT * FROM t1; +id value +REPLACE INTO t1 VALUES(4, 4); +ERROR HY000: When unique checking is disabled in MyRocks, INSERT,UPDATE,LOAD statements with clauses that update or replace the key (i.e. INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not allowed. Query: REPLACE INTO t1 VALUES(4, 4) +INSERT INTO t1 VALUES(5, 5) ON DUPLICATE KEY UPDATE value=value+1; +ERROR HY000: When unique checking is disabled in MyRocks, INSERT,UPDATE,LOAD statements with clauses that update or replace the key (i.e. INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not allowed. Query: INSERT INTO t1 VALUES(5, 5) ON DUPLICATE KEY UPDATE value=value+1 +TRUNCATE TABLE t1; +SET @save_rocksdb_bulk_load_size= @@rocksdb_bulk_load_size; +SET rocksdb_skip_unique_check=0; +SET rocksdb_commit_in_the_middle=1; +SET rocksdb_bulk_load_size=10; +BEGIN; +INSERT INTO t1 (id) VALUES(1),(2),(3),(4),(5),(6),(7),(8),(9),(10), +(11),(12),(13),(14),(15),(16),(17),(18),(19); +ROLLBACK; +SELECT * FROM t1; +id value +1 NULL +2 NULL +3 NULL +4 NULL +5 NULL +6 NULL +7 NULL +8 NULL +9 NULL +10 NULL +INSERT INTO t1 (id) VALUES (11),(12),(13),(14),(15); +BEGIN; +UPDATE t1 SET value=100; +ROLLBACK; +SELECT * FROM t1; +id value +1 100 +2 100 +3 100 +4 100 +5 100 +6 100 +7 100 +8 100 +9 100 +10 100 +11 NULL +12 NULL +13 NULL +14 NULL +15 NULL +BEGIN; +DELETE FROM t1; +ROLLBACK; +SELECT * FROM t1; +id value +11 NULL +12 NULL +13 NULL +14 NULL +15 NULL +SET rocksdb_commit_in_the_middle=0; +SET rocksdb_bulk_load_size= @save_rocksdb_bulk_load_size; +DROP TABLE t1; +# +# Issue #185 Assertion `BaseValid()' failed in void rocksdb::BaseDeltaIterator::Advance() +# +CREATE TABLE t2(id INT NOT NULL PRIMARY KEY, data INT) Engine=MEMORY; +INSERT INTO t2 VALUES (100,NULL),(150,"long varchar"),(200,"varchar"),(250,"long long long varchar"); +Warnings: +Warning 1366 Incorrect integer value: 'long varchar' for column 'data' at row 2 +Warning 1366 Incorrect integer value: 'varchar' for column 'data' at row 3 +Warning 1366 Incorrect integer value: 'long long long varchar' for column 'data' at row 4 +create TABLE t1 (a int not null, b int not null, primary key(a,b)); +INSERT INTO t1 VALUES (1,1); +SELECT a FROM t1, t2 WHERE a=b AND (b NOT IN (SELECT a FROM t1 WHERE a > 4)); +a +1 +1 +1 +1 +DROP TABLE t1, t2; +# +# Issue #189 ha_rocksdb::load_auto_incr_value() creates implicit snapshot and doesn't release +# +create table r1 (id int auto_increment primary key, value int); +insert into r1 (id) values (null), (null), (null), (null), (null); +create table r2 like r1; +show create table r2; +Table Create Table +r2 CREATE TABLE `r2` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `value` int(11) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +begin; +insert into r1 values (10, 1); +commit; +begin; +select * from r1; +id value +1 NULL +2 NULL +3 NULL +4 NULL +5 NULL +10 1 +commit; +drop table r1, r2; +create table r1 (id int auto_increment, value int, index i(id)); +insert into r1 (id) values (null), (null), (null), (null), (null); +create table r2 like r1; +show create table r2; +Table Create Table +r2 CREATE TABLE `r2` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `value` int(11) DEFAULT NULL, + KEY `i` (`id`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +begin; +insert into r1 values (10, 1); +commit; +begin; +select * from r1; +id value +1 NULL +2 NULL +3 NULL +4 NULL +5 NULL +10 1 +commit; +drop table r1, r2; +# +# Issue#211 Crash on LOCK TABLES + START TRANSACTION WITH CONSISTENT SNAPSHOT +# +CREATE TABLE t1(c1 INT); +lock TABLE t1 read local; +SELECT 1 FROM t1 GROUP BY TRIM(LEADING RAND()FROM''); +1 +set AUTOCOMMIT=0; +start transaction with consistent snapshot; +SELECT * FROM t1; +c1 +COMMIT; +UNLOCK TABLES; +DROP TABLE t1; +# +# Issue#213 Crash on LOCK TABLES + partitions +# +CREATE TABLE t1(a INT,b INT,KEY (b)) engine=rocksdb PARTITION BY HASH(a) PARTITIONS 2; +INSERT INTO t1(a)VALUES (20010101101010.999949); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +lock tables t1 write,t1 as t0 write,t1 as t2 write; +SELECT a FROM t1 ORDER BY a; +a +2147483647 +truncate t1; +INSERT INTO t1 VALUES(X'042000200020',X'042000200020'),(X'200400200020',X'200400200020'); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'a' at row 2 +Warning 1264 Out of range value for column 'b' at row 2 +UNLOCK TABLES; +DROP TABLE t1; +# +# Issue#250: MyRocks/Innodb different output from query with order by on table with index and decimal type +# (the test was changed to use VARCHAR, because DECIMAL now supports index-only, and this issue +# needs a datype that doesn't support index-inly) +# +CREATE TABLE t1( +c1 varchar(10) character set utf8 collate utf8_general_ci NOT NULL, +c2 varchar(10) character set utf8 collate utf8_general_ci, +c3 INT, +INDEX idx(c1,c2) +); +INSERT INTO t1 VALUES ('c1-val1','c2-val1',5); +INSERT INTO t1 VALUES ('c1-val2','c2-val3',6); +INSERT INTO t1 VALUES ('c1-val3','c2-val3',7); +SELECT * FROM t1 force index(idx) WHERE c1 <> 'c1-val2' ORDER BY c1 DESC; +c1 c2 c3 +c1-val3 c2-val3 7 +c1-val1 c2-val1 5 +explain SELECT * FROM t1 force index(idx) WHERE c1 <> '1' ORDER BY c1 DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 32 NULL # Using where +drop table t1; +# +# Issue#267: MyRocks issue with no matching min/max row and count(*) +# +CREATE TABLE t1(c1 INT UNSIGNED, c2 INT SIGNED, INDEX idx2(c2)); +INSERT INTO t1 VALUES(1,null); +INSERT INTO t1 VALUES(2,null); +SELECT count(*) as total_rows, min(c2) as min_value FROM t1; +total_rows min_value +2 NULL +DROP TABLE t1; +# +# Issue#263: MyRocks auto_increment skips values if you insert a negative value +# +CREATE TABLE t1(a INT AUTO_INCREMENT KEY); +INSERT INTO t1 VALUES(0),(-1),(0); +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 1000 0 0 0 0 0 3 NULL NULL NULL latin1_swedish_ci NULL +SELECT * FROM t1; +a +-1 +1 +2 +DROP TABLE t1; +CREATE TABLE t1(a INT AUTO_INCREMENT KEY); +INSERT INTO t1 VALUES(0),(10),(0); +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 1000 0 0 0 0 0 12 NULL NULL NULL latin1_swedish_ci NULL +SELECT * FROM t1; +a +1 +10 +11 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result new file mode 100644 index 00000000000..09d251ccbe6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result @@ -0,0 +1,61 @@ +create table t1 (a int, +primary key (a) comment 'cf1') engine=rocksdb; +create table t2 (a int, +primary key (a) comment 'cf2') engine=rocksdb; +create table t3 (a int, +primary key (a) comment 'z') engine=rocksdb; +insert into t1 values (1); +insert into t2 values (2); +insert into t3 values (2); + +Default options for all column families: + +select cf_name, option_type, value +from information_schema.rocksdb_cf_options +where option_type in ('WRITE_BUFFER_SIZE', +'TARGET_FILE_SIZE_BASE', +'MAX_BYTES_FOR_LEVEL_MULTIPLIER') +order by cf_name, option_type; +cf_name option_type value +cf1 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +cf1 TARGET_FILE_SIZE_BASE 1048576 +cf1 WRITE_BUFFER_SIZE 12582912 +cf2 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +cf2 TARGET_FILE_SIZE_BASE 1048576 +cf2 WRITE_BUFFER_SIZE 12582912 +default MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +default TARGET_FILE_SIZE_BASE 1048576 +default WRITE_BUFFER_SIZE 12582912 +z MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +z TARGET_FILE_SIZE_BASE 1048576 +z WRITE_BUFFER_SIZE 12582912 +__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +__system__ TARGET_FILE_SIZE_BASE 1048576 +__system__ WRITE_BUFFER_SIZE 12582912 + +Individualized options for column families: + +select cf_name, option_type, value +from information_schema.rocksdb_cf_options +where option_type in ('WRITE_BUFFER_SIZE', +'TARGET_FILE_SIZE_BASE', +'MAX_BYTES_FOR_LEVEL_MULTIPLIER') +order by cf_name, option_type; +cf_name option_type value +cf1 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +cf1 TARGET_FILE_SIZE_BASE 2097152 +cf1 WRITE_BUFFER_SIZE 8388608 +cf2 MAX_BYTES_FOR_LEVEL_MULTIPLIER 8 +cf2 TARGET_FILE_SIZE_BASE 1048576 +cf2 WRITE_BUFFER_SIZE 16777216 +default MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +default TARGET_FILE_SIZE_BASE 1048576 +default WRITE_BUFFER_SIZE 12582912 +z MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +z TARGET_FILE_SIZE_BASE 4194304 +z WRITE_BUFFER_SIZE 12582912 +__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +__system__ TARGET_FILE_SIZE_BASE 1048576 +__system__ WRITE_BUFFER_SIZE 12582912 + +drop table t1,t2,t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_reverse.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_reverse.result new file mode 100644 index 00000000000..1c85343cabb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_reverse.result @@ -0,0 +1,120 @@ +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 ( +pk int primary key, +a int not null, +b int not null, +key(a) comment 'rev:foo', +key(b) comment 'bar' +) engine=rocksdb; +insert into t1 select a,a,a from t0; +insert into t1 select a+10,a+10,a+10 from t0; +# Primary key is not in a reverse-ordered CF, so full table scan +# returns rows in ascending order: +select * from t1; +pk a b +0 0 0 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +11 11 11 +12 12 12 +13 13 13 +14 14 14 +15 15 15 +16 16 16 +17 17 17 +18 18 18 +19 19 19 +explain +select a from t1 order by a limit 5; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL a 4 NULL # Using index +select a from t1 order by a limit 5; +a +0 +1 +2 +3 +4 +explain +select b from t1 order by b limit 5; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL b 4 NULL # Using index +select a from t1 order by a limit 5; +a +0 +1 +2 +3 +4 +explain +select a from t1 order by a desc limit 5; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL a 4 NULL # Using index +select a from t1 order by a desc limit 5; +a +19 +18 +17 +16 +15 +explain +select b from t1 order by b desc limit 5; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL b 4 NULL # Using index +select b from t1 order by b desc limit 5; +b +19 +18 +17 +16 +15 +drop table t1; +# +# Try a primary key in a reverse-ordered CF. +# +create table t2 ( +pk int, +a int not null, +primary key(pk) comment 'rev:cf1' +) engine=rocksdb; +insert into t2 select a,a from t0; +# Primary key is in a reverse-ordered CF, so full table scan +# returns rows in descending order: +select * from t2; +pk a +9 9 +8 8 +7 7 +6 6 +5 5 +4 4 +3 3 +2 2 +1 1 +0 0 +set autocommit=0; +begin; +delete from t2 where a=3 or a=7; +select * from t2; +pk a +9 9 +8 8 +6 6 +5 5 +4 4 +2 2 +1 1 +0 0 +rollback; +set autocommit=1; +drop table t2; +drop table t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result new file mode 100644 index 00000000000..a8908edada5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result @@ -0,0 +1,129 @@ +set @save_rocksdb_store_checksums=@@global.rocksdb_store_checksums; +set @save_rocksdb_verify_checksums=@@global.rocksdb_verify_checksums; +set @save_rocksdb_checksums_pct=@@global.rocksdb_checksums_pct; +drop table if exists t1,t2,t3; +show variables like 'rocksdb_%checksum%'; +Variable_name Value +rocksdb_checksums_pct 100 +rocksdb_store_checksums OFF +rocksdb_verify_checksums OFF +create table t1 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; +insert into t1 values (1,1,1),(2,2,2),(3,3,3); +check table t1; +Table Op Msg_type Msg_text +test.t1 check status OK + CHECKTABLE t1: Checking table t1 + CHECKTABLE t1: Checking index a + CHECKTABLE t1: ... 3 index entries checked (0 had checksums) + CHECKTABLE t1: Checking index b + CHECKTABLE t1: ... 3 index entries checked (0 had checksums) + CHECKTABLE t1: 0 table records had checksums +drop table t1; +set session rocksdb_store_checksums=on; +create table t2 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; +insert into t2 values (1,1,1),(2,2,2),(3,3,3); +check table t2; +Table Op Msg_type Msg_text +test.t2 check status OK + CHECKTABLE t2: Checking table t2 + CHECKTABLE t2: Checking index a + CHECKTABLE t2: ... 3 index entries checked (3 had checksums) + CHECKTABLE t2: Checking index b + CHECKTABLE t2: ... 3 index entries checked (3 had checksums) + CHECKTABLE t2: 3 table records had checksums +# Now, make a table that has both rows with checksums and without +create table t3 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; +insert into t3 values (1,1,1),(2,2,2),(3,3,3); +set session rocksdb_store_checksums=off; +update t3 set b=3 where a=2; +set session rocksdb_store_checksums=on; +check table t3; +Table Op Msg_type Msg_text +test.t3 check status OK + CHECKTABLE t3: Checking table t3 + CHECKTABLE t3: Checking index a + CHECKTABLE t3: ... 3 index entries checked (3 had checksums) + CHECKTABLE t3: Checking index b + CHECKTABLE t3: ... 3 index entries checked (2 had checksums) + CHECKTABLE t3: 2 table records had checksums +set session rocksdb_store_checksums=on; +set session rocksdb_checksums_pct=5; +create table t4 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; +check table t4; +Table Op Msg_type Msg_text +test.t4 check status OK +10000 index entries had around 500 checksums +10000 index entries had around 500 checksums +Around 500 table records had checksums +set session rocksdb_checksums_pct=100; +# +# Ok, table t2 has all rows with checksums. Simulate a few checksum mismatches. +# +insert into mtr.test_suppressions values +('Checksum mismatch in key of key-value pair for index'), +('Checksum mismatch in value of key-value pair for index'), +('Data with incorrect checksum'); +# 1. Start with mismatch in key checksum of the PK. +set session debug= "+d,myrocks_simulate_bad_pk_checksum1"; +set session rocksdb_verify_checksums=off; +select * from t3; +pk a b +1 1 1 +2 2 3 +3 3 3 +set session rocksdb_verify_checksums=on; +select * from t3; +ERROR HY000: Internal error: Record checksum mismatch +select * from t4; +ERROR HY000: Internal error: Record checksum mismatch +set session debug= "-d,myrocks_simulate_bad_pk_checksum1"; +# 2. Continue with mismatch in pk value checksum. +set session debug= "+d,myrocks_simulate_bad_pk_checksum2"; +set session rocksdb_verify_checksums=off; +select * from t3; +pk a b +1 1 1 +2 2 3 +3 3 3 +set session rocksdb_verify_checksums=on; +select * from t3; +ERROR HY000: Internal error: Record checksum mismatch +select * from t4; +ERROR HY000: Internal error: Record checksum mismatch +set session debug= "-d,myrocks_simulate_bad_pk_checksum2"; +# 3. Check if we catch checksum mismatches for secondary indexes +explain +select * from t3 force index(a) where a<4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 range a a 5 NULL # Using index condition +select * from t3 force index(a) where a<4; +pk a b +1 1 1 +2 2 3 +3 3 3 +set session debug= "+d,myrocks_simulate_bad_key_checksum1"; +select * from t3 force index(a) where a<4; +ERROR HY000: Internal error: Record checksum mismatch +select * from t4 force index(a) where a<1000000; +ERROR HY000: Internal error: Record checksum mismatch +set session debug= "-d,myrocks_simulate_bad_key_checksum1"; +# 4. The same for index-only reads? +explain +select a from t3 force index(a) where a<4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 index a a 5 NULL # Using where; Using index +select a from t3 force index(a) where a<4; +a +1 +2 +3 +set session debug= "+d,myrocks_simulate_bad_key_checksum1"; +select a from t3 force index(a) where a<4; +ERROR HY000: Internal error: Record checksum mismatch +select a from t4 force index(a) where a<1000000; +ERROR HY000: Internal error: Record checksum mismatch +set session debug= "-d,myrocks_simulate_bad_key_checksum1"; +set @@global.rocksdb_store_checksums=@save_rocksdb_store_checksums; +set @@global.rocksdb_verify_checksums=@save_rocksdb_verify_checksums; +set @@global.rocksdb_checksums_pct=@save_rocksdb_checksums_pct; +drop table t2,t3,t4; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_concurrent_delete.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_concurrent_delete.result new file mode 100644 index 00000000000..9d6d368c686 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_concurrent_delete.result @@ -0,0 +1,56 @@ +SET debug_sync='RESET'; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT "", a INT); +INSERT INTO t1 VALUES(1,1), (2,2), (3,3); +SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go'; +SELECT * FROM t1 order by t1.pk ASC FOR UPDATE; +SET debug_sync='now WAIT_FOR parked'; +DELETE FROM t1 WHERE pk = 1; +SET debug_sync='now SIGNAL go'; +pk a +2 2 +3 3 +set debug_sync='RESET'; +drop table t1; +SET debug_sync='RESET'; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT "", a INT); +INSERT INTO t1 VALUES(1,1), (2,2), (3,3); +SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go'; +SELECT * FROM t1 order by t1.pk DESC FOR UPDATE; +SET debug_sync='now WAIT_FOR parked'; +DELETE FROM t1 WHERE pk = 3; +SET debug_sync='now SIGNAL go'; +pk a +2 2 +1 1 +set debug_sync='RESET'; +drop table t1; +SET debug_sync='RESET'; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT "rev:cf2", a INT); +INSERT INTO t1 VALUES(1,1), (2,2), (3,3); +SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go'; +SELECT * FROM t1 order by t1.pk ASC FOR UPDATE; +SET debug_sync='now WAIT_FOR parked'; +DELETE FROM t1 WHERE pk = 1; +SET debug_sync='now SIGNAL go'; +pk a +2 2 +3 3 +set debug_sync='RESET'; +drop table t1; +SET debug_sync='RESET'; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT "rev:cf2", a INT); +INSERT INTO t1 VALUES(1,1), (2,2), (3,3); +SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go'; +SELECT * FROM t1 order by t1.pk DESC FOR UPDATE; +SET debug_sync='now WAIT_FOR parked'; +DELETE FROM t1 WHERE pk = 3; +SET debug_sync='now SIGNAL go'; +pk a +2 2 +1 1 +set debug_sync='RESET'; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_datadir.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_datadir.result new file mode 100644 index 00000000000..7910e98b198 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_datadir.result @@ -0,0 +1,2 @@ +Check for the number of MANIFEST files +1 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result new file mode 100644 index 00000000000..51841f174af --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result @@ -0,0 +1,227 @@ +select * from information_schema.engines where engine = 'rocksdb'; +ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +ROCKSDB DEFAULT RocksDB storage engine YES YES YES +drop table if exists t0,t1,t2,t3; +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int) engine=myisam; +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 ( +pk int primary key, +kp1 int, +kp2 int, +col1 int, +key (kp1,kp2) comment 'cf1' +) engine=rocksdb; +insert into t2 select a,a,a,a from t1; +# Try a basic case: +explain +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0; +pk kp1 kp2 col1 +2 2 2 2 +4 4 4 4 +6 6 6 6 +8 8 8 8 +10 10 10 10 +# Check that ICP doesnt work for columns where column value +# cant be restored from mem-comparable form: +create table t3 ( +pk int primary key, +kp1 int, +kp2 varchar(10) collate utf8_general_ci, +col1 int, +key (kp1,kp2) comment 'cf1' +) engine=rocksdb; +insert into t3 select a,a/10,a,a from t1; +# This must not use ICP: +explain +select * from t3 where kp1=3 and kp2 like '%foo%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 ref kp1 kp1 5 const # Using where +explain format=json +select * from t3 where kp1 between 2 and 4 and mod(kp1,3)=0 and kp2 like '%foo%'; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t3", + "access_type": "range", + "possible_keys": [ + "kp1" + ], + "key": "kp1", + "used_key_parts": [ + "kp1" + ], + "key_length": "5", + "rows": 1000, + "filtered": 100, + "index_condition": "((`test`.`t3`.`kp1` between 2 and 4) and ((`test`.`t3`.`kp1` % 3) = 0))", + "attached_condition": "(`test`.`t3`.`kp2` like '%foo%')" + } + } +} +Warnings: +Note 1003 /* select#1 */ select `test`.`t3`.`pk` AS `pk`,`test`.`t3`.`kp1` AS `kp1`,`test`.`t3`.`kp2` AS `kp2`,`test`.`t3`.`col1` AS `col1` from `test`.`t3` where ((`test`.`t3`.`kp1` between 2 and 4) and ((`test`.`t3`.`kp1` % 3) = 0) and (`test`.`t3`.`kp2` like '%foo%')) +# Check that we handle the case where out-of-range is encountered sooner +# than matched index condition +explain +select * from t2 where kp1< 3 and kp2+1>50000; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1< 3 and kp2+1>50000; +pk kp1 kp2 col1 +explain +select * from t2 where kp1< 3 and kp2+1>50000; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1< 3 and kp2+1>50000; +pk kp1 kp2 col1 +# Try doing backwards scans +explain +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; +pk kp1 kp2 col1 +10 10 10 10 +8 8 8 8 +6 6 6 6 +4 4 4 4 +2 2 2 2 +explain +select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc; +pk kp1 kp2 col1 +998 998 998 998 +996 996 996 996 +994 994 994 994 +992 992 992 992 +explain +select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc; +pk kp1 kp2 col1 +drop table t0,t1,t2,t3; +# +# Check how ICP affects counters +# +# First, some preparations +# +create procedure save_read_stats() +select ROWS_READ, ROWS_REQUESTED, ROWS_INDEX_FIRST, ROWS_INDEX_NEXT +into @rr, @rq, @rif, @rin +from information_schema.table_statistics +where table_name='t4' and table_schema=database(); +create procedure get_read_stats() +select +ROWS_READ-@rr, ROWS_REQUESTED-@rq, ROWS_INDEX_FIRST-@rif, ROWS_INDEX_NEXT-@rin +from information_schema.table_statistics +where table_name='t4' and table_schema=database(); +create table t4 ( +id int, +id1 int, +id2 int, +value int, +value2 varchar(100), +primary key (id), +key id1_id2 (id1, id2) comment 'cf1' +) engine=rocksdb charset=latin1 collate latin1_bin; +insert into t4 values +(1,1,1,1,1), (2,1,2,2,2), (3,1,3,3,3),(4,1,4,4,4),(5,1,5,5,5), +(6,1,6,6,6), (7,1,7,7,7), (8,1,8,8,8),(9,1,9,9,9),(10,1,10,10,10); +# +# Now, the test itself +# +call save_read_stats(); +call get_read_stats(); +ROWS_READ-@rr ROWS_REQUESTED-@rq ROWS_INDEX_FIRST-@rif ROWS_INDEX_NEXT-@rin +0 0 0 0 +# ============== index-only query ============== +explain +select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using where; Using index +call save_read_stats(); +select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id1 id2 +1 1 +call get_read_stats(); +ROWS_READ-@rr 10 +ROWS_REQUESTED-@rq 11 +ROWS_INDEX_FIRST-@rif 1 +ROWS_INDEX_NEXT-@rin 9 +# ============== Query without ICP ============== +set optimizer_switch='index_condition_pushdown=off'; +explain +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using where +call save_read_stats(); +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id id1 id2 value value2 +1 1 1 1 1 +call get_read_stats(); +ROWS_READ-@rr 10 +ROWS_REQUESTED-@rq 11 +ROWS_INDEX_FIRST-@rif 1 +ROWS_INDEX_NEXT-@rin 9 +# ============== Query with ICP ============== +set optimizer_switch='index_condition_pushdown=on'; +explain +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using index condition +call save_read_stats(); +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id id1 id2 value value2 +1 1 1 1 1 +call get_read_stats(); +ROWS_READ-@rr 1 +ROWS_REQUESTED-@rq 1 +ROWS_INDEX_FIRST-@rif 1 +ROWS_INDEX_NEXT-@rin 0 +drop table t4; +drop procedure save_read_stats; +drop procedure get_read_stats; +# +# Issue #67: Inefficient index condition pushdown +# +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 ( +pk int not null primary key, +key1 bigint(20) unsigned, +col1 int, +key (key1) +) engine=rocksdb; +insert into t1 +select +A.a+10*B.a+100*C.a, +A.a+10*B.a+100*C.a, +1234 +from t0 A, t0 B, t0 C; +set @count=0; +explain +select * from t1 where key1=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1 key1 9 const # Using index condition +set @count_diff =(select (value - @count) from information_schema.rocksdb_perf_context +where table_schema=database() and table_name='t1' and stat_type='INTERNAL_KEY_SKIPPED_COUNT'); +select * from t1 where key1=1; +pk key1 col1 +1 1 1234 +set @count_diff =(select (value - @count) from information_schema.rocksdb_perf_context +where table_schema=database() and table_name='t1' and stat_type='INTERNAL_KEY_SKIPPED_COUNT'); +# The following must be =1, or in any case not 999: +select @count_diff as "INTERNAL_KEY_SKIPPED_COUNT increment"; +INTERNAL_KEY_SKIPPED_COUNT increment +1 +drop table t0,t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result new file mode 100644 index 00000000000..d368da16a60 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result @@ -0,0 +1,193 @@ +select * from information_schema.engines where engine = 'rocksdb'; +ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +ROCKSDB DEFAULT RocksDB storage engine YES YES YES +drop table if exists t0,t1,t2,t3; +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int) engine=myisam; +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 ( +pk int primary key, +kp1 int, +kp2 int, +col1 int, +key (kp1,kp2) comment 'rev:cf1' +) engine=rocksdb; +insert into t2 select a,a,a,a from t1; +# Try a basic case: +explain +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0; +pk kp1 kp2 col1 +2 2 2 2 +4 4 4 4 +6 6 6 6 +8 8 8 8 +10 10 10 10 +# Check that ICP doesnt work for columns where column value +# cant be restored from mem-comparable form: +create table t3 ( +pk int primary key, +kp1 int, +kp2 varchar(10) collate utf8_general_ci, +col1 int, +key (kp1,kp2) comment 'rev:cf1' +) engine=rocksdb; +insert into t3 select a,a/10,a,a from t1; +# This must not use ICP: +explain +select * from t3 where kp1=3 and kp2 like '%foo%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 ref kp1 kp1 5 const # Using where +explain format=json +select * from t3 where kp1 between 2 and 4 and mod(kp1,3)=0 and kp2 like '%foo%'; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t3", + "access_type": "range", + "possible_keys": [ + "kp1" + ], + "key": "kp1", + "used_key_parts": [ + "kp1" + ], + "key_length": "5", + "rows": 1000, + "filtered": 100, + "index_condition": "((`test`.`t3`.`kp1` between 2 and 4) and ((`test`.`t3`.`kp1` % 3) = 0))", + "attached_condition": "(`test`.`t3`.`kp2` like '%foo%')" + } + } +} +Warnings: +Note 1003 /* select#1 */ select `test`.`t3`.`pk` AS `pk`,`test`.`t3`.`kp1` AS `kp1`,`test`.`t3`.`kp2` AS `kp2`,`test`.`t3`.`col1` AS `col1` from `test`.`t3` where ((`test`.`t3`.`kp1` between 2 and 4) and ((`test`.`t3`.`kp1` % 3) = 0) and (`test`.`t3`.`kp2` like '%foo%')) +# Check that we handle the case where out-of-range is encountered sooner +# than matched index condition +explain +select * from t2 where kp1< 3 and kp2+1>50000; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1< 3 and kp2+1>50000; +pk kp1 kp2 col1 +explain +select * from t2 where kp1< 3 and kp2+1>50000; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1< 3 and kp2+1>50000; +pk kp1 kp2 col1 +# Try doing backwards scans +explain +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; +pk kp1 kp2 col1 +10 10 10 10 +8 8 8 8 +6 6 6 6 +4 4 4 4 +2 2 2 2 +explain +select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc; +pk kp1 kp2 col1 +998 998 998 998 +996 996 996 996 +994 994 994 994 +992 992 992 992 +explain +select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc; +pk kp1 kp2 col1 +drop table t0,t1,t2,t3; +# +# Check how ICP affects counters +# +# First, some preparations +# +create procedure save_read_stats() +select ROWS_READ, ROWS_REQUESTED, ROWS_INDEX_FIRST, ROWS_INDEX_NEXT +into @rr, @rq, @rif, @rin +from information_schema.table_statistics +where table_name='t4' and table_schema=database(); +create procedure get_read_stats() +select +ROWS_READ-@rr, ROWS_REQUESTED-@rq, ROWS_INDEX_FIRST-@rif, ROWS_INDEX_NEXT-@rin +from information_schema.table_statistics +where table_name='t4' and table_schema=database(); +create table t4 ( +id int, +id1 int, +id2 int, +value int, +value2 varchar(100), +primary key (id), +key id1_id2 (id1, id2) comment 'rev:cf1' +) engine=rocksdb charset=latin1 collate latin1_bin; +insert into t4 values +(1,1,1,1,1), (2,1,2,2,2), (3,1,3,3,3),(4,1,4,4,4),(5,1,5,5,5), +(6,1,6,6,6), (7,1,7,7,7), (8,1,8,8,8),(9,1,9,9,9),(10,1,10,10,10); +# +# Now, the test itself +# +call save_read_stats(); +call get_read_stats(); +ROWS_READ-@rr ROWS_REQUESTED-@rq ROWS_INDEX_FIRST-@rif ROWS_INDEX_NEXT-@rin +0 0 0 0 +# ============== index-only query ============== +explain +select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using where; Using index +call save_read_stats(); +select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id1 id2 +1 1 +call get_read_stats(); +ROWS_READ-@rr 10 +ROWS_REQUESTED-@rq 11 +ROWS_INDEX_FIRST-@rif 1 +ROWS_INDEX_NEXT-@rin 9 +# ============== Query without ICP ============== +set optimizer_switch='index_condition_pushdown=off'; +explain +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using where +call save_read_stats(); +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id id1 id2 value value2 +1 1 1 1 1 +call get_read_stats(); +ROWS_READ-@rr 10 +ROWS_REQUESTED-@rq 11 +ROWS_INDEX_FIRST-@rif 1 +ROWS_INDEX_NEXT-@rin 9 +# ============== Query with ICP ============== +set optimizer_switch='index_condition_pushdown=on'; +explain +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using index condition +call save_read_stats(); +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id id1 id2 value value2 +1 1 1 1 1 +call get_read_stats(); +ROWS_READ-@rr 1 +ROWS_REQUESTED-@rq 1 +ROWS_INDEX_FIRST-@rif 1 +ROWS_INDEX_NEXT-@rin 0 +drop table t4; +drop procedure save_read_stats; +drop procedure get_read_stats; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result new file mode 100644 index 00000000000..e4d11960e6e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result @@ -0,0 +1,63 @@ +create table t1 (pk int not null primary key) engine=rocksdb; +insert into t1 values (1),(2),(3); +set autocommit=0; +begin; +select * from t1 where pk=1 for update; +pk +1 +connect con1,localhost,root,,; +connection con1; +### Connection con1 +set @@rocksdb_lock_wait_timeout=500; +set autocommit=0; +begin; +select * from t1 where pk=1 for update;; +connection default; +### Connection default +rollback; +connection con1; +pk +1 +rollback; +connection default; +begin; +select * from t1 where pk=1 for update; +pk +1 +connection con1; +### Connection con1 +set @@rocksdb_lock_wait_timeout=2; +set autocommit=0; +begin; +select * from t1 where pk=1 for update; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +connection default; +rollback; +set autocommit=1; +connection con1; +drop table t1; +connection default; +# +# Now, test what happens if another transaction modified the record and committed +# +CREATE TABLE t1 ( +id int primary key, +value int +) engine=rocksdb collate latin1_bin; +insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10); +connection con1; +BEGIN; +SELECT * FROM t1 WHERE id=3; +id value +3 3 +connection default; +BEGIN; +UPDATE t1 SET value=30 WHERE id=3; +COMMIT; +connection con1; +SELECT * FROM t1 WHERE id=3 FOR UPDATE; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +ROLLBACK; +disconnect con1; +connection default; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result new file mode 100644 index 00000000000..acf62d0bb70 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result @@ -0,0 +1,123 @@ +drop table if exists t1,t2; +# Tests for MyRocks + partitioning +# +# MyRocks Issue #70: Server crashes in Rdb_key_def::get_primary_key_tuple +# +CREATE TABLE t1 (pk INT PRIMARY KEY, f1 INT, f2 INT, KEY(f2)) ENGINE=RocksDB +PARTITION BY HASH(pk) PARTITIONS 2; +INSERT INTO t1 VALUES (1, 6, NULL), (2, NULL, 1); +CREATE TABLE t2 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB; +INSERT INTO t2 VALUES (1, 1), (2, 1); +SELECT f1 FROM t1 WHERE f2 = ( SELECT f1 FROM t2 WHERE pk = 2 ); +f1 +NULL +drop table t1,t2; +# +# Issue#105: key_info[secondary_key].actual_key_parts does not include primary key on partitioned tables +# +CREATE TABLE t1 ( +id INT PRIMARY KEY, +a set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8, +b set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8 default null, +c set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8 not null, +INDEX (a), +INDEX (b), +INDEX (c) +) ENGINE=RocksDB PARTITION BY key (id) partitions 2; +INSERT INTO t1 (id, b) VALUES (28, 3); +Warnings: +Warning 1364 Field 'c' doesn't have a default value +UPDATE t1 SET id=8 WHERE c < 8 LIMIT 1; +check table t1; +Table Op Msg_type Msg_text +test.t1 check status OK +drop table t1; +# +# Issue #105, another testcase +# +create table t1 ( +pk int primary key, +col1 int, +col2 int, +key (col1) comment 'rev:cf_issue105' +) engine=rocksdb partition by hash(pk) partitions 2; +insert into t1 values (1,10,10); +insert into t1 values (2,10,10); +insert into t1 values (11,20,20); +insert into t1 values (12,20,20); +explain select * from t1 force index(col1) where col1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref col1 col1 5 const 2000 NULL +select * from t1 force index(col1) where col1=10; +pk col1 col2 +2 10 10 +1 10 10 +select * from t1 use index () where col1=10; +pk col1 col2 +2 10 10 +1 10 10 +drop table t1; +# +# Issue #108: Index-only scans do not work for partitioned tables and extended keys +# +create table t1 ( +pk int primary key, +col1 int, +col2 int, +key (col1) +) engine=rocksdb partition by hash(pk) partitions 2; +insert into t1 values (1,10,10); +insert into t1 values (2,10,10); +insert into t1 values (11,20,20); +insert into t1 values (12,20,20); +# The following must use "Using index" +explain select pk from t1 force index(col1) where col1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref col1 col1 5 const 2000 Using index +drop table t1; +# +# Issue #214: subqueries cause crash +# +create TABLE t1(a int,b int,c int,primary key(a,b)) +partition by list (b*a) (partition x1 values in (1) tablespace ts1, +partition x2 values in (3,11,5,7) tablespace ts2, +partition x3 values in (16,8,5+19,70-43) tablespace ts3); +create table t2(b binary(2)); +set session optimizer_switch=5; +insert into t1(a,b) values(1,7); +select a from t1 where a in (select a from t1 where a in (select b from t2)); +a +drop table t1, t2; +# +# Issue #260: altering name to invalid value leaves table unaccessible +# +CREATE TABLE t1 (c1 INT NOT NULL, c2 CHAR(5)) PARTITION BY HASH(c1) PARTITIONS 4; +INSERT INTO t1 VALUES(1,'a'); +RENAME TABLE t1 TO db3.t3; +ERROR HY000: Error on rename of './test/t1' to './db3/t3' (errno: 122 - Internal (unspecified) error in handler) +SELECT * FROM t1; +c1 c2 +1 a +SHOW TABLES; +Tables_in_test +t1 +RENAME TABLE t1 TO test.t3; +SELECT * FROM t3; +c1 c2 +1 a +SHOW TABLES; +Tables_in_test +t3 +CREATE DATABASE db3; +USE test; +RENAME TABLE t3 to db3.t2; +USE db3; +SELECT * FROM t2; +c1 c2 +1 a +SHOW TABLES; +Tables_in_db3 +t2 +DROP TABLE t2; +use test; +DROP DATABASE db3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_qcache.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_qcache.result new file mode 100644 index 00000000000..7a17dabf294 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_qcache.result @@ -0,0 +1,37 @@ +create table t1 (pk int primary key, c char(8)) engine=RocksDB; +insert into t1 values (1,'new'),(2,'new'); +select * from t1; +pk c +1 new +2 new +connect con1,localhost,root,,; +update t1 set c = 'updated'; +connection default; +flush status; +show status like 'Qcache_hits'; +Variable_name Value +Qcache_hits 0 +show global status like 'Qcache_hits'; +Variable_name Value +Qcache_hits 0 +select * from t1; +pk c +1 updated +2 updated +select sql_no_cache * from t1; +pk c +1 updated +2 updated +select * from t1 where pk = 1; +pk c +1 updated +show status like 'Qcache_hits'; +Variable_name Value +Qcache_hits 0 +show status like 'Qcache_not_cached'; +Variable_name Value +Qcache_not_cached 3 +show global status like 'Qcache_hits'; +Variable_name Value +Qcache_hits 0 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result new file mode 100644 index 00000000000..d20bbc9b775 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result @@ -0,0 +1,290 @@ +select * from information_schema.engines where engine = 'rocksdb'; +ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +ROCKSDB DEFAULT RocksDB storage engine YES YES YES +drop table if exists t0,t1,t2,t3,t4,t5; +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int) engine=myisam; +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 ( +pk int not null, +a int not null, +b int not null, +primary key(pk), +key(a) comment 'rev:cf1' +) engine=rocksdb; +insert into t2 select A.a, FLOOR(A.a/10), A.a from t1 A; +# +# HA_READ_KEY_EXACT tests +# +# Original failure was here: +explain +select * from t2 force index (a) where a=0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref a a 4 const # NULL +select * from t2 force index (a) where a=0; +pk a b +0 0 0 +1 0 1 +2 0 2 +3 0 3 +4 0 4 +5 0 5 +6 0 6 +7 0 7 +8 0 8 +9 0 9 +# The rest are for code coverage: +explain +select * from t2 force index (a) where a=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref a a 4 const # NULL +select * from t2 force index (a) where a=2; +pk a b +20 2 20 +21 2 21 +22 2 22 +23 2 23 +24 2 24 +25 2 25 +26 2 26 +27 2 27 +28 2 28 +29 2 29 +explain +select * from t2 force index (a) where a=3 and pk=33; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 const a a 8 const,const # NULL +select * from t2 force index (a) where a=3 and pk=33; +pk a b +33 3 33 +select * from t2 force index (a) where a=99 and pk=99; +pk a b +select * from t2 force index (a) where a=0 and pk=0; +pk a b +0 0 0 +select * from t2 force index (a) where a=-1; +pk a b +select * from t2 force index (a) where a=-1 and pk in (101,102); +pk a b +select * from t2 force index (a) where a=100 and pk in (101,102); +pk a b +# +# #36: Range in form tbl.key >= const doesn't work in reverse column family +# +explain +select count(*) from t2 force index (a) where a>=0 and a <=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range a a 4 NULL # Using where; Using index +select count(*) from t2 force index (a) where a>=0 and a <=1; +count(*) +20 +explain +select count(*) from t2 force index (a) where a>=-1 and a <=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range a a 4 NULL # Using where; Using index +select count(*) from t2 force index (a) where a>=-1 and a <=1; +count(*) +20 +explain +select * from t2 force index (a) where a=0 and pk>=3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range a a 8 NULL # Using index condition +select * from t2 force index (a) where a=0 and pk>=3; +pk a b +3 0 3 +4 0 4 +5 0 5 +6 0 6 +7 0 7 +8 0 8 +9 0 9 +# Try edge cases where we fall over the end of the table +create table t3 like t2; +insert into t3 select * from t2; +select * from t3 where pk>=1000000; +pk a b +select * from t2 where pk>=1000000; +pk a b +# +# #42: Range in form tbl.key > const doesn't work in reverse column family +# +explain +select count(*) from t2 force index (a) where a>0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range a a 4 NULL # Using where; Using index +select count(*) from t2 force index (a) where a>0; +count(*) +990 +explain +select count(*) from t2 force index (a) where a>99; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range a a 4 NULL # Using where; Using index +select count(*) from t2 force index (a) where a>99; +count(*) +0 +select * from t2 where pk>1000000; +pk a b +select * from t3 where pk>1000000; +pk a b +explain +select count(*) from t2 force index (a) where a=2 and pk>25; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range a a 8 NULL # Using where; Using index +select count(*) from t2 force index (a) where a=2 and pk>25; +count(*) +4 +select * from t2 force index (a) where a>-10 and a < 1; +pk a b +0 0 0 +1 0 1 +2 0 2 +3 0 3 +4 0 4 +5 0 5 +6 0 6 +7 0 7 +8 0 8 +9 0 9 +select * from t3 force index (a) where a>-10 and a < 1; +pk a b +0 0 0 +1 0 1 +2 0 2 +3 0 3 +4 0 4 +5 0 5 +6 0 6 +7 0 7 +8 0 8 +9 0 9 +# +# #46: index_read_map(HA_READ_BEFORE_KEY) does not work in reverse column family +# +select max(a) from t2 where a < 2; +max(a) +1 +select max(a) from t2 where a < -1; +max(a) +NULL +select max(pk) from t2 where a=3 and pk < 6; +max(pk) +NULL +select max(pk) from t2 where pk < 200000; +max(pk) +999 +select max(pk) from t2 where pk < 20; +max(pk) +19 +select max(a) from t3 where a < 2; +max(a) +1 +select max(a) from t3 where a < -1; +max(a) +NULL +select max(pk) from t3 where pk < 200000; +max(pk) +999 +select max(pk) from t3 where pk < 20; +max(pk) +19 +select max(pk) from t2 where a=3 and pk < 33; +max(pk) +32 +select max(pk) from t3 where a=3 and pk < 33; +max(pk) +32 +# +# #48: index_read_map(HA_READ_PREFIX_LAST) does not work in reverse CF +# +# Tests for search_flag=HA_READ_PREFIX_LAST_OR_PREV +explain +select * from t2 where a between 99 and 2000 order by a desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range a a 4 NULL # Using index condition +select * from t2 where a between 99 and 2000 order by a desc; +pk a b +999 99 999 +998 99 998 +997 99 997 +996 99 996 +995 99 995 +994 99 994 +993 99 993 +992 99 992 +991 99 991 +990 99 990 +select max(a) from t2 where a <=10; +max(a) +10 +select max(a) from t2 where a <=-4; +max(a) +NULL +select max(pk) from t2 where a=5 and pk <=55; +max(pk) +55 +select max(pk) from t2 where a=5 and pk <=55555; +max(pk) +59 +select max(pk) from t2 where a=5 and pk <=0; +max(pk) +NULL +select max(pk) from t2 where pk <=-1; +max(pk) +NULL +select max(pk) from t2 where pk <=999999; +max(pk) +999 +select max(pk) from t3 where pk <=-1; +max(pk) +NULL +select max(pk) from t3 where pk <=999999; +max(pk) +999 +# +# Tests for search_flag=HA_READ_PREFIX_LAST +# +create table t4 ( +pk int primary key, +a int, +b int, +c int, +key(a,b,c) +) engine=rocksdb; +insert into t4 select pk,pk,pk,pk from t2 where pk < 100; +explain +select * from t4 where a=1 and b in (1) order by c desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref a a 10 const,const # Using where; Using index +select * from t4 where a=1 and b in (1) order by c desc; +pk a b c +1 1 1 1 +explain +select * from t4 where a=5 and b in (4) order by c desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref a a 10 const,const # Using where; Using index +select * from t4 where a=5 and b in (4) order by c desc; +pk a b c +# HA_READ_PREFIX_LAST for reverse-ordered CF +create table t5 ( +pk int primary key, +a int, +b int, +c int, +key(a,b,c) comment 'rev:cf2' +) engine=rocksdb; +insert into t5 select pk,pk,pk,pk from t2 where pk < 100; +explain +select * from t5 where a=1 and b in (1) order by c desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t5 ref a a 10 const,const # Using where; Using index +select * from t5 where a=1 and b in (1) order by c desc; +pk a b c +1 1 1 1 +explain +select * from t5 where a=5 and b in (4) order by c desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t5 ref a a 10 const,const # Using where; Using index +select * from t5 where a=5 and b in (4) order by c desc; +pk a b c +drop table t0,t1,t2,t3,t4,t5; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result new file mode 100644 index 00000000000..d7a4f9dd065 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result @@ -0,0 +1,11 @@ +create table t1 (id1 bigint, id2 bigint, c1 bigint, c2 bigint, c3 bigint, c4 bigint, c5 bigint, c6 bigint, c7 bigint, primary key (id1, id2), index i(c1, c2)); +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +select count(*) from t1; +count(*) +10000 +explain select c1 from t1 where c1 > 5 limit 10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range i i 9 NULL 9900 Using where; Using index +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_row_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_row_stats.result new file mode 100644 index 00000000000..8c02de98c90 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_row_stats.result @@ -0,0 +1,66 @@ +create table t1 (a int primary key) engine=rocksdb; +Verify rocksdb_rows_inserted +select variable_value into @old_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_rows_inserted'; +insert into t1 values(1); +select variable_value into @new_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_rows_inserted'; +select @new_rows_inserted - @old_rows_inserted; +@new_rows_inserted - @old_rows_inserted +1 +Verify rocksdb_rows_updated +select variable_value into @old_rows_updated from information_schema.global_status where variable_name = 'rocksdb_rows_updated'; +update t1 set a=2 where a=1; +select variable_value into @new_rows_updated from information_schema.global_status where variable_name = 'rocksdb_rows_updated'; +select @new_rows_updated - @old_rows_updated; +@new_rows_updated - @old_rows_updated +1 +Verify rocksdb_rows_read +select variable_value into @old_rows_read from information_schema.global_status where variable_name = 'rocksdb_rows_read'; +select * from t1; +a +2 +select variable_value into @new_rows_read from information_schema.global_status where variable_name = 'rocksdb_rows_read'; +select @new_rows_read - @old_rows_read; +@new_rows_read - @old_rows_read +1 +Verify rocksdb_rows_deleted +select variable_value into @old_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_rows_deleted'; +delete from t1; +select variable_value into @new_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_rows_deleted'; +select @new_rows_deleted - @old_rows_deleted; +@new_rows_deleted - @old_rows_deleted +1 +use mysql; +create table t1(a int primary key) engine=rocksdb; +Verify rocksdb_system_rows_inserted +select variable_value into @old_system_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_system_rows_inserted'; +insert into t1 values(1); +select variable_value into @new_system_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_system_rows_inserted'; +select @new_system_rows_inserted - @old_system_rows_inserted; +@new_system_rows_inserted - @old_system_rows_inserted +1 +Verify rocksdb_system_rows_updated +select variable_value into @old_system_rows_updated from information_schema.global_status where variable_name = 'rocksdb_system_rows_updated'; +update t1 set a=2 where a=1; +select variable_value into @new_system_rows_updated from information_schema.global_status where variable_name = 'rocksdb_system_rows_updated'; +select @new_system_rows_updated - @old_system_rows_updated; +@new_system_rows_updated - @old_system_rows_updated +1 +Verify rocksdb_system_rows_read +select variable_value into @old_system_rows_read from information_schema.global_status where variable_name = 'rocksdb_system_rows_read'; +select * from t1; +a +2 +select variable_value into @new_system_rows_read from information_schema.global_status where variable_name = 'rocksdb_system_rows_read'; +select @new_system_rows_read - @old_system_rows_read; +@new_system_rows_read - @old_system_rows_read +1 +Verify rocksdb_system_rows_deleted +select variable_value into @old_system_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_system_rows_deleted'; +delete from t1; +select variable_value into @new_system_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_system_rows_deleted'; +select @new_system_rows_deleted - @old_system_rows_deleted; +@new_system_rows_deleted - @old_system_rows_deleted +1 +drop table t1; +use test; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_table_stats_sampling_pct_change.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_table_stats_sampling_pct_change.result new file mode 100644 index 00000000000..1e8aa5787a6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_table_stats_sampling_pct_change.result @@ -0,0 +1,23 @@ +drop table if exists t1; +SET @ORIG_PCT = @@ROCKSDB_TABLE_STATS_SAMPLING_PCT; +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 100; +create table t1 (pk int primary key) engine=rocksdb; +set global rocksdb_force_flush_memtable_now = true; +select table_rows from information_schema.tables +where table_schema = database() and table_name = 't1'; +table_rows +10000 +drop table t1; +drop table if exists t2; +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 10; +create table t2 (pk int primary key) engine=rocksdb; +set global rocksdb_force_flush_memtable_now = true; +select table_rows from information_schema.tables +where table_schema = database() and table_name = 't2'; +table_rows +10000 +select table_name from information_schema.tables where table_schema = database() and table_name = 't2'; +table_name +t2 +drop table t2; +SET GLOBAL ROCKSDB_TABLE_STATS_SAMPLING_PCT = @ORIG_PCT; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_read_free.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_read_free.result new file mode 100644 index 00000000000..82609f46423 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_read_free.result @@ -0,0 +1,321 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +drop table if exists t1; +create procedure save_read_stats() +begin +select rows_requested into @rq from information_schema.table_statistics +where table_schema=database() and table_name='t1'; +select variable_value into @rr from information_schema.global_status +where variable_name='rocksdb_rows_read'; +select variable_value into @ru from information_schema.global_status +where variable_name='rocksdb_rows_updated'; +select variable_value into @rd from information_schema.global_status +where variable_name='rocksdb_rows_deleted'; +end// +create procedure get_read_stats() +begin +select rows_requested - @rq as rows_requested from +information_schema.table_statistics +where table_schema=database() and table_name='t1'; +select variable_value - @rr as rows_read from +information_schema.global_status +where variable_name='rocksdb_rows_read'; +select variable_value - @ru as rows_updated from +information_schema.global_status +where variable_name='rocksdb_rows_updated'; +select variable_value - @rd as rows_deleted from +information_schema.global_status +where variable_name='rocksdb_rows_deleted'; +end// +create table t1 (id int primary key, value int); +insert into t1 values (1,1), (2,2), (3,3), (4,4); +include/sync_slave_sql_with_master.inc + +# regular update/delete. With rocks_read_free_rpl_tables=.*, rocksdb_rows_read does not increase on slaves + +call save_read_stats(); +update t1 set value=value+1 where id=1; +delete from t1 where id=4; +select * from t1; +id value +1 2 +2 2 +3 3 +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +0 +rows_read +0 +rows_updated +1 +rows_deleted +1 +select * from t1; +id value +1 2 +2 2 +3 3 + +# "rocks_read_free_rpl_tables=.*" makes "row not found error" not happen anymore + +include/stop_slave.inc +delete from t1 where id in (2, 3); +include/start_slave.inc +call save_read_stats(); +update t1 set value=value+1 where id=3; +delete from t1 where id=2; +select * from t1; +id value +1 2 +3 4 +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +0 +rows_read +0 +rows_updated +1 +rows_deleted +1 +select * from t1; +id value +1 2 +3 4 + +## tables without primary key -- read free replication should be disabled + + +#no index + +drop table t1; +create table t1 (c1 int, c2 int); +insert into t1 values (1,1), (2,2),(3,3),(4,4),(5,5); +include/sync_slave_sql_with_master.inc +call save_read_stats(); +update t1 set c2=100 where c1=3; +delete from t1 where c1 <= 2; +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +5 +rows_read +5 +rows_updated +1 +rows_deleted +2 +select * from t1; +c1 c2 +3 100 +4 4 +5 5 + +#secondary index only + +drop table t1; +create table t1 (c1 int, c2 int, index i(c1)); +insert into t1 values (1,1), (2,2),(3,3),(4,4),(5,5); +include/sync_slave_sql_with_master.inc +call save_read_stats(); +update t1 set c2=100 where c1=3; +delete from t1 where c1 <= 2; +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +3 +rows_read +3 +rows_updated +1 +rows_deleted +2 +select * from t1; +c1 c2 +3 100 +4 4 +5 5 + +## large row operations -- primary key modification, secondary key modification + +drop table t1; +create table t1 (id1 bigint, id2 bigint, c1 bigint, c2 bigint, c3 bigint, c4 bigint, c5 bigint, c6 bigint, c7 bigint, primary key (id1, id2), index i(c1, c2)); +include/sync_slave_sql_with_master.inc +call save_read_stats(); + +#updating all seconary keys by 1 + +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +0 +rows_read +0 +rows_updated +10000 +rows_deleted +0 +include/diff_tables.inc [master:t1, slave:t1] + +#updating all primary keys by 2 + +call save_read_stats(); +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +0 +rows_read +0 +rows_updated +10000 +rows_deleted +0 +include/diff_tables.inc [master:t1, slave:t1] + +#updating secondary keys after truncating t1 on slave + +truncate table t1; +call save_read_stats(); +update t1 set c2=c2+10; +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +0 +rows_read +0 +rows_updated +10000 +rows_deleted +0 +include/diff_tables.inc [master:t1, slave:t1] + +#updating primary keys after truncating t1 on slave + +truncate table t1; +call save_read_stats(); +update t1 set id2=id2+10; +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +0 +rows_read +0 +rows_updated +10000 +rows_deleted +0 +include/diff_tables.inc [master:t1, slave:t1] + +#deleting half rows + +call save_read_stats(); +delete from t1 where id1 <= 5000; +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +0 +rows_read +0 +rows_updated +0 +rows_deleted +5000 +include/diff_tables.inc [master:t1, slave:t1] +[on master] +create table t2 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2)); +create table u2 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2)); +insert into t2 values (1,1,1,1),(2,2,2,2),(3,3,3,3); +insert into u2 values (1,1,1,1),(2,2,2,2),(3,3,3,3); +include/sync_slave_sql_with_master.inc +[on slave] +delete from t2 where id <= 2; +delete from u2 where id <= 2; +[on master] +update t2 set i2=100, value=100 where id=1; +update u2 set i2=100, value=100 where id=1; +[on slave] +call mtr.add_suppression("Slave SQL.*Could not execute Update_rows event on table test.u2.*Error_code.*"); +call mtr.add_suppression("Slave: Can't find record in 'u2'.*"); +include/wait_for_slave_sql_error.inc [errno=1032] +select count(*) from t2 force index(primary); +count(*) +2 +select count(*) from t2 force index(i1); +count(*) +1 +select count(*) from t2 force index(i2); +count(*) +2 +select * from t2 where id=1; +id i1 i2 value +1 1 100 100 +select i1 from t2 where i1=1; +i1 +select i2 from t2 where i2=100; +i2 +100 +select count(*) from u2 force index(primary); +count(*) +1 +select count(*) from u2 force index(i1); +count(*) +1 +select count(*) from u2 force index(i2); +count(*) +1 +select * from u2 where id=1; +id i1 i2 value +select i1 from u2 where i1=1; +i1 +select i2 from u2 where i2=100; +i2 +include/wait_for_slave_sql_to_start.inc + +# some tables with read-free replication on and some with it off +# secondary keys have extra rows + +[on master] +create table t3 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2)); +create table u3 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2)); +insert into t3 values (1,1,1,1),(2,2,2,2),(3,3,3,3); +insert into u3 values (1,1,1,1),(2,2,2,2),(3,3,3,3); +include/sync_slave_sql_with_master.inc +[on slave] +update t3 set i1=100 where id=1; +update u3 set i1=100 where id=1; +[on master] +delete from t3 where id=1; +delete from u3 where id=1; +include/sync_slave_sql_with_master.inc +[on slave] +select count(*) from t3 force index(primary); +count(*) +2 +select count(*) from t3 force index(i1); +count(*) +3 +select count(*) from t3 force index(i2); +count(*) +2 +select i1 from t3 where i1=100; +i1 +100 +select count(*) from u3 force index(primary); +count(*) +2 +select count(*) from u3 force index(i1); +count(*) +2 +select count(*) from u3 force index(i2); +count(*) +2 +select i1 from u3 where i1=100; +i1 +drop table t1, t2, t3, u2, u3; +drop procedure save_read_stats; +drop procedure get_read_stats; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_not_found.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_not_found.result new file mode 100644 index 00000000000..8cdfa910739 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_not_found.result @@ -0,0 +1,56 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +drop table if exists t1; +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int) engine=myisam; +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 ( +pk int primary key, +kp1 int, +kp2 int, +col1 int, +key (kp1,kp2) +) engine=rocksdb; +insert into t2 select a,a,a,a from t1; +create table t3 like t2; +insert into t3 select * from t2; +include/sync_slave_sql_with_master.inc +set global debug= 'd,dbug.rocksdb.get_row_by_rowid'; +include/stop_slave.inc +include/start_slave.inc +update t2 set col1=100 where kp1 between 1 and 3 and mod(kp2,2)=0; +set debug_sync= 'now WAIT_FOR Reached'; +set global debug = ''; +set sql_log_bin=0; +delete from t2 where pk=2; +delete from t2 where pk=3; +set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running'; +include/sync_slave_sql_with_master.inc +select * from t2 where pk < 5; +pk kp1 kp2 col1 +0 0 0 0 +1 1 1 1 +4 4 4 4 +set global debug= 'd,dbug.rocksdb.get_row_by_rowid'; +include/stop_slave.inc +include/start_slave.inc +update t3 set col1=100 where kp1 between 1 and 4 and mod(kp2,2)=0; +call mtr.add_suppression("Deadlock found when trying to get lock"); +set debug_sync= 'now WAIT_FOR Reached'; +set global debug = ''; +set sql_log_bin=0; +delete from t3 where pk=2; +delete from t3 where pk=3; +set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running'; +include/sync_slave_sql_with_master.inc +select * from t3 where pk < 5; +pk kp1 kp2 col1 +0 0 0 0 +1 1 1 1 +4 4 4 100 +drop table t0, t1, t2, t3; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_rocksdb.result new file mode 100644 index 00000000000..50905527447 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_rocksdb.result @@ -0,0 +1,42 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +drop table if exists t1; +select @@binlog_format; +@@binlog_format +ROW +create table t1 (pk int primary key) engine=rocksdb; +insert into t1 values (1),(2),(3); +include/sync_slave_sql_with_master.inc +select * from t1; +pk +1 +2 +3 +drop table t1; +# +# Issue #18: slave crash on update with row based binary logging +# +create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb; +insert into t1 values (1,1,1); +insert into t1 values (2,1,1); +insert into t1 values (3,1,1); +insert into t1 values (4,1,1); +insert into t1 values (5,1,1); +update t1 set value2=100 where id=1; +update t1 set value2=200 where id=2; +update t1 set value2=300 where id=3; +include/sync_slave_sql_with_master.inc +select * from t1 where id=1; +id value value2 +1 1 100 +select * from t1 where id=2; +id value value2 +2 1 200 +select * from t1 where id=3; +id value value2 +3 1 300 +drop table t1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_stats.result new file mode 100644 index 00000000000..a95642096f5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_stats.result @@ -0,0 +1,88 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +drop table if exists t1; +create procedure save_read_stats() +begin +select rows_requested into @rq from information_schema.table_statistics +where table_schema=database() and table_name='t1'; +select variable_value into @rr from information_schema.global_status +where variable_name='rocksdb_rows_read'; +select variable_value into @ru from information_schema.global_status +where variable_name='rocksdb_rows_updated'; +select variable_value into @rd from information_schema.global_status +where variable_name='rocksdb_rows_deleted'; +end// +create procedure get_read_stats() +begin +select rows_requested - @rq as rows_requested from +information_schema.table_statistics +where table_schema=database() and table_name='t1'; +select variable_value - @rr as rows_read from +information_schema.global_status +where variable_name='rocksdb_rows_read'; +select variable_value - @ru as rows_updated from +information_schema.global_status +where variable_name='rocksdb_rows_updated'; +select variable_value - @rd as rows_deleted from +information_schema.global_status +where variable_name='rocksdb_rows_deleted'; +end// +create table t1 (id int primary key, value int); +insert into t1 values (1,1), (2,2), (3,3), (4,4), (5,5); +include/sync_slave_sql_with_master.inc +call save_read_stats(); +update t1 set value=value+1 where id=1; +update t1 set value=value+1 where id=3; +select * from t1; +id value +1 2 +2 2 +3 4 +4 4 +5 5 +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +2 +rows_read +2 +rows_updated +2 +rows_deleted +0 +select * from t1; +id value +1 2 +2 2 +3 4 +4 4 +5 5 +call save_read_stats(); +delete from t1 where id in (4,5); +select * from t1; +id value +1 2 +2 2 +3 4 +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +2 +rows_read +2 +rows_updated +0 +rows_deleted +2 +select * from t1; +id value +1 2 +2 2 +3 4 +drop table t1; +drop procedure save_read_stats; +drop procedure get_read_stats; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_triggers.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_triggers.result new file mode 100644 index 00000000000..1d3cd7db641 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_triggers.result @@ -0,0 +1,242 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +# Test of row replication with triggers on the slave side +CREATE TABLE t1 (C1 CHAR(1) primary key, C2 CHAR(1)); +SELECT * FROM t1; +C1 C2 +SET @old_slave_exec_mode= @@global.slave_exec_mode; +SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr; +SET @@global.slave_exec_mode= IDEMPOTENT; +SET @@global.slave_run_triggers_for_rbr= YES; +SELECT * FROM t1; +C1 C2 +create table t2 (id char(2) primary key, cnt int, o char(1), n char(1)); +insert into t2 values +('u0', 0, ' ', ' '),('u1', 0, ' ', ' '), +('d0', 0, ' ', ' '),('d1', 0, ' ', ' '), +('i0', 0, ' ', ' '),('i1', 0, ' ', ' '); +create trigger t1_cnt_b before update on t1 for each row +update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u0'; +create trigger t1_cnt_db before delete on t1 for each row +update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd0'; +create trigger t1_cnt_ib before insert on t1 for each row +update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i0'; +create trigger t1_cnt_a after update on t1 for each row +update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u1'; +create trigger t1_cnt_da after delete on t1 for each row +update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd1'; +create trigger t1_cnt_ia after insert on t1 for each row +update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i1'; +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 0 +i1 0 +u0 0 +u1 0 +# INSERT triggers test +insert into t1 values ('a','b'); +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 1 a +i1 1 a +u0 0 +u1 0 +# UPDATE triggers test +update t1 set C1= 'd'; +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 1 a +i1 1 a +u0 1 a d +u1 1 a d +# DELETE triggers test +delete from t1 where C1='d'; +SELECT * FROM t2 order by id; +id cnt o n +d0 1 d +d1 1 d +i0 1 a +i1 1 a +u0 1 a d +u1 1 a d +# INSERT triggers which cause also UPDATE test (insert duplicate row) +insert into t1 values ('0','1'); +SELECT * FROM t2 order by id; +id cnt o n +d0 1 d +d1 1 d +i0 2 0 +i1 2 0 +u0 1 a d +u1 1 a d +insert into t1 values ('0','1'); +SELECT * FROM t2 order by id; +id cnt o n +d0 1 d +d1 1 d +i0 3 0 +i1 3 0 +u0 2 0 0 +u1 2 0 0 +# INSERT triggers which cause also DELETE test +# (insert duplicate row in table referenced by foreign key) +insert into t1 values ('1','1'); +drop table if exists t1; +SET @@global.slave_exec_mode= @old_slave_exec_mode; +SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr; +drop table t2; +CREATE TABLE t1 (i INT); +CREATE TABLE t2 (i INT); +SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr; +SET GLOBAL slave_run_triggers_for_rbr=YES; +CREATE TRIGGER tr AFTER INSERT ON t1 FOR EACH ROW +INSERT INTO t2 VALUES (new.i); +BEGIN; +INSERT INTO t1 VALUES (1); +INSERT INTO t1 VALUES (2); +COMMIT; +select * from t2; +i +1 +2 +SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr; +drop tables t2,t1; +# Triggers on slave do not work if master has some +CREATE TABLE t1 (C1 CHAR(1) primary key, C2 CHAR(1)); +SELECT * FROM t1; +C1 C2 +create trigger t1_dummy before delete on t1 for each row +set @dummy= 1; +SET @old_slave_exec_mode= @@global.slave_exec_mode; +SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr; +SET @@global.slave_exec_mode= IDEMPOTENT; +SET @@global.slave_run_triggers_for_rbr= YES; +SELECT * FROM t1; +C1 C2 +create table t2 (id char(2) primary key, cnt int, o char(1), n char(1)); +insert into t2 values +('u0', 0, ' ', ' '),('u1', 0, ' ', ' '), +('d0', 0, ' ', ' '),('d1', 0, ' ', ' '), +('i0', 0, ' ', ' '),('i1', 0, ' ', ' '); +create trigger t1_cnt_b before update on t1 for each row +update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u0'; +create trigger t1_cnt_ib before insert on t1 for each row +update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i0'; +create trigger t1_cnt_a after update on t1 for each row +update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u1'; +create trigger t1_cnt_da after delete on t1 for each row +update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd1'; +create trigger t1_cnt_ia after insert on t1 for each row +update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i1'; +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 0 +i1 0 +u0 0 +u1 0 +# INSERT triggers test +insert into t1 values ('a','b'); +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 0 +i1 0 +u0 0 +u1 0 +# UPDATE triggers test +update t1 set C1= 'd'; +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 0 +i1 0 +u0 0 +u1 0 +# DELETE triggers test +delete from t1 where C1='d'; +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 0 +i1 0 +u0 0 +u1 0 +# INSERT triggers which cause also UPDATE test (insert duplicate row) +insert into t1 values ('0','1'); +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 1 0 +i1 1 0 +u0 0 +u1 0 +insert into t1 values ('0','1'); +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 1 0 +i1 1 0 +u0 0 +u1 0 +# INSERT triggers which cause also DELETE test +# (insert duplicate row in table referenced by foreign key) +insert into t1 values ('1','1'); +drop table if exists t1; +SET @@global.slave_exec_mode= @old_slave_exec_mode; +SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr; +drop table t2; +# +# MDEV-5513: Trigger is applied to the rows after first one +# +create table t1 (a int, b int); +create table tlog (a int auto_increment primary key); +set sql_log_bin=0; +create trigger tr1 after insert on t1 for each row insert into tlog values (null); +set sql_log_bin=1; +set @slave_run_triggers_for_rbr.saved = @@slave_run_triggers_for_rbr; +set global slave_run_triggers_for_rbr=1; +create trigger tr2 before insert on t1 for each row set new.b = new.a; +insert into t1 values (1,10),(2,20),(3,30); +select * from t1; +a b +1 10 +2 20 +3 30 +# +# Verify slave skips running triggers if master ran and logged the row events for triggers +# +create table t4(a int, b int); +delete from tlog; +create trigger tr4 before insert on t4 for each row insert into tlog values (null); +insert into t4 values (1, 10),(2, 20); +select * from tlog; +a +4 +5 +select * from t4; +a b +1 10 +2 20 +select * from tlog; +a +4 +5 +set global slave_run_triggers_for_rbr = @slave_run_triggers_for_rbr.saved; +drop table t1, tlog, t4; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_savepoint.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_savepoint.result new file mode 100644 index 00000000000..9424238da93 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_savepoint.result @@ -0,0 +1,103 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +drop table if exists t1; +create table t1 (id int primary key, value int); +insert into t1 values (1,1), (2,2), (3,3); +begin; +insert into t1 values (11, 1); +savepoint a; +insert into t1 values (12, 1); +rollback to savepoint a; +ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows. +commit; +ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction. +commit; +select * from t1; +id value +1 1 +2 2 +3 3 +include/sync_slave_sql_with_master.inc +select * from t1; +id value +1 1 +2 2 +3 3 +begin; +insert into t1 values (21, 1); +savepoint a; +insert into t1 values (22, 1); +rollback to savepoint a; +ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows. +insert into t1 values (23, 1); +ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction. +commit; +ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction. +commit; +select * from t1; +id value +1 1 +2 2 +3 3 +include/sync_slave_sql_with_master.inc +select * from t1; +id value +1 1 +2 2 +3 3 +begin; +insert into t1 values (31, 1); +savepoint a; +insert into t1 values (32, 1); +savepoint b; +insert into t1 values (33, 1); +rollback to savepoint a; +ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows. +insert into t1 values (34, 1); +ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction. +rollback; +select * from t1; +id value +1 1 +2 2 +3 3 +include/sync_slave_sql_with_master.inc +select * from t1; +id value +1 1 +2 2 +3 3 +SET autocommit=off; +select * from t1; +id value +1 1 +2 2 +3 3 +SAVEPOINT A; +select * from t1; +id value +1 1 +2 2 +3 3 +SAVEPOINT A; +insert into t1 values (35, 35); +ROLLBACK TO SAVEPOINT A; +ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows. +START TRANSACTION; +ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction. +select * from t1; +id value +1 1 +2 2 +3 3 +include/sync_slave_sql_with_master.inc +select * from t1; +id value +1 1 +2 2 +3 3 +drop table t1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result new file mode 100644 index 00000000000..315f040899e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result @@ -0,0 +1,54 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +drop table if exists t1; +select @@binlog_format; +@@binlog_format +STATEMENT +create table t1 (pk int primary key) engine=rocksdb; +insert into t1 values (1),(2),(3); +ERROR HY000: Can't execute updates on master with binlog_format != ROW. +set session rocksdb_unsafe_for_binlog=on; +insert into t1 values (1),(2),(3); +select * from t1; +pk +1 +2 +3 +delete from t1; +set session rocksdb_unsafe_for_binlog=off; +insert into t1 values (1),(2),(3); +ERROR HY000: Can't execute updates on master with binlog_format != ROW. +set binlog_format=row; +insert into t1 values (1),(2),(3); +include/sync_slave_sql_with_master.inc +select * from t1; +pk +1 +2 +3 +drop table t1; +create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb; +insert into t1 values (1,1,1); +insert into t1 values (2,1,1); +insert into t1 values (3,1,1); +insert into t1 values (4,1,1); +insert into t1 values (5,1,1); +update t1 set value2=100 where id=1; +update t1 set value2=200 where id=2; +update t1 set value2=300 where id=3; +include/sync_slave_sql_with_master.inc +select * from t1 where id=1; +id value value2 +1 1 100 +select * from t1 where id=2; +id value value2 +2 1 200 +select * from t1 where id=3; +id value value2 +3 1 300 +drop table t1; +set binlog_format=row; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result new file mode 100644 index 00000000000..8cdfa910739 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result @@ -0,0 +1,56 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +drop table if exists t1; +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int) engine=myisam; +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 ( +pk int primary key, +kp1 int, +kp2 int, +col1 int, +key (kp1,kp2) +) engine=rocksdb; +insert into t2 select a,a,a,a from t1; +create table t3 like t2; +insert into t3 select * from t2; +include/sync_slave_sql_with_master.inc +set global debug= 'd,dbug.rocksdb.get_row_by_rowid'; +include/stop_slave.inc +include/start_slave.inc +update t2 set col1=100 where kp1 between 1 and 3 and mod(kp2,2)=0; +set debug_sync= 'now WAIT_FOR Reached'; +set global debug = ''; +set sql_log_bin=0; +delete from t2 where pk=2; +delete from t2 where pk=3; +set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running'; +include/sync_slave_sql_with_master.inc +select * from t2 where pk < 5; +pk kp1 kp2 col1 +0 0 0 0 +1 1 1 1 +4 4 4 4 +set global debug= 'd,dbug.rocksdb.get_row_by_rowid'; +include/stop_slave.inc +include/start_slave.inc +update t3 set col1=100 where kp1 between 1 and 4 and mod(kp2,2)=0; +call mtr.add_suppression("Deadlock found when trying to get lock"); +set debug_sync= 'now WAIT_FOR Reached'; +set global debug = ''; +set sql_log_bin=0; +delete from t3 where pk=2; +delete from t3 where pk=3; +set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running'; +include/sync_slave_sql_with_master.inc +select * from t3 where pk < 5; +pk kp1 kp2 col1 +0 0 0 0 +1 1 1 1 +4 4 4 100 +drop table t0, t1, t2, t3; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rqg_examples.result b/storage/rocksdb/mysql-test/rocksdb/r/rqg_examples.result new file mode 100644 index 00000000000..766795932b0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rqg_examples.result @@ -0,0 +1,3 @@ +CREATE DATABASE IF NOT EXISTS rqg_examples; +Running test with grammar file example.yy +DROP DATABASE rqg_examples; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rqg_runtime.result b/storage/rocksdb/mysql-test/rocksdb/r/rqg_runtime.result new file mode 100644 index 00000000000..b0a1c408006 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rqg_runtime.result @@ -0,0 +1,29 @@ +call mtr.add_suppression("Did not write failed "); +call mtr.add_suppression("Can't open and lock privilege tables"); +SET @ORIG_EVENT_SCHEDULER = @@EVENT_SCHEDULER; +CREATE TABLE mysql.user_temp LIKE mysql.user; +INSERT mysql.user_temp SELECT * FROM mysql.user; +CREATE TABLE mysql.tables_priv_temp LIKE mysql.tables_priv; +INSERT mysql.tables_priv_temp SELECT * FROM mysql.tables_priv_temp; +CREATE DATABASE IF NOT EXISTS rqg_runtime; +Running test with grammar file alter_online.yy +DROP DATABASE rqg_runtime; +CREATE DATABASE IF NOT EXISTS rqg_runtime; +Running test with grammar file concurrency_1.yy +DROP DATABASE rqg_runtime; +CREATE DATABASE IF NOT EXISTS rqg_runtime; +Running test with grammar file connect_kill_sql.yy +DROP DATABASE rqg_runtime; +CREATE DATABASE IF NOT EXISTS rqg_runtime; +Running test with grammar file metadata_stability.yy +DROP DATABASE rqg_runtime; +DELETE FROM mysql.tables_priv; +DELETE FROM mysql.user; +INSERT mysql.user SELECT * FROM mysql.user_temp; +INSERT mysql.tables_priv SELECT * FROM mysql.tables_priv_temp; +DROP TABLE mysql.user_temp; +DROP TABLE mysql.tables_priv_temp; +DROP TABLE IF EXISTS test.executors; +DROP DATABASE IF EXISTS testdb_N; +DROP DATABASE IF EXISTS testdb_S; +SET GLOBAL EVENT_SCHEDULER = @ORIG_EVENT_SCHEDULER; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rqg_transactions.result b/storage/rocksdb/mysql-test/rocksdb/r/rqg_transactions.result new file mode 100644 index 00000000000..23705d493e7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rqg_transactions.result @@ -0,0 +1,11 @@ +call mtr.add_suppression("Deadlock found when trying to get lock"); +CREATE DATABASE IF NOT EXISTS rqg_transactions; +Running test with grammar file transactions.yy +Running test with grammar file repeatable_read.yy +Running test with grammar file transaction_durability.yy +Running test with grammar file transactions-flat.yy +Running test with grammar file combinations.yy +Running test with grammar file repeatable_read.yy +Running test with grammar file transaction_durability.yy +Running test with grammar file transactions-flat.yy +DROP DATABASE rqg_transactions; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select.result b/storage/rocksdb/mysql-test/rocksdb/r/select.result new file mode 100644 index 00000000000..22a6ca9bc87 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/select.result @@ -0,0 +1,373 @@ +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'foobar'),(1,'z'),(200,'bar'); +CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (a,b) SELECT a, b FROM t1; +INSERT INTO t1 (a,b) SELECT a, b FROM t2; +SELECT * FROM t1; +a b pk +1 z 2 +1 z 5 +100 foobar 1 +100 foobar 4 +200 bar 3 +200 bar 6 +SELECT DISTINCT a FROM t1; +a +1 +100 +200 +SELECT ALL b, a FROM t1; +b a +bar 200 +bar 200 +foobar 100 +foobar 100 +z 1 +z 1 +SELECT STRAIGHT_JOIN SQL_CACHE t1.* FROM t2, t1 WHERE t1.a <> t2.a; +a b pk +1 z 2 +1 z 2 +1 z 5 +1 z 5 +100 foobar 1 +100 foobar 1 +100 foobar 4 +100 foobar 4 +200 bar 3 +200 bar 3 +200 bar 6 +200 bar 6 +SELECT SQL_SMALL_RESULT SQL_NO_CACHE t1.a FROM t1, t2; +a +1 +1 +1 +1 +1 +1 +100 +100 +100 +100 +100 +100 +200 +200 +200 +200 +200 +200 +SELECT SQL_BIG_RESULT SQL_CALC_FOUND_ROWS DISTINCT(t2.a) +FROM t1 t1_1, t2, t1 t1_2; +a +1 +100 +200 +SELECT FOUND_ROWS(); +FOUND_ROWS() +3 +SET GLOBAL query_cache_size = 1024*1024; +SELECT SQL_CACHE * FROM t1, t2; +a b pk a b pk +1 z 2 1 z 2 +1 z 2 100 foobar 1 +1 z 2 200 bar 3 +1 z 5 1 z 2 +1 z 5 100 foobar 1 +1 z 5 200 bar 3 +100 foobar 1 1 z 2 +100 foobar 1 100 foobar 1 +100 foobar 1 200 bar 3 +100 foobar 4 1 z 2 +100 foobar 4 100 foobar 1 +100 foobar 4 200 bar 3 +200 bar 3 1 z 2 +200 bar 3 100 foobar 1 +200 bar 3 200 bar 3 +200 bar 6 1 z 2 +200 bar 6 100 foobar 1 +200 bar 6 200 bar 3 +SET GLOBAL query_cache_size = 1048576; +SELECT a+10 AS field1, CONCAT(b,':',b) AS field2 FROM t1 +WHERE b > 'b' AND a IS NOT NULL +GROUP BY 2 DESC, field1 ASC +HAVING field1 < 1000 +ORDER BY field2, 1 DESC, field1*2 +LIMIT 5 OFFSET 1; +field1 field2 +11 z:z +110 foobar:foobar +SELECT SUM(a), MAX(a), b FROM t1 GROUP BY b WITH ROLLUP; +SUM(a) MAX(a) b +2 1 z +200 100 foobar +400 200 bar +602 200 NULL +SELECT * FROM t2 WHERE a>0 PROCEDURE ANALYSE(); +Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_value_or_avg_length Std Optimal_fieldtype +test.t2.a 1 200 1 3 0 0 100.3333 81.2418 ENUM('1','100','200') NOT NULL +test.t2.b bar z 1 6 0 0 3.3333 NULL ENUM('bar','foobar','z') NOT NULL +test.t2.pk 1 3 1 1 0 0 2.0000 0.8165 ENUM('1','2','3') NOT NULL +SELECT t1.a, t2.b FROM t2, t1 WHERE t1.a = t2.a ORDER BY t2.b, t1.a +INTO OUTFILE '/select.out' +CHARACTER SET utf8 +FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY ''''; +200,'bar' +200,'bar' +100,'foobar' +100,'foobar' +1,'z' +1,'z' +SELECT t1.a, t2.b FROM t2, t1 WHERE t1.a = t2.a ORDER BY t2.b, t1.a +INTO DUMPFILE '/select.dump'; +ERROR 42000: Result consisted of more than one row +SELECT t1.*, t2.* FROM t1, t2 ORDER BY t2.b, t1.a, t2.a, t1.b, t1.pk, t2.pk LIMIT 1 +INTO DUMPFILE '/select.dump'; +1z2200bar3 +SELECT MIN(a), MAX(a) FROM t1 INTO @min, @max; +SELECT @min, @max; +@min @max +1 200 +SELECT t1_1.*, t2.* FROM t2, t1 AS t1_1, t1 AS t1_2 +WHERE t1_1.a = t1_2.a AND t2.a = t1_1.a; +a b pk a b pk +1 z 2 1 z 2 +1 z 2 1 z 2 +1 z 5 1 z 2 +1 z 5 1 z 2 +100 foobar 1 100 foobar 1 +100 foobar 1 100 foobar 1 +100 foobar 4 100 foobar 1 +100 foobar 4 100 foobar 1 +200 bar 3 200 bar 3 +200 bar 3 200 bar 3 +200 bar 6 200 bar 3 +200 bar 6 200 bar 3 +SELECT alias1.* FROM ( SELECT a,b FROM t1 ) alias1, t2 WHERE t2.a IN (100,200); +a b +1 z +1 z +1 z +1 z +100 foobar +100 foobar +100 foobar +100 foobar +200 bar +200 bar +200 bar +200 bar +SELECT t1.a FROM { OJ t1 LEFT OUTER JOIN t2 ON t1.a = t2.a+10 }; +a +1 +1 +100 +100 +200 +200 +SELECT t1.* FROM t2 INNER JOIN t1; +a b pk +1 z 2 +1 z 2 +1 z 2 +1 z 5 +1 z 5 +1 z 5 +100 foobar 1 +100 foobar 1 +100 foobar 1 +100 foobar 4 +100 foobar 4 +100 foobar 4 +200 bar 3 +200 bar 3 +200 bar 3 +200 bar 6 +200 bar 6 +200 bar 6 +SELECT t1_2.* FROM t1 t1_1 CROSS JOIN t1 t1_2 ON t1_1.b = t1_2.b; +a b pk +1 z 2 +1 z 2 +1 z 5 +1 z 5 +100 foobar 1 +100 foobar 1 +100 foobar 4 +100 foobar 4 +200 bar 3 +200 bar 3 +200 bar 6 +200 bar 6 +SELECT t1.a, t2.b FROM t2 STRAIGHT_JOIN t1 WHERE t1.b > t2.b; +a b +1 bar +1 bar +1 foobar +1 foobar +100 bar +100 bar +SELECT t1.a, t2.b FROM t2 STRAIGHT_JOIN t1 ON t1.b > t2.b ORDER BY t1.a, t2.b; +a b +1 bar +1 bar +1 foobar +1 foobar +100 bar +100 bar +SELECT t2.* FROM t1 LEFT JOIN t2 USING (a) ORDER BY t2.a, t2.b LIMIT 1; +a b pk +1 z 2 +SELECT t2.* FROM t2 LEFT OUTER JOIN t1 ON t1.a = t2.a WHERE t1.a IS NOT NULL; +a b pk +1 z 2 +1 z 2 +100 foobar 1 +100 foobar 1 +200 bar 3 +200 bar 3 +SELECT SUM(t2.a) FROM t1 RIGHT JOIN t2 ON t2.b = t1.b; +SUM(t2.a) +602 +SELECT MIN(t2.a) FROM t1 RIGHT OUTER JOIN t2 USING (b,a); +MIN(t2.a) +1 +SELECT alias.b FROM t1 NATURAL JOIN ( SELECT a,b FROM t1 ) alias WHERE b > ''; +b +bar +bar +bar +bar +foobar +foobar +foobar +foobar +z +z +z +z +SELECT t2.b FROM ( SELECT a,b FROM t1 ) alias NATURAL LEFT JOIN t2 WHERE b IS NOT NULL; +b +bar +bar +foobar +foobar +z +z +SELECT t1.*, t2.* FROM t1 NATURAL LEFT OUTER JOIN t2; +a b pk a b pk +1 z 2 1 z 2 +1 z 5 NULL NULL NULL +100 foobar 1 100 foobar 1 +100 foobar 4 NULL NULL NULL +200 bar 3 200 bar 3 +200 bar 6 NULL NULL NULL +SELECT t2_2.* FROM t2 t2_1 NATURAL RIGHT JOIN t2 t2_2 WHERE t2_1.a IN ( SELECT a FROM t1 ); +a b pk +1 z 2 +100 foobar 1 +200 bar 3 +SELECT t1_2.b FROM t1 t1_1 NATURAL RIGHT OUTER JOIN t1 t1_2 INNER JOIN t2; +b +bar +bar +bar +bar +bar +bar +foobar +foobar +foobar +foobar +foobar +foobar +z +z +z +z +z +z +SELECT ( SELECT MIN(a) FROM ( SELECT a,b FROM t1 ) alias1 ) AS min_a FROM t2; +min_a +1 +1 +1 +SELECT a,b FROM t2 WHERE a = ( SELECT MIN(a) FROM t1 ); +a b +1 z +SELECT a,b FROM t2 WHERE b LIKE ( SELECT b FROM t1 ORDER BY b LIMIT 1 ); +a b +200 bar +SELECT t2.* FROM t1 t1_outer, t2 WHERE ( t1_outer.a, t2.b ) IN ( SELECT a, b FROM t2 WHERE a = t1_outer.a ); +a b pk +1 z 2 +1 z 2 +100 foobar 1 +100 foobar 1 +200 bar 3 +200 bar 3 +SELECT a,b FROM t2 WHERE b = ANY ( SELECT b FROM t1 WHERE a > 1 ); +a b +100 foobar +200 bar +SELECT a,b FROM t2 WHERE b > ALL ( SELECT b FROM t1 WHERE b < 'foo' ); +a b +1 z +100 foobar +SELECT a,b FROM t1 WHERE ROW(a, b) = ( SELECT a, b FROM t2 ORDER BY a, b LIMIT 1 ); +a b +1 z +1 z +SELECT a,b FROM t1 WHERE EXISTS ( SELECT a,b FROM t2 WHERE t2.b > t1.b ); +a b +100 foobar +100 foobar +200 bar +200 bar +SELECT t1.* FROM t1, t2 ORDER BY ( SELECT b FROM t1 WHERE a IS NULL ORDER BY b LIMIT 1 ) DESC; +a b pk +1 z 2 +1 z 2 +1 z 2 +1 z 5 +1 z 5 +1 z 5 +100 foobar 1 +100 foobar 1 +100 foobar 1 +100 foobar 4 +100 foobar 4 +100 foobar 4 +200 bar 3 +200 bar 3 +200 bar 3 +200 bar 6 +200 bar 6 +200 bar 6 +SELECT a, b FROM t1 HAVING a IN ( SELECT a FROM t2 WHERE b = t1.b ); +a b +1 z +1 z +100 foobar +100 foobar +200 bar +200 bar +SELECT a,b FROM t1 UNION SELECT a,b FROM t2 UNION DISTINCT SELECT a,b FROM t1; +a b +1 z +100 foobar +200 bar +SELECT a,b FROM t1 UNION SELECT a,b FROM t2 UNION ALL SELECT a,b FROM t1; +a b +1 z +1 z +1 z +100 foobar +100 foobar +100 foobar +200 bar +200 bar +200 bar +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select_for_update.result b/storage/rocksdb/mysql-test/rocksdb/r/select_for_update.result new file mode 100644 index 00000000000..713f5e85fe0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/select_for_update.result @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a'); +connect con1,localhost,root,,; +BEGIN; +SELECT a,b FROM t1 WHERE b='a' FOR UPDATE; +a b +1 a +3 a +connection default; +SET lock_wait_timeout = 1; +SELECT a,b FROM t1 WHERE b='a'; +a b +1 a +3 a +SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +UPDATE t1 SET b='c' WHERE b='a'; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +connection con1; +COMMIT; +SELECT a,b FROM t1; +a b +1 a +2 b +3 a +disconnect con1; +connection default; +UPDATE t1 SET b='c' WHERE b='a'; +SELECT a,b FROM t1; +a b +1 c +2 b +3 c +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select_for_update_skip_locked_nowait.result b/storage/rocksdb/mysql-test/rocksdb/r/select_for_update_skip_locked_nowait.result new file mode 100644 index 00000000000..044aa4d6fc7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/select_for_update_skip_locked_nowait.result @@ -0,0 +1,28 @@ +drop table if exists t1; +create table t1 (a int primary key) engine=rocksdb; +insert into t1 values (1), (2), (3); +Should succeed since no table gets involved +select 1 for update skip locked; +1 +1 +select * from nonexistence for update skip locked; +ERROR 42S02: Table 'test.nonexistence' doesn't exist +select * from t1 for update skip locked; +ERROR HY000: Table storage engine for 't1' doesn't have this option +select * from t1 where a > 1 and a < 3 for update skip locked; +ERROR HY000: Table storage engine for 't1' doesn't have this option +insert into t1 select * from t1 for update skip locked; +ERROR HY000: Table storage engine for 't1' doesn't have this option +Should succeed since no table gets involved +select 1 for update nowait; +1 +1 +select * from nonexistence for update nowait; +ERROR 42S02: Table 'test.nonexistence' doesn't exist +select * from t1 for update nowait; +ERROR HY000: Table storage engine for 't1' doesn't have this option +select * from t1 where a > 1 and a < 3 for update nowait; +ERROR HY000: Table storage engine for 't1' doesn't have this option +insert into t1 select * from t1 for update nowait; +ERROR HY000: Table storage engine for 't1' doesn't have this option +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result b/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result new file mode 100644 index 00000000000..e6433dcbeef --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a'); +connect con1,localhost,root,,; +BEGIN; +SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; +a b +1 a +3 a +connection default; +SET lock_wait_timeout = 1; +SELECT a,b FROM t1 WHERE b='a'; +a b +1 a +3 a +# +# Currently, SELECT ... LOCK IN SHARE MODE works like +# SELECT FOR UPDATE +SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +UPDATE t1 SET b='c' WHERE b='a'; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +connection con1; +COMMIT; +SELECT a,b FROM t1; +a b +1 a +2 b +3 a +disconnect con1; +connection default; +UPDATE t1 SET b='c' WHERE b='a'; +SELECT a,b FROM t1; +a b +1 c +2 b +3 c +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result b/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result new file mode 100644 index 00000000000..69b927ba5a8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result @@ -0,0 +1,416 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB; +CREATE TABLE t2 (j INT, PRIMARY KEY (j) COMMENT 'rev:cf_t2') ENGINE = ROCKSDB; +CREATE TABLE t3 (k INT, PRIMARY KEY (k) COMMENT 'cf_t1') ENGINE = ROCKSDB; +CREATE TABLE t4 (l INT, PRIMARY KEY (l) COMMENT 'cf_t4') ENGINE = ROCKSDB +PARTITION BY KEY(l) PARTITIONS 4; +SHOW ENGINE rocksdb STATUS; +Type Name Status +DBSTATS rocksdb # +CF_COMPACTION __system__ # +CF_COMPACTION cf_t1 # +CF_COMPACTION cf_t4 # +CF_COMPACTION default # +CF_COMPACTION rev:cf_t2 # +Memory_Stats rocksdb # +INSERT INTO t1 VALUES (1), (2), (3); +SELECT COUNT(*) FROM t1; +COUNT(*) +3 +INSERT INTO t2 VALUES (1), (2), (3), (4); +SELECT COUNT(*) FROM t2; +COUNT(*) +4 +INSERT INTO t4 VALUES (1), (2), (3), (4), (5); +SELECT COUNT(*) FROM t4; +COUNT(*) +5 +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_CFSTATS; +CF_NAME STAT_TYPE VALUE +__system__ NUM_IMMUTABLE_MEM_TABLE # +__system__ MEM_TABLE_FLUSH_PENDING # +__system__ COMPACTION_PENDING # +__system__ CUR_SIZE_ACTIVE_MEM_TABLE # +__system__ CUR_SIZE_ALL_MEM_TABLES # +__system__ NUM_ENTRIES_ACTIVE_MEM_TABLE # +__system__ NUM_ENTRIES_IMM_MEM_TABLES # +__system__ NON_BLOCK_CACHE_SST_MEM_USAGE # +__system__ NUM_LIVE_VERSIONS # +cf_t1 NUM_IMMUTABLE_MEM_TABLE # +cf_t1 MEM_TABLE_FLUSH_PENDING # +cf_t1 COMPACTION_PENDING # +cf_t1 CUR_SIZE_ACTIVE_MEM_TABLE # +cf_t1 CUR_SIZE_ALL_MEM_TABLES # +cf_t1 NUM_ENTRIES_ACTIVE_MEM_TABLE # +cf_t1 NUM_ENTRIES_IMM_MEM_TABLES # +cf_t1 NON_BLOCK_CACHE_SST_MEM_USAGE # +cf_t1 NUM_LIVE_VERSIONS # +cf_t4 NUM_IMMUTABLE_MEM_TABLE # +cf_t4 MEM_TABLE_FLUSH_PENDING # +cf_t4 COMPACTION_PENDING # +cf_t4 CUR_SIZE_ACTIVE_MEM_TABLE # +cf_t4 CUR_SIZE_ALL_MEM_TABLES # +cf_t4 NUM_ENTRIES_ACTIVE_MEM_TABLE # +cf_t4 NUM_ENTRIES_IMM_MEM_TABLES # +cf_t4 NON_BLOCK_CACHE_SST_MEM_USAGE # +cf_t4 NUM_LIVE_VERSIONS # +default NUM_IMMUTABLE_MEM_TABLE # +default MEM_TABLE_FLUSH_PENDING # +default COMPACTION_PENDING # +default CUR_SIZE_ACTIVE_MEM_TABLE # +default CUR_SIZE_ALL_MEM_TABLES # +default NUM_ENTRIES_ACTIVE_MEM_TABLE # +default NUM_ENTRIES_IMM_MEM_TABLES # +default NON_BLOCK_CACHE_SST_MEM_USAGE # +default NUM_LIVE_VERSIONS # +rev:cf_t2 NUM_IMMUTABLE_MEM_TABLE # +rev:cf_t2 MEM_TABLE_FLUSH_PENDING # +rev:cf_t2 COMPACTION_PENDING # +rev:cf_t2 CUR_SIZE_ACTIVE_MEM_TABLE # +rev:cf_t2 CUR_SIZE_ALL_MEM_TABLES # +rev:cf_t2 NUM_ENTRIES_ACTIVE_MEM_TABLE # +rev:cf_t2 NUM_ENTRIES_IMM_MEM_TABLES # +rev:cf_t2 NON_BLOCK_CACHE_SST_MEM_USAGE # +rev:cf_t2 NUM_LIVE_VERSIONS # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_DBSTATS; +STAT_TYPE VALUE +DB_BACKGROUND_ERRORS # +DB_NUM_SNAPSHOTS # +DB_OLDEST_SNAPSHOT_TIME # +DB_BLOCK_CACHE_USAGE # +SELECT TABLE_SCHEMA, TABLE_NAME, PARTITION_NAME, COUNT(STAT_TYPE) +FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_SCHEMA = 'test' +GROUP BY TABLE_NAME, PARTITION_NAME; +TABLE_SCHEMA TABLE_NAME PARTITION_NAME COUNT(STAT_TYPE) +test t1 NULL 43 +test t2 NULL 43 +test t4 p0 43 +test t4 p1 43 +test t4 p2 43 +test t4 p3 43 +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_CF_OPTIONS; +CF_NAME OPTION_TYPE VALUE +__system__ COMPARATOR # +__system__ MERGE_OPERATOR # +__system__ COMPACTION_FILTER # +__system__ COMPACTION_FILTER_FACTORY # +__system__ WRITE_BUFFER_SIZE # +__system__ MAX_WRITE_BUFFER_NUMBER # +__system__ MIN_WRITE_BUFFER_NUMBER_TO_MERGE # +__system__ NUM_LEVELS # +__system__ LEVEL0_FILE_NUM_COMPACTION_TRIGGER # +__system__ LEVEL0_SLOWDOWN_WRITES_TRIGGER # +__system__ LEVEL0_STOP_WRITES_TRIGGER # +__system__ MAX_MEM_COMPACTION_LEVEL # +__system__ TARGET_FILE_SIZE_BASE # +__system__ TARGET_FILE_SIZE_MULTIPLIER # +__system__ MAX_BYTES_FOR_LEVEL_BASE # +__system__ LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES # +__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER # +__system__ SOFT_RATE_LIMIT # +__system__ HARD_RATE_LIMIT # +__system__ RATE_LIMIT_DELAY_MAX_MILLISECONDS # +__system__ ARENA_BLOCK_SIZE # +__system__ DISABLE_AUTO_COMPACTIONS # +__system__ PURGE_REDUNDANT_KVS_WHILE_FLUSH # +__system__ VERIFY_CHECKSUM_IN_COMPACTION # +__system__ MAX_SEQUENTIAL_SKIP_IN_ITERATIONS # +__system__ MEMTABLE_FACTORY # +__system__ INPLACE_UPDATE_SUPPORT # +__system__ INPLACE_UPDATE_NUM_LOCKS # +__system__ MEMTABLE_PREFIX_BLOOM_BITS_RATIO # +__system__ MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE # +__system__ BLOOM_LOCALITY # +__system__ MAX_SUCCESSIVE_MERGES # +__system__ MIN_PARTIAL_MERGE_OPERANDS # +__system__ OPTIMIZE_FILTERS_FOR_HITS # +__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL # +__system__ COMPRESSION_TYPE # +__system__ COMPRESSION_PER_LEVEL # +__system__ COMPRESSION_OPTS # +__system__ BOTTOMMOST_COMPRESSION # +__system__ PREFIX_EXTRACTOR # +__system__ COMPACTION_STYLE # +__system__ COMPACTION_OPTIONS_UNIVERSAL # +__system__ COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE # +__system__ BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # +__system__ BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE # +__system__ BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # +__system__ BLOCK_BASED_TABLE_FACTORY::CHECKSUM # +__system__ BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE # +__system__ BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY # +__system__ BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING # +__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE # +__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # +__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE # +__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION # +__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL # +__system__ BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION # +cf_t1 COMPARATOR # +cf_t1 MERGE_OPERATOR # +cf_t1 COMPACTION_FILTER # +cf_t1 COMPACTION_FILTER_FACTORY # +cf_t1 WRITE_BUFFER_SIZE # +cf_t1 MAX_WRITE_BUFFER_NUMBER # +cf_t1 MIN_WRITE_BUFFER_NUMBER_TO_MERGE # +cf_t1 NUM_LEVELS # +cf_t1 LEVEL0_FILE_NUM_COMPACTION_TRIGGER # +cf_t1 LEVEL0_SLOWDOWN_WRITES_TRIGGER # +cf_t1 LEVEL0_STOP_WRITES_TRIGGER # +cf_t1 MAX_MEM_COMPACTION_LEVEL # +cf_t1 TARGET_FILE_SIZE_BASE # +cf_t1 TARGET_FILE_SIZE_MULTIPLIER # +cf_t1 MAX_BYTES_FOR_LEVEL_BASE # +cf_t1 LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES # +cf_t1 MAX_BYTES_FOR_LEVEL_MULTIPLIER # +cf_t1 SOFT_RATE_LIMIT # +cf_t1 HARD_RATE_LIMIT # +cf_t1 RATE_LIMIT_DELAY_MAX_MILLISECONDS # +cf_t1 ARENA_BLOCK_SIZE # +cf_t1 DISABLE_AUTO_COMPACTIONS # +cf_t1 PURGE_REDUNDANT_KVS_WHILE_FLUSH # +cf_t1 VERIFY_CHECKSUM_IN_COMPACTION # +cf_t1 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS # +cf_t1 MEMTABLE_FACTORY # +cf_t1 INPLACE_UPDATE_SUPPORT # +cf_t1 INPLACE_UPDATE_NUM_LOCKS # +cf_t1 MEMTABLE_PREFIX_BLOOM_BITS_RATIO # +cf_t1 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE # +cf_t1 BLOOM_LOCALITY # +cf_t1 MAX_SUCCESSIVE_MERGES # +cf_t1 MIN_PARTIAL_MERGE_OPERANDS # +cf_t1 OPTIMIZE_FILTERS_FOR_HITS # +cf_t1 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL # +cf_t1 COMPRESSION_TYPE # +cf_t1 COMPRESSION_PER_LEVEL # +cf_t1 COMPRESSION_OPTS # +cf_t1 BOTTOMMOST_COMPRESSION # +cf_t1 PREFIX_EXTRACTOR # +cf_t1 COMPACTION_STYLE # +cf_t1 COMPACTION_OPTIONS_UNIVERSAL # +cf_t1 COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE # +cf_t1 BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # +cf_t1 BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE # +cf_t1 BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # +cf_t1 BLOCK_BASED_TABLE_FACTORY::CHECKSUM # +cf_t1 BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE # +cf_t1 BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY # +cf_t1 BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING # +cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE # +cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # +cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE # +cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION # +cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL # +cf_t1 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION # +cf_t4 COMPARATOR # +cf_t4 MERGE_OPERATOR # +cf_t4 COMPACTION_FILTER # +cf_t4 COMPACTION_FILTER_FACTORY # +cf_t4 WRITE_BUFFER_SIZE # +cf_t4 MAX_WRITE_BUFFER_NUMBER # +cf_t4 MIN_WRITE_BUFFER_NUMBER_TO_MERGE # +cf_t4 NUM_LEVELS # +cf_t4 LEVEL0_FILE_NUM_COMPACTION_TRIGGER # +cf_t4 LEVEL0_SLOWDOWN_WRITES_TRIGGER # +cf_t4 LEVEL0_STOP_WRITES_TRIGGER # +cf_t4 MAX_MEM_COMPACTION_LEVEL # +cf_t4 TARGET_FILE_SIZE_BASE # +cf_t4 TARGET_FILE_SIZE_MULTIPLIER # +cf_t4 MAX_BYTES_FOR_LEVEL_BASE # +cf_t4 LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES # +cf_t4 MAX_BYTES_FOR_LEVEL_MULTIPLIER # +cf_t4 SOFT_RATE_LIMIT # +cf_t4 HARD_RATE_LIMIT # +cf_t4 RATE_LIMIT_DELAY_MAX_MILLISECONDS # +cf_t4 ARENA_BLOCK_SIZE # +cf_t4 DISABLE_AUTO_COMPACTIONS # +cf_t4 PURGE_REDUNDANT_KVS_WHILE_FLUSH # +cf_t4 VERIFY_CHECKSUM_IN_COMPACTION # +cf_t4 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS # +cf_t4 MEMTABLE_FACTORY # +cf_t4 INPLACE_UPDATE_SUPPORT # +cf_t4 INPLACE_UPDATE_NUM_LOCKS # +cf_t4 MEMTABLE_PREFIX_BLOOM_BITS_RATIO # +cf_t4 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE # +cf_t4 BLOOM_LOCALITY # +cf_t4 MAX_SUCCESSIVE_MERGES # +cf_t4 MIN_PARTIAL_MERGE_OPERANDS # +cf_t4 OPTIMIZE_FILTERS_FOR_HITS # +cf_t4 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL # +cf_t4 COMPRESSION_TYPE # +cf_t4 COMPRESSION_PER_LEVEL # +cf_t4 COMPRESSION_OPTS # +cf_t4 BOTTOMMOST_COMPRESSION # +cf_t4 PREFIX_EXTRACTOR # +cf_t4 COMPACTION_STYLE # +cf_t4 COMPACTION_OPTIONS_UNIVERSAL # +cf_t4 COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE # +cf_t4 BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # +cf_t4 BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE # +cf_t4 BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # +cf_t4 BLOCK_BASED_TABLE_FACTORY::CHECKSUM # +cf_t4 BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE # +cf_t4 BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY # +cf_t4 BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING # +cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE # +cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # +cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE # +cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION # +cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL # +cf_t4 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION # +default COMPARATOR # +default MERGE_OPERATOR # +default COMPACTION_FILTER # +default COMPACTION_FILTER_FACTORY # +default WRITE_BUFFER_SIZE # +default MAX_WRITE_BUFFER_NUMBER # +default MIN_WRITE_BUFFER_NUMBER_TO_MERGE # +default NUM_LEVELS # +default LEVEL0_FILE_NUM_COMPACTION_TRIGGER # +default LEVEL0_SLOWDOWN_WRITES_TRIGGER # +default LEVEL0_STOP_WRITES_TRIGGER # +default MAX_MEM_COMPACTION_LEVEL # +default TARGET_FILE_SIZE_BASE # +default TARGET_FILE_SIZE_MULTIPLIER # +default MAX_BYTES_FOR_LEVEL_BASE # +default LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES # +default MAX_BYTES_FOR_LEVEL_MULTIPLIER # +default SOFT_RATE_LIMIT # +default HARD_RATE_LIMIT # +default RATE_LIMIT_DELAY_MAX_MILLISECONDS # +default ARENA_BLOCK_SIZE # +default DISABLE_AUTO_COMPACTIONS # +default PURGE_REDUNDANT_KVS_WHILE_FLUSH # +default VERIFY_CHECKSUM_IN_COMPACTION # +default MAX_SEQUENTIAL_SKIP_IN_ITERATIONS # +default MEMTABLE_FACTORY # +default INPLACE_UPDATE_SUPPORT # +default INPLACE_UPDATE_NUM_LOCKS # +default MEMTABLE_PREFIX_BLOOM_BITS_RATIO # +default MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE # +default BLOOM_LOCALITY # +default MAX_SUCCESSIVE_MERGES # +default MIN_PARTIAL_MERGE_OPERANDS # +default OPTIMIZE_FILTERS_FOR_HITS # +default MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL # +default COMPRESSION_TYPE # +default COMPRESSION_PER_LEVEL # +default COMPRESSION_OPTS # +default BOTTOMMOST_COMPRESSION # +default PREFIX_EXTRACTOR # +default COMPACTION_STYLE # +default COMPACTION_OPTIONS_UNIVERSAL # +default COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE # +default BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # +default BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE # +default BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # +default BLOCK_BASED_TABLE_FACTORY::CHECKSUM # +default BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE # +default BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY # +default BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING # +default BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE # +default BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # +default BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE # +default BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION # +default BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL # +default BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION # +rev:cf_t2 COMPARATOR # +rev:cf_t2 MERGE_OPERATOR # +rev:cf_t2 COMPACTION_FILTER # +rev:cf_t2 COMPACTION_FILTER_FACTORY # +rev:cf_t2 WRITE_BUFFER_SIZE # +rev:cf_t2 MAX_WRITE_BUFFER_NUMBER # +rev:cf_t2 MIN_WRITE_BUFFER_NUMBER_TO_MERGE # +rev:cf_t2 NUM_LEVELS # +rev:cf_t2 LEVEL0_FILE_NUM_COMPACTION_TRIGGER # +rev:cf_t2 LEVEL0_SLOWDOWN_WRITES_TRIGGER # +rev:cf_t2 LEVEL0_STOP_WRITES_TRIGGER # +rev:cf_t2 MAX_MEM_COMPACTION_LEVEL # +rev:cf_t2 TARGET_FILE_SIZE_BASE # +rev:cf_t2 TARGET_FILE_SIZE_MULTIPLIER # +rev:cf_t2 MAX_BYTES_FOR_LEVEL_BASE # +rev:cf_t2 LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES # +rev:cf_t2 MAX_BYTES_FOR_LEVEL_MULTIPLIER # +rev:cf_t2 SOFT_RATE_LIMIT # +rev:cf_t2 HARD_RATE_LIMIT # +rev:cf_t2 RATE_LIMIT_DELAY_MAX_MILLISECONDS # +rev:cf_t2 ARENA_BLOCK_SIZE # +rev:cf_t2 DISABLE_AUTO_COMPACTIONS # +rev:cf_t2 PURGE_REDUNDANT_KVS_WHILE_FLUSH # +rev:cf_t2 VERIFY_CHECKSUM_IN_COMPACTION # +rev:cf_t2 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS # +rev:cf_t2 MEMTABLE_FACTORY # +rev:cf_t2 INPLACE_UPDATE_SUPPORT # +rev:cf_t2 INPLACE_UPDATE_NUM_LOCKS # +rev:cf_t2 MEMTABLE_PREFIX_BLOOM_BITS_RATIO # +rev:cf_t2 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE # +rev:cf_t2 BLOOM_LOCALITY # +rev:cf_t2 MAX_SUCCESSIVE_MERGES # +rev:cf_t2 MIN_PARTIAL_MERGE_OPERANDS # +rev:cf_t2 OPTIMIZE_FILTERS_FOR_HITS # +rev:cf_t2 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL # +rev:cf_t2 COMPRESSION_TYPE # +rev:cf_t2 COMPRESSION_PER_LEVEL # +rev:cf_t2 COMPRESSION_OPTS # +rev:cf_t2 BOTTOMMOST_COMPRESSION # +rev:cf_t2 PREFIX_EXTRACTOR # +rev:cf_t2 COMPACTION_STYLE # +rev:cf_t2 COMPACTION_OPTIONS_UNIVERSAL # +rev:cf_t2 COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::CHECKSUM # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION # +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE t4; +SHOW ENGINE rocksdb MUTEX; +Type Name Status +SHOW ENGINE ALL MUTEX; +SHOW ENGINE rocksdb TRANSACTION STATUS; +Type Name Status +SNAPSHOTS rocksdb +============================================================ +TIMESTAMP ROCKSDB TRANSACTION MONITOR OUTPUT +============================================================ +--------- +SNAPSHOTS +--------- +LIST OF SNAPSHOTS FOR EACH SESSION: +----------------------------------------- +END OF ROCKSDB TRANSACTION MONITOR OUTPUT +========================================= + +START TRANSACTION WITH CONSISTENT SNAPSHOT; +SHOW ENGINE rocksdb TRANSACTION STATUS; +Type Name Status +SNAPSHOTS rocksdb +============================================================ +TIMESTAMP ROCKSDB TRANSACTION MONITOR OUTPUT +============================================================ +--------- +SNAPSHOTS +--------- +LIST OF SNAPSHOTS FOR EACH SESSION: +---SNAPSHOT, ACTIVE NUM sec +MySQL thread id TID, OS thread handle PTR +lock count 0, write count 0 +----------------------------------------- +END OF ROCKSDB TRANSACTION MONITOR OUTPUT +========================================= + +ROLLBACK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/show_table_status.result b/storage/rocksdb/mysql-test/rocksdb/r/show_table_status.result new file mode 100644 index 00000000000..407a8b103bd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/show_table_status.result @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS t1, t2, t3; +CREATE TABLE t1 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'a'),(2,'foo'); +CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t2 (a,b) VALUES (1,'bar'); +set global rocksdb_force_flush_memtable_now = true; +CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb CHARACTER SET utf8; +SHOW TABLE STATUS WHERE name IN ( 't1', 't2', 't3' ); +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 2 # # 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t2 ROCKSDB 10 Fixed 1 # # 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t3 ROCKSDB 10 Fixed 1000 # # 0 0 0 NULL NULL NULL NULL utf8_general_ci NULL +SHOW TABLE STATUS WHERE name LIKE 't2'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t2 ROCKSDB 10 Fixed 10000 # # 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL +DROP TABLE t1, t2, t3; +CREATE DATABASE `db_new..............................................end`; +USE `db_new..............................................end`; +CREATE TABLE `t1_new..............................................end`(a int) engine=rocksdb; +INSERT INTO `t1_new..............................................end` VALUES (1); +SELECT TABLE_SCHEMA, TABLE_NAME FROM information_schema.table_statistics WHERE TABLE_NAME = 't1_new..............................................end'; +TABLE_SCHEMA db_new..............................................end +TABLE_NAME t1_new..............................................end +DROP DATABASE `db_new..............................................end`; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/shutdown.result b/storage/rocksdb/mysql-test/rocksdb/r/shutdown.result new file mode 100644 index 00000000000..f40aceffd79 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/shutdown.result @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t1; +SHOW GLOBAL VARIABLES LIKE "log_bin"; +Variable_name Value +log_bin ON +CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB; +SELECT COUNT(*) FROM t1; +COUNT(*) +1000 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/singledelete.result b/storage/rocksdb/mysql-test/rocksdb/r/singledelete.result new file mode 100644 index 00000000000..ef9fafc852a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/singledelete.result @@ -0,0 +1,66 @@ +CREATE TABLE t1 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end +true +select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +case when variable_value-@d < 10 then 'true' else 'false' end +true +CREATE TABLE t2 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB; +INSERT INTO t2 VALUES (1,1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +optimize table t2; +Table Op Msg_type Msg_text +test.t2 optimize status OK +select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end +true +select case when variable_value-@d > 9000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +case when variable_value-@d > 9000 then 'true' else 'false' end +true +CREATE TABLE t3 (id INT, value int, PRIMARY KEY (id)) ENGINE=RocksDB; +INSERT INTO t3 VALUES (1,1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +optimize table t3; +Table Op Msg_type Msg_text +test.t3 optimize status OK +select case when variable_value-@s = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +case when variable_value-@s = 0 then 'true' else 'false' end +true +select case when variable_value-@d > 9000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +case when variable_value-@d > 9000 then 'true' else 'false' end +true +CREATE TABLE t4 (id INT, PRIMARY KEY (id)) ENGINE=RocksDB; +INSERT INTO t4 VALUES (1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +optimize table t4; +Table Op Msg_type Msg_text +test.t4 optimize status OK +select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end +true +select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +case when variable_value-@d < 10 then 'true' else 'false' end +true +CREATE TABLE t5 (id1 INT, id2 INT, PRIMARY KEY (id1, id2), INDEX(id2)) ENGINE=RocksDB; +INSERT INTO t5 VALUES (1, 1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +optimize table t5; +Table Op Msg_type Msg_text +test.t5 optimize status OK +select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end +true +select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +case when variable_value-@d < 10 then 'true' else 'false' end +true +DROP TABLE t1, t2, t3, t4, t5; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/slow_query_log.result b/storage/rocksdb/mysql-test/rocksdb/r/slow_query_log.result new file mode 100644 index 00000000000..e8a11363dba --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/slow_query_log.result @@ -0,0 +1,10 @@ +SET @cur_long_query_time = @@long_query_time; +SET @@long_query_time = 600; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (id INT PRIMARY KEY, value INT) ENGINE=ROCKSDB; +SET @@long_query_time = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +7500 +SET @@long_query_time = @cur_long_query_time; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/statistics.result b/storage/rocksdb/mysql-test/rocksdb/r/statistics.result new file mode 100644 index 00000000000..1798563f328 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/statistics.result @@ -0,0 +1,69 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +create table t1( +id bigint not null primary key auto_increment, +a varchar(255) not null, +b bigint, +index t1_1(b) +) engine=rocksdb; +create table t2( +id bigint not null primary key auto_increment, +a varchar(255) not null, +b bigint, +index t2_1(b) comment 'cf_t3' +) engine=rocksdb; +create table t3( +id bigint not null primary key auto_increment, +a varchar(255) not null, +b bigint, +index t3_1(b) comment 'rev:cf_t4' +) engine=rocksdb; +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE() and table_name <> 't1'; +table_name table_rows +t2 4999 +t3 4999 +SELECT CASE WHEN table_rows < 100000 then 'true' else 'false' end from information_schema.tables where table_name = 't1'; +CASE WHEN table_rows < 100000 then 'true' else 'false' end +true +set global rocksdb_force_flush_memtable_now = true; +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); +table_name table_rows +t1 100000 +t2 4999 +t3 4999 +SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE(); +table_name data_length>0 index_length>0 +t1 1 1 +t2 1 1 +t3 1 1 +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); +table_name table_rows +t1 100000 +t2 4999 +t3 4999 +SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE(); +table_name data_length>0 index_length>0 +t1 1 1 +t2 1 1 +t3 1 1 +analyze table t1,t2,t3,t4,t5; +Table Op Msg_type Msg_text +test.t1 analyze status OK +test.t2 analyze status OK +test.t3 analyze status OK +test.t4 analyze Error Table 'test.t4' doesn't exist +test.t4 analyze status Operation failed +test.t5 analyze Error Table 'test.t5' doesn't exist +test.t5 analyze status Operation failed +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); +table_name table_rows +t1 100000 +t2 4999 +t3 4999 +SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE(); +table_name data_length>0 index_length>0 +t1 1 1 +t2 1 1 +t3 1 1 +drop table t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result new file mode 100644 index 00000000000..e0520f5a31b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB; +SELECT COUNT(*) FROM t1; +COUNT(*) +1000 +SELECT * FROM INFORMATION_SCHEMA.TABLE_STATISTICS WHERE TABLE_NAME = "t1"; +TABLE_SCHEMA TABLE_NAME TABLE_ENGINE ROWS_INSERTED ROWS_UPDATED ROWS_DELETED ROWS_READ ROWS_REQUESTED COMPRESSED_PAGE_SIZE COMPRESS_PADDING COMPRESS_OPS COMPRESS_OPS_OK COMPRESS_PRIMARY_OPS COMPRESS_PRIMARY_OPS_OK COMPRESS_USECS COMPRESS_OK_USECS COMPRESS_PRIMARY_USECS COMPRESS_PRIMARY_OK_USECS UNCOMPRESS_OPS UNCOMPRESS_USECS ROWS_INDEX_FIRST ROWS_INDEX_NEXT IO_READ_BYTES IO_READ_REQUESTS IO_READ_SVC_USECS IO_READ_SVC_USECS_MAX IO_READ_WAIT_USECS IO_READ_WAIT_USECS_MAX IO_READ_SLOW_IOS IO_WRITE_BYTES IO_WRITE_REQUESTS IO_WRITE_SVC_USECS IO_WRITE_SVC_USECS_MAX IO_WRITE_WAIT_USECS IO_WRITE_WAIT_USECS_MAX IO_WRITE_SLOW_IOS IO_READ_BYTES_BLOB IO_READ_REQUESTS_BLOB IO_READ_SVC_USECS_BLOB IO_READ_SVC_USECS_MAX_BLOB IO_READ_WAIT_USECS_BLOB IO_READ_WAIT_USECS_MAX_BLOB IO_READ_SLOW_IOS_BLOB IO_READ_BYTES_PRIMARY IO_READ_REQUESTS_PRIMARY IO_READ_SVC_USECS_PRIMARY IO_READ_SVC_USECS_MAX_PRIMARY IO_READ_WAIT_USECS_PRIMARY IO_READ_WAIT_USECS_MAX_PRIMARY IO_READ_SLOW_IOS_PRIMARY IO_READ_BYTES_SECONDARY IO_READ_REQUESTS_SECONDARY IO_READ_SVC_USECS_SECONDARY IO_READ_SVC_USECS_MAX_SECONDARY IO_READ_WAIT_USECS_SECONDARY IO_READ_WAIT_USECS_MAX_SECONDARY IO_READ_SLOW_IOS_SECONDARY IO_INDEX_INSERTS QUERIES_USED QUERIES_EMPTY COMMENT_BYTES INNODB_ROW_LOCK_WAITS INNODB_ROW_LOCK_WAIT_TIMEOUTS INNODB_PAGES_READ INNODB_PAGES_READ_INDEX INNODB_PAGES_READ_BLOB INNODB_PAGES_WRITTEN INNODB_PAGES_WRITTEN_INDEX INNODB_PAGES_WRITTEN_BLOB +test t1 ROCKSDB 1000 0 0 1000 1001 0 0 0 0 0 0 0 0 0 0 0 0 1 999 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1001 0 0 0 0 0 0 0 0 0 0 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_ai.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_ai.result new file mode 100644 index 00000000000..7cc0cc7cd98 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_ai.result @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb AUTO_INCREMENT=10; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB AUTO_INCREMENT=10 DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (NULL); +SELECT * FROM t1; +a +10 +ALTER TABLE t1 AUTO_INCREMENT=100; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB AUTO_INCREMENT=100 DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (NULL); +SELECT * FROM t1 ORDER BY a; +a +10 +100 +ALTER TABLE t1 AUTO_INCREMENT=50; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB AUTO_INCREMENT=101 DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (NULL); +SELECT * FROM t1 ORDER BY a; +a +10 +100 +101 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_avg_row_length.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_avg_row_length.result new file mode 100644 index 00000000000..f904c04e0fb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_avg_row_length.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb AVG_ROW_LENGTH=300; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 AVG_ROW_LENGTH=300 +ALTER TABLE t1 AVG_ROW_LENGTH=30000000; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 AVG_ROW_LENGTH=30000000 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_checksum.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_checksum.result new file mode 100644 index 00000000000..d9cc69ee2a1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_checksum.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 CHECKSUM=1 +ALTER TABLE t1 CHECKSUM=0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_connection.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_connection.result new file mode 100644 index 00000000000..0beddd9f6e3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_connection.result @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS t1; +CREATE DATABASE test_remote; +CREATE SERVER test_connection FOREIGN DATA WRAPPER mysql +OPTIONS (USER 'root', HOST 'localhost', DATABASE 'test_remote'); +CREATE SERVER test_connection2 FOREIGN DATA WRAPPER mysql +OPTIONS (USER 'root', HOST 'localhost', DATABASE 'test_remote'); +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CONNECTION='test_connection'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 CONNECTION='test_connection' +ALTER TABLE t1 CONNECTION='test_connection2'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 CONNECTION='test_connection2' +DROP TABLE t1; +DROP SERVER test_connection; +DROP SERVER test_connection2; +DROP DATABASE test_remote; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result new file mode 100644 index 00000000000..5821369ae57 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '' INDEX DIRECTORY = ''; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 INDEX DIRECTORY = ''; +Warnings: +Warning 1618 option ignored +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_delay_key_write.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_delay_key_write.result new file mode 100644 index 00000000000..c5d1ad8ace9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_delay_key_write.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DELAY_KEY_WRITE=1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 DELAY_KEY_WRITE=1 +ALTER TABLE t1 DELAY_KEY_WRITE=0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_insert_method.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_insert_method.result new file mode 100644 index 00000000000..bd5e65f59c4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_insert_method.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INSERT_METHOD=FIRST; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 INSERT_METHOD=NO; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_key_block_size.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_key_block_size.result new file mode 100644 index 00000000000..6c34d08b7eb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_key_block_size.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb KEY_BLOCK_SIZE=8; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=8 +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) KEY_BLOCK_SIZE=8 +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_max_rows.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_max_rows.result new file mode 100644 index 00000000000..679e00e0771 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_max_rows.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb MAX_ROWS=10000000; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 MAX_ROWS=10000000 +ALTER TABLE t1 MAX_ROWS=30000000; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 MAX_ROWS=30000000 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_min_rows.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_min_rows.result new file mode 100644 index 00000000000..bc650434b7a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_min_rows.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb MIN_ROWS=1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 MIN_ROWS=1 +ALTER TABLE t1 MIN_ROWS=10000; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 MIN_ROWS=10000 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_pack_keys.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_pack_keys.result new file mode 100644 index 00000000000..b42d3f4d450 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_pack_keys.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb PACK_KEYS=1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 PACK_KEYS=1 +ALTER TABLE t1 PACK_KEYS=0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 PACK_KEYS=0 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_password.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_password.result new file mode 100644 index 00000000000..80ec79497ce --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_password.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb PASSWORD='password'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 PASSWORD='new_password'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_row_format.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_row_format.result new file mode 100644 index 00000000000..a0c480eaede --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_row_format.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb ROW_FORMAT=FIXED; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) NOT NULL, + PRIMARY KEY (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) NOT NULL, + PRIMARY KEY (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_union.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_union.result new file mode 100644 index 00000000000..fb021dbda18 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_union.result @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS t1, child1, child2; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb UNION(child1); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 UNION = (child1,child2); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1, child1, child2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_standard_opts.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_standard_opts.result new file mode 100644 index 00000000000..ecfc72932cc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_standard_opts.result @@ -0,0 +1,46 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb +DEFAULT CHARACTER SET = utf8 +COLLATE = utf8_general_ci +COMMENT = 'standard table options' +; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COMMENT='standard table options' +ALTER TABLE t1 COMMENT = 'table altered'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COMMENT='table altered' +ALTER TABLE t1 ENGINE=MEMORY; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=MEMORY DEFAULT CHARSET=utf8 COMMENT='table altered' +ALTER TABLE t1 ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COMMENT='table altered' +ALTER TABLE t1 CHARACTER SET = latin1 COLLATE = latin1_swedish_ci; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) CHARACTER SET utf8 DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COMMENT='table altered' +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/transaction.result b/storage/rocksdb/mysql-test/rocksdb/r/transaction.result new file mode 100644 index 00000000000..c24ef9fc1c8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/transaction.result @@ -0,0 +1,936 @@ +create table t1 (id int primary key, value int, value2 varchar(100), index(value)) engine=rocksdb; +insert into t1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(8,8,8),(9,9,9),(10,10,10); +begin; +insert into t1 values (11,11,11); +select * from t1 where id=1; +id value value2 +1 1 1 +select * from t1 where value=1; +id value value2 +1 1 1 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +1 1 1 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3; +id value value2 +1 1 1 +2 2 2 +select * from t1 where value < 3; +id value value2 +1 1 1 +2 2 2 +select value from t1 where value < 3; +value +1 +2 +select * from t1 where value2 < 3; +id value value2 +1 1 1 +2 2 2 +select * from t1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +11 11 11 +select value from t1; +value +1 +2 +3 +4 +5 +6 +8 +9 +10 +11 +rollback; +begin; +insert into t1 values (7,7,7); +select * from t1 where id=1; +id value value2 +1 1 1 +select * from t1 where value=1; +id value value2 +1 1 1 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +1 1 1 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3; +id value value2 +1 1 1 +2 2 2 +select * from t1 where value < 3; +id value value2 +1 1 1 +2 2 2 +select value from t1 where value < 3; +value +1 +2 +select * from t1 where value2 < 3; +id value value2 +1 1 1 +2 2 2 +select * from t1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +select value from t1; +value +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +rollback; +begin; +update t1 set value2=100 where id=1; +select * from t1 where id=1; +id value value2 +1 1 100 +select * from t1 where value=1; +id value value2 +1 1 100 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3; +id value value2 +1 1 100 +2 2 2 +select * from t1 where value < 3; +id value value2 +1 1 100 +2 2 2 +select value from t1 where value < 3; +value +1 +2 +select * from t1 where value2 < 3; +id value value2 +2 2 2 +select * from t1; +id value value2 +1 1 100 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1; +value +1 +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +update t1 set value=100 where id=1; +select * from t1 where id=1; +id value value2 +1 100 1 +select * from t1 where value=1; +id value value2 +select value from t1 where value=1; +value +select * from t1 where value2=1; +id value value2 +1 100 1 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3; +id value value2 +1 100 1 +2 2 2 +select * from t1 where value < 3; +id value value2 +2 2 2 +select value from t1 where value < 3; +value +2 +select * from t1 where value2 < 3; +id value value2 +1 100 1 +2 2 2 +select * from t1; +id value value2 +1 100 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1; +value +2 +3 +4 +5 +6 +8 +9 +10 +100 +rollback; +begin; +update t1 set id=100 where id=1; +select * from t1 where id=1; +id value value2 +select * from t1 where value=1; +id value value2 +100 1 1 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +100 1 1 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3; +id value value2 +2 2 2 +select * from t1 where value < 3; +id value value2 +100 1 1 +2 2 2 +select value from t1 where value < 3; +value +1 +2 +select * from t1 where value2 < 3; +id value value2 +2 2 2 +100 1 1 +select * from t1; +id value value2 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +100 1 1 +select value from t1; +value +1 +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +update t1 set value2=100 where value=1; +select * from t1 where id=1; +id value value2 +1 1 100 +select * from t1 where value=1; +id value value2 +1 1 100 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3; +id value value2 +1 1 100 +2 2 2 +select * from t1 where value < 3; +id value value2 +1 1 100 +2 2 2 +select value from t1 where value < 3; +value +1 +2 +select * from t1 where value2 < 3; +id value value2 +2 2 2 +select * from t1; +id value value2 +1 1 100 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1; +value +1 +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +update t1 set value=100 where value=1; +select * from t1 where id=1; +id value value2 +1 100 1 +select * from t1 where value=1; +id value value2 +select value from t1 where value=1; +value +select * from t1 where value2=1; +id value value2 +1 100 1 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3; +id value value2 +1 100 1 +2 2 2 +select * from t1 where value < 3; +id value value2 +2 2 2 +select value from t1 where value < 3; +value +2 +select * from t1 where value2 < 3; +id value value2 +1 100 1 +2 2 2 +select * from t1; +id value value2 +1 100 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1; +value +2 +3 +4 +5 +6 +8 +9 +10 +100 +rollback; +begin; +update t1 set id=100 where value=1; +select * from t1 where id=1; +id value value2 +select * from t1 where value=1; +id value value2 +100 1 1 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +100 1 1 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3; +id value value2 +2 2 2 +select * from t1 where value < 3; +id value value2 +100 1 1 +2 2 2 +select value from t1 where value < 3; +value +1 +2 +select * from t1 where value2 < 3; +id value value2 +2 2 2 +100 1 1 +select * from t1; +id value value2 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +100 1 1 +select value from t1; +value +1 +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +update t1 set value2=100 where value2=1; +select * from t1 where id=1; +id value value2 +1 1 100 +select * from t1 where value=1; +id value value2 +1 1 100 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3; +id value value2 +1 1 100 +2 2 2 +select * from t1 where value < 3; +id value value2 +1 1 100 +2 2 2 +select value from t1 where value < 3; +value +1 +2 +select * from t1 where value2 < 3; +id value value2 +2 2 2 +select * from t1; +id value value2 +1 1 100 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1; +value +1 +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +update t1 set value=100 where value2=1; +select * from t1 where id=1; +id value value2 +1 100 1 +select * from t1 where value=1; +id value value2 +select value from t1 where value=1; +value +select * from t1 where value2=1; +id value value2 +1 100 1 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3; +id value value2 +1 100 1 +2 2 2 +select * from t1 where value < 3; +id value value2 +2 2 2 +select value from t1 where value < 3; +value +2 +select * from t1 where value2 < 3; +id value value2 +1 100 1 +2 2 2 +select * from t1; +id value value2 +1 100 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1; +value +2 +3 +4 +5 +6 +8 +9 +10 +100 +rollback; +begin; +update t1 set id=100 where value2=1; +select * from t1 where id=1; +id value value2 +select * from t1 where value=1; +id value value2 +100 1 1 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +100 1 1 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3; +id value value2 +2 2 2 +select * from t1 where value < 3; +id value value2 +100 1 1 +2 2 2 +select value from t1 where value < 3; +value +1 +2 +select * from t1 where value2 < 3; +id value value2 +2 2 2 +100 1 1 +select * from t1; +id value value2 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +100 1 1 +select value from t1; +value +1 +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +delete from t1 where id=1; +select * from t1 where id=1; +id value value2 +select * from t1 where value=1; +id value value2 +select value from t1 where value=1; +value +select * from t1 where value2=1; +id value value2 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3; +id value value2 +2 2 2 +select * from t1 where value < 3; +id value value2 +2 2 2 +select value from t1 where value < 3; +value +2 +select * from t1 where value2 < 3; +id value value2 +2 2 2 +select * from t1; +id value value2 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1; +value +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +delete from t1 where value=1; +select * from t1 where id=1; +id value value2 +select * from t1 where value=1; +id value value2 +select value from t1 where value=1; +value +select * from t1 where value2=1; +id value value2 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3; +id value value2 +2 2 2 +select * from t1 where value < 3; +id value value2 +2 2 2 +select value from t1 where value < 3; +value +2 +select * from t1 where value2 < 3; +id value value2 +2 2 2 +select * from t1; +id value value2 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1; +value +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +delete from t1 where value2=1; +select * from t1 where id=1; +id value value2 +select * from t1 where value=1; +id value value2 +select value from t1 where value=1; +value +select * from t1 where value2=1; +id value value2 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3; +id value value2 +2 2 2 +select * from t1 where value < 3; +id value value2 +2 2 2 +select value from t1 where value < 3; +value +2 +select * from t1 where value2 < 3; +id value value2 +2 2 2 +select * from t1; +id value value2 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1; +value +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +insert into t1 values (11,11,11); +insert into t1 values (12,12,12); +insert into t1 values (13,13,13); +delete from t1 where id=9; +delete from t1 where value=8; +update t1 set id=100 where value2=5; +update t1 set value=103 where value=4; +update t1 set id=115 where id=3; +select * from t1 where id=1; +id value value2 +1 1 1 +select * from t1 where value=1; +id value value2 +1 1 1 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +1 1 1 +select * from t1 where id=5; +id value value2 +select * from t1 where value=5; +id value value2 +100 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +100 5 5 +select * from t1 where id < 3; +id value value2 +1 1 1 +2 2 2 +select * from t1 where value < 3; +id value value2 +1 1 1 +2 2 2 +select value from t1 where value < 3; +value +1 +2 +select * from t1 where value2 < 3; +id value value2 +1 1 1 +2 2 2 +select * from t1; +id value value2 +1 1 1 +2 2 2 +4 103 4 +6 6 6 +10 10 10 +11 11 11 +12 12 12 +13 13 13 +100 5 5 +115 3 3 +select value from t1; +value +1 +2 +3 +5 +6 +10 +11 +12 +13 +103 +rollback; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result new file mode 100644 index 00000000000..1544256f194 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +TRUNCATE TABLE t1; +INSERT INTO t1 (a,b) VALUES (1,'a'), (2,'b'), (3,'c'); +TRUNCATE TABLE t1; +SELECT a,b FROM t1; +a b +DROP TABLE t1; +CREATE TABLE t1 (a INT KEY AUTO_INCREMENT, c CHAR(8)) ENGINE=rocksdb; +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed # # # 0 0 0 1 NULL NULL NULL latin1_swedish_ci NULL +INSERT INTO t1 (c) VALUES ('a'),('b'),('c'); +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed # # # 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL +TRUNCATE TABLE t1; +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed # # # 0 0 0 1 NULL NULL NULL latin1_swedish_ci NULL +INSERT INTO t1 (c) VALUES ('d'); +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed # # # 0 0 0 2 NULL NULL NULL latin1_swedish_ci NULL +SELECT a,c FROM t1; +a c +1 d +DROP TABLE t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'); +HANDLER t1 OPEN AS h1; +ERROR HY000: Table storage engine for 'h1' doesn't have this option +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result new file mode 100644 index 00000000000..7adf50f9ff3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; +set global rocksdb_signal_drop_index_thread = 1; +CREATE TABLE t1 ( +a int not null, +b int not null, +c varchar(500) not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +DELETE FROM t1; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_compact_read_bytes'; +truncate table t1; +select case when variable_value-@a < 500000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_compact_read_bytes'; +case when variable_value-@a < 500000 then 'true' else 'false' end +true +DROP TABLE IF EXISTS t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_binary.result b/storage/rocksdb/mysql-test/rocksdb/r/type_binary.result new file mode 100644 index 00000000000..bbdd6d210fb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_binary.result @@ -0,0 +1,48 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +b BINARY , +b0 BINARY(0) , +b1 BINARY(1) , +b20 BINARY(20) , +b255 BINARY(255) , +pk BINARY PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +b binary(1) YES NULL +b0 binary(0) YES NULL +b1 binary(1) YES NULL +b20 binary(20) YES NULL +b255 binary(255) YES NULL +pk binary(1) NO PRI NULL +INSERT INTO t1 VALUES ('','','','','',''); +INSERT INTO t1 VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.','a'); +SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk; +HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk) +00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00 +61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61 +INSERT INTO t1 VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256),'b'); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'b0' at row 1 +Warning 1265 Data truncated for column 'b1' at row 1 +Warning 1265 Data truncated for column 'b20' at row 1 +Warning 1265 Data truncated for column 'b255' at row 1 +INSERT INTO t1 SELECT b255, b255, b255, b255, CONCAT('a',b255,b255), 'c' FROM t1; +ERROR 23000: Duplicate entry 'c' for key 'PRIMARY' +SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk; +HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk) +00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00 +61 61 6161616161616161616161616161616161616161 787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878 62 +61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61 +ALTER TABLE t1 ADD COLUMN b257 BINARY(257) ; +ERROR 42000: Column length too big for column 'b257' (max = 255); use BLOB or TEXT instead +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +b binary(1) YES NULL +b0 binary(0) YES NULL +b1 binary(1) YES NULL +b20 binary(20) YES NULL +b255 binary(255) YES NULL +pk binary(1) NO PRI NULL +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_binary_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_binary_indexes.result new file mode 100644 index 00000000000..c5cffdc1a0d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_binary_indexes.result @@ -0,0 +1,80 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (b BINARY, +b20 BINARY(20) PRIMARY KEY, +v16 VARBINARY(16), +v128 VARBINARY(128) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 b20 A 1000 NULL NULL LSMTREE +INSERT INTO t1 (b,b20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b'); +EXPLAIN SELECT HEX(b20) FROM t1 ORDER BY b20; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 20 NULL # Using index +SELECT HEX(b20) FROM t1 ORDER BY b20; +HEX(b20) +6368617231000000000000000000000000000000 +6368617232000000000000000000000000000000 +6368617233000000000000000000000000000000 +6368617234000000000000000000000000000000 +EXPLAIN SELECT HEX(b20) FROM t1 IGNORE INDEX (PRIMARY) ORDER BY b20 DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using filesort +SELECT HEX(b20) FROM t1 ORDER BY b20 DESC; +HEX(b20) +6368617234000000000000000000000000000000 +6368617233000000000000000000000000000000 +6368617232000000000000000000000000000000 +6368617231000000000000000000000000000000 +DROP TABLE t1; +CREATE TABLE t1 (b BINARY, +b20 BINARY(20), +v16 VARBINARY(16), +v128 VARBINARY(128), +pk VARBINARY(10) PRIMARY KEY, +INDEX (v16(10)) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 v16 1 v16 A 500 10 NULL YES LSMTREE +INSERT INTO t1 (b,b20,v16,v128,pk) VALUES ('a','char1','varchar1a','varchar1b',1),('a','char2','varchar2a','varchar2b',2),('b','char3','varchar1a','varchar1b',3),('c','char4','varchar3a','varchar3b',4),('d','char5','varchar4a','varchar3b',5),('e','char6','varchar2a','varchar3b',6); +INSERT INTO t1 (b,b20,v16,v128,pk) SELECT b,b20,v16,v128,pk+100 FROM t1; +EXPLAIN SELECT HEX(SUBSTRING(v16,0,3)) FROM t1 WHERE v16 LIKE 'varchar%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v16 v16 13 NULL # Using where +SELECT HEX(SUBSTRING(v16,7,3)) FROM t1 WHERE v16 LIKE 'varchar%'; +HEX(SUBSTRING(v16,7,3)) +723161 +723161 +723161 +723161 +723261 +723261 +723261 +723261 +723361 +723361 +723461 +723461 +EXPLAIN SELECT HEX(SUBSTRING(v16,0,3)) FROM t1 FORCE INDEX (v16) WHERE v16 LIKE 'varchar%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v16 v16 13 NULL # Using where +SELECT HEX(SUBSTRING(v16,7,3)) FROM t1 FORCE INDEX (v16) WHERE v16 LIKE 'varchar%'; +HEX(SUBSTRING(v16,7,3)) +723161 +723161 +723161 +723161 +723261 +723261 +723261 +723261 +723361 +723361 +723461 +723461 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_bit.result b/storage/rocksdb/mysql-test/rocksdb/r/type_bit.result new file mode 100644 index 00000000000..d385c0d4670 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_bit.result @@ -0,0 +1,53 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a BIT , +b BIT(20) , +c BIT(64) , +d BIT(1) , +PRIMARY KEY (c) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a bit(1) YES NULL +b bit(20) YES NULL +c bit(64) NO PRI b'0' +d bit(1) YES NULL +ALTER TABLE t1 DROP COLUMN d; +ALTER TABLE t1 ADD COLUMN d BIT(0) ; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a bit(1) YES NULL +b bit(20) YES NULL +c bit(64) NO PRI b'0' +d bit(1) YES NULL +INSERT INTO t1 (a,b,c,d) VALUES (0,POW(2,20)-1,b'1111111111111111111111111111111111111111111111111111111111111111',1); +SELECT BIN(a), HEX(b), c+0 FROM t1 WHERE d>0; +BIN(a) HEX(b) c+0 +0 FFFFF 18446744073709551615 +INSERT INTO t1 (a,b,c,d) VALUES (1,0,-2,0); +SELECT a+0, b+0, c+0 FROM t1 WHERE d<100; +a+0 b+0 c+0 +0 1048575 18446744073709551615 +1 0 18446744073709551614 +INSERT INTO t1 (a,b,c,d) VALUES (b'1', 'f', 0xFF, 0x0); +SELECT a+0, b+0, c+0 FROM t1 WHERE d IN (0, 2); +a+0 b+0 c+0 +1 0 18446744073709551614 +1 102 255 +DELETE FROM t1; +INSERT INTO t1 (a,b,c,d) VALUES (0x10,0,0,1); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +SELECT a+0,b+0,c+0,d+0 FROM t1; +a+0 b+0 c+0 d+0 +1 0 0 1 +INSERT INTO t1 (a,b,c,d) VALUES (0x01,0,0x10000000000000000,0); +Warnings: +Warning 1264 Out of range value for column 'c' at row 1 +SELECT a+0,b+0,c+0,d+0 FROM t1; +a+0 b+0 c+0 d+0 +1 0 0 1 +1 0 18446744073709551615 0 +DROP TABLE t1; +CREATE TABLE t1 (pk INT PRIMARY KEY, a BIT(65) ) ENGINE=rocksdb; +ERROR 42000: Display width out of range for column 'a' (max = 64) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_bit_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_bit_indexes.result new file mode 100644 index 00000000000..8da878eb0f2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_bit_indexes.result @@ -0,0 +1,58 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a BIT, +b BIT(20) PRIMARY KEY, +c BIT(32), +d BIT(64) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 b A 1000 NULL NULL LSMTREE +INSERT INTO t1 (a,b,c,d) VALUES +(0,0xFFFFF,0,1),(0,256,0xAAA,0x12345),(1,16,0,0xFFFFFFF),(0,11,12,13), +(1,100,101,102),(0,12,13,14),(1,13,14,15),(0,101,201,202),(1,1000,1001,1002), +(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF); +EXPLAIN SELECT b+0 FROM t1 ORDER BY b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 3 NULL # Using index +SELECT b+0 FROM t1 ORDER BY b; +b+0 +11 +12 +13 +16 +100 +101 +256 +1000 +65535 +1048575 +DROP TABLE t1; +# TODO: Unique indexes are not enforced +CREATE TABLE t1 ( +a BIT, +b BIT(20), +c BIT(32), +d BIT(64), +pk BIT(10) PRIMARY KEY, +INDEX(a) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 a 1 a A 500 NULL NULL YES LSMTREE +INSERT INTO t1 (a,b,c,d,pk) VALUES +(0,0xFFFFF,0,1,1),(0,256,0xAAA,0x12345,2),(1,16,0,0xFFFFFFF,3),(0,11,12,13,4), +(1,100,101,102,5),(0,12,13,14,6),(1,13,14,15,7),(0,101,201,202,8),(1,1000,1001,1002,9), +(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF,10); +EXPLAIN SELECT DISTINCT a+0 FROM t1 ORDER BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using temporary; Using filesort +SELECT DISTINCT a+0 FROM t1 ORDER BY a; +a+0 +0 +1 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_blob.result b/storage/rocksdb/mysql-test/rocksdb/r/type_blob.result new file mode 100644 index 00000000000..e36c91658fd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_blob.result @@ -0,0 +1,57 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +b BLOB , +b0 BLOB(0) , +b1 BLOB(1) , +b300 BLOB(300) , +bm BLOB(65535) , +b70k BLOB(70000) , +b17m BLOB(17000000) , +t TINYBLOB , +m MEDIUMBLOB , +l LONGBLOB +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +b blob YES NULL +b0 blob YES NULL +b1 tinyblob YES NULL +b300 blob YES NULL +bm blob YES NULL +b70k mediumblob YES NULL +b17m longblob YES NULL +t tinyblob YES NULL +m mediumblob YES NULL +l longblob YES NULL +INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES +('','','','','','','','','',''), +('a','b','c','d','e','f','g','h','i','j'), +('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'), +( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), HEX(REPEAT('g',1048576)), REPEAT('h',255), REPEAT('i',1048576), HEX(REPEAT('j',1048576)) ); +SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152 +INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES +( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) ); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'b0' at row 1 +Warning 1265 Data truncated for column 'b1' at row 1 +Warning 1265 Data truncated for column 'b300' at row 1 +Warning 1265 Data truncated for column 'bm' at row 1 +Warning 1265 Data truncated for column 't' at row 1 +SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152 +ALTER TABLE t1 ADD COLUMN bbb BLOB(4294967296); +ERROR 42000: Display width out of range for column 'bbb' (max = 4294967295) +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_blob_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_blob_indexes.result new file mode 100644 index 00000000000..26726e0f6d1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_blob_indexes.result @@ -0,0 +1,188 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +b BLOB, +t TINYBLOB, +m MEDIUMBLOB, +l LONGBLOB, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 b A 1000 32 NULL LSMTREE +INSERT INTO t1 (b,t,m,l) VALUES +('','','',''), +('a','b','c','d'), +('b','d','c','b'), +('test1','test2','test3','test4'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)), +(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')), +('abc','def','ghi','jkl'), +('test2','test3','test4','test5'), +('test3','test4','test5','test6'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128)); +EXPLAIN SELECT SUBSTRING(b,16) AS f FROM t1 WHERE b IN ('test1','test2') ORDER BY f; +id select_type table type possible_keys key key_len ref rows Extra +# # # # # PRIMARY # # # # +SELECT SUBSTRING(b,16) AS f FROM t1 WHERE b IN ('test1','test2') ORDER BY f; +f + + +EXPLAIN SELECT SUBSTRING(b,16) AS f FROM t1 USE INDEX () WHERE b IN ('test1','test2') ORDER BY f; +id select_type table type possible_keys key key_len ref rows Extra +# # # # # NULL # # # # +SELECT SUBSTRING(b,16) AS f FROM t1 USE INDEX () WHERE b IN ('test1','test2') ORDER BY f; +f + + +DROP TABLE t1; +CREATE TABLE t1 ( +b BLOB, +t TINYBLOB, +m MEDIUMBLOB, +l LONGBLOB, +pk INT AUTO_INCREMENT PRIMARY KEY, +UNIQUE INDEX l_t (l(256),t(64)) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk # # NULL NULL # # +t1 0 l_t 1 l # # 256 NULL # # +t1 0 l_t 2 t # # 64 NULL # # +INSERT INTO t1 (b,t,m,l) VALUES +('','','',''), +('a','b','c','d'), +('b','d','c','b'), +('test1','test2','test3','test4'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)), +(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')), +('abc','def','ghi','jkl'), +('test2','test3','test4','test5'), +('test3','test4','test5','test6'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128)); +EXPLAIN SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range l_t l_t 259 NULL # Using where; Using filesort +SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC; +SUBSTRING(t,64) SUBSTRING(l,256) + + +bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb +bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb + + +fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff + + + +EXPLAIN SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 FORCE INDEX (l_t) WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range l_t l_t 259 NULL # Using where; Using filesort +SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 FORCE INDEX (l_t) WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC; +SUBSTRING(t,64) SUBSTRING(l,256) + + +bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb +bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb + + +fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff + + + +DROP TABLE t1; +CREATE TABLE t1 ( +b BLOB, +t TINYBLOB, +m MEDIUMBLOB, +l LONGBLOB, +pk INT AUTO_INCREMENT PRIMARY KEY, +INDEX (m(128)) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 m 1 m A 500 128 NULL YES LSMTREE +INSERT INTO t1 (b,t,m,l) VALUES +('','','',''), +('a','b','c','d'), +('b','d','c','b'), +('test1','test2','test3','test4'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)), +(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')), +('abc','def','ghi','jkl'), +('test2','test3','test4','test5'), +('test3','test4','test5','test6'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128)); +EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref m m 131 const # Using where; Using filesort +SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC; +f +EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 IGNORE INDEX FOR ORDER BY (m) WHERE m = 'test1' ORDER BY f DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref m m 131 const # Using where; Using filesort +SELECT SUBSTRING(m,128) AS f FROM t1 IGNORE INDEX FOR ORDER BY (m) WHERE m = 'test1' ORDER BY f DESC; +f +DROP TABLE t1; +CREATE TABLE t1 ( +b BLOB, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +SELECT b FROM t1; +b +00000000000000000000000000000000 +00000000000000000000000000000001 +00000000000000000000000000000002 +DROP TABLE t1; +CREATE TABLE t1 ( +b TINYBLOB, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +SELECT b FROM t1; +b +00000000000000000000000000000000 +00000000000000000000000000000001 +00000000000000000000000000000002 +DROP TABLE t1; +CREATE TABLE t1 ( +b MEDIUMBLOB, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +SELECT b FROM t1; +b +00000000000000000000000000000000 +00000000000000000000000000000001 +00000000000000000000000000000002 +DROP TABLE t1; +CREATE TABLE t1 ( +b LONGBLOB, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +SELECT b FROM t1; +b +00000000000000000000000000000000 +00000000000000000000000000000001 +00000000000000000000000000000002 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_bool.result b/storage/rocksdb/mysql-test/rocksdb/r/type_bool.result new file mode 100644 index 00000000000..dd9dc6d1f9f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_bool.result @@ -0,0 +1,73 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +b1 BOOL , +b2 BOOLEAN +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +b1 tinyint(1) YES NULL +b2 tinyint(1) YES NULL +INSERT INTO t1 (b1,b2) VALUES (1,TRUE); +SELECT b1,b2 FROM t1; +b1 b2 +1 1 +INSERT INTO t1 (b1,b2) VALUES (FALSE,0); +SELECT b1,b2 FROM t1; +b1 b2 +0 0 +1 1 +INSERT INTO t1 (b1,b2) VALUES (2,3); +SELECT b1,b2 FROM t1; +b1 b2 +0 0 +1 1 +2 3 +INSERT INTO t1 (b1,b2) VALUES (-1,-2); +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +1 1 +2 3 +SELECT IF(b1,'true','false') AS a, IF(b2,'true','false') AS b FROM t1; +a b +false false +true true +true true +true true +SELECT b1,b2 FROM t1 WHERE b1 = TRUE; +b1 b2 +1 1 +SELECT b1,b2 FROM t1 WHERE b2 = FALSE; +b1 b2 +0 0 +INSERT INTO t1 (b1,b2) VALUES ('a','b'); +Warnings: +Warning 1366 Incorrect integer value: 'a' for column 'b1' at row 1 +Warning 1366 Incorrect integer value: 'b' for column 'b2' at row 1 +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +0 0 +1 1 +2 3 +INSERT INTO t1 (b1,b2) VALUES (128,-129); +Warnings: +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b2' at row 1 +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +0 0 +1 1 +127 -128 +2 3 +ALTER TABLE t1 ADD COLUMN b3 BOOLEAN UNSIGNED ; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'UNSIGNED' at line 1 +ALTER TABLE ADD COLUMN b3 BOOL ZEROFILL ; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ADD COLUMN b3 BOOL ZEROFILL' at line 1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_char.result b/storage/rocksdb/mysql-test/rocksdb/r/type_char.result new file mode 100644 index 00000000000..1e5ac0d44f5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_char.result @@ -0,0 +1,76 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c CHAR , +c0 CHAR(0) , +c1 CHAR(1) , +c20 CHAR(20) , +c255 CHAR(255) , +PRIMARY KEY (c255) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c char(1) YES NULL +c0 char(0) YES NULL +c1 char(1) YES NULL +c20 char(20) YES NULL +c255 char(255) NO PRI +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('','','','',''); +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.'); +SELECT c,c0,c1,c20,c255 FROM t1; +c c0 c1 c20 c255 + +a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work. +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256)); +Warnings: +Warning 1265 Data truncated for column 'c' at row 1 +Warning 1265 Data truncated for column 'c0' at row 1 +Warning 1265 Data truncated for column 'c1' at row 1 +Warning 1265 Data truncated for column 'c20' at row 1 +Warning 1265 Data truncated for column 'c255' at row 1 +INSERT INTO t1 (c,c0,c1,c20,c255) SELECT c255, c255, c255, c255, CONCAT('a',c255,c1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'c' at row 5 +Warning 1265 Data truncated for column 'c0' at row 5 +Warning 1265 Data truncated for column 'c1' at row 5 +Warning 1265 Data truncated for column 'c20' at row 5 +Warning 1265 Data truncated for column 'c' at row 6 +Warning 1265 Data truncated for column 'c0' at row 6 +Warning 1265 Data truncated for column 'c1' at row 6 +Warning 1265 Data truncated for column 'c20' at row 6 +Warning 1265 Data truncated for column 'c255' at row 6 +SELECT c,c0,c1,c20,c255 FROM t1; +c c0 c1 c20 c255 + + a +C C Creating an article aCreating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work.b +a a aaaaaaaaaaaaaaaaaaaa xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work. +x x xxxxxxxxxxxxxxxxxxxx axxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +SELECT DISTINCT c20, REPEAT('a',LENGTH(c20)), COUNT(*) FROM t1 GROUP BY c1, c20; +c20 REPEAT('a',LENGTH(c20)) COUNT(*) + 2 +Creating an article aaaaaaaaaaaaaaaaaaa 1 +aaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaa 1 +abcdefghi klmnopqrst aaaaaaaaaaaaaaaaaaaa 1 +xxxxxxxxxxxxxxxxxxxx aaaaaaaaaaaaaaaaaaaa 1 +ALTER TABLE t1 ADD COLUMN c257 CHAR(257) ; +ERROR 42000: Column length too big for column 'c257' (max = 255); use BLOB or TEXT instead +DROP TABLE t1; +CREATE TABLE t1(c1 CHAR(0) NOT NULL); +INSERT INTO t1 VALUES('a'); +Warnings: +Warning 1265 Data truncated for column 'c1' at row 1 +SELECT * FROM t1; +c1 + +DROP TABLE t1; +CREATE TABLE t1(a char(10) character set utf8 collate utf8_bin primary key); +INSERT INTO t1 VALUES ('one'),('two'),('three'),('four'),('five'); +SELECT * FROM t1 LIMIT 1 UNION SELECT * FROM t1; +a +five +four +one +three +two +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result new file mode 100644 index 00000000000..e8b913288c5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result @@ -0,0 +1,73 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c CHAR, +c20 CHAR(20) PRIMARY KEY, +v16 VARCHAR(16), +v128 VARCHAR(128) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 c20 A 1000 NULL NULL LSMTREE +INSERT INTO t1 (c,c20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b'); +EXPLAIN SELECT c20 FROM t1 ORDER BY c20; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 20 NULL # Using index +SELECT c20 FROM t1 ORDER BY c20; +c20 +char1 +char2 +char3 +char4 +EXPLAIN SELECT c20 FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY c20; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 20 NULL # Using index +SELECT c20 FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY c20; +c20 +char1 +char2 +char3 +char4 +DROP TABLE t1; +CREATE TABLE t1 ( +c CHAR, +c20 CHAR(20), +v16 VARCHAR(16), +v128 VARCHAR(128), +pk VARCHAR(64) PRIMARY KEY, +INDEX (v16) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 v16 1 v16 A 500 NULL NULL YES LSMTREE +INSERT INTO t1 (c,c20,v16,v128,pk) VALUES ('a','char1','varchar1a','varchar1b','1'),('a','char2','varchar2a','varchar2b','2'),('b','char3','varchar1a','varchar1b','3'),('c','char4','varchar3a','varchar3b','4'); +EXPLAIN SELECT SUBSTRING(v16,0,3) FROM t1 WHERE v16 LIKE 'varchar%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index v16 v16 19 NULL # Using where; Using index +SELECT SUBSTRING(v16,7,3) FROM t1 WHERE v16 LIKE 'varchar%'; +SUBSTRING(v16,7,3) +r1a +r1a +r2a +r3a +EXPLAIN SELECT SUBSTRING(v16,0,3) FROM t1 IGNORE INDEX (v16) WHERE v16 LIKE 'varchar%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where +SELECT SUBSTRING(v16,7,3) FROM t1 IGNORE INDEX (v16) WHERE v16 LIKE 'varchar%'; +SUBSTRING(v16,7,3) +r1a +r1a +r2a +r3a +EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL v16 NULL NULL NULL # Using where; Using filesort +SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16; +c c20 v16 v128 +a char1 varchar1a varchar1b +b char3 varchar1a varchar1b +c char4 varchar3a varchar3b +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result new file mode 100644 index 00000000000..9ab00243b6b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result @@ -0,0 +1,109 @@ +set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +create table t (id int not null auto_increment primary key, +c varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci, +key sk (c)); +set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +insert into t (c) values ('A'), ('b'), ('C'); +explain select c from t; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL sk 4 NULL # Using index +select c from t; +c +A +b +C +select c from t where c = 'a'; +c +A +drop table t; +set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans"; +create table t (id int not null auto_increment primary key, +c varchar(8) CHARACTER SET utf8 COLLATE utf8_general_ci, +key sk (c)); +insert into t (c) values ('☀'), ('ß'); +explain select c from t; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL sk 27 NULL # Using index +select c from t; +c +ß +☀ +drop table t; +set session debug= "-d,myrocks_enable_unknown_collation_index_only_scans"; +create table t (id int not null auto_increment, +c1 varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci, +c2 char(1) CHARACTER SET latin1 COLLATE latin1_general_ci, +primary key (id), +key sk1 (c1), +key sk2 (c2)); +explain select hex(c1) from t order by c1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL sk1 4 NULL # Using index +explain select hex(c1) from t IGNORE INDEX (sk1) order by c1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ALL NULL NULL NULL NULL # Using filesort +explain select hex(c2) from t order by c2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL sk2 2 NULL # Using index +explain select hex(c2) from t IGNORE INDEX (sk1) order by c2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL sk2 2 NULL # Using index +truncate t; +insert into t (c1, c2) values ('Asdf ', 'Asdf '); +Warnings: +Warning 1265 Data truncated for column 'c1' at row 1 +Warning 1265 Data truncated for column 'c2' at row 1 +select char_length(c1), char_length(c2), c1, c2 from t; +char_length(c1) char_length(c2) c1 c2 +1 1 A A +drop table t; +create table t (id int not null auto_increment, +c2 char(255) CHARACTER SET latin1 COLLATE latin1_general_ci, +primary key (id), +unique key sk2 (c2)); +insert into t (c2) values ('Asdf'); +insert into t (c2) values ('asdf '); +ERROR 23000: Duplicate entry 'asdf' for key 'sk2' +drop table t; +create table t (id int not null auto_increment, +c1 varchar(256) CHARACTER SET latin1 COLLATE latin1_swedish_ci, +primary key (id), +unique key sk1 (c1)); +insert into t (c1) values ('Asdf'); +insert into t (c1) values ('asdf '); +ERROR 23000: Duplicate entry 'asdf ' for key 'sk1' +insert into t (c1) values ('asdf'); +ERROR 23000: Duplicate entry 'asdf' for key 'sk1' +drop table t; +create table t (id int not null auto_increment, +c1 varchar(256) CHARACTER SET latin1 COLLATE latin1_swedish_ci, +primary key (id), +unique key sk1 (c1(1))); +insert into t (c1) values ('Asdf'); +insert into t (c1) values ('bbbb '); +insert into t (c1) values ('a '); +ERROR 23000: Duplicate entry 'a' for key 'sk1' +explain select c1 from t; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ALL NULL NULL NULL NULL # NULL +select c1 from t; +c1 +Asdf +bbbb +drop table t; +set session rocksdb_verify_checksums = on; +create table t (id int primary key, email varchar(100), KEY email_i (email(30))) engine=rocksdb default charset=latin1; +insert into t values (1, ' a'); +explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL email_i 33 NULL # Using index +select 'email_i' as index_name, count(*) AS count from t force index(email_i); +index_name count +email_i 1 +drop table t; +create table t (id int primary key, email varchar(767), KEY email_i (email)) engine=rocksdb default charset=latin1; +insert into t values (1, REPEAT('a', 700)); +select 'email_i' as index_name, count(*) AS count from t force index(email_i); +index_name count +email_i 1 +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result new file mode 100644 index 00000000000..1a5ec573be7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result @@ -0,0 +1,53 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d DATE , +dt DATETIME , +ts TIMESTAMP , +t TIME , +y YEAR , +y4 YEAR(4) , +y2 YEAR(2) , +pk DATETIME PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead. +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +d date YES NULL +dt datetime YES NULL +ts timestamp YES NULL +t time YES NULL +y year(4) YES NULL +y4 year(4) YES NULL +y2 year(4) YES NULL +pk datetime NO PRI NULL +SET @tm = '2012-04-09 05:27:00'; +INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES +('1000-01-01', '1000-01-01 00:00:00', FROM_UNIXTIME(1), '-838:59:59', '1901', '1901', '00','2012-12-12 12:12:12'), +('9999-12-31', '9999-12-31 23:59:59', FROM_UNIXTIME(2147483647), '838:59:59', '2155', '2155', '99','2012-12-12 12:12:13'), +('0000-00-00', '0000-00-00 00:00:00', '0000-00-00 00:00:00', '00:00:00', '0', '0', '0','2012-12-12 12:12:14'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),YEAR(@tm),YEAR(@tm),'2012-12-12 12:12:15'); +SELECT d,dt,ts,t,y,y4,y2 FROM t1; +d dt ts t y y4 y2 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000 +1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012 +9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999 +INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES +('999-13-32', '999-11-31 00:00:00', '0', '-839:00:00', '1900', '1900', '-1','2012-12-12 12:12:16'); +Warnings: +Warning 1265 Data truncated for column 'd' at row 1 +Warning 1264 Out of range value for column 'dt' at row 1 +Warning 1264 Out of range value for column 'ts' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 'y' at row 1 +Warning 1264 Out of range value for column 'y4' at row 1 +Warning 1264 Out of range value for column 'y2' at row 1 +SELECT d,dt,ts,t,y,y4,y2 FROM t1; +d dt ts t y y4 y2 +1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000 +9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 0000 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result new file mode 100644 index 00000000000..187330836df --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result @@ -0,0 +1,119 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d DATE, +dt DATETIME PRIMARY KEY, +ts TIMESTAMP, +t TIME, +y YEAR +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 dt A 1000 NULL NULL LSMTREE +SET @tm = '2012-04-09 05:27:00'; +INSERT INTO t1 (d,dt,ts,t,y) VALUES +('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000'), +('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001'), +('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999'), +('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998'), +('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm)); +EXPLAIN SELECT dt FROM t1 ORDER BY dt LIMIT 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 5 NULL # Using index +SELECT dt FROM t1 ORDER BY dt LIMIT 3; +dt +2010-11-22 11:43:14 +2010-11-22 12:33:54 +2011-08-27 21:33:56 +EXPLAIN SELECT dt FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY dt LIMIT 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 5 NULL # Using index +SELECT dt FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY dt LIMIT 3; +dt +2010-11-22 11:43:14 +2010-11-22 12:33:54 +2011-08-27 21:33:56 +INSERT INTO t1 (d,dt,ts,t,y) VALUES +('2012-01-11', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000'); +ERROR 23000: Duplicate entry '2010-11-22 12:33:54' for key 'PRIMARY' +DROP TABLE t1; +CREATE TABLE t1 ( +d DATE, +dt DATETIME, +ts TIMESTAMP, +t TIME, +y YEAR, +pk TIME PRIMARY KEY, +INDEX (ts) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 ts 1 ts A 500 NULL NULL YES LSMTREE +SET @tm = '2012-04-09 05:27:00'; +INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES +('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000','12:00:00'), +('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001','12:01:00'), +('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999','12:02:00'), +('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998','12:03:00'), +('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994','12:04:00'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'12:05:00'); +EXPLAIN SELECT ts FROM t1 WHERE ts > NOW(); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index ts ts 5 NULL # Using where; Using index +SELECT ts FROM t1 WHERE ts > NOW(); +ts +EXPLAIN SELECT ts FROM t1 USE INDEX () WHERE ts > NOW(); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where +SELECT ts FROM t1 USE INDEX () WHERE ts > NOW(); +ts +DROP TABLE t1; +CREATE TABLE t1 ( +d DATE, +dt DATETIME, +ts TIMESTAMP, +t TIME, +y YEAR, +pk TIME PRIMARY KEY, +INDEX (y,t) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 y 1 y A 250 NULL NULL YES LSMTREE +t1 1 y 2 t A 500 NULL NULL YES LSMTREE +SET @tm = '2012-04-09 05:27:00'; +INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES +('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000','18:18:18'), +('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001','19:18:18'), +('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999','20:18:18'), +('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998','21:18:18'), +('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994','22:18:18'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'23:18:18'); +EXPLAIN SELECT y, COUNT(*) FROM t1 GROUP BY y; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index y y 6 NULL # Using index +SELECT y, COUNT(*) FROM t1 GROUP BY y; +y COUNT(*) +1994 1 +1998 1 +1999 1 +2000 1 +2001 1 +2012 1 +EXPLAIN SELECT y, COUNT(*) FROM t1 USE INDEX FOR GROUP BY () GROUP BY y; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index y y 6 NULL # Using index; Using temporary; Using filesort +SELECT y, COUNT(*) FROM t1 USE INDEX FOR GROUP BY () GROUP BY y; +y COUNT(*) +1994 1 +1998 1 +1999 1 +2000 1 +2001 1 +2012 1 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result b/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result new file mode 100644 index 00000000000..7a7a5c7638c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result @@ -0,0 +1,179 @@ +drop table if exists t1, t2; +# +# Check that DECIMAL PK +# +create table t0(a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +# First, make the server to create a dataset in the old format: +set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +create table t1 ( +pk1 decimal(32,16), +pk2 decimal(32,16), +pk3 decimal(32,16), +a smallint not null, +primary key(pk1, pk2, pk3) +); +insert into t1 +select +A.a, B.a, C.a, 1234 +from t0 A, t0 B, t0 C; +# +# Looking at the table size, one can tell that the data is stored using +# old format: +# +set global rocksdb_force_flush_memtable_now=1; +# Check the format version: +select table_name,index_name,kv_format_version +from information_schema.ROCKSDB_DDL +where TABLE_SCHEMA=database() AND table_name='t1'; +table_name index_name kv_format_version +t1 PRIMARY 10 +flush tables; +set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +# Check that the new server reads the data in the old format: +select * from t1 order by pk1,pk2,pk3 limit 5; +pk1 pk2 pk3 a +0.0000000000000000 0.0000000000000000 0.0000000000000000 1234 +0.0000000000000000 0.0000000000000000 1.0000000000000000 1234 +0.0000000000000000 0.0000000000000000 2.0000000000000000 1234 +0.0000000000000000 0.0000000000000000 3.0000000000000000 1234 +0.0000000000000000 0.0000000000000000 4.0000000000000000 1234 +# +# Ok, now, enable the new data format: +# +create table t2 ( +pk1 decimal(32,16), +pk2 decimal(32,16), +pk3 decimal(32,16), +a smallint not null, +primary key(pk1, pk2, pk3) +); +insert into t2 +select +A.a, B.a, C.a, 1234 +from t0 A, t0 B, t0 C; +set global rocksdb_force_flush_memtable_now=1; +larger +1 +# This should show the new PK data fromat +select table_name,index_name,kv_format_version from information_schema.ROCKSDB_DDL +where TABLE_SCHEMA=database() AND table_name='t2'; +table_name index_name kv_format_version +t2 PRIMARY 11 +# +# Check that the server is able to read BOTH the old and the new formats: +# +select * from t2 limit 3; +pk1 pk2 pk3 a +0.0000000000000000 0.0000000000000000 0.0000000000000000 1234 +0.0000000000000000 0.0000000000000000 1.0000000000000000 1234 +0.0000000000000000 0.0000000000000000 2.0000000000000000 1234 +select * from t1 limit 3; +pk1 pk2 pk3 a +0.0000000000000000 0.0000000000000000 0.0000000000000000 1234 +0.0000000000000000 0.0000000000000000 1.0000000000000000 1234 +0.0000000000000000 0.0000000000000000 2.0000000000000000 1234 +drop table t1,t2; +drop table t0; +# +# Check that DECIMAL datatype supports 'index-only' scans and is decoded correctly. +# (Decoding happens from the mem-comparable image in the index, regardless +# of whether the value part has original value or not) +# +create table t1 ( +pk int not null primary key, +col1 decimal (2,1) signed, +col2 decimal (2,1) unsigned, +filler varchar(100), +key key1(col1, col2) +)engine=rocksdb; +insert into t1 values +(1,-9.1, 0.7, 'filler'), +(2,-8.2, 1.6, 'filler'), +(3, 0.3, 2.5, 'filler'), +(4, 1.4, 3.4, 'filler'), +(5, 2.5, 4.3, 'filler'), +(6, 3.3, 5.3, 'filler'); +insert into t1 select pk+100, 9.0, 9.0, 'extra-data' from t1; +insert into t1 select pk+200, 9.0, 9.0, 'extra-data' from t1; +insert into t1 select pk+1000, 9.0, 9.0, 'extra-data' from t1; +insert into t1 select pk+10000, 9.0, 9.0, 'extra-data' from t1; +insert into t1 select pk+100000, 9.0, 9.0, 'extra-data' from t1; +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +# The following can't use index-only: +explain select * from t1 where col1 between -8 and 8; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range key1 key1 3 NULL # Using index condition +# This will use index-only: +explain +select col1, col2 from t1 where col1 between -8 and 8; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range key1 key1 3 NULL # Using where; Using index +select col1, col2 from t1 where col1 between -8 and 8; +col1 col2 +0.3 2.5 +1.4 3.4 +2.5 4.3 +3.3 5.3 +insert into t1 values (11, NULL, 0.9, 'row1-with-null'); +insert into t1 values (10, -8.4, NULL, 'row2-with-null'); +explain +select col1, col2 from t1 force index(key1) where col1 is null or col1 < -7; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range key1 key1 3 NULL # Using where; Using index +select col1, col2 from t1 force index(key1) where col1 is null or col1 < -7; +col1 col2 +NULL 0.9 +-9.1 0.7 +-8.4 NULL +-8.2 1.6 +# Try an UPDATE +select * from t1 where pk in (3,4); +pk col1 col2 filler +3 0.3 2.5 filler +4 1.4 3.4 filler +update t1 set col2= col2+0.2 where pk in (3,4); +select * from t1 where pk in (3,4); +pk col1 col2 filler +3 0.3 2.7 filler +4 1.4 3.6 filler +drop table t1; +# +# Try another DECIMAL-based type that takes more space +# +create table t1 ( +pk int not null primary key, +col1 decimal (12,6) signed, +col2 decimal (12,6) unsigned, +filler varchar(100), +key key1(col1, col2) +)engine=rocksdb; +insert into t1 values +(1,-900.001, 000.007, 'filler'), +(2,-700.002, 100.006, 'filler'), +(3, 000.003, 200.005, 'filler'), +(4, 100.004, 300.004, 'filler'), +(5, 200.005, 400.003, 'filler'), +(6, 300.003, 500.003, 'filler'); +insert into t1 select pk+100, col1+20000, col2+20000, 'extra-data' from t1; +insert into t1 select pk+200, col1+20000, col2+20000, 'extra-data' from t1; +insert into t1 select pk+1000, col1+20000, col2+20000, 'extra-data' from t1; +insert into t1 select pk+10000, col1+20000, col2+20000, 'extra-data' from t1; +insert into t1 select pk+100000, col1+20000, col2+20000, 'extra-data' from t1; +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +explain +select col1, col2 from t1 force index(key1) where col1 between -800 and 800; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range key1 key1 7 NULL # Using where; Using index +select col1, col2 from t1 force index(key1) where col1 between -800 and 800; +col1 col2 +-700.002000 100.006000 +0.003000 200.005000 +100.004000 300.004000 +200.005000 400.003000 +300.003000 500.003000 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_enum.result b/storage/rocksdb/mysql-test/rocksdb/r/type_enum.result new file mode 100644 index 00000000000..009b342182d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_enum.result @@ -0,0 +1,47 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a ENUM('') , +b ENUM('test1','test2','test3','test4','test5') , +c ENUM('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') , +PRIMARY KEY (b) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a enum('') YES NULL +b enum('test1','test2','test3','test4','test5') NO PRI test1 +c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') YES NULL +INSERT INTO t1 (a,b,c) VALUES ('','test2','4'),('',5,2); +SELECT a,b,c FROM t1; +a b c + test2 4 + test5 2 +INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1); +Warnings: +Warning 1265 Data truncated for column 'a' at row 1 +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + + test2 4 + test5 2 +ALTER TABLE t1 ADD COLUMN e ENUM('a','A') ; +Warnings: +Note 1291 Column 'e' has duplicated value 'a' in ENUM +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a enum('') YES NULL +b enum('test1','test2','test3','test4','test5') NO PRI test1 +c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') YES NULL +e enum('a','A') YES NULL +INSERT INTO t1 (a,b,c,e) VALUES ('','test3','75','A'); +SELECT a,b,c,e FROM t1; +a b c e + NULL + test2 4 NULL + test3 75 a + test5 2 NULL +SELECT a,b,c,e FROM t1 WHERE b='test2' OR a != ''; +a b c e + test2 4 NULL +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result new file mode 100644 index 00000000000..37d005485d6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result @@ -0,0 +1,69 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a ENUM('N.America','S.America','Africa','Europe','Australia','Asia','Antarctica'), +b ENUM('test1','test2','test3','test4','test5'), +c ENUM('1a','1b','1d','1j','4a','4z','5a','5b','6v','6z') PRIMARY KEY +) ENGINE=rocksdb; +INSERT INTO t1 (a,b,c) VALUES +('N.America','test1','5a'),('Europe','test1','5b'),('Europe','test2','6v'), +('Africa','test3','4z'),('Africa','test4','1j'),('Antarctica','test4','1d'); +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 c A 1000 NULL NULL LSMTREE +EXPLAIN SELECT c FROM t1 WHERE c BETWEEN '1d' AND '6u'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index PRIMARY PRIMARY 1 NULL # Using where; Using index +SELECT c FROM t1 WHERE c BETWEEN '1d' AND '6u'; +c +1d +1j +4z +5a +5b +EXPLAIN SELECT c FROM t1 USE INDEX () WHERE c BETWEEN '1d' AND '6u'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where +SELECT c FROM t1 USE INDEX () WHERE c BETWEEN '1d' AND '6u'; +c +1d +1j +4z +5a +5b +DROP TABLE t1; +CREATE TABLE t1 ( +a ENUM('N.America','S.America','Africa','Europe','Australia','Asia','Antarctica'), +b ENUM('test1','test2','test3','test4','test5'), +c ENUM('1a','1b','1d','1j','4a','4z','5a','5b','6v','6z'), +pk ENUM('1','2','3','4','5','6','7','8','9') PRIMARY KEY, +INDEX(b) +) ENGINE=rocksdb; +INSERT INTO t1 (a,b,c,pk) VALUES +('N.America','test1','5a',1),('Europe','test1','5b',2),('Europe','test2','6v',3), +('Africa','test3','4z',4),('Africa','test4','1j',5),('Antarctica','test4','1d',6); +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 b 1 b A 500 NULL NULL YES LSMTREE +EXPLAIN SELECT DISTINCT b FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL b 2 NULL # NULL +SELECT DISTINCT b FROM t1; +b +test1 +test2 +test3 +test4 +EXPLAIN SELECT DISTINCT b FROM t1 IGNORE INDEX (b); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using temporary +SELECT DISTINCT b FROM t1 IGNORE INDEX (b); +b +test1 +test2 +test3 +test4 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_fixed.result b/storage/rocksdb/mysql-test/rocksdb/r/type_fixed.result new file mode 100644 index 00000000000..45fd402f9b3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_fixed.result @@ -0,0 +1,131 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d DECIMAL , +d0 DECIMAL(0) , +d1_1 DECIMAL(1,1) , +d10_2 DECIMAL(10,2) , +d60_10 DECIMAL(60,10) , +n NUMERIC , +n0_0 NUMERIC(0,0) , +n1 NUMERIC(1) , +n20_4 NUMERIC(20,4) , +n65_4 NUMERIC(65,4) , +pk NUMERIC PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +d decimal(10,0) YES NULL +d0 decimal(10,0) YES NULL +d1_1 decimal(1,1) YES NULL +d10_2 decimal(10,2) YES NULL +d60_10 decimal(60,10) YES NULL +n decimal(10,0) YES NULL +n0_0 decimal(10,0) YES NULL +n1 decimal(1,0) YES NULL +n20_4 decimal(20,4) YES NULL +n65_4 decimal(65,4) YES NULL +pk decimal(10,0) NO PRI NULL +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3); +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5); +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +6 +); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8); +Warnings: +Note 1265 Data truncated for column 'd' at row 1 +Note 1265 Data truncated for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Note 1265 Data truncated for column 'd10_2' at row 1 +Note 1265 Data truncated for column 'd60_10' at row 1 +Note 1265 Data truncated for column 'n' at row 1 +Note 1265 Data truncated for column 'n0_0' at row 1 +Note 1265 Data truncated for column 'n1' at row 1 +Note 1265 Data truncated for column 'n20_4' at row 1 +Note 1265 Data truncated for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.00 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.0000 9999999999999999999999999999999999999999999999999999999999999.1111 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) ; +ERROR 42000: Too big precision 66 specified for column 'n66'. Maximum is 65. +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) ; +ERROR 42000: Too big precision 66 specified for column 'n66_6'. Maximum is 65. +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) ; +ERROR 42000: Too big scale 66 specified for column 'n66_66'. Maximum is 30. +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_fixed_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_fixed_indexes.result new file mode 100644 index 00000000000..8aa80244908 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_fixed_indexes.result @@ -0,0 +1,129 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d1 DECIMAL(10,2) PRIMARY KEY, +d2 DECIMAL(60,10), +n1 NUMERIC, +n2 NUMERIC(65,4) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 d1 A 1000 NULL NULL LSMTREE +INSERT INTO t1 (d1,d2,n1,n2) VALUES +(10.22,60.12345,123456,14.3456), +(10.0,60.12345,123456,14), +(11.14,15,123456,13), +(100,100,1,2), +(0,0,0,0), +(4540424564.23,3343303441.0,12,13), +(15,17,23,100000); +Warnings: +Warning 1264 Out of range value for column 'd1' at row 6 +EXPLAIN SELECT d1 FROM t1 ORDER BY d1 DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 5 NULL # Using index +SELECT d1 FROM t1 ORDER BY d1 DESC; +d1 +99999999.99 +100.00 +15.00 +11.14 +10.22 +10.00 +0.00 +EXPLAIN SELECT d1 FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY d1 DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 5 NULL # Using index; Using filesort +SELECT d1 FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY d1 DESC; +d1 +99999999.99 +100.00 +15.00 +11.14 +10.22 +10.00 +0.00 +DROP TABLE t1; +CREATE TABLE t1 ( +d1 DECIMAL(10,2), +d2 DECIMAL(60,10), +n1 NUMERIC, +n2 NUMERIC(65,4), +pk NUMERIC PRIMARY KEY, +UNIQUE INDEX n1_n2 (n1,n2) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 0 n1_n2 1 n1 A 500 NULL NULL YES LSMTREE +t1 0 n1_n2 2 n2 A 1000 NULL NULL YES LSMTREE +INSERT INTO t1 (d1,d2,n1,n2,pk) VALUES +(10.22,60.12345,123456,14.3456,1), +(10.0,60.12345,123456,14,2), +(11.14,15,123456,13,3), +(100,100,1,2,4), +(0,0,0,0,5), +(4540424564.23,3343303441.0,12,13,6), +(15,17,23,100000,7); +Warnings: +Warning 1264 Out of range value for column 'd1' at row 6 +EXPLAIN SELECT DISTINCT n1+n2 FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index n1_n2 n1_n2 37 NULL # Using index; Using temporary +SELECT DISTINCT n1+n2 FROM t1; +n1+n2 +0.0000 +100023.0000 +123469.0000 +123470.0000 +123470.3456 +25.0000 +3.0000 +DROP TABLE t1; +CREATE TABLE t1 ( +d1 DECIMAL(10,2), +d2 DECIMAL(60,10), +n1 NUMERIC, +n2 NUMERIC(65,4), +pk DECIMAL(20,10) PRIMARY KEY, +INDEX (d2) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 d2 1 d2 A 500 NULL NULL YES LSMTREE +INSERT INTO t1 (d1,d2,n1,n2,pk) VALUES +(10.22,60.12345,123456,14.3456,1), +(10.0,60.12345,123456,14,2), +(11.14,15,123456,13,3), +(100,100,1,2,4), +(0,0,0,0,5), +(4540424564.23,3343303441.0,12,13,6), +(15,17,23,100000,7); +Warnings: +Warning 1264 Out of range value for column 'd1' at row 6 +EXPLAIN SELECT d2, COUNT(*) FROM t1 GROUP BY d2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index d2 d2 29 NULL # Using index +SELECT d2, COUNT(*) FROM t1 GROUP BY d2; +d2 COUNT(*) +0.0000000000 1 +100.0000000000 1 +15.0000000000 1 +17.0000000000 1 +3343303441.0000000000 1 +60.1234500000 2 +EXPLAIN SELECT d2, COUNT(*) FROM t1 IGNORE INDEX FOR GROUP BY (d2) GROUP BY d2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index d2 d2 29 NULL # Using index; Using temporary; Using filesort +SELECT d2, COUNT(*) FROM t1 IGNORE INDEX FOR GROUP BY (d2) GROUP BY d2; +d2 COUNT(*) +0.0000000000 1 +100.0000000000 1 +15.0000000000 1 +17.0000000000 1 +3343303441.0000000000 1 +60.1234500000 2 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_float.result b/storage/rocksdb/mysql-test/rocksdb/r/type_float.result new file mode 100644 index 00000000000..0f78926c89a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_float.result @@ -0,0 +1,306 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +f FLOAT , +f0 FLOAT(0) , +r1_1 REAL(1,1) , +f23_0 FLOAT(23) , +f20_3 FLOAT(20,3) , +d DOUBLE , +d1_0 DOUBLE(1,0) , +d10_10 DOUBLE PRECISION (10,10) , +d53 DOUBLE(53,0) , +d53_10 DOUBLE(53,10) , +pk DOUBLE PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +f float YES NULL +f0 float YES NULL +r1_1 double(1,1) YES NULL +f23_0 float YES NULL +f20_3 float(20,3) YES NULL +d double YES NULL +d1_0 double(1,0) YES NULL +d10_10 double(10,10) YES NULL +d53 double(53,0) YES NULL +d53_10 double(53,10) YES NULL +pk double NO PRI NULL +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1); +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 11111111.111 +d10_10 0.0123456789 +d1_0 8 +d53 1234566789123456800 +d53_10 100000000000000000.0000000000 +f0 12345.1 +f20_3 56789.988 +f23_0 123457000 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +99999999999999999999999999999999999999, +99999999999999999999999999999999999999.9999999999999999, +0.9, +99999999999999999999999999999999999999.9, +99999999999999999.999, +999999999999999999999999999999999999999999999999999999999999999999999999999999999, +9, +0.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +3 +); +Warnings: +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 0 +d 11111111.111 +d 1e81 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 0 +d1_0 8 +d1_0 9 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 0 +f 1e38 +f0 0 +f0 12345.1 +f0 1e38 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4); +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1; +MAX(f) 9.999999680285692e37 +MAX(d) 1e81 +MAX(d10_10) 0.9999999999 +MAX(d1_0) 9 +MAX(d53) 100000000000000000000000000000000000000000000000000000 +MAX(d53_10) 10000000000000000000000000000000000000000000.0000000000 +MAX(f0) 9.999999680285692e37 +MAX(f20_3) 99999998430674940.000 +MAX(f23_0) 9.999999680285692e37 +MAX(r1_1) 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +5 +); +Warnings: +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e61 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f 3.40282e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f0 3.40282e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +f23_0 3.40282e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +999999999999999999999999999999999999999, +999999999999999999999999999999999999999.9999999999999999, +1.9, +999999999999999999999999999999999999999.9, +999999999999999999.999, +9999999999999999999999999999999999999999999999999999999999999999999999999999999999, +99, +1.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +6 +); +Warnings: +Warning 1292 Truncated incorrect DECIMAL value: '' +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e61 +d 1e65 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f 3.40282e38 +f 3.40282e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f0 3.40282e38 +f0 3.40282e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +f23_0 3.40282e38 +f23_0 3.40282e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) ; +ERROR 42000: Display width out of range for column 'd0_0' (max = 255) +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) ; +ERROR 42000: Too big precision 256 specified for column 'n66_6'. Maximum is 65. +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) ; +ERROR 42000: Too big scale 35 specified for column 'n66_66'. Maximum is 30. +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result new file mode 100644 index 00000000000..9a50f66870c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result @@ -0,0 +1,189 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +f FLOAT PRIMARY KEY, +r REAL(20,3), +d DOUBLE, +dp DOUBLE PRECISION (64,10) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 f A 1000 NULL NULL LSMTREE +INSERT INTO t1 (f,r,d,dp) VALUES +(1.2345,1422.22,1.2345,1234567.89), +(0,0,0,0), +(-1,-1,-1,-1), +(17.5843,4953453454.44,29229114.0,1111111.23), +(4644,1422.22,466664.999,0.5); +EXPLAIN SELECT f FROM t1 ORDER BY f; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 4 NULL # Using index +SELECT f FROM t1 ORDER BY f; +f +-1 +0 +1.2345 +17.5843 +4644 +EXPLAIN SELECT f FROM t1 IGNORE INDEX (PRIMARY) ORDER BY f; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using filesort +SELECT f FROM t1 IGNORE INDEX (PRIMARY) ORDER BY f; +f +-1 +0 +1.2345 +17.5843 +4644 +DROP TABLE t1; +CREATE TABLE t1 ( +f FLOAT, +r REAL(20,3), +d DOUBLE, +dp DOUBLE PRECISION (64,10), +pk DOUBLE PRIMARY KEY, +UNIQUE KEY r_dp (r,dp) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 0 r_dp 1 r A 500 NULL NULL YES LSMTREE +t1 0 r_dp 2 dp A 1000 NULL NULL YES LSMTREE +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); +EXPLAIN SELECT r, dp FROM t1 WHERE r > 0 or dp > 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index r_dp r_dp 18 NULL # Using where; Using index +SELECT r, dp FROM t1 WHERE r > 0 or dp > 0; +r dp +1422.220 0.5000000000 +1422.220 1234567.8900000000 +4953453454.440 1111111.2300000000 +DROP TABLE t1; +CREATE TABLE t1 ( +f FLOAT, +r REAL(20,3), +d DOUBLE, +dp DOUBLE PRECISION (64,10), +pk FLOAT PRIMARY KEY, +UNIQUE KEY(d) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 0 d 1 d A 1000 NULL NULL YES LSMTREE +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); +EXPLAIN SELECT DISTINCT d FROM t1 ORDER BY d; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index d d 9 NULL # Using index +SELECT DISTINCT d FROM t1 ORDER BY d; +d +-1 +0 +1.2345 +466664.999 +29229114 +DROP TABLE t1; +CREATE TABLE t1 ( +f FLOAT, +r REAL(20,3), +d DOUBLE, +dp DOUBLE PRECISION (64,10), +pk FLOAT PRIMARY KEY, +KEY(d) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 d 1 d A 500 NULL NULL YES LSMTREE +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); +EXPLAIN SELECT DISTINCT d FROM t1 ORDER BY d; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index d d 9 NULL # Using index +SELECT DISTINCT d FROM t1 ORDER BY d; +d +-1 +0 +1.2345 +466664.999 +29229114 +DROP TABLE t1; +CREATE TABLE t1 ( +f FLOAT, +r REAL(20,3), +d DOUBLE, +dp DOUBLE PRECISION (64,10), +pk FLOAT PRIMARY KEY, +UNIQUE KEY(f) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 0 f 1 f A 1000 NULL NULL YES LSMTREE +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,0,0,0,6); +ERROR 23000: Duplicate entry '1.2345' for key 'f' +EXPLAIN SELECT DISTINCT f FROM t1 ORDER BY f; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index f f 5 NULL # Using index +SELECT DISTINCT f FROM t1 ORDER BY f; +f +-1 +0 +1.2345 +17.5843 +4644 +DROP TABLE t1; +CREATE TABLE t1 ( +f FLOAT, +r REAL(20,3), +d DOUBLE, +dp DOUBLE PRECISION (64,10), +pk FLOAT PRIMARY KEY, +KEY(f) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 f 1 f A 500 NULL NULL YES LSMTREE +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,0,0,0,6); +EXPLAIN SELECT DISTINCT f FROM t1 ORDER BY f; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index f f 5 NULL # Using index +SELECT DISTINCT f FROM t1 ORDER BY f; +f +-1 +0 +1.2345 +17.5843 +4644 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_int.result b/storage/rocksdb/mysql-test/rocksdb/r/type_int.result new file mode 100644 index 00000000000..06866e9b5bb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_int.result @@ -0,0 +1,212 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +i INT , +i0 INT(0) , +i1 INT(1) , +i20 INT(20) , +t TINYINT , +t0 TINYINT(0) , +t1 TINYINT(1) , +t20 TINYINT(20) , +s SMALLINT , +s0 SMALLINT(0) , +s1 SMALLINT(1) , +s20 SMALLINT(20) , +m MEDIUMINT , +m0 MEDIUMINT(0) , +m1 MEDIUMINT(1) , +m20 MEDIUMINT(20) , +b BIGINT , +b0 BIGINT(0) , +b1 BIGINT(1) , +b20 BIGINT(20) , +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +i int(11) YES NULL +i0 int(11) YES NULL +i1 int(1) YES NULL +i20 int(20) YES NULL +t tinyint(4) YES NULL +t0 tinyint(4) YES NULL +t1 tinyint(1) YES NULL +t20 tinyint(20) YES NULL +s smallint(6) YES NULL +s0 smallint(6) YES NULL +s1 smallint(1) YES NULL +s20 smallint(20) YES NULL +m mediumint(9) YES NULL +m0 mediumint(9) YES NULL +m1 mediumint(1) YES NULL +m20 mediumint(20) YES NULL +b bigint(20) YES NULL +b0 bigint(20) YES NULL +b1 bigint(1) YES NULL +b20 bigint(20) YES NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807); +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615); +Warnings: +Warning 1264 Out of range value for column 'i' at row 8 +Warning 1264 Out of range value for column 'i0' at row 8 +Warning 1264 Out of range value for column 'i1' at row 8 +Warning 1264 Out of range value for column 'i20' at row 8 +Warning 1264 Out of range value for column 't' at row 8 +Warning 1264 Out of range value for column 't0' at row 8 +Warning 1264 Out of range value for column 't1' at row 8 +Warning 1264 Out of range value for column 't20' at row 8 +Warning 1264 Out of range value for column 's' at row 8 +Warning 1264 Out of range value for column 's0' at row 8 +Warning 1264 Out of range value for column 's1' at row 8 +Warning 1264 Out of range value for column 's20' at row 8 +Warning 1264 Out of range value for column 'm' at row 8 +Warning 1264 Out of range value for column 'm0' at row 8 +Warning 1264 Out of range value for column 'm1' at row 8 +Warning 1264 Out of range value for column 'm20' at row 8 +Warning 1264 Out of range value for column 'i' at row 9 +Warning 1264 Out of range value for column 'i0' at row 9 +Warning 1264 Out of range value for column 'i1' at row 9 +Warning 1264 Out of range value for column 'i20' at row 9 +Warning 1264 Out of range value for column 't' at row 9 +Warning 1264 Out of range value for column 't0' at row 9 +Warning 1264 Out of range value for column 't1' at row 9 +Warning 1264 Out of range value for column 't20' at row 9 +Warning 1264 Out of range value for column 's' at row 9 +Warning 1264 Out of range value for column 's0' at row 9 +Warning 1264 Out of range value for column 's1' at row 9 +Warning 1264 Out of range value for column 's20' at row 9 +Warning 1264 Out of range value for column 'm' at row 9 +Warning 1264 Out of range value for column 'm0' at row 9 +Warning 1264 Out of range value for column 'm1' at row 9 +Warning 1264 Out of range value for column 'm20' at row 9 +Warning 1264 Out of range value for column 'i' at row 10 +Warning 1264 Out of range value for column 'i0' at row 10 +Warning 1264 Out of range value for column 'i1' at row 10 +Warning 1264 Out of range value for column 'i20' at row 10 +Warning 1264 Out of range value for column 't' at row 10 +Warning 1264 Out of range value for column 't0' at row 10 +Warning 1264 Out of range value for column 't1' at row 10 +Warning 1264 Out of range value for column 't20' at row 10 +Warning 1264 Out of range value for column 's' at row 10 +Warning 1264 Out of range value for column 's0' at row 10 +Warning 1264 Out of range value for column 's1' at row 10 +Warning 1264 Out of range value for column 's20' at row 10 +Warning 1264 Out of range value for column 'm' at row 10 +Warning 1264 Out of range value for column 'm0' at row 10 +Warning 1264 Out of range value for column 'm1' at row 10 +Warning 1264 Out of range value for column 'm20' at row 10 +Warning 1264 Out of range value for column 'i' at row 11 +Warning 1264 Out of range value for column 'i0' at row 11 +Warning 1264 Out of range value for column 'i1' at row 11 +Warning 1264 Out of range value for column 'i20' at row 11 +Warning 1264 Out of range value for column 't' at row 11 +Warning 1264 Out of range value for column 't0' at row 11 +Warning 1264 Out of range value for column 't1' at row 11 +Warning 1264 Out of range value for column 't20' at row 11 +Warning 1264 Out of range value for column 's' at row 11 +Warning 1264 Out of range value for column 's0' at row 11 +Warning 1264 Out of range value for column 's1' at row 11 +Warning 1264 Out of range value for column 's20' at row 11 +Warning 1264 Out of range value for column 'm' at row 11 +Warning 1264 Out of range value for column 'm0' at row 11 +Warning 1264 Out of range value for column 'm1' at row 11 +Warning 1264 Out of range value for column 'm20' at row 11 +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +ALTER TABLE t1 ADD COLUMN i257 INT(257) ; +ERROR 42000: Display width out of range for column 'i257' (max = 255) +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_int_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_int_indexes.result new file mode 100644 index 00000000000..e4c47f04c24 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_int_indexes.result @@ -0,0 +1,99 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +i INT PRIMARY KEY, +t TINYINT, +s SMALLINT, +m MEDIUMINT, +b BIGINT +) ENGINE=rocksdb; +INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16); +EXPLAIN SELECT i FROM t1 ORDER BY i; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 4 NULL # Using index +SELECT i FROM t1 ORDER BY i; +i +1 +2 +3 +5 +10 +11 +12 +101 +1000 +10001 +DROP TABLE t1; +CREATE TABLE t1 ( +i INT, +t TINYINT, +s SMALLINT, +m MEDIUMINT, +b BIGINT, +pk SMALLINT AUTO_INCREMENT PRIMARY KEY, +INDEX s_m (s,m) +) ENGINE=rocksdb; +INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16); +EXPLAIN SELECT s, m FROM t1 WHERE s != 10 AND m != 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index s_m s_m 7 NULL # Using where; Using index +SELECT s, m FROM t1 WHERE s != 10 AND m != 1; +s m +10000 1000000 +10000 1000000 +10002 10003 +103 104 +12 13 +13 14 +14 15 +3 4 +4 5 +5 6 +DROP TABLE t1; +# RocksDB: unique indexes allowed +CREATE TABLE t1 ( +i INT, +t TINYINT, +s SMALLINT, +m MEDIUMINT, +b BIGINT, +pk MEDIUMINT AUTO_INCREMENT PRIMARY KEY, +UNIQUE KEY b_t (b,t) +) ENGINE=rocksdb; +INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16); +SELECT b+t FROM t1 WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t; +b+t +9 +11 +25 +27 +29 +207 +10107 +100000000000000100 +1000000000000000100 +SELECT b+t FROM t1 FORCE INDEX (b_t) WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t; +b+t +9 +11 +25 +27 +29 +207 +10107 +100000000000000100 +1000000000000000100 +SELECT b+t FROM t1 IGNORE INDEX (b_t) WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t; +b+t +9 +11 +25 +27 +29 +207 +10107 +100000000000000100 +1000000000000000100 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_set.result b/storage/rocksdb/mysql-test/rocksdb/r/type_set.result new file mode 100644 index 00000000000..f401af46536 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_set.result @@ -0,0 +1,49 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a SET('') , +b SET('test1','test2','test3','test4','test5') , +c SET('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') , +PRIMARY KEY (c) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a set('') YES NULL +b set('test1','test2','test3','test4','test5') YES NULL +c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI +INSERT INTO t1 (a,b,c) VALUES +('','test2,test3','01,34,44,,23'), +('',5,2), +(',','test4,test2',''); +Warnings: +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + test1,test3 02 + test2,test3 01,23,34,44 + test2,test4 +INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + 01,02,03,04,05,06,07,08,09,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50'51,52,53,54,55,56,57,58,59,60,61,62,63,64 + test1,test3 02 + test2,test3 01,23,34,44 + test2,test4 +ALTER TABLE t1 ADD COLUMN e SET('a','A') ; +Warnings: +Note 1291 Column 'e' has duplicated value 'a' in SET +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a set('') YES NULL +b set('test1','test2','test3','test4','test5') YES NULL +c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI +e set('a','A') YES NULL +ALTER TABLE t1 ADD COLUMN f SET('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i') ; +ERROR HY000: Too many strings for column f and SET +SELECT a,b,c,e FROM t1 WHERE FIND_IN_SET('test2',b)>0 OR a != ''; +a b c e + test2,test3 01,23,34,44 NULL + test2,test4 NULL +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_set_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_set_indexes.result new file mode 100644 index 00000000000..62a3004e584 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_set_indexes.result @@ -0,0 +1,80 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'), +b SET('test1','test2','test3','test4','test5'), +c SET('01','22','23','33','34','39','40','44','50','63','64') PRIMARY KEY +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 c A 1000 NULL NULL LSMTREE +INSERT INTO t1 (a,b,c) VALUES +('','test2,test3','01,34,44,23'), +('',5,2), +('N.America,Asia','test4,test2',''), +('Africa,Europe,Asia','test2,test3','01'), +('Antarctica','test3','34,44'), +('Asia','test5','50'), +('Europe,S.America','test1,','39'); +Warnings: +Warning 1265 Data truncated for column 'b' at row 7 +EXPLAIN SELECT c FROM t1 ORDER BY c; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 2 NULL # Using index +SELECT c FROM t1 ORDER BY c; +c + +01 +22 +39 +34,44 +01,23,34,44 +50 +EXPLAIN SELECT c FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY c; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 2 NULL # Using index; Using filesort +SELECT c FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY c; +c + +01 +22 +39 +34,44 +01,23,34,44 +50 +INSERT INTO t1 (a,b,c) VALUES ('Antarctica','test3','02'); +ERROR 23000: Duplicate entry '22' for key 'PRIMARY' +INSERT INTO t1 (a,b,c) VALUES ('','test1','34,44'); +ERROR 23000: Duplicate entry '34,44' for key 'PRIMARY' +DROP TABLE t1; +CREATE TABLE t1 ( +a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'), +b SET('test1','test2','test3','test4','test5'), +c SET('01','22','23','33','34','39','40','44','50','63','64'), +pk SET('1','2','3','4','5','6','7','8','9') PRIMARY KEY, +INDEX(a) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 a 1 a A 500 NULL NULL YES LSMTREE +INSERT INTO t1 (a,b,c,pk) VALUES +('','test2,test3','01,34,44,23',1), +('',5,2,2), +('N.America,Asia','test4,test2','',3), +('Africa,Europe,Asia','test2,test3','01',4), +('Antarctica','test3','34,44',5), +('Asia','test5','50',6), +('Europe,S.America','test1,','39',7); +Warnings: +Warning 1265 Data truncated for column 'b' at row 7 +EXPLAIN SELECT a FROM t1 WHERE FIND_IN_SET('Europe',a) > 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where +SELECT a FROM t1 WHERE FIND_IN_SET('Europe',a) > 0; +a +Africa,Europe,Asia +S.America,Europe +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_text.result b/storage/rocksdb/mysql-test/rocksdb/r/type_text.result new file mode 100644 index 00000000000..bd10e3888c9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_text.result @@ -0,0 +1,57 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +t TEXT , +t0 TEXT(0) , +t1 TEXT(1) , +t300 TEXT(300) , +tm TEXT(65535) , +t70k TEXT(70000) , +t17m TEXT(17000000) , +tt TINYTEXT , +m MEDIUMTEXT , +l LONGTEXT +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +t text YES NULL +t0 text YES NULL +t1 tinytext YES NULL +t300 text YES NULL +tm text YES NULL +t70k mediumtext YES NULL +t17m longtext YES NULL +tt tinytext YES NULL +m mediumtext YES NULL +l longtext YES NULL +INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES +('','','','','','','','','',''), +('a','b','c','d','e','f','g','h','i','j'), +('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'), +( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',255), REPEAT('i',1048576), REPEAT('j',1048576) ); +SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES +( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) ); +Warnings: +Warning 1265 Data truncated for column 't' at row 1 +Warning 1265 Data truncated for column 't0' at row 1 +Warning 1265 Data truncated for column 't1' at row 1 +Warning 1265 Data truncated for column 't300' at row 1 +Warning 1265 Data truncated for column 'tm' at row 1 +Warning 1265 Data truncated for column 'tt' at row 1 +SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +ALTER TABLE t1 ADD COLUMN ttt TEXT(4294967296) ; +ERROR 42000: Display width out of range for column 'ttt' (max = 4294967295) +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_text_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_text_indexes.result new file mode 100644 index 00000000000..22318316596 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_text_indexes.result @@ -0,0 +1,165 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +t TEXT, +tt TINYTEXT, +m MEDIUMTEXT, +l LONGTEXT, +PRIMARY KEY t (t(32)) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 t A 1000 32 NULL LSMTREE +INSERT INTO t1 (t,tt,m,l) VALUES +('','','',''), +('a','b','c','d'), +('b','d','c','b'), +('test1','test2','test3','test4'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)), +('abc','def','ghi','jkl'), +('test2','test3','test4','test5'), +('test3','test4','test5','test6'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128)); +EXPLAIN SELECT SUBSTRING(t,16) AS f FROM t1 WHERE t IN ('test1','test2') ORDER BY f; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range PRIMARY PRIMARY 34 NULL # Using where; Using filesort +SELECT SUBSTRING(t,16) AS f FROM t1 WHERE t IN ('test1','test2') ORDER BY f; +f + + +EXPLAIN SELECT SUBSTRING(t,16) AS f FROM t1 IGNORE INDEX (PRIMARY) WHERE t IN ('test1','test2') ORDER BY f; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where; Using filesort +SELECT SUBSTRING(t,16) AS f FROM t1 IGNORE INDEX (PRIMARY) WHERE t IN ('test1','test2') ORDER BY f; +f + + +DROP TABLE t1; +CREATE TABLE t1 ( +t TEXT, +tt TINYTEXT, +m MEDIUMTEXT, +l LONGTEXT, +pk TINYTEXT PRIMARY KEY, +UNIQUE INDEX l_tt (l(256),tt(64)) +) ENGINE=rocksdb; +ERROR 42000: BLOB/TEXT column 'pk' used in key specification without a key length +CREATE TABLE t1 ( +t TEXT, +tt TINYTEXT, +m MEDIUMTEXT, +l LONGTEXT, +pk MEDIUMTEXT, +PRIMARY KEY mt (pk(1)), +INDEX (m(128)) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 1 NULL LSMTREE +t1 1 m 1 m A 500 128 NULL YES LSMTREE +INSERT INTO t1 (t,tt,m,l,pk) VALUES +('','','','','0'), +('a','b','c','d','1'), +('b','d','c','b','2'), +('test1','test2','test3','test4','3'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128),'4'), +('abc','def','ghi','jkl','5'), +('test2','test3','test4','test5','6'), +('test3','test4','test5','test6','7'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128),'8'), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128),'9'); +EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref m m 131 const # Using where; Using filesort +SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC; +f +DROP TABLE t1; +CREATE TABLE t1 ( +b TEXT, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +SELECT b FROM t1; +b +00000000000000000000000000000000 +00000000000000000000000000000001 +00000000000000000000000000000002 +DROP TABLE t1; +CREATE TABLE t1 ( +b TINYTEXT, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +SELECT b FROM t1; +b +00000000000000000000000000000000 +00000000000000000000000000000001 +00000000000000000000000000000002 +DROP TABLE t1; +CREATE TABLE t1 ( +b MEDIUMTEXT, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +SELECT b FROM t1; +b +00000000000000000000000000000000 +00000000000000000000000000000001 +00000000000000000000000000000002 +DROP TABLE t1; +CREATE TABLE t1 ( +b LONGTEXT, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +SELECT b FROM t1; +b +00000000000000000000000000000000 +00000000000000000000000000000001 +00000000000000000000000000000002 +DROP TABLE t1; +CREATE TABLE t1 ( +b LONGTEXT CHARACTER SET "binary" COLLATE "binary", +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +INSERT INTO t1 (b) VALUES (''), (_binary 0x0), (' '); +SELECT hex(b) FROM t1; +hex(b) + +00 +20 +3030303030303030303030303030303030303030303030303030303030303030 +3030303030303030303030303030303030303030303030303030303030303031 +3030303030303030303030303030303030303030303030303030303030303032 +DROP TABLE t1; +CREATE TABLE t1 ( +b LONGTEXT CHARACTER SET "latin1" COLLATE "latin1_bin", +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES (''), (_binary 0x0), (' '); +ERROR 23000: Duplicate entry ' ' for key 'PRIMARY' +INSERT INTO t1 (b) VALUES (''), (_binary 0x0); +SELECT hex(b) FROM t1; +hex(b) +00 + +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varbinary.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varbinary.result new file mode 100644 index 00000000000..06afbb92802 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varbinary.result @@ -0,0 +1,93 @@ +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 ( +v0 VARBINARY(0) , +v1 VARBINARY(1) , +v64 VARBINARY(64) , +v65000 VARBINARY(65000) , +PRIMARY KEY (v64) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varbinary(0) YES NULL +v1 varbinary(1) YES NULL +v64 varbinary(64) NO PRI +v65000 varbinary(65000) YES NULL +CREATE TABLE t2 (v VARBINARY(65532) , PRIMARY KEY(v(255))) ENGINE=rocksdb; +SHOW COLUMNS IN t2; +Field Type Null Key Default Extra +v varbinary(65532) NO PRI +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. + For developers who want to code on MariaDB or MySQL + + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + + For MariaDB / MySQL end users + + * MariaDB Crash Course by Ben Forta + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o Free to read in the Knowledgebase! + + * MySQL (4th Edition) by Paul DuBois + o The \'default\' book to read if you wont to learn to use MySQL / MariaDB. + + * MySQL Cookbook by Paul DuBois + o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject. + + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly) + + * MySQL Admin Cookbook + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. '); +SELECT HEX(v0), HEX(v1), HEX(v64), HEX(v65000) FROM t1; +HEX(v0) HEX(v1) HEX(v64) HEX(v65000) + + 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E2057652776652070726F7669646564206C696E6B7320746F20416D617A6F6E2E636F6D206865726520666F7220636F6E76656E69656E63652C2062757420746865792063616E20626520666F756E64206174206D616E79206F7468657220626F6F6B73746F7265732C20626F7468206F6E6C696E6520616E64206F66662E0A0A2020496620796F752077616E7420746F206861766520796F7572206661766F72697465204D7953514C202F204D61726961444220626F6F6B206C697374656420686572652C20706C65617365206C65617665206120636F6D6D656E742E0A2020466F7220646576656C6F706572732077686F2077616E7420746F20636F6465206F6E204D617269614442206F72204D7953514C0A0A2020202020202A20556E6465727374616E64696E67204D7953514C20496E7465726E616C73206279205361736861205061636865762C20666F726D6572204D7953514C20646576656C6F706572206174204D7953514C2041422E0A2020202020202020202020206F205468697320697320746865206F6E6C7920626F6F6B207765206B6E6F772061626F75742074686174206465736372696265732074686520696E7465726E616C73206F66204D617269614442202F204D7953514C2E2041206D757374206861766520666F7220616E796F6E652077686F2077616E747320746F20756E6465727374616E6420616E6420646576656C6F70206F6E204D617269614442210A2020202020202020202020206F204E6F7420616C6C20746F706963732061726520636F766572656420616E6420736F6D652070617274732061726520736C696768746C79206F757464617465642C20627574207374696C6C20746865206265737420626F6F6B206F6E207468697320746F7069632E200A2020202020202A204D7953514C20352E3120506C7567696E20446576656C6F706D656E742062792053657267656920476F6C75626368696B20616E6420416E64726577204875746368696E67730A2020202020202020202020206F2041206D757374207265616420666F7220616E796F6E652077616E74696E6720746F207772697465206120706C7567696E20666F72204D6172696144422C207772697474656E20627920746865205365726765692077686F2064657369676E65642074686520706C7567696E20696E7465726661636520666F72204D7953514C20616E64204D61726961444221200A0A2020466F72204D617269614442202F204D7953514C20656E642075736572730A0A2020202020202A204D61726961444220437261736820436F757273652062792042656E20466F7274610A2020202020202020202020206F204669727374204D61726961444220626F6F6B210A2020202020202020202020206F20466F722070656F706C652077686F2077616E7420746F206C6561726E2053514C20616E642074686520626173696373206F66204D6172696144422E0A2020202020202020202020206F204E6F77207368697070696E672E20507572636861736520617420416D617A6F6E2E636F6D206F7220796F7572206661766F7269746520626F6F6B73656C6C65722E200A0A2020202020202A2053514C2D393920436F6D706C6574652C205265616C6C792062792050657465722047756C75747A616E20262054727564792050656C7A65722E0A2020202020202020202020206F2045766572797468696E6720796F752077616E74656420746F206B6E6F772061626F7574207468652053514C203939207374616E646172642E20457863656C6C656E74207265666572656E636520626F6F6B210A2020202020202020202020206F204672656520746F207265616420696E20746865204B6E6F776C656467656261736521200A0A2020202020202A204D7953514C20283474682045646974696F6E29206279205061756C204475426F69730A2020202020202020202020206F20546865202764656661756C742720626F6F6B20746F207265616420696620796F7520776F6E7420746F206C6561726E20746F20757365204D7953514C202F204D6172696144422E200A0A2020202020202A204D7953514C20436F6F6B626F6F6B206279205061756C204475426F69730A2020202020202020202020206F2041206C6F74206F66206578616D706C6573206F6620686F7720746F20757365204D7953514C2E204173207769746820616C6C206F66205061756C277320626F6F6B732C206974277320776F727468206974732077656967687420696E20676F6C6420616E64206576656E20656E6A6F7961626C652072656164696E6720666F7220737563682061202764727927207375626A6563742E200A0A2020202020202A204869676820506572666F726D616E6365204D7953514C2C205365636F6E642045646974696F6E2C204279204261726F6E20536368776172747A2C205065746572205A6169747365762C20566164696D20546B616368656E6B6F2C204A6572656D7920442E205A61776F646E792C2041726A656E204C656E747A2C20446572656B204A2E2042616C6C696E672C20657420616C2E0A2020202020202020202020206F20224869676820506572666F726D616E6365204D7953514C2069732074686520646566696E697469766520677569646520746F206275696C64696E6720666173742C2072656C6961626C652073797374656D732077697468204D7953514C2E205772697474656E206279206E6F74656420657870657274732077697468207965617273206F66207265616C2D776F726C6420657870657269656E6365206275696C64696E672076657279206C617267652073797374656D732C207468697320626F6F6B20636F7665727320657665727920617370656374206F66204D7953514C20706572666F726D616E636520696E2064657461696C2C20616E6420666F6375736573206F6E20726F627573746E6573732C2073656375726974792C20616E64206461746120696E746567726974792E204C6561726E20616476616E63656420746563686E697175657320696E20646570746820736F20796F752063616E206272696E67206F7574204D7953514C27732066756C6C20706F7765722E22202846726F6D2074686520626F6F6B206465736372697074696F6E206174204F275265696C6C7929200A0A2020202020202A204D7953514C2041646D696E20436F6F6B626F6F6B0A2020202020202020202020206F204120717569636B20737465702D62792D7374657020677569646520666F72204D7953514C20757365727320616E642064617461626173652061646D696E6973747261746F727320746F207461636B6C65207265616C2D776F726C64206368616C6C656E6765732077697468204D7953514C20636F6E66696775726174696F6E20616E642061646D696E697374726174696F6E200A0A2020202020202A204D7953514C20352E302043657274696669636174696F6E2053747564792047756964652C204279205061756C204475426F69732C2053746566616E2048696E7A2C204361727374656E20506564657273656E0A2020202020202020202020206F205468697320697320746865206F6666696369616C20677569646520746F20636F766572207468652070617373696E67206F66207468652074776F204D7953514C2043657274696669636174696F6E206578616D696E6174696F6E732E2049742069732076616C69642074696C6C2076657273696F6E20352E30206F6620746865207365727665722C20736F207768696C65206974206D697373657320616C6C2074686520666561747572657320617661696C61626C6520696E204D7953514C20352E3120616E6420677265617465722028696E636C7564696E67204D61726961444220352E3120616E642067726561746572292C2069742070726F7669646573206120676F6F6420626173696320756E6465727374616E64696E67206F66204D7953514C20666F722074686520656E642D757365722E20 +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501)); +Warnings: +Warning 1265 Data truncated for column 'v0' at row 1 +Warning 1265 Data truncated for column 'v1' at row 1 +Warning 1265 Data truncated for column 'v64' at row 1 +Warning 1265 Data truncated for column 'v65000' at row 1 +INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'v0' at row 5 +Warning 1265 Data truncated for column 'v1' at row 5 +Warning 1265 Data truncated for column 'v64' at row 5 +Warning 1265 Data truncated for column 'v0' at row 6 +Warning 1265 Data truncated for column 'v1' at row 6 +Warning 1265 Data truncated for column 'v64' at row 6 +Warning 1265 Data truncated for column 'v65000' at row 6 +SELECT HEX(v0), HEX(v1), HEX(v64), LENGTH(HEX(v65000)) FROM t1; +HEX(v0) HEX(v1) HEX(v64) LENGTH(HEX(v65000)) + 0 + 61 0 + 48 61486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E205765277665 5932 + 61 61616263646566676869206162636465666768692061626364656667686920616263646566676869206162636465666768692061626364656667686920616263 130000 + 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 5930 + 79 63636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363 130000 +ALTER TABLE t1 ADD COLUMN v65536 VARBINARY(65536) ; +Warnings: +Note 1246 Converting column 'v65536' from VARBINARY to BLOB +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varbinary(0) YES NULL +v1 varbinary(1) YES NULL +v64 varbinary(64) NO PRI +v65000 varbinary(65000) YES NULL +v65536 mediumblob YES NULL +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result new file mode 100644 index 00000000000..5c449da2b8f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result @@ -0,0 +1,743 @@ +drop table if exists t1,t2; +# +# A basic test whether endspace-aware variable length encoding +# works when in PK +# +create table t1 ( pk varchar(10) primary key, col1 int not null) ; +insert into t1 values ('a', 1); +select * from t1; +pk col1 +a 1 +drop table t1; +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 ( +v0 VARCHAR(0) , +v1 VARCHAR(1) , +v64 VARCHAR(64) , +v65000 VARCHAR(65000) , +PRIMARY KEY (v64) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varchar(0) YES NULL +v1 varchar(1) YES NULL +v64 varchar(64) NO PRI +v65000 varchar(65000) YES NULL +CREATE TABLE t2 (v VARCHAR(65532), PRIMARY KEY (v(255))) ENGINE=rocksdb; +SHOW COLUMNS IN t2; +Field Type Null Key Default Extra +v varchar(65532) NO PRI +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. + For developers who want to code on MariaDB or MySQL + + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + + For MariaDB / MySQL end users + + * MariaDB Crash Course by Ben Forta + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o Free to read in the Knowledgebase! + + * MySQL (4th Edition) by Paul DuBois + o The \'default\' book to read if you wont to learn to use MySQL / MariaDB. + + * MySQL Cookbook by Paul DuBois + o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject. + + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly) + + * MySQL Admin Cookbook + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. '); +SELECT v0,v1,v64,v65000 FROM t1; +v0 v1 v64 v65000 + + + + + + + + + + + + y Once there, double check that an article doesn't already exist Here is a list of recommended books on MariaDB and MySQL. We've provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + o "High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL's full power." (From the book description at O'Reilly) + o A lot of examples of how to use MySQL. As with all of Paul's books, it's worth its weight in gold and even enjoyable reading for such a 'dry' subject. + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Free to read in the Knowledgebase! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + o The 'default' book to read if you wont to learn to use MySQL / MariaDB. + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + * MariaDB Crash Course by Ben Forta + * MySQL (4th Edition) by Paul DuBois + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + * MySQL Admin Cookbook + * MySQL Cookbook by Paul DuBois + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + For MariaDB / MySQL end users + For developers who want to code on MariaDB or MySQL + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501)); +Warnings: +Warning 1265 Data truncated for column 'v0' at row 1 +Warning 1265 Data truncated for column 'v1' at row 1 +Warning 1265 Data truncated for column 'v64' at row 1 +Warning 1265 Data truncated for column 'v65000' at row 1 +INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'v0' at row 5 +Warning 1265 Data truncated for column 'v1' at row 5 +Warning 1265 Data truncated for column 'v64' at row 5 +Warning 1265 Data truncated for column 'v65000' at row 5 +Warning 1265 Data truncated for column 'v0' at row 6 +Warning 1265 Data truncated for column 'v1' at row 6 +Warning 1265 Data truncated for column 'v64' at row 6 +SELECT v0, v1, v64, LENGTH(v65000) FROM t1; +v0 v1 v64 LENGTH(v65000) + 0 + a 0 + H aHere is a list of recommended books on MariaDB and MySQL. We've 2966 + a aabcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abc 65000 + y Once there, double check that an article doesn't already exist 2965 + y cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc 65000 +ALTER TABLE t1 ADD COLUMN v65536 VARCHAR(65536) ; +Warnings: +Note 1246 Converting column 'v65536' from VARCHAR to TEXT +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varchar(0) YES NULL +v1 varchar(1) YES NULL +v64 varchar(64) NO PRI +v65000 varchar(65000) YES NULL +v65536 mediumtext YES NULL +DROP TABLE t1, t2; +# +# Endspace-comparison tests: +# +# +# Issue 257: Sort order for varchars is different between +# MyISAM/InnoDB vs MyRocks +# +create table t1 ( +pk varchar(64) CHARACTER SET latin1 COLLATE latin1_bin, +col1 varchar(64), +primary key (pk) +); +insert into t1 values ('a','a'); +insert into t1 values ('a ', 'a-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values('b ', 'b-2x-space'); +insert into t1 values ('b', 'b'); +ERROR 23000: Duplicate entry 'b' for key 'PRIMARY' +select pk, hex(pk), col1 from t1; +pk hex(pk) col1 +a 61 a +b 622020 b-2x-space +insert into t1 values ('a\t', 'a-tab'); +insert into t1 values ('a \t', 'a-space-tab'); +select pk, hex(pk), col1 from t1 order by pk; +pk hex(pk) col1 +a 6109 a-tab +a 612009 a-space-tab +a 61 a +b 622020 b-2x-space +# Try longer values +insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space'); +select * from t1; +pk col1 +a a-tab +a a-space-tab +a a +b b-2x-space +c c-10-x-space +drop table t1; +# Secondary index +create table t1 ( +pk int not null primary key, +col1 varchar(64) CHARACTER SET latin1 COLLATE latin1_bin, +col2 varchar(64), +key (col1) +); +insert into t1 values (0, 'ab', 'a-b'); +insert into t1 values (1, 'a ', 'a-space'); +insert into t1 values (2, 'a', 'a'); +insert into t1 values (3, 'a \t', 'a-tab'); +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL col1 67 NULL # Using index +select col1, hex(col1) from t1; +col1 hex(col1) +a 61202009 +a 6120 +a 61 +ab 6162 +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1 where col1 < 'b'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # col1 col1 67 NULL # Using where; Using index +select col1, hex(col1) from t1 where col1 < 'b'; +col1 hex(col1) +a 61202009 +a 6120 +a 61 +ab 6162 +delete from t1; +insert into t1 values(10, '', 'empty'); +insert into t1 values(11, repeat(' ', 8), '8x-space'); +insert into t1 values(12, repeat(' ', 16), '16x-space'); +insert into t1 values(13, repeat(' ', 24), '24x-space'); +insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a'); +insert into t1 values(21, repeat(' ', 9), '9x-space'); +insert into t1 values(22, repeat(' ',17), '17x-space'); +insert into t1 values(23, repeat(' ',18), '18x-space'); +explain +select pk, col1, hex(col1), length(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # NULL col1 67 NULL # Using index +select pk, col1, hex(col1), length(col1) from t1; +pk col1 hex(col1) length(col1) +10 0 +11 2020202020202020 8 +12 20202020202020202020202020202020 16 +13 202020202020202020202020202020202020202020202020 24 +21 202020202020202020 9 +22 2020202020202020202020202020202020 17 +23 202020202020202020202020202020202020 18 +14 a 2020202020202020202020202020202061 17 +drop table t1; +create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb; +insert into t1 values (1, concat('a', repeat(' ', 300))); +insert into t1 values (2, concat('b', repeat(' ', 300))); +select pk,length(a) from t1 force index(a) where a < 'zz'; +pk length(a) +1 301 +2 301 +select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +drop table t1; +# +# Issue 257: Sort order for varchars is different between +# MyISAM/InnoDB vs MyRocks +# +create table t1 ( +pk varchar(64) CHARACTER SET utf8 COLLATE utf8_bin, +col1 varchar(64), +primary key (pk) +); +insert into t1 values ('a','a'); +insert into t1 values ('a ', 'a-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values('b ', 'b-2x-space'); +insert into t1 values ('b', 'b'); +ERROR 23000: Duplicate entry 'b' for key 'PRIMARY' +select pk, hex(pk), col1 from t1; +pk hex(pk) col1 +a 61 a +b 622020 b-2x-space +insert into t1 values ('a\t', 'a-tab'); +insert into t1 values ('a \t', 'a-space-tab'); +select pk, hex(pk), col1 from t1 order by pk; +pk hex(pk) col1 +a 6109 a-tab +a 612009 a-space-tab +a 61 a +b 622020 b-2x-space +# Try longer values +insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space'); +select * from t1; +pk col1 +a a-tab +a a-space-tab +a a +b b-2x-space +c c-10-x-space +drop table t1; +# Secondary index +create table t1 ( +pk int not null primary key, +col1 varchar(64) CHARACTER SET utf8 COLLATE utf8_bin, +col2 varchar(64), +key (col1) +); +insert into t1 values (0, 'ab', 'a-b'); +insert into t1 values (1, 'a ', 'a-space'); +insert into t1 values (2, 'a', 'a'); +insert into t1 values (3, 'a \t', 'a-tab'); +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL col1 195 NULL # Using index +select col1, hex(col1) from t1; +col1 hex(col1) +a 61202009 +a 6120 +a 61 +ab 6162 +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1 where col1 < 'b'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # col1 col1 195 NULL # Using where; Using index +select col1, hex(col1) from t1 where col1 < 'b'; +col1 hex(col1) +a 61202009 +a 6120 +a 61 +ab 6162 +delete from t1; +insert into t1 values(10, '', 'empty'); +insert into t1 values(11, repeat(' ', 8), '8x-space'); +insert into t1 values(12, repeat(' ', 16), '16x-space'); +insert into t1 values(13, repeat(' ', 24), '24x-space'); +insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a'); +insert into t1 values(21, repeat(' ', 9), '9x-space'); +insert into t1 values(22, repeat(' ',17), '17x-space'); +insert into t1 values(23, repeat(' ',18), '18x-space'); +explain +select pk, col1, hex(col1), length(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # NULL col1 195 NULL # Using index +select pk, col1, hex(col1), length(col1) from t1; +pk col1 hex(col1) length(col1) +10 0 +11 2020202020202020 8 +12 20202020202020202020202020202020 16 +13 202020202020202020202020202020202020202020202020 24 +21 202020202020202020 9 +22 2020202020202020202020202020202020 17 +23 202020202020202020202020202020202020 18 +14 a 2020202020202020202020202020202061 17 +drop table t1; +create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb; +insert into t1 values (1, concat('a', repeat(' ', 300))); +insert into t1 values (2, concat('b', repeat(' ', 300))); +select pk,length(a) from t1 force index(a) where a < 'zz'; +pk length(a) +1 301 +2 301 +select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +drop table t1; +# +# Issue 257: Sort order for varchars is different between +# MyISAM/InnoDB vs MyRocks +# +create table t1 ( +pk varchar(64) CHARACTER SET ucs2 COLLATE ucs2_bin, +col1 varchar(64), +primary key (pk) +); +insert into t1 values ('a','a'); +insert into t1 values ('a ', 'a-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values('b ', 'b-2x-space'); +insert into t1 values ('b', 'b'); +ERROR 23000: Duplicate entry 'b' for key 'PRIMARY' +select pk, hex(pk), col1 from t1; +pk hex(pk) col1 +a 0061 a +b 006200200020 b-2x-space +insert into t1 values ('a\t', 'a-tab'); +insert into t1 values ('a \t', 'a-space-tab'); +select pk, hex(pk), col1 from t1 order by pk; +pk hex(pk) col1 +a 00610009 a-tab +a 006100200009 a-space-tab +a 0061 a +b 006200200020 b-2x-space +# Try longer values +insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space'); +select * from t1; +pk col1 +a a-tab +a a-space-tab +a a +b b-2x-space +c c-10-x-space +drop table t1; +# Secondary index +create table t1 ( +pk int not null primary key, +col1 varchar(64) CHARACTER SET ucs2 COLLATE ucs2_bin, +col2 varchar(64), +key (col1) +); +insert into t1 values (0, 'ab', 'a-b'); +insert into t1 values (1, 'a ', 'a-space'); +insert into t1 values (2, 'a', 'a'); +insert into t1 values (3, 'a \t', 'a-tab'); +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # NULL +select col1, hex(col1) from t1; +col1 hex(col1) +ab 00610062 +a 00610020 +a 0061 +a 0061002000200009 +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1 where col1 < 'b'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # col1 col1 131 NULL # Using where +select col1, hex(col1) from t1 where col1 < 'b'; +col1 hex(col1) +a 0061002000200009 +a 00610020 +a 0061 +ab 00610062 +delete from t1; +insert into t1 values(10, '', 'empty'); +insert into t1 values(11, repeat(' ', 8), '8x-space'); +insert into t1 values(12, repeat(' ', 16), '16x-space'); +insert into t1 values(13, repeat(' ', 24), '24x-space'); +insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a'); +insert into t1 values(21, repeat(' ', 9), '9x-space'); +insert into t1 values(22, repeat(' ',17), '17x-space'); +insert into t1 values(23, repeat(' ',18), '18x-space'); +explain +select pk, col1, hex(col1), length(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # NULL NULL NULL NULL # NULL +select pk, col1, hex(col1), length(col1) from t1; +pk col1 hex(col1) length(col1) +10 0 +11 00200020002000200020002000200020 16 +12 0020002000200020002000200020002000200020002000200020002000200020 32 +13 002000200020002000200020002000200020002000200020002000200020002000200020002000200020002000200020 48 +14 a 00200020002000200020002000200020002000200020002000200020002000200061 34 +21 002000200020002000200020002000200020 18 +22 00200020002000200020002000200020002000200020002000200020002000200020 34 +23 002000200020002000200020002000200020002000200020002000200020002000200020 36 +drop table t1; +create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb; +insert into t1 values (1, concat('a', repeat(' ', 300))); +insert into t1 values (2, concat('b', repeat(' ', 300))); +select pk,length(a) from t1 force index(a) where a < 'zz'; +pk length(a) +1 301 +2 301 +select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +drop table t1; +# +# Issue 257: Sort order for varchars is different between +# MyISAM/InnoDB vs MyRocks +# +create table t1 ( +pk varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin, +col1 varchar(64), +primary key (pk) +); +insert into t1 values ('a','a'); +insert into t1 values ('a ', 'a-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values('b ', 'b-2x-space'); +insert into t1 values ('b', 'b'); +ERROR 23000: Duplicate entry 'b' for key 'PRIMARY' +select pk, hex(pk), col1 from t1; +pk hex(pk) col1 +a 61 a +b 622020 b-2x-space +insert into t1 values ('a\t', 'a-tab'); +insert into t1 values ('a \t', 'a-space-tab'); +select pk, hex(pk), col1 from t1 order by pk; +pk hex(pk) col1 +a 6109 a-tab +a 612009 a-space-tab +a 61 a +b 622020 b-2x-space +# Try longer values +insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space'); +select * from t1; +pk col1 +a a-tab +a a-space-tab +a a +b b-2x-space +c c-10-x-space +drop table t1; +# Secondary index +create table t1 ( +pk int not null primary key, +col1 varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin, +col2 varchar(64), +key (col1) +); +insert into t1 values (0, 'ab', 'a-b'); +insert into t1 values (1, 'a ', 'a-space'); +insert into t1 values (2, 'a', 'a'); +insert into t1 values (3, 'a \t', 'a-tab'); +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # NULL +select col1, hex(col1) from t1; +col1 hex(col1) +ab 6162 +a 6120 +a 61 +a 61202009 +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1 where col1 < 'b'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # col1 col1 259 NULL # Using where +select col1, hex(col1) from t1 where col1 < 'b'; +col1 hex(col1) +a 61202009 +a 6120 +a 61 +ab 6162 +delete from t1; +insert into t1 values(10, '', 'empty'); +insert into t1 values(11, repeat(' ', 8), '8x-space'); +insert into t1 values(12, repeat(' ', 16), '16x-space'); +insert into t1 values(13, repeat(' ', 24), '24x-space'); +insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a'); +insert into t1 values(21, repeat(' ', 9), '9x-space'); +insert into t1 values(22, repeat(' ',17), '17x-space'); +insert into t1 values(23, repeat(' ',18), '18x-space'); +explain +select pk, col1, hex(col1), length(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # NULL NULL NULL NULL # NULL +select pk, col1, hex(col1), length(col1) from t1; +pk col1 hex(col1) length(col1) +10 0 +11 2020202020202020 8 +12 20202020202020202020202020202020 16 +13 202020202020202020202020202020202020202020202020 24 +14 a 2020202020202020202020202020202061 17 +21 202020202020202020 9 +22 2020202020202020202020202020202020 17 +23 202020202020202020202020202020202020 18 +drop table t1; +create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb; +insert into t1 values (1, concat('a', repeat(' ', 300))); +insert into t1 values (2, concat('b', repeat(' ', 300))); +select pk,length(a) from t1 force index(a) where a < 'zz'; +pk length(a) +1 301 +2 301 +select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +drop table t1; +# +# Issue 257: Sort order for varchars is different between +# MyISAM/InnoDB vs MyRocks +# +create table t1 ( +pk varchar(64) CHARACTER SET utf16 COLLATE utf16_bin, +col1 varchar(64), +primary key (pk) +); +insert into t1 values ('a','a'); +insert into t1 values ('a ', 'a-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values('b ', 'b-2x-space'); +insert into t1 values ('b', 'b'); +ERROR 23000: Duplicate entry 'b' for key 'PRIMARY' +select pk, hex(pk), col1 from t1; +pk hex(pk) col1 +a 0061 a +b 006200200020 b-2x-space +insert into t1 values ('a\t', 'a-tab'); +insert into t1 values ('a \t', 'a-space-tab'); +select pk, hex(pk), col1 from t1 order by pk; +pk hex(pk) col1 +a 00610009 a-tab +a 006100200009 a-space-tab +a 0061 a +b 006200200020 b-2x-space +# Try longer values +insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space'); +select * from t1; +pk col1 +a a-tab +a a-space-tab +a a +b b-2x-space +c c-10-x-space +drop table t1; +# Secondary index +create table t1 ( +pk int not null primary key, +col1 varchar(64) CHARACTER SET utf16 COLLATE utf16_bin, +col2 varchar(64), +key (col1) +); +insert into t1 values (0, 'ab', 'a-b'); +insert into t1 values (1, 'a ', 'a-space'); +insert into t1 values (2, 'a', 'a'); +insert into t1 values (3, 'a \t', 'a-tab'); +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # NULL +select col1, hex(col1) from t1; +col1 hex(col1) +ab 00610062 +a 00610020 +a 0061 +a 0061002000200009 +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1 where col1 < 'b'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # col1 col1 259 NULL # Using where +select col1, hex(col1) from t1 where col1 < 'b'; +col1 hex(col1) +a 0061002000200009 +a 00610020 +a 0061 +ab 00610062 +delete from t1; +insert into t1 values(10, '', 'empty'); +insert into t1 values(11, repeat(' ', 8), '8x-space'); +insert into t1 values(12, repeat(' ', 16), '16x-space'); +insert into t1 values(13, repeat(' ', 24), '24x-space'); +insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a'); +insert into t1 values(21, repeat(' ', 9), '9x-space'); +insert into t1 values(22, repeat(' ',17), '17x-space'); +insert into t1 values(23, repeat(' ',18), '18x-space'); +explain +select pk, col1, hex(col1), length(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # NULL NULL NULL NULL # NULL +select pk, col1, hex(col1), length(col1) from t1; +pk col1 hex(col1) length(col1) +10 0 +11 00200020002000200020002000200020 16 +12 0020002000200020002000200020002000200020002000200020002000200020 32 +13 002000200020002000200020002000200020002000200020002000200020002000200020002000200020002000200020 48 +14 a 00200020002000200020002000200020002000200020002000200020002000200061 34 +21 002000200020002000200020002000200020 18 +22 00200020002000200020002000200020002000200020002000200020002000200020 34 +23 002000200020002000200020002000200020002000200020002000200020002000200020 36 +drop table t1; +create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb; +insert into t1 values (1, concat('a', repeat(' ', 300))); +insert into t1 values (2, concat('b', repeat(' ', 300))); +select pk,length(a) from t1 force index(a) where a < 'zz'; +pk length(a) +1 301 +2 301 +select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +drop table t1; +create table t1 ( +pk int primary key, +col1 varchar(10) collate utf8mb4_bin not null, +col2 varchar(20), +key(col1) +) engine=rocksdb; +insert into t1 values (1, 'ab','ab'); +insert into t1 values (2, 'ab\0','ab0'); +select pk, hex(col1), col2 from t1 force index(col1) order by col1; +pk hex(col1) col2 +2 616200 ab0 +1 6162 ab +select pk, hex(col1), col2 from t1 ignore index(col1) order by col1; +pk hex(col1) col2 +2 616200 ab0 +1 6162 ab +drop table t1; +create table t (id int primary key, email varchar(100), KEY email_i (email(30))); +insert into t values (1, 'abcabcabcabcabcabcabcabcabcabcabc '); +explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL email_i 33 NULL # Using index +select 'email_i' as index_name, count(*) AS count from t force index(email_i); +index_name count +email_i 1 +drop table t; +set @save_rocksdb_checksums_pct = @@global.rocksdb_checksums_pct; +set @save_rocksdb_verify_checksums = @@session.rocksdb_verify_checksums; +set global rocksdb_checksums_pct = 100; +set session rocksdb_verify_checksums = on; +create table t (id int primary key, email varchar(100), KEY email_i (email(30))); +insert into t values (1, 'a'); +explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL email_i 33 NULL # Using index +select 'email_i' as index_name, count(*) AS count from t force index(email_i); +index_name count +email_i 1 +drop table t; +set global rocksdb_checksums_pct = @save_rocksdb_checksums_pct; +set session rocksdb_verify_checksums = @save_rocksdb_verify_checksums; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar_debug.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar_debug.result new file mode 100644 index 00000000000..de7608ebb1c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar_debug.result @@ -0,0 +1,254 @@ +drop table if exists t1,t2; +set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans"; +# +# Issue 257: Sort order for varchars is different between +# MyISAM/InnoDB vs MyRocks +# +create table t1 ( +pk varchar(64) CHARACTER SET utf8 COLLATE utf8_general_ci, +col1 varchar(64), +primary key (pk) +); +insert into t1 values ('a','a'); +insert into t1 values ('a ', 'a-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values('b ', 'b-2x-space'); +insert into t1 values ('b', 'b'); +ERROR 23000: Duplicate entry 'b' for key 'PRIMARY' +select pk, hex(pk), col1 from t1; +pk hex(pk) col1 +a 61 a +b 622020 b-2x-space +insert into t1 values ('a\t', 'a-tab'); +insert into t1 values ('a \t', 'a-space-tab'); +select pk, hex(pk), col1 from t1 order by pk; +pk hex(pk) col1 +a 6109 a-tab +a 612009 a-space-tab +a 61 a +b 622020 b-2x-space +# Try longer values +insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space'); +select * from t1; +pk col1 +a a-tab +a a-space-tab +a a +b b-2x-space +c c-10-x-space +drop table t1; +# Secondary index +create table t1 ( +pk int not null primary key, +col1 varchar(64) CHARACTER SET utf8 COLLATE utf8_general_ci, +col2 varchar(64), +key (col1) +); +insert into t1 values (0, 'ab', 'a-b'); +insert into t1 values (1, 'a ', 'a-space'); +insert into t1 values (2, 'a', 'a'); +insert into t1 values (3, 'a \t', 'a-tab'); +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL col1 195 NULL # Using index +select col1, hex(col1) from t1; +col1 hex(col1) +a 61202009 +a 6120 +a 61 +ab 6162 +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1 where col1 < 'b'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # col1 col1 195 NULL # Using where; Using index +select col1, hex(col1) from t1 where col1 < 'b'; +col1 hex(col1) +a 61202009 +a 6120 +a 61 +ab 6162 +delete from t1; +insert into t1 values(10, '', 'empty'); +insert into t1 values(11, repeat(' ', 8), '8x-space'); +insert into t1 values(12, repeat(' ', 16), '16x-space'); +insert into t1 values(13, repeat(' ', 24), '24x-space'); +insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a'); +insert into t1 values(21, repeat(' ', 9), '9x-space'); +insert into t1 values(22, repeat(' ',17), '17x-space'); +insert into t1 values(23, repeat(' ',18), '18x-space'); +explain +select pk, col1, hex(col1), length(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # NULL col1 195 NULL # Using index +select pk, col1, hex(col1), length(col1) from t1; +pk col1 hex(col1) length(col1) +10 0 +11 2020202020202020 8 +12 20202020202020202020202020202020 16 +13 202020202020202020202020202020202020202020202020 24 +21 202020202020202020 9 +22 2020202020202020202020202020202020 17 +23 202020202020202020202020202020202020 18 +14 a 2020202020202020202020202020202061 17 +drop table t1; +create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb; +insert into t1 values (1, concat('a', repeat(' ', 300))); +insert into t1 values (2, concat('b', repeat(' ', 300))); +select pk,length(a) from t1 force index(a) where a < 'zz'; +pk length(a) +1 301 +2 301 +select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +drop table t1; +set session debug= "-d,myrocks_enable_unknown_collation_index_only_scans"; +# +# Check backwards compatibility: +# +set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +# Create the tables in the old format +create table t1 ( +pk varchar(64) collate latin1_bin, +col1 varchar(64), +primary key (pk) +); +insert into t1 values ('a','a'); +# The following will not produce an error: +insert into t1 values ('a ', 'a-space'); +select pk, hex(pk), col1 from t1; +pk hex(pk) col1 +a 61 a +a 6120 a-space +create table t2 ( +pk int not null primary key, +col1 varchar(64) collate latin1_bin, +col2 varchar(64), +unique key (col1) +); +insert into t2 values (0, 'ab', 'a-b'); +# The following will not produce an error: +insert into t2 values (1, 'a ', 'a-space'); +insert into t2 values (2, 'a', 'a'); +select pk, col1, hex(col1), col2 from t2; +pk col1 hex(col1) col2 +0 ab 6162 a-b +1 a 6120 a-space +2 a 61 a +# Check the format version: +select table_name,index_name,kv_format_version +from information_schema.ROCKSDB_DDL +where TABLE_SCHEMA=database() AND table_name in ('t1','t2'); +table_name index_name kv_format_version +t1 PRIMARY 10 +t2 PRIMARY 10 +t2 col1 10 +flush tables; +set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +select pk, hex(pk), col1 from t1; +pk hex(pk) col1 +a 61 a +a 6120 a-space +select pk, col1, hex(col1), col2 from t2; +pk col1 hex(col1) col2 +0 ab 6162 a-b +1 a 6120 a-space +2 a 61 a +select pk, hex(pk), col1 from t1; +pk hex(pk) col1 +a 61 a +a 6120 a-space +select pk, col1, hex(col1), col2 from t2; +pk col1 hex(col1) col2 +0 ab 6162 a-b +1 a 6120 a-space +2 a 61 a +drop table t1,t2; +# +# General upgrade tests to see that they work. +# +set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +create table t2 ( +id int primary key, +col1 varchar(64) collate latin1_swedish_ci, +unique key (col1) +) engine=rocksdb; +set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +insert into t2 values (1, 'a'); +insert into t2 values (2, 'b'); +insert into t2 values (3, 'c'); +insert into t2 values (4, 'c '); +select col1 from t2; +col1 +a +b +c +c +delete from t2 where id = 4; +alter table t2 engine=rocksdb; +select col1 from t2; +col1 +a +b +c +insert into t2 values (4, 'c '); +ERROR 23000: Duplicate entry 'c ' for key 'col1' +drop table t2; +set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +create table t2 ( +id int primary key, +col1 varchar(64) collate latin1_bin, +unique key (col1) +) engine=rocksdb; +set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +insert into t2 values (1, 'a'); +insert into t2 values (2, 'b'); +insert into t2 values (3, 'c'); +insert into t2 values (4, 'c '); +select col1 from t2; +col1 +a +b +c +c +delete from t2 where id = 4; +alter table t2 engine=rocksdb; +select col1 from t2; +col1 +a +b +c +insert into t2 values (4, 'c '); +ERROR 23000: Duplicate entry 'c ' for key 'col1' +drop table t2; +# +# Check what happens when one tries to 'upgrade' to the new data format +# and causes a unique key violation: +# +set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +create table t2 ( +pk int not null primary key, +col1 varchar(64) collate latin1_bin, +col2 varchar(64), +unique key (col1) +); +insert into t2 values (1, 'a ', 'a-space'); +insert into t2 values (2, 'a', 'a'); +select * from t2; +pk col1 col2 +1 a a-space +2 a a +set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +alter table t2 engine=rocksdb; +ERROR 23000: Duplicate entry 'a' for key 'col1' +drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result new file mode 100644 index 00000000000..0f3e7200d8a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result @@ -0,0 +1,72 @@ +set debug_sync='RESET'; +drop table if exists t1; +create table t1 (id int, value int, primary key (id)) engine=rocksdb; +create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb; +begin; +insert into t1 values (1,1); +set session rocksdb_lock_wait_timeout=50; +begin; +insert into t1 values (1,2); +commit; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +commit; +select * from t1; +id value +1 1 +truncate table t1; +begin; +insert into t2 values (1,1,1); +begin; +insert into t2 values (2,1,2); +commit; +ERROR 23000: Duplicate entry '1' for key 'id2' +commit; +select * from t2; +id id2 value +1 1 1 +truncate table t2; +begin; +insert into t1 values (1,1); +begin; +insert into t1 values (1,2); +rollback; +commit; +select * from t1; +id value +1 2 +truncate table t1; +begin; +insert into t2 values (1,1,1); +begin; +insert into t2 values (2,1,2); +rollback; +commit; +select * from t2; +id id2 value +2 1 2 +truncate table t2; +set debug_sync='rocksdb.update_write_row_after_unique_check SIGNAL parked1 WAIT_FOR go1'; +insert into t1 values (1,1); +set debug_sync='rocksdb.update_write_row_after_unique_check SIGNAL parked2 WAIT_FOR go2'; +insert into t2 values (1,1,1); +set debug_sync='now WAIT_FOR parked1'; +set debug_sync='now WAIT_FOR parked2'; +set session rocksdb_lock_wait_timeout=1; +insert into t1 values (1,2); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +insert into t2 values (2,1,2); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t2.id2 +set debug_sync='now SIGNAL go1'; +set debug_sync='now SIGNAL go2'; +insert into t1 values (1,2); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +insert into t2 values (2,1,2); +ERROR 23000: Duplicate entry '1' for key 'id2' +select * from t1; +id value +1 1 +select * from t2; +id id2 value +1 1 1 +set debug_sync='RESET'; +drop table t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result new file mode 100644 index 00000000000..59ad709a595 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result @@ -0,0 +1,185 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (id1 INT NOT NULL, id2 INT NOT NULL, id3 VARCHAR(32), +id4 INT, id5 VARCHAR(32), +value1 INT, value2 INT, value3 VARCHAR(32), +PRIMARY KEY (id1, id2) , +UNIQUE INDEX (id2, id1) , +UNIQUE INDEX (id2, id3, id4) , +INDEX (id1) , +INDEX (id3, id1) , +UNIQUE INDEX(id5) , +INDEX (id2, id5)) ENGINE=ROCKSDB; +SELECT COUNT(*) FROM t1; +COUNT(*) +10 +# Test inserting a key that returns duplicate error +INSERT INTO t1 VALUES (1, 1, 11, 11, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '1-1' for key 'PRIMARY' +INSERT INTO t1 VALUES (5, 5, 11, 11, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '5-5' for key 'PRIMARY' +INSERT INTO t1 VALUES (10, 10, 11, 11, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '10-10' for key 'PRIMARY' +INSERT INTO t1 VALUES (11, 1, 1, 1, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2' +INSERT INTO t1 VALUES (11, 5, 5, 5, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '5-5-5' for key 'id2_2' +INSERT INTO t1 VALUES (11, 10, 10, 10, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '10-10-10' for key 'id2_2' +INSERT INTO t1 VALUES (11, 11, 11, 11, 1, 11, 11, 11); +ERROR 23000: Duplicate entry '1' for key 'id5' +INSERT INTO t1 VALUES (11, 11, 11, 11, 5, 11, 11, 11); +ERROR 23000: Duplicate entry '5' for key 'id5' +INSERT INTO t1 VALUES (11, 11, 11, 11, 10, 11, 11, 11); +ERROR 23000: Duplicate entry '10' for key 'id5' +# Test updating a key that returns duplicate error +UPDATE t1 SET id2=1, id3=1, id4=1 WHERE id1=2; +ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2' +UPDATE t1 SET id2=1, id3=1, id4=1; +ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2' +SELECT COUNT(*) FROM t1; +COUNT(*) +10 +# Test updating a key to itself +UPDATE t1 set id2=id4; +UPDATE t1 set id5=id3, value1=value2; +UPDATE t1 set value3=value1; +# Test modifying values should not cause duplicates +UPDATE t1 SET value1=value3+1; +UPDATE t1 SET value3=value3 div 2; +UPDATE t1 SET value2=value3; +SELECT COUNT(*) FROM t1; +COUNT(*) +10 +# Test NULL values are considered unique +INSERT INTO t1 VALUES (20, 20, 20, NULL, NULL, 20, 20, 20); +INSERT INTO t1 VALUES (21, 20, 20, NULL, NULL, 20, 20, 20); +INSERT INTO t1 VALUES (22, 20, 20, NULL, NULL, 20, 20, 20); +SELECT COUNT(*) FROM t1; +COUNT(*) +13 +# Adding multiple rows where one of the rows fail the duplicate +# check should fail the whole statement +INSERT INTO t1 VALUES (23, 23, 23, 23, 23, 23, 23, 23), +(24, 24, 24, 24, 24, 24, 24, 24), +(25, 10, 10, 10, 25, 25, 25, 25), +(26, 26, 26, 26, 26, 26, 26, 26); +ERROR 23000: Duplicate entry '10-10-10' for key 'id2_2' +SELECT COUNT(*) FROM t1; +COUNT(*) +13 +BEGIN; +INSERT INTO t1 VALUES (30, 31, 32, 33, 34, 30, 30, 30); +BEGIN; +SELECT COUNT(*) FROM t1; +COUNT(*) +13 +# Primary key should prevent duplicate on insert +INSERT INTO t1 VALUES (30, 31, 30, 30, 30, 30, 30, 30); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +# Primary key should prevent duplicate on update +UPDATE t1 SET id1=30, id2=31 WHERE id2=10; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +# Unique secondary key should prevent duplicate on insert +INSERT INTO t1 VALUES (31, 31, 32, 33, 30, 30, 30, 30); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2 +INSERT INTO t1 VALUES (32, 32, 32, 32, 34, 32, 32, 32); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 +# Unique secondary key should prevent duplicate on update +UPDATE t1 SET id2=31, id3=32, id4=33 WHERE id2=8; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2 +UPDATE t1 SET id5=34 WHERE id2=8; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 +# Adding multiple rows where one of the rows fail the duplicate +# check should fail the whole statement +INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35), +(36, 36, 36, 36, 36, 36, 36, 36), +(37, 31, 32, 33, 37, 37, 37, 37), +(38, 38, 38, 38, 38, 38, 38, 38); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2 +INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35), +(36, 36, 36, 36, 36, 36, 36, 36), +(37, 37, 37, 37, 34, 37, 37, 37), +(38, 38, 38, 38, 38, 38, 38, 38); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 +# NULL values are unique and duplicates in value fields are ignored +INSERT INTO t1 VALUES (37, 31, 32, NULL, 37, 37, 37, 37), +(38, 31, 32, NULL, 38, 37, 37, 37), +(39, 31, 32, NULL, 39, 37, 37, 37); +SELECT COUNT(*) FROM t1; +COUNT(*) +16 +# Fail on duplicate key update for row added in our transaction +UPDATE t1 SET id5=37 WHERE id1=38; +ERROR 23000: Duplicate entry '37' for key 'id5' +# Fail on lock timeout for row modified in another transaction +UPDATE t1 SET id5=34 WHERE id1=38; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 +# NULL values are unique +UPDATE t1 SET id5=NULL WHERE value1 > 37; +COMMIT; +COMMIT; +BEGIN; +SELECT COUNT(*) FROM t1; +COUNT(*) +17 +BEGIN; +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +# When transaction is pending, fail on lock acquisition +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2 +SELECT COUNT(*) FROM t1; +COUNT(*) +17 +COMMIT; +# When transaction is committed, fail on duplicate key +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +Got one of the listed errors +INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40); +ERROR 23000: Duplicate entry '40-40-40' for key 'id2_2' +ROLLBACK; +SELECT * FROM t1; +id1 id2 id3 id4 id5 value1 value2 value3 +1 1 1 1 1 2 0 0 +2 2 2 2 2 3 1 1 +3 3 3 3 3 4 1 1 +4 4 4 4 4 5 2 2 +5 5 5 5 5 6 2 2 +6 6 6 6 6 7 3 3 +7 7 7 7 7 8 3 3 +8 8 8 8 8 9 4 4 +9 9 9 9 9 10 4 4 +10 10 10 10 10 11 5 5 +20 20 20 NULL NULL 20 20 20 +21 20 20 NULL NULL 20 20 20 +22 20 20 NULL NULL 20 20 20 +30 31 32 33 34 30 30 30 +37 31 32 NULL 37 37 37 37 +38 31 32 NULL 38 37 37 37 +39 31 32 NULL 39 37 37 37 +40 40 40 40 40 40 40 40 +DROP TABLE t1; +# +# Issue #88: Creating unique index over column with duplicate values succeeds +# +create table t1 (pk int primary key, a int) engine=rocksdb; +insert into t1 values +(1, 1), +(2, 2), +(3, 3), +(4, 1), +(5, 5); +alter table t1 add unique(a); +ERROR 23000: Duplicate entry '1' for key 'a' +drop table t1; +# +# Issue #111 +# +CREATE TABLE t2 (pk int, a int, PRIMARY KEY (pk, a), UNIQUE KEY (a)) ENGINE=ROCKSDB PARTITION BY KEY (a) PARTITIONS 16; +INSERT INTO t2 VALUES (1,1); +INSERT INTO t2 VALUES (1,1); +ERROR 23000: Duplicate entry '1-1' for key 'PRIMARY' +INSERT INTO t2 VALUES (2,1); +ERROR 23000: Duplicate entry '1' for key 'a' +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result new file mode 100644 index 00000000000..0ff55ac8d10 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result @@ -0,0 +1,162 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (id1 INT NOT NULL, id2 INT NOT NULL, id3 VARCHAR(32), +id4 INT, id5 VARCHAR(32), +value1 INT, value2 INT, value3 VARCHAR(32), +PRIMARY KEY (id1, id2) COMMENT 'rev:cf', +UNIQUE INDEX (id2, id1) COMMENT 'rev:cf', +UNIQUE INDEX (id2, id3, id4) COMMENT 'rev:cf', +INDEX (id1) COMMENT 'rev:cf', +INDEX (id3, id1) COMMENT 'rev:cf', +UNIQUE INDEX(id5) COMMENT 'rev:cf', +INDEX (id2, id5)) ENGINE=ROCKSDB; +SELECT COUNT(*) FROM t1; +COUNT(*) +10 +# Test inserting a key that returns duplicate error +INSERT INTO t1 VALUES (1, 1, 11, 11, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '1-1' for key 'PRIMARY' +INSERT INTO t1 VALUES (5, 5, 11, 11, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '5-5' for key 'PRIMARY' +INSERT INTO t1 VALUES (10, 10, 11, 11, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '10-10' for key 'PRIMARY' +INSERT INTO t1 VALUES (11, 1, 1, 1, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2' +INSERT INTO t1 VALUES (11, 5, 5, 5, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '5-5-5' for key 'id2_2' +INSERT INTO t1 VALUES (11, 10, 10, 10, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '10-10-10' for key 'id2_2' +INSERT INTO t1 VALUES (11, 11, 11, 11, 1, 11, 11, 11); +ERROR 23000: Duplicate entry '1' for key 'id5' +INSERT INTO t1 VALUES (11, 11, 11, 11, 5, 11, 11, 11); +ERROR 23000: Duplicate entry '5' for key 'id5' +INSERT INTO t1 VALUES (11, 11, 11, 11, 10, 11, 11, 11); +ERROR 23000: Duplicate entry '10' for key 'id5' +# Test updating a key that returns duplicate error +UPDATE t1 SET id2=1, id3=1, id4=1 WHERE id1=2; +ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2' +UPDATE t1 SET id2=1, id3=1, id4=1; +ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2' +SELECT COUNT(*) FROM t1; +COUNT(*) +10 +# Test updating a key to itself +UPDATE t1 set id2=id4; +UPDATE t1 set id5=id3, value1=value2; +UPDATE t1 set value3=value1; +# Test modifying values should not cause duplicates +UPDATE t1 SET value1=value3+1; +UPDATE t1 SET value3=value3 div 2; +UPDATE t1 SET value2=value3; +SELECT COUNT(*) FROM t1; +COUNT(*) +10 +# Test NULL values are considered unique +INSERT INTO t1 VALUES (20, 20, 20, NULL, NULL, 20, 20, 20); +INSERT INTO t1 VALUES (21, 20, 20, NULL, NULL, 20, 20, 20); +INSERT INTO t1 VALUES (22, 20, 20, NULL, NULL, 20, 20, 20); +SELECT COUNT(*) FROM t1; +COUNT(*) +13 +# Adding multiple rows where one of the rows fail the duplicate +# check should fail the whole statement +INSERT INTO t1 VALUES (23, 23, 23, 23, 23, 23, 23, 23), +(24, 24, 24, 24, 24, 24, 24, 24), +(25, 10, 10, 10, 25, 25, 25, 25), +(26, 26, 26, 26, 26, 26, 26, 26); +ERROR 23000: Duplicate entry '10-10-10' for key 'id2_2' +SELECT COUNT(*) FROM t1; +COUNT(*) +13 +BEGIN; +INSERT INTO t1 VALUES (30, 31, 32, 33, 34, 30, 30, 30); +BEGIN; +SELECT COUNT(*) FROM t1; +COUNT(*) +13 +# Primary key should prevent duplicate on insert +INSERT INTO t1 VALUES (30, 31, 30, 30, 30, 30, 30, 30); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +# Primary key should prevent duplicate on update +UPDATE t1 SET id1=30, id2=31 WHERE id2=10; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +# Unique secondary key should prevent duplicate on insert +INSERT INTO t1 VALUES (31, 31, 32, 33, 30, 30, 30, 30); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2 +INSERT INTO t1 VALUES (32, 32, 32, 32, 34, 32, 32, 32); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 +# Unique secondary key should prevent duplicate on update +UPDATE t1 SET id2=31, id3=32, id4=33 WHERE id2=8; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2 +UPDATE t1 SET id5=34 WHERE id2=8; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 +# Adding multiple rows where one of the rows fail the duplicate +# check should fail the whole statement +INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35), +(36, 36, 36, 36, 36, 36, 36, 36), +(37, 31, 32, 33, 37, 37, 37, 37), +(38, 38, 38, 38, 38, 38, 38, 38); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2 +INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35), +(36, 36, 36, 36, 36, 36, 36, 36), +(37, 37, 37, 37, 34, 37, 37, 37), +(38, 38, 38, 38, 38, 38, 38, 38); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 +# NULL values are unique and duplicates in value fields are ignored +INSERT INTO t1 VALUES (37, 31, 32, NULL, 37, 37, 37, 37), +(38, 31, 32, NULL, 38, 37, 37, 37), +(39, 31, 32, NULL, 39, 37, 37, 37); +SELECT COUNT(*) FROM t1; +COUNT(*) +16 +# Fail on duplicate key update for row added in our transaction +UPDATE t1 SET id5=37 WHERE id1=38; +ERROR 23000: Duplicate entry '37' for key 'id5' +# Fail on lock timeout for row modified in another transaction +UPDATE t1 SET id5=34 WHERE id1=38; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 +# NULL values are unique +UPDATE t1 SET id5=NULL WHERE value1 > 37; +COMMIT; +COMMIT; +BEGIN; +SELECT COUNT(*) FROM t1; +COUNT(*) +17 +BEGIN; +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +# When transaction is pending, fail on lock acquisition +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2 +SELECT COUNT(*) FROM t1; +COUNT(*) +17 +COMMIT; +# When transaction is committed, fail on duplicate key +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +Got one of the listed errors +INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40); +ERROR 23000: Duplicate entry '40-40-40' for key 'id2_2' +ROLLBACK; +SELECT * FROM t1; +id1 id2 id3 id4 id5 value1 value2 value3 +40 40 40 40 40 40 40 40 +39 31 32 NULL 39 37 37 37 +38 31 32 NULL 38 37 37 37 +37 31 32 NULL 37 37 37 37 +30 31 32 33 34 30 30 30 +22 20 20 NULL NULL 20 20 20 +21 20 20 NULL NULL 20 20 20 +20 20 20 NULL NULL 20 20 20 +10 10 10 10 10 11 5 5 +9 9 9 9 9 10 4 4 +8 8 8 8 8 9 4 4 +7 7 7 7 7 8 3 3 +6 6 6 6 6 7 3 3 +5 5 5 5 5 6 2 2 +4 4 4 4 4 5 2 2 +3 3 3 3 3 4 1 1 +2 2 2 2 2 3 1 1 +1 1 1 1 1 2 0 0 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unsupported_tx_isolations.result b/storage/rocksdb/mysql-test/rocksdb/r/unsupported_tx_isolations.result new file mode 100644 index 00000000000..32776e19767 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/unsupported_tx_isolations.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +create table t1 (id int primary key, value int) engine=rocksdb; +insert into t1 values (1,1); +SET session transaction isolation level read uncommitted; +begin; +insert into t1 values (2,1); +ERROR HY000: MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level READ-UNCOMMITTED +select * from t1 where id=1; +ERROR HY000: MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level READ-UNCOMMITTED +rollback; +SET session transaction isolation level serializable; +begin; +insert into t1 values (2,1); +ERROR HY000: MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level SERIALIZABLE +select * from t1 where id=1; +ERROR HY000: MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level SERIALIZABLE +rollback; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/update.result b/storage/rocksdb/mysql-test/rocksdb/r/update.result new file mode 100644 index 00000000000..33bf8d09405 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/update.result @@ -0,0 +1,113 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +UPDATE t1 SET a=a+100; +SELECT a,b FROM t1; +a b +101 a +101 a +10100 foobar +10100 foobar +102 b +102 b +103 c +103 c +104 d +104 d +105 e +105 e +UPDATE t1 SET a=a-100, b=DEFAULT WHERE a>100; +SELECT a,b FROM t1; +a b +1 NULL +1 NULL +10000 NULL +10000 NULL +2 NULL +2 NULL +3 NULL +3 NULL +4 NULL +4 NULL +5 NULL +5 NULL +UPDATE t1 SET b = 'update' WHERE a <= 4 ORDER BY b DESC, a ASC LIMIT 1; +SELECT a,b FROM t1; +a b +1 NULL +1 update +10000 NULL +10000 NULL +2 NULL +2 NULL +3 NULL +3 NULL +4 NULL +4 NULL +5 NULL +5 NULL +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +BEGIN; +UPDATE t1 SET a=a+100; +UPDATE t1 SET a=a-50, b=DEFAULT WHERE a>100; +COMMIT; +SELECT * FROM t1 ORDER BY pk; +a b pk +10050 NULL 12 +10050 NULL 6 +51 NULL 1 +51 NULL 7 +52 NULL 2 +52 NULL 8 +53 NULL 3 +53 NULL 9 +54 NULL 10 +54 NULL 4 +55 NULL 11 +55 NULL 5 +BEGIN; +UPDATE t1 SET b = 'update' WHERE a <= 4 ORDER BY a DESC, b ASC LIMIT 3; +UPDATE t1 SET b = ''; +ROLLBACK; +SELECT * FROM t1 ORDER BY pk; +a b pk +51 NULL 1 +52 NULL 2 +53 NULL 3 +54 NULL 4 +55 NULL 5 +10050 NULL 6 +51 NULL 7 +52 NULL 8 +53 NULL 9 +54 NULL 10 +55 NULL 11 +10050 NULL 12 +BEGIN; +UPDATE t1 SET b = 'update2' WHERE a <= 100; +SAVEPOINT spt1; +UPDATE t1 SET b = ''; +ROLLBACK TO SAVEPOINT spt1; +ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows. +UPDATE t1 SET b = 'upd' WHERE a = 10050; +COMMIT; +ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction. +SELECT * FROM t1 ORDER BY pk; +a b pk +51 NULL 1 +52 NULL 2 +53 NULL 3 +54 NULL 4 +55 NULL 5 +10050 NULL 6 +51 NULL 7 +52 NULL 8 +53 NULL 9 +54 NULL 10 +55 NULL 11 +10050 NULL 12 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/update_ignore.result b/storage/rocksdb/mysql-test/rocksdb/r/update_ignore.result new file mode 100644 index 00000000000..9dda807edea --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/update_ignore.result @@ -0,0 +1,57 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE IGNORE t1 SET b = 'upd1' WHERE b IS NOT NULL ORDER BY a LIMIT 1; +SELECT a,b FROM t1 ORDER BY pk; +a b +1 upd1 +2 b +3 c +4 d +5 e +10000 foobar +1 a +2 b +3 c +4 d +5 e +10000 foobar +UPDATE t1, t2 SET b = 'upd2a', c = 'upd2b' +WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 ); +ERROR 21000: Subquery returns more than 1 row +UPDATE IGNORE t1, t2 SET b = 'upd2a', c = 'upd2b' +WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 ); +Warnings: +Error 1242 Subquery returns more than 1 row +SELECT a,b FROM t1 ORDER BY pk; +a b +1 upd2a +2 upd2a +3 upd2a +4 upd2a +5 upd2a +10000 upd2a +1 a +2 upd2a +3 upd2a +4 upd2a +5 upd2a +10000 upd2a +SELECT c,d FROM t2 ORDER BY pk; +c d +upd2b 1 +upd2b 2 +upd2b 3 +upd2b 4 +upd2b 5 +upd2b 10000 +upd2b 1 +upd2b 2 +upd2b 3 +upd2b 4 +upd2b 5 +upd2b 10000 +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/update_multi.result b/storage/rocksdb/mysql-test/rocksdb/r/update_multi.result new file mode 100644 index 00000000000..294c07b2a79 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/update_multi.result @@ -0,0 +1,691 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/update_with_keys.result b/storage/rocksdb/mysql-test/rocksdb/r/update_with_keys.result new file mode 100644 index 00000000000..576fe6f6754 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/update_with_keys.result @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, INDEX(b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z'); +UPDATE t1 SET a=100, b='f' WHERE b IN ('b','c'); +UPDATE t1 SET b='m' WHERE b = 'f'; +UPDATE t1 SET b='z' WHERE a < 2; +UPDATE t1 SET b=''; +SELECT a,b FROM t1; +a b +1 +100 +100 +4 +5 +6 +7 +8 +DROP TABLE t1; +# RocksDB: skip the test for secondary UNIQUE keys. +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(0,'f'),(100,'a'); +UPDATE t1 SET a=a+200; +UPDATE t1 SET a=0 WHERE a > 250; +UPDATE t1 SET a=205 WHERE a=200; +ERROR 23000: Duplicate entry '205' for key 'PRIMARY' +UPDATE t1 SET a=12345 ORDER BY a DESC, b LIMIT 1; +SELECT a,b FROM t1; +a b +0 a +12345 e +200 f +201 a +202 b +203 c +204 d +UPDATE t1 SET a=80 WHERE a IN (202,203); +ERROR 23000: Duplicate entry '80' for key 'PRIMARY' +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/validate_datadic.result b/storage/rocksdb/mysql-test/rocksdb/r/validate_datadic.result new file mode 100644 index 00000000000..92c5207046a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/validate_datadic.result @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 (pk int primary key) ENGINE=ROCKSDB; +CREATE TABLE t2 (pk int primary key) ENGINE=ROCKSDB PARTITION BY KEY(pk) PARTITIONS 4; +Expect errors that we are missing two .frm files + RocksDB: Schema mismatch - Table test.t1 is registered in RocksDB but does not have a .frm file + RocksDB: Schema mismatch - Table test.t2 is registered in RocksDB but does not have a .frm file +Expect an error that we have an extra .frm file + RocksDB: Schema mismatch - A .frm file exists for table test.t1_dummy, but that table is not registered in RocksDB +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result b/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result new file mode 100644 index 00000000000..433c00c9fc8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result @@ -0,0 +1,39 @@ +SET GLOBAL rocksdb_write_disable_wal=false; +SET GLOBAL rocksdb_write_ignore_missing_column_families=true; +create table aaa (id int primary key, i int) engine rocksdb; +SET LOCAL rocksdb_write_sync=off; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +insert aaa(id, i) values(1,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +variable_value-@a +0 +insert aaa(id, i) values(2,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +variable_value-@a +0 +insert aaa(id, i) values(3,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +variable_value-@a +0 +SET LOCAL rocksdb_write_sync=1; +insert aaa(id, i) values(4,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +variable_value-@a +1 +insert aaa(id, i) values(5,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +variable_value-@a +2 +insert aaa(id, i) values(6,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +variable_value-@a +3 +SET GLOBAL rocksdb_background_sync=on; +SET LOCAL rocksdb_write_sync=off; +insert aaa(id, i) values(7,1); +truncate table aaa; +drop table aaa; +SET GLOBAL rocksdb_write_sync=off; +SET GLOBAL rocksdb_write_disable_wal=false; +SET GLOBAL rocksdb_write_ignore_missing_column_families=false; +SET GLOBAL rocksdb_background_sync=off; diff --git a/storage/rocksdb/mysql-test/rocksdb/slow_query_log.awk b/storage/rocksdb/mysql-test/rocksdb/slow_query_log.awk new file mode 100644 index 00000000000..4617b6d9fc3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/slow_query_log.awk @@ -0,0 +1,27 @@ +#!/bin/awk + +/Query_time:/ { + results["Rows_examined:"] = "uninit"; + results["RocksDB_key_skipped:"] = "uninit"; + results["RocksDB_del_skipped:"] = "uninit"; + + for (i = 2; i <= NF; i = i+2) { + results[$i] = $(i+1); + } + + # If the output format has changed and we don't find these keys, + # error out. + if (results["Rows_examined:"] == "uninit" || + results["RocksDB_key_skipped:"] == "uninit" || + results["RocksDB_del_skipped:"] == "uninit") { + exit(-2); + } + + if (results["Rows_examined:"] == 0) { + next + } + if (results["RocksDB_key_skipped:"] == 0 || + results["RocksDB_del_skipped:"] == 0) { + exit(-1); + } +} diff --git a/storage/rocksdb/mysql-test/rocksdb/t/1st.test b/storage/rocksdb/mysql-test/rocksdb/t/1st.test new file mode 100644 index 00000000000..cecef8b7537 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/1st.test @@ -0,0 +1,36 @@ +--source include/have_rocksdb.inc + +# +# This test checks some very basic capabilities +# which will be used in almost every other test, +# and will not be checked through support* variables. +# If this test does not pass, there is no point +# at executing other ones. +# +# Minimal requirements: +# - supported column types: INT, CHAR (default CHAR(8), INT(11)); +# - column attributes as declared in define_engine.inc ($default_col_opts) +# (by default empty, which means no additional attributes apart from the type); +# - table attributes as declared in define_engine.inc ($default_tbl_opts) +# (by default empty, which means no additional attributes apart from ENGINE); +# - CREATE TABLE .. (column1 , column2 ) ENGINE=; +# - INSERT INTO TABLE .. VALUES (val1,val2); +# - DROP TABLE .. +# - SELECT a,b FROM .. +# - SHOW CREATE TABLE .. +# - SHOW COLUMNS IN ... +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (pk INT PRIMARY KEY DEFAULT '0', a INT(11), b CHAR(8)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +SHOW COLUMNS IN t1; +INSERT INTO t1 VALUES (1, 1,'a'); +INSERT INTO t1 (a,b) VALUES (2,'b'); +--sorted_result +SELECT a,b FROM t1; +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.cnf b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.cnf new file mode 100644 index 00000000000..45ec29033c6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.cnf @@ -0,0 +1,5 @@ +[mysql] +no-defaults + +[mysqld.1] +rocksdb_strict_collation_check=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test new file mode 100644 index 00000000000..e0d7a4465c8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test @@ -0,0 +1,290 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +## +## test adding index inplace +## + +# test basic add +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2; +DROP TABLE t1; + +# test add + drop (simultaneous) +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD INDEX kb(b), DROP INDEX ka, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2; +DROP TABLE t1; + +## test multi-drop + multi-add +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 DROP INDEX ka, DROP INDEX kab, ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kb(b), ADD INDEX kab(a,b), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2; +DROP TABLE t1; + +# test multi add + drop (simultaneous) +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD INDEX kb(b), DROP INDEX ka, ADD INDEX kba(b,a), DROP INDEX kab, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kba) WHERE a > 2; +DROP TABLE t1; + +# test dropping and adding a key simultaneously w/ same name but different col +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +ALTER TABLE t1 DROP INDEX ka, ADD INDEX ka(b), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(ka) WHERE b > 5; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2; +DROP TABLE t1; + +## +## test adding index inplace w/ various column types +## + +# test basic add +CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin; +SHOW CREATE TABLE t1; +SHOW COLUMNS IN t1; +INSERT INTO t1 VALUES ('aaa', '1111', 1); +INSERT INTO t1 VALUES ('bbb', '2222', 2); +INSERT INTO t1 VALUES ('ccc', '3333', 3); +ALTER TABLE t1 ADD INDEX kab(a,b), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > '2' AND b < 3; +DROP TABLE t1; + +## test add + drop (simultaneous) +CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin; +SHOW CREATE TABLE t1; +SHOW COLUMNS IN t1; +INSERT INTO t1 VALUES ('aaa', '1111', 1); +INSERT INTO t1 VALUES ('bbb', '2222', 2); +INSERT INTO t1 VALUES ('ccc', '3333', 3); +ALTER TABLE t1 ADD INDEX kab(a,b), ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX ka(a), DROP INDEX kab, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(ka) WHERE a > '2' AND b < 3; +DROP TABLE t1; + +### test multi-drop + multi-add +CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin; +SHOW CREATE TABLE t1; +SHOW COLUMNS IN t1; +INSERT INTO t1 VALUES ('aaa', '1111', 1); +INSERT INTO t1 VALUES ('bbb', '2222', 2); +INSERT INTO t1 VALUES ('ccc', '3333', 3); +ALTER TABLE t1 ADD INDEX kab(a,b), ADD INDEX ka(a), ADD INDEX kb(b), ALGORITHM=INPLACE; +ALTER TABLE t1 DROP INDEX ka, DROP INDEX kb, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > '2' AND b < 3; +DROP TABLE t1; + +## +## test adding via CREATE/DROP index syntax +## +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +CREATE INDEX kb on t1 (b); +CREATE INDEX kba on t1 (b,a); +DROP INDEX ka on t1; +DROP INDEX kab on t1; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kba) WHERE a > 2; +DROP TABLE t1; + +# +# Create tables with partitions and try to update/select from them. +# +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; + +--disable_query_log +let $max = 100; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +DROP INDEX kij ON t1; +SHOW CREATE TABLE t1; + +SELECT * FROM t1 ORDER BY i LIMIT 10; +SELECT COUNT(*) FROM t1; + +DROP TABLE t1; + +# +# test crash recovery +# + +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); + +--echo # crash_during_online_index_creation +flush logs; + +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="+d,crash_during_online_index_creation"; +--error 2013 +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; + +--enable_reconnect +--source include/wait_until_connected_again.inc + +SET SESSION debug="-d,crash_during_online_index_creation"; + +SHOW CREATE TABLE t1; +CHECK TABLE t1; + +DROP TABLE t1; + +# +# Test crash recovery with partitioned tables +# +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; + +--disable_query_log +let $max = 100; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +--echo # crash_during_index_creation_partition +flush logs; + +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="+d,crash_during_index_creation_partition"; +--error 2013 +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; + +--enable_reconnect +--source include/wait_until_connected_again.inc + +SET SESSION debug="-d,crash_during_index_creation_partition"; + +SHOW CREATE TABLE t1; + +# here, the index numbers should be higher because previously 4 index numbers +# were allocated for the partitioned table +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; + +SELECT * FROM t1 ORDER BY i LIMIT 10; +SELECT COUNT(*) FROM t1; + +DROP TABLE t1; + +# +# Test rollback on partitioned tables for inplace alter +# +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; + +--disable_query_log +let $max = 100; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +--echo # crash_during_index_creation_partition +flush logs; + +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="+d,myrocks_simulate_index_create_rollback"; + +--echo # expected assertion failure from sql layer here for alter rollback +call mtr.add_suppression("Assertion `0' failed."); +call mtr.add_suppression("Attempting backtrace. You can use the following information to find out"); + +--error 2013 + +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; + +--enable_reconnect +--source include/wait_until_connected_again.inc + +SET SESSION debug="-d,myrocks_simulate_index_create_rollback"; + +SHOW CREATE TABLE t1; + +# here, the index numbers should be higher because previously 4 index numbers +# were allocated for the partitioned table +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; + +SHOW CREATE TABLE t1; +SELECT COUNT(*) FROM t1; + +DROP TABLE t1; + +# test failure in prepare phase (due to collation) +CREATE TABLE t1 (a INT, b TEXT); + +--error 1105 +ALTER TABLE t1 ADD KEY kb(b(10)); +ALTER TABLE t1 ADD PRIMARY KEY(a); +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test new file mode 100644 index 00000000000..2ad2c390d59 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test @@ -0,0 +1,102 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +# Create a table with a primary key and one secondary key as well as one +# more column +CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30)) COLLATE 'latin1_bin'; + +--let $file = `SELECT CONCAT(@@datadir, "test_loadfile.txt")` + +# Create a text file with data to import into the table. +# The primary key is in sorted order and the secondary keys are randomly generated +--let ROCKSDB_INFILE = $file +perl; +my $fn = $ENV{'ROCKSDB_INFILE'}; +open(my $fh, '>>', $fn) || die "perl open($fn): $!"; +my $max = 3000000; +my @chars = ("A".."Z", "a".."z", "0".."9"); +my @lowerchars = ("a".."z"); +my @powers_of_26 = (26 * 26 * 26 * 26, 26 * 26 * 26, 26 * 26, 26, 1); +for (my $ii = 0; $ii < $max; $ii++) +{ + my $pk; + my $tmp = $ii; + foreach (@powers_of_26) + { + $pk .= $lowerchars[$tmp / $_]; + $tmp = $tmp % $_; + } + + my $num = int(rand(25)) + 6; + my $a; + $a .= $chars[rand(@chars)] for 1..$num; + + $num = int(rand(25)) + 6; + my $b; + $b .= $chars[rand(@chars)] for 1..$num; + print $fh "$pk\t$a\t$b\n"; +} +close($fh); +EOF + +--file_exists $file + +set rocksdb_bulk_load=1; +set rocksdb_bulk_load_size=100000; +--disable_query_log +--echo LOAD DATA INFILE INTO TABLE t1; +eval LOAD DATA INFILE '$file' INTO TABLE t1; +--enable_query_log +set rocksdb_bulk_load=0; + +# Make sure all the data is there. +select count(pk) from t1; +select count(a) from t1; +select count(b) from t1; + +# now do fast secondary index creation +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +# disable duplicate index warning +--disable_warnings +# now do same index using copy algorithm +ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY; +--enable_warnings + +# checksum testing +SELECT COUNT(*) as c FROM +(SELECT COALESCE(LOWER(CONV(BIT_XOR(CAST(CRC32(CONCAT_WS('#', `b`, CONCAT(ISNULL(`b`)))) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `t1` FORCE INDEX(`kb`) +UNION DISTINCT +SELECT COALESCE(LOWER(CONV(BIT_XOR(CAST(CRC32(CONCAT_WS('#', +`b`, CONCAT(ISNULL(`b`)))) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `t1` FORCE +INDEX(`kb_copy`)) as temp; + +select count(*) from t1 FORCE INDEX(kb); +select count(*) from t1 FORCE INDEX(kb_copy); +select count(*) from t1 FORCE INDEX(PRIMARY); + +# drop the index +ALTER TABLE t1 DROP INDEX kb, ALGORITHM=INPLACE; +ALTER TABLE t1 DROP INDEX kb_copy, ALGORITHM=INPLACE; + +# add two indexes simultaneously +ALTER TABLE t1 ADD INDEX kb(b), ADD INDEX kab(a,b), ALGORITHM=INPLACE; +SELECT COUNT(*) FROM t1 FORCE INDEX(kab); +SELECT COUNT(*) FROM t1 FORCE INDEX(kb); +SHOW CREATE TABLE t1; + +DROP TABLE t1; + +# Reverse CF testing, needs to be added to SSTFileWriter in reverse order +CREATE TABLE t1 (a INT PRIMARY KEY, b INT, KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 DROP INDEX kab, ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kb(b) comment 'rev:cf1', ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +SELECT COUNT(*) FROM t1 FORCE INDEX(kb); +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_pk_concurrent_insert.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_pk_concurrent_insert.test new file mode 100644 index 00000000000..033b1325151 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_pk_concurrent_insert.test @@ -0,0 +1,22 @@ + +# +# Tests concurrent inserts for tables with no primary key. +# + +--source include/have_rocksdb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +--echo # Binary must be compiled with debug for this test +--source include/have_debug.inc + +# create the actual table +CREATE TABLE t1 (a INT) ENGINE=rocksdb; + +let $exec = python suite/rocksdb/t/rocksdb_concurrent_insert.py root 127.0.0.1 $MASTER_MYPORT test t1 100 4; +exec $exec; + +SELECT COUNT(*) from t1; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test new file mode 100644 index 00000000000..d1fe15b98fe --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test @@ -0,0 +1,91 @@ +--source include/have_rocksdb.inc + +# +# This test checks some very basic capabilities +# for tables without primary keys. A hidden pk will be generated under the hood +# in myrocks. Everything should work here as normal. +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# test CREATE +CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +## test ALTER +CREATE TABLE t1 (a INT, c CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,'a'),(5,'z'); +ALTER TABLE t1 ADD COLUMN b INT; +SHOW CREATE TABLE t1; + +--sorted_result +SELECT * FROM t1; +ALTER TABLE t1 DROP COLUMN b; +SHOW CREATE TABLE t1; +--sorted_result +SELECT * FROM t1; +DROP TABLE t1; + +## test creating a table with primary and then dropping that key +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +ALTER TABLE t1 DROP COLUMN pk; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +# test CHECK TABLE +# CHECK TABLE statements +# +# Note: the output is likely to be different for the engine under test, +# in which case rdiff will be needed. Or, the output might say that +# the storage engine does not support CHECK. +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); + +CREATE TABLE t2 (a INT, b CHAR(8)) ENGINE=rocksdb; + +CHECK TABLE t1; +INSERT INTO t1 (a,b) VALUES (3,'c'); +INSERT INTO t2 (a,b) VALUES (4,'d'); +CHECK TABLE t1, t2 FOR UPGRADE; +INSERT INTO t2 (a,b) VALUES (5,'e'); +CHECK TABLE t2 QUICK; +INSERT INTO t1 (a,b) VALUES (6,'f'); +CHECK TABLE t1 FAST; +INSERT INTO t1 (a,b) VALUES (7,'g'); +INSERT INTO t2 (a,b) VALUES (8,'h'); +CHECK TABLE t2, t1 MEDIUM; +INSERT INTO t1 (a,b) VALUES (9,'i'); +INSERT INTO t2 (a,b) VALUES (10,'j'); +CHECK TABLE t1, t2 EXTENDED; +INSERT INTO t1 (a,b) VALUES (11,'k'); +CHECK TABLE t1 CHANGED; + +DROP TABLE t1, t2; + +# test disabling unique keys +--error 1105 +CREATE TABLE t1 (a INT, b CHAR(8), UNIQUE INDEX(a)) ENGINE=rocksdb; + +## test restarting a table that has no data +CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +SHOW COLUMNS IN t1; +--source include/restart_mysqld.inc + +## single delete statement should remove MULTIPLE rows (aka duplicate rows) +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (36,'foo'); +DELETE FROM t1 WHERE a = 35 AND b = 'foo'; +--sorted_result +SELECT * FROM t1; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key_with_sk.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key_with_sk.test new file mode 100644 index 00000000000..1f3ef49e534 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key_with_sk.test @@ -0,0 +1,137 @@ +--source include/have_rocksdb.inc + +# +# This test checks some very basic capabilities +# for tables without primary keys. A hidden pk will be generated under the hood +# in myrocks. Everything should work here as normal. +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +## test CREATE with SK +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a)) ENGINE=rocksdb; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +## test adding/dropping sk w/no pk +CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +ALTER TABLE t1 ADD INDEX (b); +--source no_primary_key_basic_ops.inc + +ALTER TABLE t1 DROP INDEX b; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +# test dropping pk w/ sk +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +ALTER TABLE t1 DROP COLUMN pk; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +--echo # +--echo # MDEV-4313: RocksDB: Server crashes in Rdb_key_def::setup on dropping the primary key column +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, i INT NOT NULL, KEY(i)) ENGINE=RocksDB; +ALTER TABLE t1 DROP COLUMN `pk`; +DROP TABLE t1; + +# create table with multiple sk, make sure it still works +# test CREATE with SK +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a), KEY(b)) ENGINE=rocksdb; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +# test CREATE table with multi-part sk +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a, b)) ENGINE=rocksdb; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +# test CREATE table with more than one sk +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a), KEY(b)) ENGINE=rocksdb; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +# test check table with sk +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a)) ENGINE=rocksdb; +INSERT INTO t1 (a) VALUES (1),(2),(5); +CHECK TABLE t1; +INSERT INTO t1 (a) VALUES (6),(8),(12); +CHECK TABLE t1 FOR UPGRADE; +INSERT INTO t1 (a) VALUES (13),(15),(16); +CHECK TABLE t1 QUICK; +INSERT INTO t1 (a) VALUES (17),(120),(132); +CHECK TABLE t1 FAST; +INSERT INTO t1 (a) VALUES (801),(900),(7714); +CHECK TABLE t1 MEDIUM; +INSERT INTO t1 (a) VALUES (8760),(10023),(12000); +CHECK TABLE t1 EXTENDED; +INSERT INTO t1 (a) VALUES (13345),(24456),(78302),(143028); +CHECK TABLE t1 CHANGED; +DROP TABLE t1; + +## tables with multi-part secondary indexes + columns that dont belong to any +## secondary indexes +CREATE TABLE t1 (a INT, b INT, c INT, d INT, KEY kab(a, b), KEY kbc(b, c), KEY kabc(a,b,c)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b,c,d) VALUES (1,2,3,4); +INSERT INTO t1 (a,b,c,d) VALUES (5,6,7,8); +INSERT INTO t1 (a,b,c,d) VALUES (10,11,12,13); +INSERT INTO t1 (a,b,c,d) VALUES (14,15,16,17); + +--sorted_result +SELECT * FROM t1; +--sorted_result +SELECT * FROM t1 WHERE a = 1 OR a = 10; +--sorted_result +SELECT * FROM t1 WHERE c = 3 OR d = 17; +--sorted_result +SELECT * FROM t1 WHERE a > 5 OR d > 5; + +# force some of these selects to use different indexes and/or have the columns +# being selected also not contain column d +--sorted_result +SELECT a, b, c FROM t1 FORCE INDEX (kabc) WHERE a=1 OR b=11; +--sorted_result +SELECT d FROM t1 FORCE INDEX (kbc) WHERE b > 6 AND c > 12; + +UPDATE t1 SET a=a+100; +UPDATE t1 SET a=a-100, b=99 WHERE a>100; +--sorted_result +SELECT * FROM t1; + +DELETE FROM t1 WHERE a>5; +DELETE FROM t1 WHERE b=99 AND d>4; +--sorted_result +SELECT * FROM t1; + +TRUNCATE TABLE t1; +DROP TABLE t1; + +## secondary indexes live in reverse column families +CREATE TABLE t1 (a INT, b CHAR(8), KEY ka(a) comment 'rev:cf1', KEY kb(b) +comment 'rev:cf1', KEY kab(a,b) comment 'rev:cf2') ENGINE=rocksdb; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +## https://github.com/facebook/mysql-5.6/issues/209 +## Accidental single delete caused data inconsistency +CREATE TABLE t1 (col1 int, col2 int, KEY kcol1(col1)) ENGINE=ROCKSDB; +INSERT INTO t1 (col1, col2) values (2,2); +ALTER TABLE t1 ADD COLUMN extra INT; +UPDATE t1 SET col2 = 1; +select * from t1; +DELETE FROM t1 WHERE col1 = 2; + +# flush memtable to cause compaction to occur. +# During compaction, if a SingleDelete occurs then the delete marker and the +# key it is deleting are both removed. This will cause data inconsistency if +# SingleDelete is called on PK, since we do multiple Put() operations to update +# primary keys. +set global rocksdb_force_flush_memtable_now = true; + +select * from t1; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_os_buffer.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_os_buffer.test new file mode 100644 index 00000000000..e3ac4307c54 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_os_buffer.test @@ -0,0 +1,30 @@ +--source include/have_rocksdb.inc + +# Issue221 +# Turning on --rocksdb-allow-mmap-reads while having --rocksdb-allow-os-buffer +# off caused an assertion in RocksDB. Now it should not be allowed and the +# server will not start with that configuration + +# Write file to make mysql-test-run.pl expect the "crash", but don't restart +# the serve runtil it is told to +--let $_server_id= `SELECT @@server_id` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--exec echo "wait" >$_expect_file_name +shutdown_server 10; + +# Clear the log +--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err + +# Attempt to restart the server with invalid options +--exec echo "restart:--rocksdb_allow_os_buffer=0 --rocksdb_allow_mmap_reads=1" >$_expect_file_name +--sleep 0.1 # Wait 100ms - that is how long the sleep is in check_expected_crash_and_restart +--exec echo "restart:" >$_expect_file_name + +# Cleanup +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + +# We should now have an error message +--exec grep "disable allow_os_buffer" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/alter_table.test b/storage/rocksdb/mysql-test/rocksdb/t/alter_table.test new file mode 100644 index 00000000000..2603311da55 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/alter_table.test @@ -0,0 +1,94 @@ +--source include/have_rocksdb.inc + +# +# Basic ALTER TABLE statements. +# +# USAGE of table options in ALTER statements +# is covered in tbl_standard_opts and tbl_opt*.tests. +# +# Index operations are covered in index* tests. +# +# ALTER OFFLINE is not covered as it is not supported, as of 5.5.23 +# + +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, c CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,1,'a'),(2,5,'z'); + +# Column operations + +ALTER TABLE t1 ADD COLUMN b INT; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 ALTER COLUMN a SET DEFAULT '0'; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 ALTER a DROP DEFAULT; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 CHANGE COLUMN b b1 CHAR(8) FIRST; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 CHANGE b1 b INT AFTER c; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 CHANGE b b CHAR(8); +SHOW CREATE TABLE t1; + +ALTER TABLE t1 MODIFY COLUMN b INT; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 MODIFY COLUMN b CHAR(8) FIRST; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 MODIFY COLUMN b INT AFTER a; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 DROP COLUMN b; +SHOW CREATE TABLE t1; + + +# Rename table + +ALTER TABLE t1 RENAME TO t2; +--error ER_NO_SUCH_TABLE +SHOW CREATE TABLE t1; +SHOW CREATE TABLE t2; +DROP TABLE t2; + + +# ORDER BY +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, b INT) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,1,5),(2,2,2),(3,4,3); +SHOW CREATE TABLE t1; + +ALTER TABLE t1 ORDER BY b ASC, a DESC, pk DESC; +SHOW CREATE TABLE t1; +SELECT * FROM t1; +DROP TABLE t1; + + +# Character set, collate + +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, b CHAR(8), c CHAR(8)) ENGINE=rocksdb CHARACTER SET latin1 COLLATE latin1_general_cs; +INSERT INTO t1 VALUES (1,5,'z','t'); +SHOW CREATE TABLE t1; + +ALTER TABLE t1 CONVERT TO CHARACTER SET utf8; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 DEFAULT CHARACTER SET = latin1 COLLATE latin1_general_ci; +SHOW CREATE TABLE t1; + + +# A 'null' ALTER operation + +ALTER TABLE t1 FORCE; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/analyze_table.test b/storage/rocksdb/mysql-test/rocksdb/t/analyze_table.test new file mode 100644 index 00000000000..10722194121 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/analyze_table.test @@ -0,0 +1,31 @@ +--source include/have_rocksdb.inc + +# +# ANALYZE TABLE statements +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT(11), b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,1,'a'),(2,2,'b'); +CREATE TABLE t2 (pk INT PRIMARY KEY, a INT(11), b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (3,3,'c'); +ANALYZE TABLE t1; +INSERT INTO t2 VALUES (1,4,'d'); +ANALYZE NO_WRITE_TO_BINLOG TABLE t2; +INSERT INTO t1 VALUES (4,5,'e'); +INSERT INTO t2 VALUES (2,6,'f'); +ANALYZE LOCAL TABLE t1, t2; + +DROP TABLE t1, t2; + + --let $create_definition = a $int_indexed_col, $default_index(a) +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT(11), KEY(a)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,1),(2,2),(3,4),(4,7); +ANALYZE TABLE t1; +INSERT INTO t1 VALUES (5,8),(6,10),(7,11),(8,12); +ANALYZE TABLE t1; +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/apply_changes_iter.test b/storage/rocksdb/mysql-test/rocksdb/t/apply_changes_iter.test new file mode 100644 index 00000000000..4f759a8ec60 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/apply_changes_iter.test @@ -0,0 +1,44 @@ +--source include/have_rocksdb.inc + +# Tests the Apply_changes_iter class for walking forward and backwards +# with data in both the transaction class and in the rocksdb storage layer + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +--enable_warnings + +CREATE TABLE t1 ( + pk INT NOT NULL PRIMARY KEY, + key1 INT NOT NULL, + KEY (key1) +) ENGINE=ROCKSDB; + +INSERT INTO t1 VALUES (12,12); +INSERT INTO t1 VALUES (6,6); +BEGIN; +INSERT INTO t1 VALUES (8,8), (10,10); +SELECT * FROM t1 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 DESC; +SELECT * FROM t1 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 ASC; +SELECT * FROM t1 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 DESC; +SELECT * FROM t1 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 ASC; +ROLLBACK; + +CREATE TABLE t2 ( + pk INT NOT NULL PRIMARY KEY, + key1 INT NOT NULL, + KEY (key1) COMMENT 'rev:cf' +) ENGINE=ROCKSDB; + +INSERT INTO t2 VALUES (12,12); +INSERT INTO t2 VALUES (6,6); +BEGIN; +INSERT INTO t2 VALUES (8,8), (10,10); +SELECT * FROM t2 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 DESC; +SELECT * FROM t2 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 ASC; +SELECT * FROM t2 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 DESC; +SELECT * FROM t2 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 ASC; +ROLLBACK; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_secondary.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_secondary.test new file mode 100644 index 00000000000..68ad21bea1c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_secondary.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT AUTO_INCREMENT, KEY(a)) ENGINE=rocksdb; +INSERT INTO t1 (pk) VALUES (3), (2), (1); +SELECT * FROM t1; + +--source include/restart_mysqld.inc + +INSERT INTO t1 (pk) VALUES (4); +SELECT * FROM t1; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test new file mode 100644 index 00000000000..c3f3550e303 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test @@ -0,0 +1,65 @@ +--source include/have_rocksdb.inc + +# +# auto-increment-offset and auto-increment-increment +# + +############################################ +# TODO: +# This test currently produces wrong result +# on the line 36 of the result file and further +# due to bug MySQL:47118. +# When/if the bug is fixed, +# the result will need to be updated +############################################ + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--echo #--------------------------- +--echo # auto_increment_offset +--echo #--------------------------- +SET auto_increment_offset = 200; + +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; + +# If auto_increment_offset is greater than auto_increment_increment, +# the offset is ignored + +INSERT INTO t1 (a,b) VALUES (NULL,'a'),(NULL,'b'),(NULL,'c'); +SELECT LAST_INSERT_ID(); +SELECT a,b FROM t1 ORDER BY a; + +--echo #--------------------------- +--echo # auto_increment_increment +--echo #--------------------------- + +SET auto_increment_increment = 300; +# offset should not be ignored anymore + +INSERT INTO t1 (a,b) VALUES (NULL,'d'),(NULL,'e'),(NULL,'f'); +SELECT LAST_INSERT_ID(); +SELECT a,b FROM t1 ORDER BY a; + +SET auto_increment_increment = 50; +INSERT INTO t1 (a,b) VALUES (NULL,'g'),(NULL,'h'),(NULL,'i'); +SELECT LAST_INSERT_ID(); +SELECT a,b FROM t1 ORDER BY a; +DROP TABLE t1; + + +--echo #--------------------------- +--echo # offset is greater than the max value +--echo #--------------------------- + +SET auto_increment_increment = 500; +SET auto_increment_offset = 300; + +CREATE TABLE t1 (a TINYINT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a) VALUES (NULL); +SELECT LAST_INSERT_ID(); +SELECT a FROM t1 ORDER BY a; +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoincrement.test b/storage/rocksdb/mysql-test/rocksdb/t/autoincrement.test new file mode 100644 index 00000000000..375571f705d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoincrement.test @@ -0,0 +1,3 @@ +--source include/have_rocksdb.inc + +--echo # The test checks AUTO_INCREMENT capabilities that are not supported by RocksDB-SE. diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter-master.opt new file mode 100644 index 00000000000..8600e9e415c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter-master.opt @@ -0,0 +1,2 @@ +--rocksdb_default_cf_options=write_buffer_size=256k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:20 +--rocksdb_override_cf_options=cf_short_prefix={prefix_extractor=capped:4};cf_long_prefix={prefix_extractor=capped:240} diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc new file mode 100644 index 00000000000..acc1a9f2365 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc @@ -0,0 +1,63 @@ +--source include/have_rocksdb.inc + +let tmpl_ddl= suite/rocksdb/t/bloomfilter_table_def.tmpl; +let ddl= $MYSQL_TMP_DIR/bloomfilter_create.sql; + +DELIMITER //; +CREATE PROCEDURE bloom_start() +BEGIN + select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +END// +CREATE PROCEDURE bloom_end() +BEGIN +select case when variable_value-@c > 0 then 'true' else 'false' end as checked from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +END// +DELIMITER ;// + + +#BF is sometimes invoked and useful +--exec sed s/##CF##//g $tmpl_ddl > $ddl +--source $ddl +--source suite/rocksdb/t/bloomfilter_load_select.inc + +#BF is always invoked but not useful at all +--exec sed s/##CF##/" COMMENT 'cf_short_prefix'"/g $tmpl_ddl > $ddl +--source $ddl +--source suite/rocksdb/t/bloomfilter_load_select.inc + + +#BF is most of the time invoked and useful +--exec sed s/##CF##/" COMMENT 'cf_long_prefix'"/g $tmpl_ddl > $ddl +--source $ddl +--source suite/rocksdb/t/bloomfilter_load_select.inc + +# BUG: Prev() with prefix lookup should not use prefix bloom filter +create table r1 (id1 bigint, id2 bigint, id3 bigint, v1 int, v2 text, primary key (id1, id2, id3)) engine=rocksdb DEFAULT CHARSET=latin1 collate latin1_bin; +--disable_query_log +let $max = 100000; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO r1 VALUES ($i,$i,$i,$i,$i); + inc $i; + eval $insert; +} +--enable_query_log +call bloom_start(); +select * from r1 where id1=1 and id2 in (1) order by id3 asc; +call bloom_end(); +call bloom_start(); +select * from r1 where id1=1 and id2 in (1) order by id3 desc; +call bloom_end(); + +# cleanup +DROP PROCEDURE bloom_start; +DROP PROCEDURE bloom_end; +truncate table t1; +optimize table t1; +truncate table t2; +optimize table t2; +drop table if exists t1; +drop table if exists t2; +drop table if exists r1; +--remove_file $ddl diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.test b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.test new file mode 100644 index 00000000000..efcf9ee1f73 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.test @@ -0,0 +1 @@ +--source bloomfilter.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2-master.opt new file mode 100644 index 00000000000..f3824106b25 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2-master.opt @@ -0,0 +1 @@ +--rocksdb_default_cf_options=write_buffer_size=64k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:24 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2.test b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2.test new file mode 100644 index 00000000000..c4f1570ec41 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2.test @@ -0,0 +1,103 @@ +--source include/have_rocksdb.inc + +## Test 0: Eq cond len includs VARCHAR, and real cond len < prefix bloom len < VARCHAR definition len +CREATE TABLE t0 (id1 VARCHAR(30), id2 INT, value INT, PRIMARY KEY (id1, id2)) ENGINE=rocksdb collate latin1_bin; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $insert = INSERT INTO t0 VALUES('X', $i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +# BF not used +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t0 WHERE id1='X' AND id2>=1; +select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; + +DROP TABLE t0; + + +## Test 1: Eq cond len is shorter than prefix bloom len +CREATE TABLE t1 (id1 BIGINT, id2 INT, id3 BIGINT, value INT, PRIMARY KEY (id1, id2, id3)) ENGINE=rocksdb; + +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $insert = INSERT INTO t1 VALUES(1, 1, $i, $i); + eval $insert; + inc $i; +} +--enable_query_log + +# BF not used (4+8+4=16) +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=1 AND id3>=2; +select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; + +# BF not used (4+8=12) +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2>=1 AND id3>=2; +select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; + +DROP TABLE t1; + + +## Test 2: Long IN and short IN (varchar) -- can_use_bloom_filter changes within the same query +CREATE TABLE t2 (id1 INT, id2 VARCHAR(100), id3 BIGINT, value INT, PRIMARY KEY (id1, id2, id3)) ENGINE=rocksdb collate latin1_bin; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $insert = INSERT INTO t2 VALUES($i, $i, $i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +# BF used for large cond, not used for short cond +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +select count(*) from t2 WHERE id1=100 and id2 IN ('00000000000000000000', '100'); +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; + +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +select count(*) from t2 WHERE id1=200 and id2 IN ('00000000000000000000', '200'); +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; + +# BF not used because cond length is too small in all cases +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +select count(*) from t2 WHERE id1=200 and id2 IN ('3', '200'); +select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; + +DROP TABLE t2; + + +## Test 3: Eq cond len is longer than prefix bloom len +CREATE TABLE t3 (id1 BIGINT, id2 BIGINT, id3 BIGINT, id4 BIGINT, PRIMARY KEY (id1, id2, id3, id4)) ENGINE=rocksdb collate latin1_bin; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + if ($i != 5000) { + let $insert = INSERT INTO t3 VALUES(1, $i, $i, $i); + eval $insert; + } + inc $i; +} +--enable_query_log + +# Full BF works with Get(), Block based does not. +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_useful'; +SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=5000 AND id3=1 AND id4=1; +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_useful'; + +# BF used (4+8+8+8) +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=1 AND id3=1; +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; + +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=1 AND id3=1 AND id4 <= 500; +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; + +DROP TABLE t3; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3-master.opt new file mode 100644 index 00000000000..ef6d0fd554a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3-master.opt @@ -0,0 +1,3 @@ +--rocksdb_default_cf_options=write_buffer_size=64k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:20 +--rocksdb_debug_optimizer_n_rows=1000 +--rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3.test b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3.test new file mode 100644 index 00000000000..a15e2a89693 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3.test @@ -0,0 +1,118 @@ +--source include/have_rocksdb.inc + +--source include/restart_mysqld.inc +CREATE TABLE `linktable` ( + `id1` bigint(20) unsigned NOT NULL DEFAULT '0', + `id1_type` int(10) unsigned NOT NULL DEFAULT '0', + `id2` bigint(20) unsigned NOT NULL DEFAULT '0', + `id2_type` int(10) unsigned NOT NULL DEFAULT '0', + `link_type` bigint(20) unsigned NOT NULL DEFAULT '0', + `visibility` tinyint(3) NOT NULL DEFAULT '0', + `data` varchar(255) NOT NULL DEFAULT '', + `time` bigint(20) unsigned NOT NULL DEFAULT '0', + `version` int(11) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (link_type, `id1`,`id2`) COMMENT 'cf_link_pk', + KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'rev:cf_link_id1_type', + KEY `id1_type2` (`id1`,`link_type`,`time`,`version`,`data`,`visibility`) COMMENT 'rev:cf_link_id1_type2', + KEY `id1_type3` (`id1`,`visibility`,`time`,`version`,`data`,`link_type`) COMMENT 'rev:cf_link_id1_type3' +) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin; + +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $insert = INSERT INTO linktable VALUES($i, $i, $i, $i, 1, 1, $i, $i, $i); + eval $insert; + inc $i; +} +--enable_query_log + +## HA_READ_PREFIX_LAST_OR_PREV +# BF len 21 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and time >= 0 and time <= 9223372036854775807 and visibility = 1 order by time desc; +select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +# BF len 20 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 and time >= 0 and time <= 9223372036854775807 order by time desc; +select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +# BF len 13 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type3`) where id1 = 100 and time >= 0 and time <= 9223372036854775807 and visibility = 1 order by time desc; +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +## HA_READ_PREFIX_LAST_OR_PREV (no end range) +# BF len 20 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and visibility = 1 and time >= 0 order by time desc; +select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +# BF len 19 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 and time >= 0 order by time desc; +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +--echo ## HA_READ_PREFIX_LAST +--echo # BF len 20 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and visibility = 1 order by time desc; +select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +--echo # BF len 19 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 order by time desc; +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +--echo # BF len 12 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type3`) where id1 = 100 and visibility = 1 order by time desc; +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + + +DROP TABLE linktable; +--source include/restart_mysqld.inc + +--echo # +--echo # bloom filter prefix is 20 byte +--echo # Create a key which is longer than that, so that we see that +--echo # eq_cond_len= slice.size() - 1; +--echo # doesnt work. +--echo # +--echo # indexnr 4 +--echo # kp0 + 4 = 8 +--echo # kp1 + 8 = 16 +--echo # kp2 + 8 = 24 24>20 byte length prefix +--echo # kp3 + 8 = 28 + +create table t1 ( + pk int primary key, + kp0 int not null, + kp1 bigint not null, + kp2 bigint not null, + kp3 bigint not null, + key kp12(kp0, kp1, kp2, kp3) comment 'rev:x1' +) engine=rocksdb; + +insert into t1 values (1, 1,1, 1,1); +insert into t1 values (10,1,1,0x12FFFFFFFFFF,1); +insert into t1 values (11,1,1,0x12FFFFFFFFFF,1); +insert into t1 values (20,2,2,0x12FFFFFFFFFF,1); +insert into t1 values (21,2,2,0x12FFFFFFFFFF,1); + +--source include/restart_mysqld.inc + +--replace_column 9 # +explain +select * from t1 where kp0=1 and kp1=1 and kp2=0x12FFFFFFFFFF order by kp3 desc; +show status like '%rocksdb_bloom_filter_prefix%'; + +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select * from t1 where kp0=1 and kp1=1 and kp2=0x12FFFFFFFFFF order by kp3 desc; +show status like '%rocksdb_bloom_filter_prefix%'; +--echo # The following MUST show TRUE: +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +drop table t1; +# Key length is 4 + 8 + 8 = 20 + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4-master.opt new file mode 100644 index 00000000000..0a325757962 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4-master.opt @@ -0,0 +1 @@ +--rocksdb_default_cf_options=write_buffer_size=16k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:12 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4.test b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4.test new file mode 100644 index 00000000000..76ec6ca101f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4.test @@ -0,0 +1,52 @@ +--source include/have_rocksdb.inc + +# Fixing issue#230 -- Prefix bloom filter + reverse column family misses some rows +# This test inserts 20,000 rows into t1, then selecting one by one from stored procedure. +# If the select does not return any row, it is wrong. + +CREATE TABLE t1 ( + `id1` int unsigned NOT NULL DEFAULT '0', + `id2` int unsigned NOT NULL DEFAULT '0', + `link_type` int unsigned NOT NULL DEFAULT '0', + `visibility` tinyint NOT NULL DEFAULT '0', + `data` varchar(255) NOT NULL DEFAULT '', + `time` int unsigned NOT NULL DEFAULT '0', + `version` int unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (id1, link_type, visibility, id2) COMMENT 'rev:cf_link_pk' +) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin; + +DELIMITER //; +CREATE PROCEDURE select_test() +BEGIN + DECLARE id1_cond INT; + SET id1_cond = 1; + WHILE id1_cond <= 20000 DO + SELECT count(*) AS cnt FROM (SELECT id1 FROM t1 FORCE INDEX (PRIMARY) WHERE id1 = id1_cond AND link_type = 1 AND visibility = 1 ORDER BY id2 DESC) AS t INTO @cnt; + IF @cnt < 1 THEN + SELECT id1_cond, @cnt; + END IF; + SET id1_cond = id1_cond + 1; + END WHILE; +END// +DELIMITER ;// + +--disable_query_log +let $i = 1; +while ($i <= 20000) { + let $insert = INSERT INTO t1 VALUES($i, $i, 1, 1, $i, $i, $i); + eval $insert; + inc $i; +} +--enable_query_log + +--echo "Skipping bloom filter" +SET session rocksdb_skip_bloom_filter_on_read=1; +CALL select_test(); + +--echo "Using bloom filter" +SET session rocksdb_skip_bloom_filter_on_read=0; +CALL select_test(); + +DROP PROCEDURE select_test; +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc new file mode 100644 index 00000000000..1f1a4b9810f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc @@ -0,0 +1,189 @@ +# loading some data (larger than write buf size) to cause compaction +--exec perl suite/rocksdb/t/gen_insert.pl t1 > $MYSQL_TMP_DIR/insert_t1.sql +--exec perl suite/rocksdb/t/gen_insert.pl t2 > $MYSQL_TMP_DIR/insert_t2.sql +--disable_query_log +--source $MYSQL_TMP_DIR/insert_t1.sql +--source $MYSQL_TMP_DIR/insert_t2.sql +--enable_query_log + +# BF conditions (prefix short(4B)|medium(20B)|long(240B)) +#0 no eq condition (o, x, x) +## cond length 4, key length > 4 +call bloom_start(); +select count(*) from t1; +call bloom_end(); +call bloom_start(); +select count(*) from t2; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +call bloom_end(); + +#1 cond length == prefix length (o, o, x) +## cond length 4+8+8=20, key length > 20 +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +call bloom_end(); +## (cond_length == extended_key_length(4+8+4+4=20) == prefix_length) +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +call bloom_end(); + +#2 cond length < actual key length and cond_length < prefix length (o, x, x) +## for long prefix key, most cases falling into this category, unless all key colums are used. +## cond length 4+8=12, key length > 12 +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +call bloom_end(); + +#3 both actual key length and cond length >= prefix length (o, o, o/x) +## cond length 4+8+9+8+4=33 +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +call bloom_end(); +## 4+8+9=25 +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +call bloom_end(); + +#4 actual key length > prefix length and cond length < prefix length (o, x, x) +## cond length 4+8=12 +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +call bloom_end(); + +#5 cond length == extended key length < prefix length (o, o, o) +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +call bloom_end(); +## 4+9+4=17 +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +call bloom_end(); + +#6 cond length == non-extended key length < prefix length, actual key length > prefix length (o, x, x) +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +call bloom_end(); +## 4+9+4=17 +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +call bloom_end(); + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip-master.opt new file mode 100644 index 00000000000..5c62c7cf986 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip-master.opt @@ -0,0 +1,3 @@ +--rocksdb_default_cf_options=write_buffer_size=256k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:20 +--rocksdb_override_cf_options=cf_short_prefix={prefix_extractor=capped:4};cf_long_prefix={prefix_extractor=capped:240} +--rocksdb_skip_bloom_filter_on_read=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip.test b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip.test new file mode 100644 index 00000000000..efcf9ee1f73 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip.test @@ -0,0 +1 @@ +--source bloomfilter.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.tmpl b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.tmpl new file mode 100644 index 00000000000..5cf033d4726 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.tmpl @@ -0,0 +1,36 @@ +drop table if exists t1; +drop table if exists t2; +create table t1 ( + id1 bigint not null, + id2 bigint not null, + id3 varchar(100) not null, + id4 int not null, + id5 int not null, + value bigint, + value2 varchar(100), + primary key (id1, id2, id3, id4)##CF##, + index id2 (id2)##CF##, + index id2_id1 (id2, id1)##CF##, + index id2_id3 (id2, id3)##CF##, + index id2_id4 (id2, id4)##CF##, + index id2_id3_id1_id4 (id2, id3, id1, id4)##CF##, + index id3_id2 (id3, id2)##CF## +) engine=ROCKSDB; + +create table t2 ( + id1 bigint not null, + id2 bigint not null, + id3 varchar(100) not null, + id4 int not null, + id5 int not null, + value bigint, + value2 varchar(100), + primary key (id4)##CF##, + index id2 (id2)##CF##, + index id2_id3 (id2, id3)##CF##, + index id2_id4 (id2, id4)##CF##, + index id2_id4_id5 (id2, id4, id5)##CF##, + index id3_id4 (id3, id4)##CF##, + index id3_id5 (id3, id5)##CF## +) engine=ROCKSDB; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test new file mode 100644 index 00000000000..cfa2f6ff747 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test @@ -0,0 +1,110 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +# Create a table with a primary key and one secondary key as well as one +# more column +CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'; + +# Create a second identical table to validate that bulk loading different +# tables in the same session works +CREATE TABLE t2(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'; + +# Create a third table using partitions to validate that bulk loading works +# across a partitioned table +CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin' + PARTITION BY KEY() PARTITIONS 4; + +--let $file = `SELECT CONCAT(@@datadir, "test_loadfile.txt")` + +# Create a text file with data to import into the table. +# The primary key is in sorted order and the secondary keys are randomly generated +--let ROCKSDB_INFILE = $file +perl; +my $fn = $ENV{'ROCKSDB_INFILE'}; +open(my $fh, '>>', $fn) || die "perl open($fn): $!"; +my $max = 10000000; +my @chars = ("A".."Z", "a".."z", "0".."9"); +my @lowerchars = ("a".."z"); +my @powers_of_26 = (26 * 26 * 26 * 26, 26 * 26 * 26, 26 * 26, 26, 1); +for (my $ii = 0; $ii < $max; $ii++) +{ + my $pk; + my $tmp = $ii; + foreach (@powers_of_26) + { + $pk .= $lowerchars[$tmp / $_]; + $tmp = $tmp % $_; + } + + my $num = int(rand(25)) + 6; + my $a; + $a .= $chars[rand(@chars)] for 1..$num; + + $num = int(rand(25)) + 6; + my $b; + $b .= $chars[rand(@chars)] for 1..$num; + print $fh "$pk\t$a\t$b\n"; +} +close($fh); +EOF + +--file_exists $file + +# Make sure a snapshot held by another user doesn't block the bulk load +connect (other,localhost,root,,); +set session transaction isolation level repeatable read; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +start transaction with consistent snapshot; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; + +connection default; +set rocksdb_bulk_load=1; +set rocksdb_bulk_load_size=100000; +--disable_query_log +--echo LOAD DATA INFILE INTO TABLE t1; +eval LOAD DATA INFILE '$file' INTO TABLE t1; +--echo LOAD DATA INFILE INTO TABLE t2; +eval LOAD DATA INFILE '$file' INTO TABLE t2; +--echo LOAD DATA INFILE INTO TABLE t3; +eval LOAD DATA INFILE '$file' INTO TABLE t3; +--enable_query_log +set rocksdb_bulk_load=0; + +# Make sure all the data is there. +select count(pk) from t1; +select count(a) from t1; +select count(b) from t1; +select count(pk) from t2; +select count(a) from t2; +select count(b) from t2; +select count(pk) from t3; +select count(a) from t3; +select count(b) from t3; + +# Create a dummy file with a bulk load extesion. It should be removed when +# the server starts +--let $tmpext = .bulk_load.tmp +--let $MYSQLD_DATADIR= `SELECT @@datadir` +--let $datadir = $MYSQLD_DATADIR/.rocksdb +--write_file $datadir/test$tmpext +dummy data +EOF +--write_file $datadir/longfilenamethatvalidatesthatthiswillgetdeleted$tmpext +dummy data +EOF + +# Show the files exists +--list_files $datadir *$tmpext + +# Now restart the server and make sure it automatically removes this test file +--source include/restart_mysqld.inc + +# Show the files do not exist +--list_files $datadir *$tmpext + +# Cleanup +disconnect other; +DROP TABLE t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cardinality-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/cardinality-master.opt new file mode 100644 index 00000000000..ed6029f9a27 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/cardinality-master.opt @@ -0,0 +1,4 @@ +--skip-rocksdb_debug_optimizer_no_zero_cardinality +--rocksdb_compaction_sequential_deletes=0 +--force-restart +--rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test b/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test new file mode 100644 index 00000000000..df2b0673315 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test @@ -0,0 +1,41 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +create table t1( + id bigint not null primary key, + i1 bigint, #unique + i2 bigint, #repeating + c1 varchar(20), #unique + c2 varchar(20), #repeating + index t1_1(id, i1), + index t1_2(i1, i2), + index t1_3(i2, i1), + index t1_4(c1, c2), + index t1_5(c2, c1) +) engine=rocksdb; +--disable_query_log +let $i=0; +while ($i<100000) +{ + inc $i; + eval insert t1(id, i1, i2, c1, c2) values($i, $i, $i div 10, $i, $i div 10); +} +--enable_query_log + +# Flush memtable out to SST and display index cardinalities +optimize table t1; +show index in t1; +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); + +--echo restarting... +--source include/restart_mysqld.inc + +# display index cardinalities after the restart +show index in t1; +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); + +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/check_log_for_xa.py b/storage/rocksdb/mysql-test/rocksdb/t/check_log_for_xa.py new file mode 100644 index 00000000000..a3d50f305a4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/check_log_for_xa.py @@ -0,0 +1,31 @@ +import sys +import re + +""" +Example usage: + python check_log_for_xa.py path/to/log/mysqld.2.err rollback,commit,prepare +""" + +log_path = sys.argv[1] +desired_filters = sys.argv[2] + +all_filters = [ + ('rollback', re.compile('(\[Note\] rollback xid .+)')), + ('commit', re.compile('(\[Note\] commit xid .+)')), + ('prepare', + re.compile('(\[Note\] Found \d+ prepared transaction\(s\) in \w+)')), +] + +active_filters = filter(lambda f: f[0] in desired_filters, all_filters) + +results = set() +with open(log_path) as log: + for line in log: + line = line.strip() + for f in active_filters: + match = f[1].search(line) + if match: + results.add("**found '%s' log entry**" % f[0]) + +for res in results: + print res diff --git a/storage/rocksdb/mysql-test/rocksdb/t/check_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/check_table.inc new file mode 100644 index 00000000000..c108a97362d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/check_table.inc @@ -0,0 +1,54 @@ +# +# CHECK TABLE statements +# +# Note: the output is likely to be different for the engine under test, +# in which case rdiff will be needed. Or, the output might say that +# the storage engine does not support CHECK. +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); + +CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +CHECK TABLE t1; +INSERT INTO t1 (a,b) VALUES (3,'c'); +INSERT INTO t2 (a,b) VALUES (4,'d'); +CHECK TABLE t1, t2 FOR UPGRADE; +INSERT INTO t2 (a,b) VALUES (5,'e'); +CHECK TABLE t2 QUICK; +INSERT INTO t1 (a,b) VALUES (6,'f'); +CHECK TABLE t1 FAST; +INSERT INTO t1 (a,b) VALUES (7,'g'); +INSERT INTO t2 (a,b) VALUES (8,'h'); +CHECK TABLE t2, t1 MEDIUM; +INSERT INTO t1 (a,b) VALUES (9,'i'); +INSERT INTO t2 (a,b) VALUES (10,'j'); +CHECK TABLE t1, t2 EXTENDED; +INSERT INTO t1 (a,b) VALUES (11,'k'); +CHECK TABLE t1 CHANGED; + +DROP TABLE t1, t2; + + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(a)) ENGINE=rocksdb; +INSERT INTO t1 (a) VALUES (1),(2),(5); +CHECK TABLE t1; +INSERT INTO t1 (a) VALUES (6),(8),(12); +CHECK TABLE t1 FOR UPGRADE; +INSERT INTO t1 (a) VALUES (13),(15),(16); +CHECK TABLE t1 QUICK; +INSERT INTO t1 (a) VALUES (17),(120),(132); +CHECK TABLE t1 FAST; +INSERT INTO t1 (a) VALUES (801),(900),(7714); +CHECK TABLE t1 MEDIUM; +INSERT INTO t1 (a) VALUES (8760),(10023),(12000); +CHECK TABLE t1 EXTENDED; +INSERT INTO t1 (a) VALUES (13345),(24456),(78302),(143028); +CHECK TABLE t1 CHANGED; +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/check_table.test b/storage/rocksdb/mysql-test/rocksdb/t/check_table.test new file mode 100644 index 00000000000..4d349f7a167 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/check_table.test @@ -0,0 +1,12 @@ +--source include/have_rocksdb.inc + +# +# CHECK TABLE statements +# +# Note: the output is likely to be different for the engine under test, +# in which case rdiff will be needed. Or, the output might say that +# the storage engine does not support CHECK. +# + +--source check_table.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test b/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test new file mode 100644 index 00000000000..e5de6246f60 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test @@ -0,0 +1,107 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +DROP TABLE IF EXISTS t5; +--enable_warnings + +# Start from clean slate +#--source include/restart_mysqld.inc + +CREATE TABLE t1 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t2 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t3 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t4 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +# Populate tables +let $max = 1000; +let $table = t1; +--source drop_table_repopulate_table.inc +let $table = t2; +--source drop_table_repopulate_table.inc +let $table = t3; +--source drop_table_repopulate_table.inc +let $table = t4; +--source drop_table_repopulate_table.inc + +# Make sure new table gets unique indices +CREATE TABLE t5 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +let $max = 1000; +let $table = t5; +--source drop_table_repopulate_table.inc + +# Create checkpoint without trailing '/' +let $checkpoint = $MYSQL_TMP_DIR/checkpoint; +let $succeeds = 1; +--source set_checkpoint.inc + +# Create checkpoint with a trailing '/' +let $checkpoint = $MYSQL_TMP_DIR/checkpoint/; +let $succeeds = 1; +--source set_checkpoint.inc + +# Set checkpoint dir as empty string, which fails +let $checkpoint = ; +let $succeeds = 0; +--source set_checkpoint.inc + +# Set checkpoint as a directory that does not exist, which fails +let $checkpoint = /does/not/exist; +let $succeeds = 0; +--source set_checkpoint.inc + +# Set checkpoint as a directory that already exists, which fails +let $checkpoint = $MYSQL_TMP_DIR/already-existing-directory; +--mkdir $checkpoint +let $succeeds = 0; +--source set_checkpoint.inc +--exec rm -rf $checkpoint + +--disable_result_log +truncate table t1; +optimize table t1; +truncate table t2; +optimize table t2; +truncate table t3; +optimize table t3; +truncate table t4; +optimize table t4; +truncate table t5; +optimize table t5; +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +drop table if exists t4; +drop table if exists t5; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/checksum_table.test b/storage/rocksdb/mysql-test/rocksdb/t/checksum_table.test new file mode 100644 index 00000000000..fbe8028f6d5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/checksum_table.test @@ -0,0 +1,76 @@ +--source include/have_rocksdb.inc + +# +# CHECKSUM TABLE statements for standard CHECKSUM properties. +# Live checksums are covered in checksum_table_live.test +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=0; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); + +CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=0; + +CHECKSUM TABLE t1; +CHECKSUM TABLE t2, t1; +CHECKSUM TABLE t1, t2 QUICK; +CHECKSUM TABLE t1, t2 EXTENDED; + +DROP TABLE t1, t2; + +--echo # +--echo # Issue #110: SQL command checksum returns inconsistent result +--echo # +create table t1 (pk int primary key, col1 varchar(10)) engine=rocksdb; +insert into t1 values (2,'fooo'); +insert into t1 values (1,NULL); +checksum table t1; +checksum table t1; +select * from t1 where pk=2; +checksum table t1; +checksum table t1; +flush tables; +checksum table t1; +checksum table t1; + +drop table t1; + +--echo # +--echo # The following test is about making sure MyRocks CHECKSUM TABLE +--echo # values are the same as with InnoDB. +--echo # If you see checksum values changed, make sure their counterparts +--echo # in suite/innodb/r/checksum-matches-myrocks.result match. +--echo # + +create table t1 (pk int primary key, col1 varchar(10)) engine=rocksdb; +insert into t1 values (2,'fooo'); +insert into t1 values (1,NULL); +checksum table t1; +drop table t1; + +create table t1 ( + pk bigint unsigned primary key, + col1 varchar(10), + col2 tinyint, + col3 double +) engine=rocksdb; + +checksum table t1; + +insert into t1 values (1, NULL, NULL, NULL); +insert into t1 values (2, 'foo', NULL, NULL); +checksum table t1; + +insert into t1 values (3, NULL, 123, NULL); +insert into t1 values (4, NULL, NULL, 2.78); +checksum table t1; + +insert into t1 values (5, 'xxxYYYzzzT', NULL, 2.78); +insert into t1 values (6, '', NULL, 2.78); +checksum table t1; + +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/checksum_table_live.test b/storage/rocksdb/mysql-test/rocksdb/t/checksum_table_live.test new file mode 100644 index 00000000000..da278ed7f9b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/checksum_table_live.test @@ -0,0 +1,24 @@ +--source include/have_rocksdb.inc + +# +# CHECKSUM TABLE statements for live CHECKSUM. +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +# For most engines CHECKSUM=1 option will be ignored, +# and the results will be different + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1; + +CHECKSUM TABLE t1; +CHECKSUM TABLE t2, t1; +CHECKSUM TABLE t1, t2 QUICK; +CHECKSUM TABLE t1, t2 EXTENDED; + +DROP TABLE t1, t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_not_null.inc b/storage/rocksdb/mysql-test/rocksdb/t/col_not_null.inc new file mode 100644 index 00000000000..2d3c9292441 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_not_null.inc @@ -0,0 +1,55 @@ +# +# NOT NULL attribute in columns +# +# Usage: +# let $col_type = ; +# let $col_default = ; +# --source col_not_null.inc +# +# We will add NOT NULL to the column options; +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--echo #---------------------------------- +--echo # $col_type NOT NULL columns without a default +--echo #---------------------------------- + +eval CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c $col_type NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; + +--error ER_BAD_NULL_ERROR +INSERT INTO t1 (c) VALUES (NULL); +eval INSERT INTO t1 (c) VALUES ($col_default); +SELECT HEX(c) FROM t1; + +DROP TABLE t1; + +--echo #---------------------------------- +--echo # $col_type NOT NULL columns with a default +--echo #---------------------------------- + +eval CREATE TABLE t1 ( + pk INT AUTO_INCREMENT PRIMARY KEY, + c $col_type NOT NULL DEFAULT $col_default +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +--error ER_INVALID_DEFAULT +eval ALTER TABLE t1 ADD COLUMN err $col_type NOT NULL DEFAULT NULL; + +--error ER_BAD_NULL_ERROR +INSERT INTO t1 (c) VALUES (NULL); + +eval INSERT INTO t1 (c) VALUES ($col_default); +eval INSERT INTO t1 () VALUES (); + +# HEX should be universal for all column types +SELECT pk, HEX(c) FROM t1 ORDER BY pk; + +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_not_null_timestamp.inc b/storage/rocksdb/mysql-test/rocksdb/t/col_not_null_timestamp.inc new file mode 100644 index 00000000000..4c5b89c9d00 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_not_null_timestamp.inc @@ -0,0 +1,61 @@ +# +# NOT NULL attribute in TIMESTAMP columns +# +# This is a copy of col_not_null.inc, except that +# instead of getting an error on inserting NULL into a non-NULL column, +# we are getting the current timestamp (see MySQL:68472). +# If the bug is ever fixed, this include file won't be needed anymore. + + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--echo #---------------------------------- +--echo # $col_type NOT NULL column without a default +--echo #---------------------------------- + +eval CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c $col_type NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; + +# Here where the non-standard behavior strikes: +# instead of an error we are getting the current timestamp + +# As of mysql-5.6.11, this no longer works, and we get an error: +--error ER_BAD_NULL_ERROR +INSERT INTO t1 (c) VALUES (NULL); +eval INSERT INTO t1 (c) VALUES ($col_default); +SELECT HEX(c) FROM t1; + +DROP TABLE t1; + +--echo #---------------------------------- +--echo # $col_type NOT NULL columns with a default +--echo #---------------------------------- + +eval CREATE TABLE t1 ( + pk INT AUTO_INCREMENT PRIMARY KEY, + c $col_type NOT NULL DEFAULT $col_default +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +--error ER_INVALID_DEFAULT +eval ALTER TABLE t1 ADD COLUMN err $col_type NOT NULL DEFAULT NULL; + +# Here where the non-standard behavior strikes: +# instead of an error we are getting the current timestamp + +# As of mysql-5.6.11, this no longer works, and we get an error: +--error ER_BAD_NULL_ERROR +INSERT INTO t1 (c) VALUES (NULL); + +eval INSERT INTO t1 (c) VALUES ($col_default); +eval INSERT INTO t1 () VALUES (); + +# HEX should be universal for all column types +SELECT pk, HEX(c) FROM t1 ORDER BY pk; + +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_null.inc b/storage/rocksdb/mysql-test/rocksdb/t/col_null.inc new file mode 100644 index 00000000000..7ebfee0b114 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_null.inc @@ -0,0 +1,34 @@ +# +# NULL attribute and DEFAULT NULL in columns +# +# Usage: +# let $col_type = ; +# let $col_default = ; +# --source col_null.inc +# +# We will add NULL attribute to the column options. +# + + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + c $col_type NULL, + c1 $col_type NULL DEFAULT NULL, + c2 $col_type NULL DEFAULT $col_default, + pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +eval INSERT INTO t1 (c,c1,c2) VALUES ($col_default,$col_default,$col_default); +INSERT INTO t1 () VALUES (); + +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_default.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_default.test new file mode 100644 index 00000000000..6f91ee7ca9a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_default.test @@ -0,0 +1,27 @@ +--source include/have_rocksdb.inc + +# +# Check whether DEFAULT column attribute +# is supported in CREATE and ALTER TABLE. +# If the attribute is supported at all, it will be covered +# in more details in col_option_null and col_option_not_null tests. +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY DEFAULT '0') ENGINE=rocksdb; +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a) VALUES (1); +SELECT a FROM t1; + +ALTER TABLE t1 ADD COLUMN b CHAR(8) DEFAULT ''; +SHOW COLUMNS IN t1; + +INSERT INTO t1 (b) VALUES ('a'); +SELECT a,b FROM t1 ORDER BY a,b; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_not_null.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_not_null.test new file mode 100644 index 00000000000..fbb5a932fc0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_not_null.test @@ -0,0 +1,224 @@ +--source include/have_rocksdb.inc + +# +# NOT NULL column attribute +# + +######################################### +# TODO: +# Currently the test produces incorrect (?) result +# due to bug MySQL:68472. If the bug is ever fixed, +# the test and result files will need to be updated. +######################################### + + +let $extra_col_opts = NOT NULL; + +--echo ######################## +--echo # BINARY columns +--echo ######################## + +--source type_binary.inc +--let $col_type = BINARY +--let $col_default = 0 +--source col_not_null.inc + +--echo ######################## +--echo # VARBINARY columns +--echo ######################## + +--source type_varbinary.inc +--let $col_type = VARBINARY(64) +--let $col_default = 'test' +--source col_not_null.inc + +--echo ######################## +--echo # BIT columns +--echo ######################## + +--source type_bit.inc +--let $col_type = BIT +--let $col_default = 1 +--source col_not_null.inc + +--echo ######################## +--echo # BLOB columns +--echo ######################## + +--source type_blob.inc + +--let $col_default = '' + +--let $col_type = BLOB +--source col_not_null.inc + +--let $col_type = TINYBLOB +--source col_not_null.inc + +--let $col_type = MEDIUMBLOB +--source col_not_null.inc + +--let $col_type = LONGBLOB +--source col_not_null.inc + +--echo ######################## +--echo # BOOL columns +--echo ######################## + +--source type_bool.inc +--let $col_type = BOOL +--let $col_default = '0' +--source col_not_null.inc + +--echo ######################## +--echo # CHAR columns +--echo ######################## + +--source type_char.inc +--let $col_type = CHAR +--let $col_default = '_' +--source col_not_null.inc + +--echo ######################## +--echo # VARCHAR columns +--echo ######################## + +--source type_varchar.inc +--let $col_type = VARCHAR(64) +--let $col_default = 'test default' +--source col_not_null.inc + +--echo ######################## +--echo # date and time columns +--echo ######################## + +--source type_date_time.inc + +SET TIMESTAMP=UNIX_TIMESTAMP('2013-12-12 12:12:12'); + +--let $col_type = DATE +--let $col_default = '2012-12-21' +--source col_not_null.inc + +--let $col_type = DATETIME +--let $col_default = '2012-12-21 12:21:12' +--source col_not_null.inc + +# Even with explicit-defaults-for-timestamps, we still can't use +# the standard include file, due to bug MySQL:68472 + +--let $col_type = TIMESTAMP +--let $col_default = '2012-12-21 12:21:12' +--source col_not_null_timestamp.inc + +--let $col_type = TIME +--let $col_default = '12:21:12' +--source col_not_null.inc + +--let $col_type = YEAR +--let $col_default = '2012' +--source col_not_null.inc + +--let $col_type = YEAR(2) +--let $col_default = '12' +--source col_not_null.inc + +--echo ######################## +--echo # ENUM columns +--echo ######################## + +--source type_enum.inc + +--let $col_type = ENUM('test1','test2','test3') +--let $col_default = 'test2' +--source col_not_null.inc + +--echo ######################## +--echo # Fixed point columns (NUMERIC, DECIMAL) +--echo ######################## + +--source type_fixed.inc + +--let $col_type = DECIMAL +--let $col_default = 1.1 +--source col_not_null.inc + +--let $col_type = NUMERIC +--let $col_default = 0 +--source col_not_null.inc + +--echo ######################## +--echo # Floating point columns (FLOAT, DOUBLE) +--echo ######################## + +--source type_float.inc + +--let $col_type = FLOAT +--let $col_default = 1.1 +--source col_not_null.inc + +--let $col_type = DOUBLE +--let $col_default = 0 +--source col_not_null.inc + +--echo ######################## +--echo # INT columns +--echo ######################## + +--source type_int.inc + +--let $col_type = INT +--let $col_default = 2147483647 +--source col_not_null.inc + +--let $col_type = TINYINT +--let $col_default = 127 +--source col_not_null.inc + +--let $col_type = SMALLINT +--let $col_default = 0 +--source col_not_null.inc + +--let $col_type = MEDIUMINT +--let $col_default = 1 +--source col_not_null.inc + +--let $col_type = BIGINT +--let $col_default = 9223372036854775807 +--source col_not_null.inc + +--echo ######################## +--echo # SET columns +--echo ######################## + +--source type_set.inc +--let $col_type = SET('test1','test2','test3') +--let $col_default = 'test2,test3' +--source col_not_null.inc + +--echo ######################## +--echo # TEXT columns +--echo ######################## + +--source type_text.inc + +--let $col_default = '' + +--let $col_type = TEXT +--source col_not_null.inc + +--let $col_type = TINYTEXT +--source col_not_null.inc + +--let $col_type = MEDIUMTEXT +--source col_not_null.inc + +--let $col_type = LONGTEXT +--source col_not_null.inc + + +--let $col_type = +--let $col_default = +--let $extra_col_opts = + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_null.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_null.test new file mode 100644 index 00000000000..18f2601eb16 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_null.test @@ -0,0 +1,216 @@ +--source include/have_rocksdb.inc + +# +# NULL column attribute +# + +let $extra_col_opts = NULL; + + +--echo ######################## +--echo # BINARY columns +--echo ######################## + +--source type_binary.inc +--let $col_type = BINARY +--let $col_default = 0 +--source col_null.inc + +--echo ######################## +--echo # VARBINARY columns +--echo ######################## + +--source type_varbinary.inc +--let $col_type = VARBINARY(64) +--let $col_default = 'test' +--source col_null.inc + +--echo ######################## +--echo # BIT columns +--echo ######################## + +--source type_bit.inc +--let $col_type = BIT +--let $col_default = 1 +--source col_null.inc + +--echo ######################## +--echo # BLOB columns +--echo ######################## + +--source type_blob.inc + +--let $col_default = '' + +--let $col_type = BLOB +--source col_null.inc + +--let $col_type = TINYBLOB +--source col_null.inc + +--let $col_type = MEDIUMBLOB +--source col_null.inc + +--let $col_type = LONGBLOB +--source col_null.inc + +--echo ######################## +--echo # BOOL columns +--echo ######################## + +--source type_bool.inc +--let $col_type = BOOL +--let $col_default = '0' +--source col_null.inc + + +--echo ######################## +--echo # CHAR columns +--echo ######################## + +--source type_char.inc +--let $col_type = CHAR +--let $col_default = '_' +--source col_null.inc + +--echo ######################## +--echo # VARCHAR columns +--echo ######################## + + +--source type_varchar.inc +--let $col_type = VARCHAR(64) +--let $col_default = 'test default' +--source col_null.inc + + +--echo ######################## +--echo # date and time columns +--echo ######################## + +--source type_date_time.inc + +--let $col_type = DATE +--let $col_default = '2012-12-21' +--source col_null.inc + +--let $col_type = DATETIME +--let $col_default = '2012-12-21 12:21:12' +--source col_null.inc + +--let $col_type = TIMESTAMP +--let $col_default = '2012-12-21 12:21:12' +--source col_null.inc + +--let $col_type = TIME +--let $col_default = '12:21:12' +--source col_null.inc + +--let $col_type = YEAR +--let $col_default = '2012' +--source col_null.inc + +--let $col_type = YEAR(2) +--let $col_default = '12' +--source col_null.inc + + +--echo ######################## +--echo # ENUM columns +--echo ######################## + +--source type_enum.inc +--let $col_type = ENUM('test1','test2','test3') +--let $col_default = 'test2' +--source col_null.inc + +--echo ######################## +--echo # Fixed point columns (NUMERIC, DECIMAL) +--echo ######################## + +--source type_fixed.inc + +--let $col_type = DECIMAL +--let $col_default = 1.1 +--source col_null.inc + +--let $col_type = NUMERIC +--let $col_default = 0 +--source col_null.inc + +--echo ######################## +--echo # Floating point columns (FLOAT, DOUBLE) +--echo ######################## + +--source type_float.inc + +--let $col_type = FLOAT +--let $col_default = 1.1 +--source col_null.inc + +--let $col_type = DOUBLE +--let $col_default = 0 +--source col_null.inc + +--echo ######################## +--echo # INT columns +--echo ######################## + +--source type_int.inc + +--let $col_type = INT +--let $col_default = 2147483647 +--source col_null.inc + +--let $col_type = TINYINT +--let $col_default = 127 +--source col_null.inc + +--let $col_type = SMALLINT +--let $col_default = 0 +--source col_null.inc + +--let $col_type = MEDIUMINT +--let $col_default = 1 +--source col_null.inc + +--let $col_type = BIGINT +--let $col_default = 9223372036854775807 +--source col_null.inc + +--echo ######################## +--echo # SET columns +--echo ######################## + +--source type_set.inc +--let $col_type = SET('test1','test2','test3') +--let $col_default = 'test2,test3' +--source col_null.inc + + +--echo ######################## +--echo # TEXT columns +--echo ######################## + +--source type_text.inc + +--let $col_default = '' + +--let $col_type = TEXT +--source col_null.inc + +--let $col_type = TINYTEXT +--source col_null.inc + +--let $col_type = MEDIUMTEXT +--source col_null.inc + +--let $col_type = LONGTEXT +--source col_null.inc + + +--let $col_type = +--let $col_default = +--let $extra_col_opts = + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_unsigned.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_unsigned.test new file mode 100644 index 00000000000..25cda84ce2e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_unsigned.test @@ -0,0 +1,74 @@ +--source include/have_rocksdb.inc + +# +# UNSIGNED column attribute +# + +--let $extra_col_opts = UNSIGNED + +--echo ######################## +--echo # Fixed point columns (NUMERIC, DECIMAL) +--echo ######################## + +--source type_fixed.inc + +CREATE TABLE t1 ( + a DECIMAL UNSIGNED, + b NUMERIC UNSIGNED, + PRIMARY KEY (a) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b) VALUES (1.0,-1.0); +INSERT INTO t1 (a,b) VALUES (-100,100); +--sorted_result +SELECT a,b FROM t1; +DROP TABLE t1; + +--echo ######################## +--echo # Floating point columns (FLOAT, DOUBLE) +--echo ######################## + +--source type_float.inc + +CREATE TABLE t1 ( + a DOUBLE UNSIGNED, + b FLOAT UNSIGNED, + PRIMARY KEY (b) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b) VALUES (1.0,-1.0); +INSERT INTO t1 (a,b) VALUES (-100,100); +--sorted_result +SELECT a,b FROM t1; +DROP TABLE t1; + +--echo ######################## +--echo # INT columns +--echo ######################## + +--source type_int.inc + +CREATE TABLE t1 ( + t TINYINT UNSIGNED, + s SMALLINT UNSIGNED, + m MEDIUMINT UNSIGNED, + i INT UNSIGNED, + b BIGINT UNSIGNED, + PRIMARY KEY (b) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (t,s,m,i,b) VALUES (255,65535,16777215,4294967295,18446744073709551615); +INSERT INTO t1 (t,s,m,i,b) VALUES (-1,-1,-1,-1,-1); +--sorted_result +SELECT t,s,m,i,b FROM t1; + +DROP TABLE t1; + +--let $extra_col_opts = + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_zerofill.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_zerofill.test new file mode 100644 index 00000000000..37982ae0964 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_zerofill.test @@ -0,0 +1,67 @@ +--source include/have_rocksdb.inc + +# +# ZEROFILL column attribute +# + +let $extra_col_opts = ZEROFILL; + +--echo ######################## +--echo # Fixed point columns (NUMERIC, DECIMAL) +--echo ######################## + +--source type_fixed.inc + +CREATE TABLE t1 ( + a DECIMAL ZEROFILL, + b NUMERIC ZEROFILL, + PRIMARY KEY (a) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b) VALUES (1.1,1234); +SELECT a,b FROM t1; +DROP TABLE t1; + +--echo ######################## +--echo # Floating point columns (FLOAT, DOUBLE) +--echo ######################## + +--source type_float.inc + +CREATE TABLE t1 ( + a DOUBLE ZEROFILL, + b FLOAT ZEROFILL, + PRIMARY KEY (b) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b) VALUES (1,1234.5); +SELECT a,b FROM t1; +DROP TABLE t1; + +--echo ######################## +--echo # INT columns +--echo ######################## + +--source type_int.inc + +CREATE TABLE t1 ( + t TINYINT ZEROFILL, + s SMALLINT ZEROFILL, + m MEDIUMINT ZEROFILL, + i INT ZEROFILL, + b BIGINT ZEROFILL, + PRIMARY KEY (b) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (t,s,m,i,b) VALUES (1,10,100,1000,0); +SELECT t,s,m,i,b FROM t1; +DROP TABLE t1; + +--let $extra_col_opts = + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/collation-master.opt new file mode 100644 index 00000000000..79e591636fd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/collation-master.opt @@ -0,0 +1 @@ +--rocksdb_strict_collation_check=ON diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation.test b/storage/rocksdb/mysql-test/rocksdb/t/collation.test new file mode 100644 index 00000000000..c3fcaad776c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/collation.test @@ -0,0 +1,181 @@ +--source include/have_rocksdb.inc +--source include/have_fullregex.inc + +SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# ci non-indexed column is allowed +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text) engine=rocksdb charset utf8; +DROP TABLE t1; + +# ci indexed column is not allowed +--error ER_UNKNOWN_ERROR +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value)) engine=rocksdb charset utf8; +--error ER_UNKNOWN_ERROR +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value3(50))) engine=rocksdb charset utf8; +# ci indexed column with rocksdb_strict_collation_check=OFF is allowed. +SET GLOBAL rocksdb_strict_collation_check=0; +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value3(50))) engine=rocksdb charset utf8; +DROP TABLE t1; +SET GLOBAL rocksdb_strict_collation_check=1; + +# cs indexed column is allowed +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value2)) engine=rocksdb charset utf8; +DROP TABLE t1; + +# cs latin1_bin is allowed +CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset latin1 collate latin1_bin; +DROP TABLE t1; + +# cs utf8_bin is allowed +CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset utf8 collate utf8_bin; +DROP TABLE t1; + +# cs mixed latin1_bin and utf8_bin is allowed +CREATE TABLE t1 (id varchar(20) collate latin1_bin, value varchar(50) collate utf8_bin, value2 varchar(50) collate latin1_bin, value3 text, primary key (id), index(value, value2)) engine=rocksdb; +DROP TABLE t1; + +# ci indexed column is not allowed unless table name is in exception list +SET GLOBAL rocksdb_strict_collation_exceptions=t1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE t2 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test regex for exception list +SET GLOBAL rocksdb_strict_collation_exceptions="t.*"; +CREATE TABLE t123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t123; +--error ER_UNKNOWN_ERROR +CREATE TABLE s123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +SET GLOBAL rocksdb_strict_collation_exceptions=".t.*"; +CREATE TABLE xt123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE xt123; +--error ER_UNKNOWN_ERROR +CREATE TABLE t123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list with commas +SET GLOBAL rocksdb_strict_collation_exceptions="s.*,t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list with vertical bar +SET GLOBAL rocksdb_strict_collation_exceptions="s.*|t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list and extra comma at the front +SET GLOBAL rocksdb_strict_collation_exceptions=",s.*,t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list and extra vertical bar at the front +SET GLOBAL rocksdb_strict_collation_exceptions="|s.*|t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list and extra comma in the middle +SET GLOBAL rocksdb_strict_collation_exceptions="s.*,,t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list and extra vertical bar in the middle +SET GLOBAL rocksdb_strict_collation_exceptions="s.*||t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list and extra comma at the end +SET GLOBAL rocksdb_strict_collation_exceptions="s.*,t.*,"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list and extra vertical bar at the end +SET GLOBAL rocksdb_strict_collation_exceptions="s.*|t.*|"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list and tons of commas and vertical bars just for the fun of it +SET GLOBAL rocksdb_strict_collation_exceptions="||||,,,,s.*,,|,,||,t.*,,|||,,,"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test allowing alters to create temporary tables +SET GLOBAL rocksdb_strict_collation_exceptions='t1'; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb; +ALTER TABLE t1 AUTO_INCREMENT=1; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE t2 (id INT primary key, value varchar(50), index(value)) engine=rocksdb; +CREATE TABLE t2 (id INT primary key, value varchar(50)) engine=rocksdb; +--error ER_UNKNOWN_ERROR +ALTER TABLE t2 ADD INDEX(value); +DROP TABLE t2; + + +# test invalid regex (missing end bracket) +--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err +SET GLOBAL rocksdb_strict_collation_exceptions="[a-b"; +--exec grep -A 1 "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +--error ER_UNKNOWN_ERROR +CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +SET GLOBAL rocksdb_strict_collation_exceptions="[a-b]"; +CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +CREATE TABLE b (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +--error ER_UNKNOWN_ERROR +CREATE TABLE c (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE a, b; + +# test invalid regex (trailing escape) +--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err +SET GLOBAL rocksdb_strict_collation_exceptions="abc\\"; +--exec grep -A 1 "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +--error ER_UNKNOWN_ERROR +CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +SET GLOBAL rocksdb_strict_collation_exceptions="abc"; +CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +--error ER_UNKNOWN_ERROR +CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE abc; + +# cleanup +SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation_exception-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/collation_exception-master.opt new file mode 100644 index 00000000000..13563edb439 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/collation_exception-master.opt @@ -0,0 +1,2 @@ +--rocksdb_strict_collation_check=ON +--rocksdb_strict_collation_exceptions='r1.lol' diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation_exception.test b/storage/rocksdb/mysql-test/rocksdb/t/collation_exception.test new file mode 100644 index 00000000000..7f741e286b1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/collation_exception.test @@ -0,0 +1,27 @@ +CREATE TABLE `r1.lol` ( + `c1` int(10) NOT NULL DEFAULT '0', + `c2` int(11) NOT NULL DEFAULT '0', + `c3` int(1) NOT NULL DEFAULT '0', + `c4` int(11) NOT NULL DEFAULT '0', + `c5` int(11) NOT NULL DEFAULT '0', + `c6` varchar(100) NOT NULL DEFAULT '', + `c7` varchar(100) NOT NULL DEFAULT '', + `c8` varchar(255) NOT NULL DEFAULT '', + `c9` int(10) NOT NULL DEFAULT '125', + `c10` int(10) NOT NULL DEFAULT '125', + `c11` text NOT NULL, + `c12` int(11) NOT NULL DEFAULT '0', + `c13` int(10) NOT NULL DEFAULT '0', + `c14` text NOT NULL, + `c15` blob NOT NULL, + `c16` int(11) NOT NULL DEFAULT '0', + `c17` int(11) NOT NULL DEFAULT '0', + `c18` int(11) NOT NULL DEFAULT '0', + PRIMARY KEY (`c1`), + KEY i1 (`c4`), + KEY i2 (`c7`), + KEY i3 (`c2`)) ENGINE=RocksDB DEFAULT CHARSET=latin1; + +DROP INDEX i1 ON `r1.lol`; +DROP TABLE `r1.lol`; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes-master.opt new file mode 100644 index 00000000000..3b4871f864a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes-master.opt @@ -0,0 +1,3 @@ +--rocksdb_debug_optimizer_n_rows=1000 +--rocksdb_records_in_range=50 +--rocksdb_compaction_sequential_deletes_count_sd=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test new file mode 100644 index 00000000000..9cb32e8d615 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test @@ -0,0 +1,87 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS r1; +--enable_warnings + +create table r1 ( + id1 int, + id2 int, + type int, + value varchar(100), + value2 int, + value3 int, + primary key (type, id1, id2), + index id1_type (id1, type, value2, value, id2) +) engine=rocksdb collate latin1_bin; + +select 'loading data'; + +--disable_query_log +let $i=0; +while ($i<1000) +{ + inc $i; + eval insert r1(id1, id2, type, value, value2, value3) + values($i,$i,$i, 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',$i,$i); +} +--enable_query_log + +set global rocksdb_force_flush_memtable_now=1; +optimize table r1; + +--exec echo Test 1: Do a bunch of updates without setting the compaction sysvar +--exec echo Expect: no compaction +let $window = 0; +let $deletes = 0; +let $file_size = 0; +let $secondary_only = 0; +let $primary = 1; +let $no_more_deletes = 0; +--source compact_deletes_test.inc + +--exec echo Test 2: Do a bunch of updates and set the compaction sysvar +--exec echo Expect: compaction +let $window = 1000; +let $deletes = 990; +let $file_size = 0; +let $secondary_only = 0; +let $primary = 1; +let $no_more_deletes = 1; +--source compact_deletes_test.inc + +--exec echo Test 3: Do a bunch of updates and set the compaction sysvar and a file size to something large +--exec echo Expect: no compaction +let $window = 1000; +let $deletes = 1000; +let $file_size = 1000000; +let $secondary_only = 0; +let $primary = 1; +let $no_more_deletes = 0; +--source compact_deletes_test.inc + +--exec echo Test 4: Do a bunch of secondary key updates and set the compaction sysvar +--exec echo Expect: compaction +let $window = 1000; +let $deletes = 50; +let $file_size = 0; +let $secondary_only = 1; +let $primary = 0; +let $no_more_deletes = 1; +--source compact_deletes_test.inc + +--exec echo Test 5: Do a bunch of secondary key updates and set the compaction sysvar, +--exec echo and rocksdb_compaction_sequential_deletes_count_sd turned on +--exec echo Expect: compaction +let $window = 1000; +let $deletes = 50; +let $file_size = 0; +let $secondary_only = 1; +let $primary = 0; +let $no_more_deletes = 1; +SET @save_rocksdb_compaction_sequential_deletes_count_sd = @@global.rocksdb_compaction_sequential_deletes_count_sd; +SET GLOBAL rocksdb_compaction_sequential_deletes_count_sd= ON; +--source compact_deletes_test.inc +SET GLOBAL rocksdb_compaction_sequential_deletes_count_sd= @save_rocksdb_compaction_sequential_deletes_count_sd; + +drop table r1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc new file mode 100644 index 00000000000..15a611c8dbb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc @@ -0,0 +1,43 @@ +# Usage: +# let $window = ; +# let $deletes = ; +# let $file_size = ; +# --source compact_deletes_test.inc +# + +let $save_rocksdb_compaction_sequential_deletes_window = `SELECT @@rocksdb_compaction_sequential_deletes_window`; +eval set global rocksdb_compaction_sequential_deletes_window=$window; +let $save_rocksdb_compaction_sequential_deletes = `SELECT @@rocksdb_compaction_sequential_deletes`; +eval set global rocksdb_compaction_sequential_deletes= $deletes; +let $save_rocksdb_compaction_sequential_deletes_file_size = `SELECT @@rocksdb_compaction_sequential_deletes_file_size`; +eval set global rocksdb_compaction_sequential_deletes_file_size=$file_size; +--disable_query_log +let $i=0; +while ($i<1000) +{ + inc $i; + if ($secondary_only) + { + eval update r1 set value2=value2+1 where id1=$i; + } + if ($primary) + { + eval update r1 set id2=id2+10000 where id1=500; + } +} +--enable_query_log +set global rocksdb_force_flush_memtable_now=1; +select sleep(1); + +--disable_query_log +let $wait_timeout= 300; # Override default 30 seconds with 300. +let $wait_condition = select count(*) = 0 + as c from information_schema.rocksdb_global_info + where TYPE = 'DDL_DROP_INDEX_ONGOING'; +--source include/wait_condition.inc +--enable_query_log + +--exec bash suite/rocksdb/t/sst_count_rows.sh $MYSQLTEST_VARDIR $MYSQL_SST_DUMP $no_more_deletes +eval SET GLOBAL rocksdb_compaction_sequential_deletes= $save_rocksdb_compaction_sequential_deletes; +eval SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= $save_rocksdb_compaction_sequential_deletes_file_size; +eval SET GLOBAL rocksdb_compaction_sequential_deletes_window= $save_rocksdb_compaction_sequential_deletes_window; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd-master.opt new file mode 100644 index 00000000000..81b5acc4e56 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd-master.opt @@ -0,0 +1 @@ +--rocksdb_default_cf_options=compression_per_level=kZSTDNotFinalCompression;compression_opts=-14:4:0 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test b/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test new file mode 100644 index 00000000000..c146d43474c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test @@ -0,0 +1,4 @@ +--source include/have_rocksdb.inc + +create table t (id int primary key) engine=rocksdb; +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/concurrent_alter.test b/storage/rocksdb/mysql-test/rocksdb/t/concurrent_alter.test new file mode 100644 index 00000000000..2a2896691b7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/concurrent_alter.test @@ -0,0 +1,34 @@ +--source include/have_rocksdb.inc + +# +# Generate concurrent requests to alter a table using mysqlslap +# + +--disable_warnings +DROP DATABASE IF EXISTS mysqlslap; +--enable_warnings + +CREATE DATABASE mysqlslap; + +use mysqlslap; + +CREATE TABLE a1 (a int, b int) ENGINE=ROCKSDB; +INSERT INTO a1 VALUES (1, 1); + +--write_file $MYSQL_TMP_DIR/concurrent_alter.sh +$MYSQL_SLAP --silent --delimiter=";" --query="alter table a1 add index bx(b); alter table a1 drop index bx" --concurrency=1 --iterations=25 & +$MYSQL_SLAP --silent --delimiter=";" --query="alter table a1 add index ax(a); alter table a1 drop index ax" --concurrency=1 --iterations=25 & +sleep 2 +$MYSQL_SLAP --silent --delimiter=";" --query="select * from a1 where a=1" --concurrency=16 --iterations=1000 & +$MYSQL_SLAP --silent --delimiter=";" --query="select * from a1 where b=1" --concurrency=16 --iterations=1000 +sleep 2 +$MYSQL_SLAP --silent --delimiter=";" --query="select * from a1 where a=1" --concurrency=16 --iterations=1000 & +$MYSQL_SLAP --silent --delimiter=";" --query="select * from a1 where b=1" --concurrency=16 --iterations=1000 +wait +EOF + +--exec bash $MYSQL_TMP_DIR/concurrent_alter.sh + +SHOW CREATE TABLE a1; + +DROP DATABASE mysqlslap; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_read_committed.test b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_read_committed.test new file mode 100644 index 00000000000..4dfa5abbbbb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_read_committed.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +let $trx_isolation = READ COMMITTED; + +--source consistent_snapshot.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_repeatable_read.test b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_repeatable_read.test new file mode 100644 index 00000000000..c9f28dbcbe4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_repeatable_read.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +let $trx_isolation = REPEATABLE READ; + +--source consistent_snapshot.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_serializable.test b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_serializable.test new file mode 100644 index 00000000000..57b45050fea --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_serializable.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +let $trx_isolation = SERIALIZABLE; + +--source consistent_snapshot.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/consistent_snapshot.inc b/storage/rocksdb/mysql-test/rocksdb/t/consistent_snapshot.inc new file mode 100644 index 00000000000..be01338cb85 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/consistent_snapshot.inc @@ -0,0 +1,136 @@ +# +# TRANSACTION WITH CONSISTENT SNAPSHOT +# + +--enable_connect_log + +# Save the initial number of concurrent sessions +--source include/count_sessions.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; + +CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=ROCKSDB; +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; + +# While a consistent snapshot transaction is executed, +# no external inserts should be visible to the transaction. +# But it should only work this way for REPEATABLE-READ and SERIALIZABLE + +--error 0,ER_UNKNOWN_ERROR +START TRANSACTION WITH CONSISTENT SNAPSHOT; +--echo ERROR: $mysql_errno + +connection con2; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; + +connection con1; +COMMIT; + +# verifying snapshot is released after finishing transaction +connection con2; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; + +connection con1; +--error 0,ER_UNKNOWN_ERROR +START TRANSACTION WITH CONSISTENT SNAPSHOT; +--echo ERROR: $mysql_errno + +connection con2; +INSERT INTO t1 (a) VALUES (1); + +connection con1; +--echo # If consistent read works on this isolation level ($trx_isolation), the following SELECT should not return the value we inserted (1) +SELECT a FROM t1; +COMMIT; + +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +CREATE TABLE r1 (id int primary key, value int, value2 int) engine=ROCKSDB; +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; +insert into r1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4); + +BEGIN; + +connection con2; +INSERT INTO r1 values (5,5,5); + +connection con1; +SELECT * FROM r1; # 5 + +connection con2; +INSERT INTO r1 values (6,6,6); + +connection con1; +SELECT * FROM r1; # 5 +COMMIT; +SELECT * FROM r1; # 6 + +--error 0,ER_UNKNOWN_ERROR +START TRANSACTION WITH CONSISTENT SNAPSHOT; +--echo ERROR: $mysql_errno + +connection con2; +INSERT INTO r1 values (7,7,7); + +connection con1; +SELECT * FROM r1; # 6 + +connection con2; +INSERT INTO r1 values (8,8,8); + +connection con1; +SELECT * FROM r1; # 6 +COMMIT; +SELECT * FROM r1; # 8 + +--error 0,ER_UNKNOWN_ERROR +START TRANSACTION WITH CONSISTENT SNAPSHOT; +--echo ERROR: $mysql_errno + +connection con2; +INSERT INTO r1 values (9,9,9); + +connection con1; +--error 0,ER_UNKNOWN_ERROR +START TRANSACTION WITH CONSISTENT SNAPSHOT; +--echo ERROR: $mysql_errno + +connection con2; +INSERT INTO r1 values (10,10,10); + +connection con1; +SELECT * FROM r1; # 9 + +--error 0,ER_UNKNOWN_ERROR +START TRANSACTION WITH CONSISTENT SNAPSHOT; +--echo ERROR: $mysql_errno +# Succeeds with Read Committed, Fails with Repeatable Read +--error 0,ER_UNKNOWN_ERROR +INSERT INTO r1 values (11,11,11); +--echo ERROR: $mysql_errno +SELECT * FROM r1; # self changes should be visible + + +drop table r1; + +connection default; +disconnect con1; +disconnect con2; + + +--source include/wait_until_count_sessions.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test b/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test new file mode 100644 index 00000000000..a9ee98dfda5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test @@ -0,0 +1,80 @@ +--source include/have_rocksdb.inc + +--echo # +--echo # Test how MyRocks handles reading corrupted data from disk. +--echo # Data corruption is simulated at source-code level. +--echo # + +--source include/have_debug.inc + + +--echo # +--echo # A test for case when data in the table *record* is longer +--echo # than table DDL expects it to be +--echo # +create table t1 ( + pk int not null primary key, + col1 varchar(10) +) engine=rocksdb; + +insert into t1 values (1,1),(2,2),(3,3); + +select * from t1; + +set @tmp1=@@rocksdb_verify_checksums; +set rocksdb_verify_checksums=1; +set session debug= "+d,myrocks_simulate_bad_row_read1"; +--error ER_GET_ERRNO +select * from t1 where pk=1; +set session debug= "-d,myrocks_simulate_bad_row_read1"; +set rocksdb_verify_checksums=@tmp1; + +select * from t1 where pk=1; + +set session debug= "+d,myrocks_simulate_bad_row_read2"; +--error ER_GET_ERRNO +select * from t1 where pk=1; +set session debug= "-d,myrocks_simulate_bad_row_read2"; + +set session debug= "+d,myrocks_simulate_bad_row_read3"; +--error ER_GET_ERRNO +select * from t1 where pk=1; +set session debug= "-d,myrocks_simulate_bad_row_read3"; + +insert into t1 values(4,'0123456789'); +select * from t1; +drop table t1; + +--echo # +--echo # A test for case when index data is longer than table DDL +--echo # expects it to be +--echo # + +create table t2 ( + pk varchar(4) not null primary key, + col1 int not null +) engine=rocksdb collate latin1_bin; + +insert into t2 values ('ABCD',1); +select * from t2; +set session debug= "+d,myrocks_simulate_bad_pk_read1"; +--error ER_GET_ERRNO +select * from t2; +set session debug= "-d,myrocks_simulate_bad_pk_read1"; + +drop table t2; + +create table t2 ( + pk varchar(4) not null primary key, + col1 int not null +) engine=rocksdb; + +insert into t2 values ('ABCD',1); + +select * from t2; +set session debug= "+d,myrocks_simulate_bad_pk_read1"; +--error ER_GET_ERRNO +select * from t2; +set session debug= "-d,myrocks_simulate_bad_pk_read1"; + +drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/create_table.test b/storage/rocksdb/mysql-test/rocksdb/t/create_table.test new file mode 100644 index 00000000000..4fffe7497c4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/create_table.test @@ -0,0 +1,192 @@ +--source include/have_rocksdb.inc + +# +# Basic CREATE TABLE statements +# + +############################################# +# TODO: +# A part of the test is currently disabled +# because temporary tables are not supported +############################################# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +# Simple create table with minimal table options +# which are defined in have_engine.inc +# (default empty) plus ENGINE= + +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +SHOW CREATE TABLE t1; + +# IF NOT EXISTS +CREATE TABLE IF NOT EXISTS t1 (a INT PRIMARY KEY) ENGINE=rocksdb; + +# CREATE .. LIKE + +CREATE TABLE t2 LIKE t1; +SHOW CREATE TABLE t2; + +--error ER_ILLEGAL_HA_CREATE_OPTION +CREATE TEMPORARY TABLE t2 (a INT PRIMARY KEY) ENGINE=rocksdb; + +--disable_parsing + +DROP TABLE t2; + +CREATE TEMPORARY TABLE t2 LIKE t1; + +SHOW CREATE TABLE t2; +DROP TEMPORARY TABLE t2; + +--enable_parsing + +DROP TABLE t2; + +DROP TABLE IF EXISTS t1; + +# CREATE .. AS SELECT + +# Use the engine as default + +SET default_storage_engine = rocksdb; + +CREATE TABLE t1 (a INT PRIMARY KEY); +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT PRIMARY KEY) AS SELECT 1 AS a UNION SELECT 2 AS a; +SHOW CREATE TABLE t1; +--sorted_result +SELECT * FROM t1; + +# Just to add FLUSH LOGS into the mix while we are in the most common test +FLUSH LOGS; + +DROP TABLE IF EXISTS t1; + +# CREATE TABLE with MAX_INDEXES (64) keys and no primary key +# MyRocks adds a hidden primary key, so make sure we don't break anything +CREATE TABLE t1(c1 INT,c2 INT,c3 INT,c4 INT,c5 INT,c6 INT,c7 INT,c8 INT,c9 INT, + c10 INT,c11 INT,c12 INT,c13 INT,c14 INT,c15 INT,c16 INT,c17 INT, + c18 INT,c19 INT,c20 INT,c21 INT,c22 INT,c23 INT,c24 INT,c25 INT, + c26 INT,c27 INT,c28 INT,c29 INT,c30 INT,c31 INT,c32 INT,c33 INT, + c34 INT,c35 INT,c36 INT,c37 INT,c38 INT,c39 INT,c40 INT,c41 INT, + c42 INT,c43 INT,c44 INT,c45 INT,c46 INT,c47 INT,c48 INT,c49 INT, + c50 INT,c51 INT,c52 INT,c53 INT,c54 INT,c55 INT,c56 INT,c57 INT, + c58 INT,c59 INT,c60 INT,c61 INT,c62 INT,c63 INT,c64 INT,c65 INT, + c66 INT,c67 INT,c68 INT,c69 INT,c70 INT,c71 INT,c72 INT,c73 INT, + c74 INT,c75 INT,c76 INT,c77 INT,c78 INT,c79 INT,c80 INT,c81 INT, + c82 INT,c83 INT,c84 INT,c85 INT,c86 INT,c87 INT,c88 INT,c89 INT, + c90 INT,c91 INT,c92 INT,c93 INT,c94 INT,c95 INT,c96 INT,c97 INT, + c98 INT,c99 INT,c100 INT,c101 INT,c102 INT,c103 INT,c104 INT, + c105 INT,c106 INT,c107 INT,c108 INT,c109 INT,c110 INT,c111 INT, + c112 INT,c113 INT,c114 INT,c115 INT,c116 INT,c117 INT,c118 INT, + c119 INT,c120 INT,c121 INT,c122 INT,c123 INT,c124 INT,c125 INT, + c126 INT,c127 INT,c128 INT,c129 INT,c130 INT,c131 INT,c132 INT, + c133 INT,c134 INT,c135 INT,c136 INT,c137 INT,c138 INT,c139 INT, + c140 INT,c141 INT,c142 INT,c143 INT,c144 INT,c145 INT,c146 INT, + c147 INT,c148 INT,c149 INT,c150 INT,c151 INT,c152 INT,c153 INT, + c154 INT,c155 INT,c156 INT,c157 INT,c158 INT,c159 INT,c160 INT, + c161 INT,c162 INT,c163 INT,c164 INT,c165 INT,c166 INT,c167 INT, + c168 INT,c169 INT,c170 INT,c171 INT,c172 INT,c173 INT,c174 INT, + c175 INT,c176 INT,c177 INT,c178 INT,c179 INT,c180 INT,c181 INT, + c182 INT,c183 INT,c184 INT,c185 INT,c186 INT,c187 INT,c188 INT, + c189 INT,c190 INT,c191 INT,c192 INT,c193 INT,c194 INT,c195 INT, + c196 INT,c197 INT,c198 INT,c199 INT,c200 INT,c201 INT,c202 INT, + c203 INT,c204 INT,c205 INT,c206 INT,c207 INT,c208 INT,c209 INT, + c210 INT,c211 INT,c212 INT,c213 INT,c214 INT,c215 INT,c216 INT, + c217 INT,c218 INT,c219 INT,c220 INT,c221 INT,c222 INT,c223 INT, + c224 INT,c225 INT,c226 INT,c227 INT,c228 INT,c229 INT,c230 INT, + c231 INT,c232 INT,c233 INT,c234 INT,c235 INT,c236 INT,c237 INT, + c238 INT,c239 INT,c240 INT,c241 INT,c242 INT,c243 INT,c244 INT, + c245 INT,c246 INT,c247 INT,c248 INT,c249 INT,c250 INT,c251 INT, + c252 INT,c253 INT,c254 INT,c255 INT,c256 INT,c257 INT,c258 INT, + c259 INT,c260 INT,c261 INT,c262 INT,c263 INT,c264 INT,c265 INT, + c266 INT,c267 INT,c268 INT,c269 INT,c270 INT,c271 INT,c272 INT, + c273 INT,c274 INT,c275 INT,c276 INT,c277 INT,c278 INT,c279 INT, + c280 INT,c281 INT,c282 INT,c283 INT,c284 INT,c285 INT,c286 INT, + c287 INT,c288 INT,c289 INT,c290 INT,c291 INT,c292 INT,c293 INT, + c294 INT,c295 INT,c296 INT,c297 INT,c298 INT,c299 INT,c300 INT, + c301 INT,c302 INT,c303 INT,c304 INT,c305 INT,c306 INT,c307 INT, + c308 INT,c309 INT,c310 INT,c311 INT,c312 INT,c313 INT,c314 INT, + c315 INT,c316 INT,c317 INT,c318 INT,c319 INT,c320 INT,c321 INT, + c322 INT,c323 INT,c324 INT,c325 INT,c326 INT,c327 INT,c328 INT, + c329 INT,c330 INT,c331 INT,c332 INT,c333 INT,c334 INT,c335 INT, + c336 INT,c337 INT,c338 INT,c339 INT,c340 INT,c341 INT,c342 INT, + c343 INT,c344 INT,c345 INT,c346 INT,c347 INT,c348 INT,c349 INT, + c350 INT,c351 INT,c352 INT,c353 INT,c354 INT,c355 INT,c356 INT, + c357 INT,c358 INT,c359 INT,c360 INT,c361 INT,c362 INT,c363 INT, + c364 INT,c365 INT,c366 INT,c367 INT,c368 INT,c369 INT,c370 INT, + c371 INT,c372 INT,c373 INT,c374 INT,c375 INT,c376 INT,c377 INT, + c378 INT,c379 INT,c380 INT,c381 INT,c382 INT,c383 INT,c384 INT, + c385 INT,c386 INT,c387 INT,c388 INT,c389 INT,c390 INT,c391 INT, + c392 INT,c393 INT,c394 INT,c395 INT,c396 INT,c397 INT,c398 INT, + c399 INT,c400 INT,c401 INT,c402 INT,c403 INT,c404 INT,c405 INT, + c406 INT,c407 INT,c408 INT,c409 INT,c410 INT,c411 INT,c412 INT, + c413 INT,c414 INT,c415 INT,c416 INT,c417 INT,c418 INT,c419 INT, + c420 INT,c421 INT,c422 INT,c423 INT,c424 INT,c425 INT,c426 INT, + c427 INT,c428 INT,c429 INT,c430 INT,c431 INT,c432 INT,c433 INT, + c434 INT,c435 INT,c436 INT,c437 INT,c438 INT,c439 INT,c440 INT, + c441 INT,c442 INT,c443 INT,c444 INT,c445 INT,c446 INT,c447 INT, + c448 INT, + KEY (c1,c2,c3,c4,c5,c6,c7),KEY (c8,c9,c10,c11,c12,c13,c14), + KEY (c15,c16,c17,c18,c19,c20,c21),KEY (c22,c23,c24,c25,c26,c27,c28), + KEY (c29,c30,c31,c32,c33,c34,c35),KEY (c36,c37,c38,c39,c40,c41,c42), + KEY (c43,c44,c45,c46,c47,c48,c49),KEY (c50,c51,c52,c53,c54,c55,c56), + KEY (c57,c58,c59,c60,c61,c62,c63),KEY (c64,c65,c66,c67,c68,c69,c70), + KEY (c71,c72,c73,c74,c75,c76,c77),KEY (c78,c79,c80,c81,c82,c83,c84), + KEY (c85,c86,c87,c88,c89,c90,c91),KEY (c92,c93,c94,c95,c96,c97,c98), + KEY (c99,c100,c101,c102,c103,c104,c105), + KEY (c106,c107,c108,c109,c110,c111,c112), + KEY (c113,c114,c115,c116,c117,c118,c119), + KEY (c120,c121,c122,c123,c124,c125,c126), + KEY (c127,c128,c129,c130,c131,c132,c133), + KEY (c134,c135,c136,c137,c138,c139,c140), + KEY (c141,c142,c143,c144,c145,c146,c147), + KEY (c148,c149,c150,c151,c152,c153,c154), + KEY (c155,c156,c157,c158,c159,c160,c161), + KEY (c162,c163,c164,c165,c166,c167,c168), + KEY (c169,c170,c171,c172,c173,c174,c175), + KEY (c176,c177,c178,c179,c180,c181,c182), + KEY (c183,c184,c185,c186,c187,c188,c189), + KEY (c190,c191,c192,c193,c194,c195,c196), + KEY (c197,c198,c199,c200,c201,c202,c203), + KEY (c204,c205,c206,c207,c208,c209,c210), + KEY (c211,c212,c213,c214,c215,c216,c217), + KEY (c218,c219,c220,c221,c222,c223,c224), + KEY (c225,c226,c227,c228,c229,c230,c231), + KEY (c232,c233,c234,c235,c236,c237,c238), + KEY (c239,c240,c241,c242,c243,c244,c245), + KEY (c246,c247,c248,c249,c250,c251,c252), + KEY (c253,c254,c255,c256,c257,c258,c259), + KEY (c260,c261,c262,c263,c264,c265,c266), + KEY (c267,c268,c269,c270,c271,c272,c273), + KEY (c274,c275,c276,c277,c278,c279,c280), + KEY (c281,c282,c283,c284,c285,c286,c287), + KEY (c288,c289,c290,c291,c292,c293,c294), + KEY (c295,c296,c297,c298,c299,c300,c301), + KEY (c302,c303,c304,c305,c306,c307,c308), + KEY (c309,c310,c311,c312,c313,c314,c315), + KEY (c316,c317,c318,c319,c320,c321,c322), + KEY (c323,c324,c325,c326,c327,c328,c329), + KEY (c330,c331,c332,c333,c334,c335,c336), + KEY (c337,c338,c339,c340,c341,c342,c343), + KEY (c344,c345,c346,c347,c348,c349,c350), + KEY (c351,c352,c353,c354,c355,c356,c357), + KEY (c358,c359,c360,c361,c362,c363,c364), + KEY (c365,c366,c367,c368,c369,c370,c371), + KEY (c372,c373,c374,c375,c376,c377,c378), + KEY (c379,c380,c381,c382,c383,c384,c385), + KEY (c386,c387,c388,c389,c390,c391,c392), + KEY (c393,c394,c395,c396,c397,c398,c399), + KEY (c400,c401,c402,c403,c404,c405,c406), + KEY (c407,c408,c409,c410,c411,c412,c413), + KEY (c414,c415,c416,c417,c418,c419,c420), + KEY (c421,c422,c423,c424,c425,c426,c427), + KEY (c428,c429,c430,c431,c432,c433,c434), + KEY (c435,c436,c437,c438,c439,c440,c441), + KEY (c442,c443,c444,c445,c446,c447,c448)); +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/deadlock.test b/storage/rocksdb/mysql-test/rocksdb/t/deadlock.test new file mode 100644 index 00000000000..3be7fda9952 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/deadlock.test @@ -0,0 +1,43 @@ +--echo # +--echo # Validate that deadlock errors don't occur with a high level of concurrency +--echo # + +--source include/have_rocksdb.inc + +--echo # Disable for valgrind because this takes too long +--source include/not_valgrind.inc + +--disable_warnings +DROP DATABASE IF EXISTS mysqlslap; +--enable_warnings + +CREATE DATABASE mysqlslap; +USE mysqlslap; +CREATE TABLE t1(id1 BIGINT, id2 BIGINT, count INT, PRIMARY KEY(id1, id2), KEY(id2)) ENGINE=rocksdb; +CREATE TABLE t1rev(id1 BIGINT, id2 BIGINT, count INT, PRIMARY KEY(id1, id2) COMMENT "rev:cf2", KEY(id2) COMMENT "rev:cf2") ENGINE=rocksdb; + +SET @save = @@global.rocksdb_lock_wait_timeout; +SET GLOBAL rocksdb_lock_wait_timeout = 60; + +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="INSERT INTO t1 VALUES(1, 1, 1) ON DUPLICATE KEY UPDATE count=count+1" +SELECT count from t1; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1 SET count=count+1 WHERE id1=1 AND id2=1" +SELECT count from t1; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1 SET count=count+1 WHERE id2=1" +SELECT count from t1; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1 SET count=count+1" +SELECT count from t1; + +# Same tests on a table with reverse orderings +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="INSERT INTO t1rev VALUES(1, 1, 1) ON DUPLICATE KEY UPDATE count=count+1" +SELECT count from t1rev; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1rev SET count=count+1 WHERE id1=1 AND id2=1" +SELECT count from t1rev; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1rev SET count=count+1 WHERE id2=1" +SELECT count from t1rev; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1rev SET count=count+1" +SELECT count from t1rev; + +SET GLOBAL rocksdb_lock_wait_timeout = @save; + +DROP DATABASE mysqlslap; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/delete.test b/storage/rocksdb/mysql-test/rocksdb/t/delete.test new file mode 100644 index 00000000000..b1654e606a5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/delete.test @@ -0,0 +1,101 @@ +--source include/have_rocksdb.inc + +# +# Basic DELETE statements. +# DELETE LOW_PRIORITY is covered in delete_low_prio test +# DELETE QUICK is covered in delete_quick test (syntax only) +# DELETE IGNORE is covered in delete_ignore test +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (10000,'foobar'),(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; + +# Single-table DELETE + +DELETE FROM t1 WHERE b IN ('c'); +--sorted_result +SELECT a,b FROM t1; + +DELETE FROM t1 WHERE a < 0 OR b = 'a'; +--sorted_result +SELECT a,b FROM t1; + +# ORDER BY and LIMIT +DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1; +--sorted_result +SELECT a,b FROM t1; + +# Multi-table DELETE + +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +--sorted_result +SELECT c,d FROM t2; + +DELETE t2.* FROM t1, t2 WHERE c < b AND a + d != 1; +--sorted_result +SELECT a,b FROM t1; +--sorted_result +SELECT c,d FROM t2; + +DELETE FROM t2, t1.* USING t2, t1 WHERE c = 'foobar' and b = c; +--sorted_result +SELECT a,b FROM t1; +--sorted_result +SELECT c,d FROM t2; + +DELETE FROM t1; +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1, t2; + + +# +# Transactional DELETE +# + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'f'),(7,'g'),(8,'h'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; + +BEGIN; +DELETE FROM t1 WHERE b IN ('c'); +--sorted_result +SELECT a,b FROM t1; +DELETE FROM t1 WHERE a < 0 OR b = 'a'; +COMMIT; +--sorted_result +SELECT a,b FROM t1; + +# Savepoints + +BEGIN; +DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1; +SAVEPOINT spt1; + +DELETE FROM t1; +RELEASE SAVEPOINT spt1; +ROLLBACK; +--sorted_result +SELECT a,b FROM t1; + +BEGIN; +DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1; +SAVEPOINT spt1; +DELETE FROM t1; +INSERT INTO t1 (a,b) VALUES (1,'a'); +--error ER_UNKNOWN_ERROR +ROLLBACK TO SAVEPOINT spt1; +--error ER_UNKNOWN_ERROR +COMMIT; +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/delete_before_lock.test b/storage/rocksdb/mysql-test/rocksdb/t/delete_before_lock.test new file mode 100644 index 00000000000..93a9d1adaf9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/delete_before_lock.test @@ -0,0 +1,36 @@ +--source include/have_rocksdb.inc +--source include/have_debug_sync.inc + +# This is a test case to reproduce https://github.com/facebook/mysql-5.6/issues/162 +# Expected output of the last select for update was (1,2,100) and (1,3,100), but +# currently it returns (1,2,1) and (1,3,1), which must be fixed. + +connect (con, localhost, root,,); +connection default; + +--disable_warnings +set debug_sync='RESET'; +drop table if exists t1; +--enable_warnings + +create table t1 (id1 int, id2 int, value int, primary key (id1, id2)) engine=rocksdb; +insert into t1 values (1, 1, 1),(1, 2, 1),(1, 3, 1), (2, 2, 2); + +connection con; +set debug_sync='rocksdb.get_row_by_rowid SIGNAL parked WAIT_FOR go'; +send update t1 set value=100 where id1=1; + +connection default; +set debug_sync='now WAIT_FOR parked'; +delete from t1 where id1=1 and id2=1; +set debug_sync='now SIGNAL go'; + +connection con; +reap; +select * from t1 where id1=1 for update; + +# Cleanup +connection default; +disconnect con; +set debug_sync='RESET'; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/delete_ignore.test b/storage/rocksdb/mysql-test/rocksdb/t/delete_ignore.test new file mode 100644 index 00000000000..d087d80f4ac --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/delete_ignore.test @@ -0,0 +1,37 @@ +--source include/have_rocksdb.inc + +# +# DELETE IGNORE +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +INSERT INTO t1 (a,b) VALUES (10000,'foobar'),(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; + +CREATE TABLE t2 (pk INT AUTO_INCREMENT PRIMARY KEY, c CHAR(8), d INT) ENGINE=rocksdb; + +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +--sorted_result +SELECT a,b FROM t1; +--sorted_result +SELECT c,d FROM t2; + +DELETE IGNORE FROM t1 WHERE b IS NOT NULL ORDER BY a LIMIT 1; +--sorted_result +SELECT a,b FROM t1; + +DELETE IGNORE t1.*, t2.* FROM t1, t2 WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 ); +--sorted_result +SELECT a,b FROM t1; +--sorted_result +SELECT c,d FROM t2; + +# Cleanup +DROP TABLE t1, t2; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/delete_quick.test b/storage/rocksdb/mysql-test/rocksdb/t/delete_quick.test new file mode 100644 index 00000000000..127ef47dfaf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/delete_quick.test @@ -0,0 +1,32 @@ +--source include/have_rocksdb.inc + +# +# DELETE QUICK syntax. +# For now we only check that the keyword is accepted, +# without actually checking whether the feature works. +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (a)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); + +DELETE QUICK FROM t1 WHERE a = 1 OR b > 'foo'; +--sorted_result +SELECT a,b FROM t1; + +CREATE TABLE t2 (c CHAR(8), d INT, PRIMARY KEY (c)) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +--sorted_result +SELECT c,d FROM t2; + +DELETE QUICK FROM t2, t1.* USING t2, t1 WHERE c IS NULL OR a = d; +--sorted_result +SELECT a,b FROM t1; +--sorted_result +SELECT c,d FROM t2; + +DROP TABLE t1, t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/delete_with_keys.test b/storage/rocksdb/mysql-test/rocksdb/t/delete_with_keys.test new file mode 100644 index 00000000000..f4d890eb1b4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/delete_with_keys.test @@ -0,0 +1,39 @@ +--source include/have_rocksdb.inc + +# +# DELETE statements for tables with keys +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z'); +DELETE FROM t1 WHERE b > 'y'; +DELETE FROM t1 WHERE a=2; + +--sorted_result +SELECT a,b FROM t1; +DELETE FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z'); +DELETE FROM t1 WHERE b > 'y'; +DELETE FROM t1 WHERE a=2; + +--sorted_result +SELECT a,b FROM t1; +DELETE FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, b INT, c INT, pk INT AUTO_INCREMENT PRIMARY KEY, KEY(a), KEY (b)) ENGINE=rocksdb; + +INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6),(7,8,9); +DELETE FROM t1 WHERE a = 10 OR b = 20 ORDER BY c LIMIT 1; +--sorted_result +SELECT a,b,c FROM t1; +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/describe.test b/storage/rocksdb/mysql-test/rocksdb/t/describe.test new file mode 100644 index 00000000000..9bc5d299a31 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/describe.test @@ -0,0 +1,24 @@ +--source include/have_rocksdb.inc + +# +# DESCRIBE statement +# + +--disable_warnings +DROP TABLE IF EXISTS t1, t2, t3; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (a)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'foo'),(2, 'b'); + +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY (b)) ENGINE=rocksdb CHARACTER SET utf8; +INSERT INTO t2 (a,b) VALUES (1, 'bar'); + +CREATE TABLE t3 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb CHARACTER SET utf8; + +DESCRIBE t1; +DESC t2 a; +DESCRIBE t3 '%'; + +DROP TABLE t1, t2, t3; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def new file mode 100644 index 00000000000..5e459b0d471 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -0,0 +1,4 @@ +cons_snapshot_serializable : Consistent read does not work on serializable +level_read_uncommitted : Not supported +level_serializable: Not supported + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_database.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_database.test new file mode 100644 index 00000000000..174a4bbf286 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_database.test @@ -0,0 +1,11 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP DATABASE IF EXISTS test_drop_database; +--enable_warnings + +CREATE DATABASE test_drop_database; +CREATE TABLE t1 (a int, b int, c int, primary key (a), unique key (b)) ENGINE=ROCKSDB; +ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY (a); +DROP TABLE t1; +DROP DATABASE test_drop_database; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_index_inplace.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_index_inplace.test new file mode 100644 index 00000000000..57d7cdf57c2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_index_inplace.test @@ -0,0 +1,116 @@ +--source include/have_rocksdb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +## +## test dropping index inplace +## + +CREATE TABLE t1 (a INT, b INT AUTO_INCREMENT, KEY ka(a), KEY kb(a,b), PRIMARY KEY(b)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +INSERT INTO t1 (a) VALUES (1); +INSERT INTO t1 (a) VALUES (3); +INSERT INTO t1 (a) VALUES (5); + +ALTER TABLE t1 DROP INDEX ka, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; + +# Key ka does not exist in table t1 +--error 1176 +SELECT * FROM t1 FORCE INDEX(ka) where a > 1; + +--sorted_result +SELECT * FROM t1 FORCE INDEX(kb) where a > 1; +--sorted_result +SELECT * FROM t1 where b > 1; + +DROP TABLE t1; + +## +## test dropping multiple indexes at once and multi-part indexes +## + +CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, c INT, KEY kb(b), KEY kbc(b,c), KEY kc(c), PRIMARY KEY(a)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +INSERT INTO t1 (b,c) VALUES (1,2); +INSERT INTO t1 (b,c) VALUES (3,4); +INSERT INTO t1 (b,c) VALUES (5,6); +ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; + + +# test restarting to make sure everything is still ok and persisted properly +--source include/restart_mysqld.inc + +SHOW CREATE TABLE t1; + +INSERT INTO t1 (b,c) VALUES (1,2); +INSERT INTO t1 (b,c) VALUES (3,4); +INSERT INTO t1 (b,c) VALUES (5,6); + +--sorted_result +SELECT * FROM t1 FORCE INDEX(kc) where c > 3; +--sorted_result +SELECT * FROM t1 where b > 3; + +DROP TABLE t1; + +# test dropping pk to see if thats still ok +CREATE TABLE t1 (a INT, b INT, c INT, KEY kb(b), KEY kbc(b,c), KEY kc(c), PRIMARY KEY(a)) ENGINE=rocksdb; +SHOW INDEX IN t1; +ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc, ALGORITHM=INPLACE; +SHOW INDEX IN t1; + +ALTER TABLE t1 DROP PRIMARY KEY; +SHOW INDEX IN t1; +# test dropping index on tables with no pk +ALTER TABLE t1 DROP INDEX kc, ALGORITHM=INPLACE; +SHOW INDEX IN t1; + +DROP TABLE t1; + +# test dropping unique keys +CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, c INT, PRIMARY KEY(a)) ENGINE=rocksdb; +ALTER TABLE t1 ADD UNIQUE INDEX kb(b); +ALTER TABLE t1 ADD UNIQUE INDEX kbc(b,c); +ALTER TABLE t1 ADD UNIQUE INDEX kc(c); +SHOW INDEX IN t1; + +ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc; +SHOW INDEX IN t1; + +# test restarting to make sure everything is still ok and persisted properly +--source include/restart_mysqld.inc + +--sorted_result +INSERT INTO t1 (b,c) VALUES (1,2); +INSERT INTO t1 (b,c) VALUES (3,4); +INSERT INTO t1 (b,c) VALUES (5,6); +SELECT * FROM t1 FORCE INDEX(kc) where c > 3; + +# test dropping index on tables with no pk +ALTER TABLE t1 DROP INDEX kc, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + +# case where dropping column, where column is the key, we dont want to use +# inplace in this scenario +CREATE TABLE IF NOT EXISTS t1 (col1 INT, col2 INT, col3 INT); +INSERT INTO t1 (col1,col2,col3) VALUES (1,2,3); +ALTER TABLE t1 ADD KEY idx ( col1, col2 ); +ANALYZE TABLE t1; +ALTER TABLE t1 DROP COLUMN col2; +ALTER TABLE t1 DROP COLUMN col3; +DROP TABLE t1; + +# case drop and add at same time, should not use inplace algorithm yet +CREATE TABLE IF NOT EXISTS t1 (col1 INT, col2 INT, col3 INT); +INSERT INTO t1 (col1,col2,col3) VALUES (1,2,3); +ALTER TABLE t1 ADD KEY idx ( col1, col2 ); +ANALYZE TABLE t1; +ALTER TABLE t1 DROP COLUMN col2; +ALTER TABLE t1 DROP COLUMN col3; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_stats_procedure.inc b/storage/rocksdb/mysql-test/rocksdb/t/drop_stats_procedure.inc new file mode 100644 index 00000000000..b40004402c9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_stats_procedure.inc @@ -0,0 +1,3 @@ +drop procedure save_read_stats; +drop procedure get_read_stats; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt new file mode 100644 index 00000000000..f53a6050e89 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt @@ -0,0 +1,3 @@ +--rocksdb_max_background_compactions=8 +--rocksdb_max_subcompactions=1 +--rocksdb_default_cf_options=write_buffer_size=512k;target_file_size_base=512k;level0_file_num_compaction_trigger=2;level0_slowdown_writes_trigger=-1;level0_stop_writes_trigger=1000;max_bytes_for_level_base=1m diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test new file mode 100644 index 00000000000..7b28474d9f2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test @@ -0,0 +1,115 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +DROP TABLE IF EXISTS t5; +--enable_warnings + +# Start from clean slate +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; +set global rocksdb_signal_drop_index_thread = 1; +--source include/restart_mysqld.inc +--exec truncate --size=0 $MYSQLTEST_VARDIR/log/mysqld.1.err + +CREATE TABLE t1 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t2 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t3 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t4 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +# Populate tables +let $max = 1000; +let $table = t1; +--source drop_table_repopulate_table.inc +let $table = t2; +--source drop_table_repopulate_table.inc +let $table = t3; +--source drop_table_repopulate_table.inc +let $table = t4; +--source drop_table_repopulate_table.inc + +drop table t2; + +# Restart the server before t2's indices are deleted +--source include/restart_mysqld.inc + +let $table = t1; +--source drop_table_repopulate_table.inc +let $table = t4; +--source drop_table_repopulate_table.inc + +drop table t3; + +# Insert enough data to trigger compactions that eliminate t2 and t3 +let $max = 50000; +let $table = t1; +--source drop_table_repopulate_table.inc +let $table = t4; +--source drop_table_repopulate_table.inc + +drop table t4; + +# Restart the server before t4's indices are deleted +--source include/restart_mysqld.inc + +# Make sure new table gets unique indices +CREATE TABLE t5 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +let $max = 1000; +let $table = t5; +--source drop_table_repopulate_table.inc + +drop table t5; + +# Manually compact column families, cleaning up all lingering data +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; + +# Signal thread to check for dropped indices +set global rocksdb_signal_drop_index_thread = 1; + +let $show_rpl_debug_info= 1; # to force post-failure printout +let $wait_timeout= 300; # Override default 30 seconds with 300. +let $wait_condition = select count(*) = 0 + as c from information_schema.rocksdb_global_info + where TYPE = 'DDL_DROP_INDEX_ONGOING'; +--source include/wait_condition.inc + +# Get list of all indices needing to be dropped +# Check total compacted-away rows for all indices +# Check that all indices have been successfully dropped +--exec perl suite/rocksdb/t/drop_table_compactions.pl $MYSQLTEST_VARDIR/log/mysqld.1.err + +# Cleanup +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test new file mode 100644 index 00000000000..3742ab0e444 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test @@ -0,0 +1,110 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +DROP TABLE IF EXISTS t5; +--enable_warnings + +# Start from clean slate +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; +set global rocksdb_signal_drop_index_thread = 1; +--source include/restart_mysqld.inc +--exec truncate --size=0 $MYSQLTEST_VARDIR/log/mysqld.1.err + +CREATE TABLE t1 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t2 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t3 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t4 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +# Populate tables +let $max = 1000; +let $table = t1; +--source drop_table_repopulate_table.inc +let $table = t2; +--source drop_table_repopulate_table.inc +let $table = t3; +--source drop_table_repopulate_table.inc +let $table = t4; +--source drop_table_repopulate_table.inc + + +# Restart the server before t2's indices are deleted +--source include/restart_mysqld.inc + +let $table = t1; +--source drop_table_repopulate_table.inc +let $table = t4; +--source drop_table_repopulate_table.inc + + +# Insert enough data to trigger compactions that eliminate t2 and t3 +let $max = 50000; +let $table = t1; +--source drop_table_repopulate_table.inc +let $table = t4; +--source drop_table_repopulate_table.inc + + +# Restart the server before t4's indices are deleted +--source include/restart_mysqld.inc + +# Make sure new table gets unique indices +CREATE TABLE t5 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +let $max = 1000; +let $table = t5; +--source drop_table_repopulate_table.inc + +let $output= $MYSQLTEST_VARDIR/tmp/size_output; + +--exec du -c $MYSQLTEST_VARDIR/mysqld.1/data/.rocksdb/*.sst |grep total |sed 's/[\t]total/ before/' > $output +drop table t1; +drop table t2; +drop table t3; +drop table t4; +drop table t5; + +let $show_rpl_debug_info= 1; # to force post-failure printout +let $wait_timeout= 300; # Override default 30 seconds with 300. +let $wait_condition = select count(*) = 0 + as c from information_schema.rocksdb_global_info + where TYPE = 'DDL_DROP_INDEX_ONGOING'; +--source include/wait_condition.inc + +# Check that space is reclaimed +--exec du -c $MYSQLTEST_VARDIR/mysqld.1/data/.rocksdb/*.sst |grep total |sed 's/[\t]total/ after/' >> $output +--exec perl suite/rocksdb/t/drop_table2_check.pl $output + +# Cleanup diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2_check.pl b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2_check.pl new file mode 100644 index 00000000000..8f43f4725b5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2_check.pl @@ -0,0 +1,19 @@ +#!/usr/bin/perl + +my $a = 0; +my $b=0; +die unless($ARGV[0]); +open(my $f, "<", $ARGV[0]) or die $!; +while(readline($f)) { + if (/(\d+) before/) { + $a = $1; + } + + if (/(\d+) after/ ) { + $b = $1; + } +} + +if ($a > $b * 2) { + printf("Compacted\n"); +} diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table3-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3-master.opt new file mode 100644 index 00000000000..a9ebc4ec20b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3-master.opt @@ -0,0 +1,2 @@ +--rocksdb_max_subcompactions=1 +--rocksdb_default_cf_options=write_buffer_size=16k;target_file_size_base=16k;level0_slowdown_writes_trigger=-1;level0_stop_writes_trigger=1000;compression_per_level=kNoCompression; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.inc b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.inc new file mode 100644 index 00000000000..4d23f7a1c5f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.inc @@ -0,0 +1,47 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Start from clean slate +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; +set global rocksdb_signal_drop_index_thread = 1; +--source include/restart_mysqld.inc +--exec truncate --size=0 $MYSQLTEST_VARDIR/log/mysqld.1.err + +CREATE TABLE t1 ( + a int not null, + b int not null, + c varchar(500) not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +# Populate tables +let $max = 50000; +let $table = t1; +--source drop_table3_repopulate_table.inc + +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_compact_read_bytes'; +if ($truncate_table) +{ + truncate table t1; +} +if ($drop_table) +{ + drop table t1; +} + +let $show_rpl_debug_info= 1; # to force post-failure printout +let $wait_timeout= 300; # Override default 30 seconds with 300. +let $wait_condition = select count(*) = 0 + as c from information_schema.rocksdb_global_info + where TYPE = 'DDL_DROP_INDEX_ONGOING'; +--source include/wait_condition.inc + +select case when variable_value-@a < 500000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_compact_read_bytes'; + +# Cleanup +DROP TABLE IF EXISTS t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.test new file mode 100644 index 00000000000..b3a6bf9958e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.test @@ -0,0 +1,5 @@ +--source include/have_rocksdb.inc + +-- let $truncate_table = 0 +-- let $drop_table = 1 +-- source drop_table3.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table3_repopulate_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3_repopulate_table.inc new file mode 100644 index 00000000000..c34af07204f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3_repopulate_table.inc @@ -0,0 +1,15 @@ +# Usage: +# let $max = ; +# let $table = ; +# --source drop_table_repopulate_table.inc +# +eval DELETE FROM $table; + +--disable_query_log +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO $table VALUES ($i, $i, rpad('a', 499, 'b')); + inc $i; + eval $insert; +} +--enable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl new file mode 100755 index 00000000000..b123ac5492f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl @@ -0,0 +1,37 @@ +sub print_array { + $str = shift; + @arr = @_; + $prev= 0; + foreach (@arr) { + if ($prev) { + $dummy_idx = $_ - $prev; + }else { + $dummy_idx = 0; + } + $prev= $_; + print "$str $dummy_idx\n"; + } +} + +while (<>) { + if (/Compacting away elements from dropped index \(\d+,(\d+)\): (\d+)/) { + $a{$1} += $2; + } + if (/Begin filtering dropped index \(\d+,(\d+)\)/) { + push @b, $1; + } + if (/Finished filtering dropped index \(\d+,(\d+)\)/) { + push @c, $1; + } +} +$prev= 0; +foreach (sort {$a <=> $b} keys %a){ + if ($prev) { + $dummy_idx= $_ - $prev; + }else { + $dummy_idx= 0; + } + $prev= $_; +} +print_array("Begin filtering dropped index+", sort {$a <=> $b} @b); +print_array("Finished filtering dropped index+", sort {$a <=> $b} @c); diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table_repopulate_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_repopulate_table.inc new file mode 100644 index 00000000000..6faf41ef7b1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_repopulate_table.inc @@ -0,0 +1,15 @@ +# Usage: +# let $max = ; +# let $table =
; +# --source drop_table_repopulate_table.inc +# +eval DELETE FROM $table; + +--disable_query_log +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO $table VALUES ($i, $i); + inc $i; + eval $insert; +} +--enable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table_sync.inc b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_sync.inc new file mode 100644 index 00000000000..c6a3ccde7a6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_sync.inc @@ -0,0 +1,6 @@ +let $show_rpl_debug_info= 1; # to force post-failure printout +let $wait_timeout= 300; # Override default 30 seconds with 300. +let $wait_condition = select count(*) = 0 + as c from information_schema.rocksdb_global_info + where TYPE = 'DDL_DROP_INDEX_ONGOING'; +--source include/wait_condition.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/dup_key_update.test b/storage/rocksdb/mysql-test/rocksdb/t/dup_key_update.test new file mode 100644 index 00000000000..ebe18ab2e95 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/dup_key_update.test @@ -0,0 +1,41 @@ +--source include/have_rocksdb.inc + +# Test insert ... on duplicate key update statements + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +--enable_warnings + +CREATE TABLE t1 (id1 INT, id2 INT, id3 INT, + PRIMARY KEY (id1, id2, id3), + UNIQUE KEY (id3, id1)) ENGINE=ROCKSDB; + +CREATE TABLE t2 (id1 INT, id2 INT, id3 INT, + PRIMARY KEY (id1, id2, id3), + UNIQUE KEY (id3, id1) COMMENT 'rev:cf') ENGINE=ROCKSDB; + + +--source suite/rocksdb/include/dup_key_update.inc + +# Cleanup +DROP TABLE t1; +DROP TABLE t2; + +CREATE TABLE t1 (id1 varchar(128) CHARACTER SET latin1 COLLATE latin1_bin, + id2 varchar(256) CHARACTER SET utf8 COLLATE utf8_bin, + id3 varchar(200) CHARACTER SET latin1 COLLATE latin1_swedish_ci, + PRIMARY KEY (id1, id2, id3), + UNIQUE KEY (id3, id1)) ENGINE=ROCKSDB; + +CREATE TABLE t2 (id1 varchar(128) CHARACTER SET latin1 COLLATE latin1_bin, + id2 varchar(256) CHARACTER SET utf8 COLLATE utf8_bin, + id3 varchar(200) CHARACTER SET latin1 COLLATE latin1_swedish_ci, + PRIMARY KEY (id1, id2, id3), + UNIQUE KEY (id3, id1) COMMENT 'rev:cf') ENGINE=ROCKSDB; + +--source suite/rocksdb/include/dup_key_update.inc + +# Cleanup +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test b/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test new file mode 100644 index 00000000000..781163f34fb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc +--disable_warnings +DROP TABLE IF EXISTS t; +--enable_warnings +CREATE TABLE t(id int primary key) engine=rocksdb; +INSERT INTO t values (1), (2), (3); +--error ER_TABLE_EXISTS_ERROR +CREATE TABLE t(id int primary key) engine=rocksdb; +FLUSH TABLES; +--exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm.tmp +--error ER_UNKNOWN_ERROR +CREATE TABLE t(id int primary key) engine=rocksdb; +--exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm +FLUSH TABLES; +SELECT * FROM t; +DROP TABLE t; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/fail_system_cf.test b/storage/rocksdb/mysql-test/rocksdb/t/fail_system_cf.test new file mode 100644 index 00000000000..255819704a8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/fail_system_cf.test @@ -0,0 +1,17 @@ +--source include/have_rocksdb.inc + +# +# Any create table using the system column family should fail + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--error ER_WRONG_ARGUMENTS +CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT '__system__') ENGINE = ROCKSDB; + +#cleanup +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/foreign_key.test b/storage/rocksdb/mysql-test/rocksdb/t/foreign_key.test new file mode 100644 index 00000000000..bd8071b1b5e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/foreign_key.test @@ -0,0 +1,45 @@ +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +CREATE TABLE t1 (b INT PRIMARY KEY); + +# Try simple foreign key - should fail +--error ER_NOT_SUPPORTED_YET +CREATE TABLE t2 (a INT NOT NULL, b INT NOT NULL, FOREIGN KEY (b) REFERENCES t1(b)); + +# Try simple valid syntax with 'foreign' as part - should succeed +CREATE TABLE t2 (a INT NOT NULL, bforeign INT NOT NULL); +DROP TABLE t2; + +# Try simple valid syntax with 'foreign' and 'key' as part (with no space) - should succeed +CREATE TABLE t2 (a INT NOT NULL, foreignkey INT NOT NULL); +DROP TABLE t2; + +# Try with valid id containing 'foreign' and then a foreign key - should fail +--error ER_NOT_SUPPORTED_YET +CREATE TABLE t2 (a INT NOT NULL, bforeign INT not null, FOREIGN KEY (bforeign) REFERENCES t1(b)); + +CREATE TABLE t2 (a INT NOT NULL, b INT NOT NULL); +# Alter with foreign key - should fail +--error ER_NOT_SUPPORTED_YET +ALTER TABLE t2 ADD FOREIGN KEY (b) REFERENCES t1(b); +DROP TABLE t2; + +# Alter with valid syntax that contains 'foreign' - should succeed +CREATE TABLE t2 (a INT NOT NULL); +ALTER TABLE t2 ADD bforeign INT NOT NULL; +DROP TABLE t2; + +# Alter with valid syntax that contains 'foreign' and 'key' (no space) - should succeed +CREATE TABLE t2 (a INT NOT NULL); +ALTER TABLE t2 ADD foreignkey INT NOT NULL; +DROP TABLE t2; + +# Alter with valid syntax that contains 'foreign' and then foreign key - should fail +CREATE TABLE t2 (a INT NOT NULL); +--error ER_NOT_SUPPORTED_YET +ALTER TABLE t2 ADD bforeign INT NOT NULL, ADD FOREIGN KEY (bforeign) REFERENCES t1(b); +DROP TABLE t2; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254-master.opt new file mode 100644 index 00000000000..f0b7f4b5ce5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254-master.opt @@ -0,0 +1 @@ +--binlog-format=row --binlog-row-image=full --gap-lock-raise-error=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254.test b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254.test new file mode 100644 index 00000000000..af7c9b1ab4f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254.test @@ -0,0 +1,14 @@ +-- source include/have_binlog_format_row.inc +-- source include/have_rocksdb.inc + +# For issue#254 +create table t (id int primary key, value int); +begin; +update t set value=100 where id in (1, 2); +commit; +begin; +--error ER_UNKNOWN_ERROR +select * from t for update; +commit; +drop table t; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_raise_error.test b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_raise_error.test new file mode 100644 index 00000000000..59fe7e6f80a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_raise_error.test @@ -0,0 +1,37 @@ +--source include/have_rocksdb.inc + +let $engine=rocksdb; +--source include/gap_lock_raise_error_all.inc + +SET @save_gap_lock_exceptions = @@global.gap_lock_exceptions; + +SET GLOBAL gap_lock_exceptions="t.*"; +--source include/gap_lock_raise_error_init.inc + +set session autocommit=0; +--error ER_UNKNOWN_ERROR +select * from gap1 limit 1 for update; +--error ER_UNKNOWN_ERROR +select * from gap1 where value != 100 limit 1 for update; + +--source include/gap_lock_raise_error_cleanup.inc + +SET GLOBAL gap_lock_exceptions="gap.*"; +--source include/gap_lock_raise_error_init.inc + +set session autocommit=0; +select * from gap1 limit 1 for update; +select * from gap1 where value != 100 limit 1 for update; + +--source include/gap_lock_raise_error_cleanup.inc + +# This test has been temporarily removed because it fails when the server +# is compiled using GCC 4.8 as full regular expression handling was added +# in GCC 4.9. We need to add the ability to detect if full regex is +# available before re-enabling this test. +## Make sure we handle invalid regex expressions and generate a warning +#--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err +#SET GLOBAL gap_lock_exceptions="[a-b,abc\\"; +#--exec grep -A 2 "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 + +SET GLOBAL gap_lock_exceptions=@save_gap_lock_exceptions; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/gen_insert.pl b/storage/rocksdb/mysql-test/rocksdb/t/gen_insert.pl new file mode 100644 index 00000000000..c723ec3ca17 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/gen_insert.pl @@ -0,0 +1,32 @@ +#!/usr/bin/perl + +my $table_name= $ARGV[0]; +my $id1= 1; +my $id2= 1; +my $id3= 1; +my $id4= 1; +my $id5= 1; +my $value= 1000; +my $value2= 'aaabbbccc'; +my $max_rows = 1 * 10000; + +for(my $row_id= 1; $row_id <= $max_rows; $row_id++) { + my $value_clause = "($id1, $id2, $id3, $id4, $id5, $value, \"$value2\")"; + + if ($row_id % 100 == 1) { + print "INSERT INTO $table_name VALUES"; + } + + if ($row_id % 100 == 0) { + print "$value_clause;\n"; + }else { + print "$value_clause,"; + } + + $id4++; + $id5++; + $id3++ if($row_id % 5 == 0); + $id2++ if($row_id % 5 == 0); + $id1++ if($row_id % 10 == 0); +} + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test b/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test new file mode 100644 index 00000000000..4f07bf206f2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test @@ -0,0 +1,25 @@ +--source include/have_rocksdb.inc + +# +# Create tables with partitions and try to generate an error while creating +# partitions. +# +--disable_warnings + +DROP TABLE IF EXISTS t1; + +--enable_warnings + +CREATE TABLE t1(a INT,b INT,KEY (a)) PARTITION BY HASH (a) PARTITIONS 3; + +SHOW TABLES; + +--disable_query_log +call mtr.add_suppression("Failed to execute action for entry.*"); +--enable_query_log + +--error ER_WRONG_TABLE_NAME +ALTER TABLE t1 ADD PARTITION(PARTITION p3 DATA DIRECTORY='G:/mysqltest/p3Data' INDEX DIRECTORY='H:/mysqltest/p3Index'); + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/handler_basic.test b/storage/rocksdb/mysql-test/rocksdb/t/handler_basic.test new file mode 100644 index 00000000000..91a492ef26e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/handler_basic.test @@ -0,0 +1,53 @@ +--source include/have_rocksdb.inc + +# +# Basic HANDLER counts +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +FLUSH STATUS; +CREATE TABLE t1 (id INT PRIMARY KEY, a VARCHAR(100), b INT, +INDEX b(b)) ENGINE=rocksdb; +INSERT INTO t1 (id,a,b) VALUES (1,'foobar',100),(2,'z',0),(3,'bar',50); +SHOW SESSION STATUS LIKE 'Handler_write%'; + +UPDATE t1 SET b=1000 WHERE id=1; +SHOW SESSION STATUS LIKE 'Handler_update%'; + +DELETE FROM t1 WHERE id=2; +SHOW SESSION STATUS LIKE 'Handler_delete%'; + +INSERT INTO t1 (id,b) VALUES(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10); +SHOW SESSION STATUS LIKE 'Handler_write%'; + +FLUSH STATUS; +SELECT * FROM t1 WHERE id=8; +SHOW SESSION STATUS LIKE 'Handler_read%'; + +FLUSH STATUS; +SELECT * FROM t1 WHERE b=6; +SHOW SESSION STATUS LIKE 'Handler_read%'; + +FLUSH STATUS; +--sorted_result +SELECT * FROM t1; +SHOW SESSION STATUS LIKE 'Handler_read%'; + +FLUSH STATUS; +SELECT * FROM t1 WHERE b <=5 ORDER BY b; +SHOW SESSION STATUS LIKE 'Handler_read%'; + +FLUSH STATUS; +SELECT * FROM t1 WHERE id >=8 ORDER BY id; +SHOW SESSION STATUS LIKE 'Handler_read%'; + +FLUSH STATUS; +SELECT * FROM t1 WHERE id < 8 ORDER BY id; +SHOW SESSION STATUS LIKE 'Handler_read%'; + +# Cleanup +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/hermitage.inc b/storage/rocksdb/mysql-test/rocksdb/t/hermitage.inc new file mode 100644 index 00000000000..17baf5b6c57 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/hermitage.inc @@ -0,0 +1,257 @@ +--enable_connect_log + +# Save the initial number of concurrent sessions +--source include/count_sessions.inc + +### See full test cases here: +### https://github.com/ept/hermitage/blob/master/mysql.md + +--disable_warnings +DROP TABLE IF EXISTS test; +--enable_warnings + +connect (con1,localhost,root,,); +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; +connect (con2,localhost,root,,); +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; +connect (con3,localhost,root,,); +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; + +connection con1; + +create table test (id int primary key, value int) engine=rocksdb; + +### Prevents Aborted Reads (G1a) +--source hermitage_init.inc + +connection con1; +select * from test; +update test set value = 101 where id = 1; +connection con2; +select * from test; # Still shows 1 => 10 +connection con1; +rollback; +connection con2; # Still shows 1 => 10 +select * from test; +commit; + + +### Prevents Intermediate Reads (G1b) +--source hermitage_init.inc + +connection con1; +update test set value = 101 where id = 1; +connection con2; +select * from test; # Still shows 1 => 10 +connection con1; +update test set value = 11 where id = 1; +commit; +connection con2; +select * from test; # Now shows 1 => 11 +commit; + + +### Prevents Circular Information Flow (G1c) +--source hermitage_init.inc + +connection con1; +update test set value = 11 where id = 1; +connection con2; +update test set value = 22 where id = 2; +connection con1; +select * from test where id = 2; # Still shows 2 => 20 +connection con2; +select * from test where id = 1; # Still shows 1 => 10 +connection con1; +commit; +connection con2; +commit; + + +### prevents Observed Transaction Vanishes (OTV) +--source hermitage_init.inc + +connection con1; +update test set value = 11 where id = 1; +update test set value = 19 where id = 2; +connection con2; +send update test set value = 12 where id = 1; +connection con1; +commit; +connection con2; +reap; +connection con3; +select * from test; # Shows 1 => 11, 2 => 19 +connection con2; +update test set value = 18 where id = 2; +connection con3; +select * from test; # Shows 1 => 11, 2 => 19 +connection con2; +commit; +connection con3; +select * from test; # Shows 1 => 12, 2 => 18 +commit; + + +### Predicate-Many-Preceders (PMP) -- RC does not prevent, RR prevents +--source hermitage_init.inc + +connection con1; +select * from test where value = 30; +connection con2; +insert into test (id, value) values(3, 30); +commit; +connection con1; +# RC: Returns the newly inserted row +# RR: Still returns nothing +select * from test where value % 3 = 0; +commit; + +--source hermitage_init.inc +connection con1; +update test set value = value + 10; +connection con2; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_snapshot_conflict_errors'; +select * from test; +send delete from test where value = 20; +connection con1; +commit; +connection con2; +if ($trx_isolation == "READ COMMITTED") +{ + reap; + # RC: Returns 2 => 30 + select * from test; +} +if ($trx_isolation == "REPEATABLE READ") +{ + --error ER_LOCK_DEADLOCK + reap; + select variable_value-@a from information_schema.global_status where variable_name='rocksdb_snapshot_conflict_errors'; + +} +commit; + + +### Lost Update (P4) -- RC does not prevent, RR prevents +--source hermitage_init.inc + +connection con1; +select * from test where id = 1; +connection con2; +select * from test where id = 1; +connection con1; +update test set value = 11 where id = 1; +connection con2; +send update test set value = 12 where id = 1; +connection con1; +commit; +connection con2; +if ($trx_isolation == "READ COMMITTED") +{ + reap; + # RC: Returns 1 => 12 + select * from test; +} +if ($trx_isolation == "REPEATABLE READ") +{ + --error ER_LOCK_DEADLOCK + reap; +} +commit; + + +### Read Skew (G-single) -- RC does not prevent, RR prevents +--source hermitage_init.inc + +connection con1; +select * from test where id = 1; +connection con2; +select * from test where id = 1; +select * from test where id = 2; +update test set value = 12 where id = 1; +update test set value = 18 where id = 2; +commit; +connection con1; +select * from test where id = 2; # RC shows 18, RR shows 20 +commit; + +# test using predicate dependencies +--source hermitage_init.inc + +connection con1; +select * from test where value % 5 = 0; +connection con2; +update test set value = 12 where value = 10; +commit; +connection con1; +# RC: returns 1 => 12, RR: returns nothing +select * from test where value % 3 = 0; +commit; + +# on a write predicate +--source hermitage_init.inc + +connection con1; +select * from test where id = 1; +connection con2; +select * from test; +update test set value = 12 where id = 1; +update test set value = 18 where id = 2; +commit; +connection con1; +if ($trx_isolation == "READ COMMITTED") +{ + delete from test where value = 20; # doesn't delete anything + select * from test where id = 2; # shows 2 => 18 +} +if ($trx_isolation == "REPEATABLE READ") +{ + --error ER_LOCK_DEADLOCK + delete from test where value = 20; +} +commit; + + +### Write Skew (G2-item) -- Neither RC/RR prevents +--source hermitage_init.inc + +connection con1; +select * from test where id in (1,2); +connection con2; +select * from test where id in (1,2); +connection con1; +update test set value = 11 where id = 1; +connection con2; +update test set value = 21 where id = 2; +connection con1; +commit; +connection con2; +commit; + +### Anti-Dependency Cycles (G2) -- Neither RC/RR prevents +--source hermitage_init.inc + +connection con1; +select * from test where value % 3 = 0; +connection con2; +select * from test where value % 3 = 0; +connection con1; +insert into test (id, value) values(3, 30); +connection con2; +insert into test (id, value) values(4, 42); +connection con1; +commit; +connection con2; +commit; +select * from test where value % 3 = 0; # Either. Returns 3 => 30, 4 => 42 +connection con1; +select * from test where value % 3 = 0; + + +connection default; +drop table test; + +disconnect con1; +disconnect con2; +disconnect con3; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/hermitage.test b/storage/rocksdb/mysql-test/rocksdb/t/hermitage.test new file mode 100644 index 00000000000..e4138e8d89f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/hermitage.test @@ -0,0 +1,10 @@ +--source include/have_rocksdb.inc + +# Hermitage is an attempt to test transaction isolation levels. +# https://github.com/ept/hermitage + +let $trx_isolation = READ COMMITTED; +--source hermitage.inc + +let $trx_isolation = REPEATABLE READ; +--source hermitage.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/hermitage_init.inc b/storage/rocksdb/mysql-test/rocksdb/t/hermitage_init.inc new file mode 100644 index 00000000000..4f3f03efab0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/hermitage_init.inc @@ -0,0 +1,8 @@ +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test b/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test new file mode 100644 index 00000000000..9ee23a88bbe --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test @@ -0,0 +1,24 @@ +--source include/have_rocksdb.inc + +# +# Information Schema DDL +# + +--disable_warnings +DROP TABLE IF EXISTS is_ddl_t1; +DROP TABLE IF EXISTS is_ddl_t2; +--enable_warnings + +CREATE TABLE is_ddl_t1 (i INT, j INT, k INT, l INT, + PRIMARY KEY (i), KEY (j), KEY (k, l) COMMENT 'kl_cf') + ENGINE = ROCKSDB; + +CREATE TABLE is_ddl_t2 (x INT, y INT, z INT, + PRIMARY KEY (z, y) COMMENT 'zy_cf', + KEY (x)) ENGINE = ROCKSDB; + +SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%'; + +# cleanup +DROP TABLE is_ddl_t1; +DROP TABLE is_ddl_t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index.inc b/storage/rocksdb/mysql-test/rocksdb/t/index.inc new file mode 100644 index 00000000000..6b4e4ff233b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index.inc @@ -0,0 +1,121 @@ +# +# Basic syntax related to indexes: +# unique and non-unique keys, +# single- and multi-column keys, +# index option COMMENT. +# +# See other index* tests for operations +# which are less likely to be supported +# +# PRIMARY KEY syntax is covered in index_primary test. +# Index types BTREE|HASH -- in index_type_btree|hash tests. +# SPATIAL -- in type_spatial_indexes test. +# FULLTEXT -- in fulltext_search test. +# KEY_BLOCK_SIZE -- in index_key_block_size test. +# +# Usage to call the test from another test: +# +# A calling test may define $index_type, in which case +# USING clause will be added to the syntax. +# + +################################################ +# TODO: +# A part of the test is disabled because unique indexes +# are not currently supported +################################################ + + +let $using_index_type = ; +if ($index_type) +{ + let $using_index_type = USING $index_type; +} + + +eval CREATE TABLE t1 (a INT, + b CHAR(8), + pk INT PRIMARY KEY, + KEY $using_index_type (a) +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW KEYS IN t1; +DROP TABLE t1; + +eval CREATE TABLE t1 (a INT, + b CHAR(8), + pk INT PRIMARY KEY, + KEY a_b $using_index_type (a,b) COMMENT 'a_b index' +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW KEYS IN t1; +DROP TABLE t1; + +eval CREATE TABLE t1 (a INT, + b CHAR(8), + pk INT PRIMARY KEY, + KEY $using_index_type (a), + KEY $using_index_type (b) +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW KEYS IN t1; +DROP TABLE t1; + +--disable_parsing + +eval CREATE TABLE t1 (a INT, + b CHAR(8), + pk INT PRIMARY KEY, + UNIQUE INDEX $using_index_type (a) +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW KEYS IN t1; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +--error ER_DUP_ENTRY,ER_DUP_KEY +INSERT INTO t1 (a,b) VALUES (1,'c'); + +DROP TABLE t1; + +--source drop_table_sync.inc + +--enable_parsing + +# +# ALTER TABLE +# + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'z'); + +eval ALTER TABLE t1 ADD KEY (a) $using_index_type COMMENT 'simple index on a'; +--replace_column 7 # +SHOW INDEX FROM t1; +ALTER TABLE t1 DROP KEY a; +DROP TABLE t1; + +--disable_parsing + +eval CREATE TABLE t1 (a INT, + b CHAR(8), + pk INT AUTO_INCREMENT PRIMARY KEY, + UNIQUE INDEX $using_index_type (a) +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW KEYS IN t1; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +--error ER_DUP_ENTRY,ER_DUP_KEY +INSERT INTO t1 (a,b) VALUES (1,'c'); + +ALTER TABLE t1 DROP INDEX a; +INSERT INTO t1 (a,b) VALUES (1,'c'); +--error ER_DUP_ENTRY +eval ALTER TABLE t1 ADD UNIQUE INDEX a(a) $using_index_type; +DROP TABLE t1; + +--enable_parsing + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index.test b/storage/rocksdb/mysql-test/rocksdb/t/index.test new file mode 100644 index 00000000000..f12e7714bc3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Basic syntax related to indexes: +# unique and non-unique keys, +# single- and multi-column keys, +# index option COMMENT. +# +# See other index* tests for operations +# which are less likely to be supported +# +# PRIMARY KEY syntax is covered in index_primary test. +# Index types BTREE|HASH -- in index_type_btree|hash tests. +# SPATIAL -- in type_spatial_indexes test. +# FULLTEXT -- in fulltext_search test. +# KEY_BLOCK_SIZE -- in index_key_block_size test. +# + +# (Default index type) + +--source index.inc + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_file_map-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/index_file_map-master.opt new file mode 100644 index 00000000000..436edf2b40c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_file_map-master.opt @@ -0,0 +1 @@ +--rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_file_map.test b/storage/rocksdb/mysql-test/rocksdb/t/index_file_map.test new file mode 100644 index 00000000000..1021846c508 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_file_map.test @@ -0,0 +1,51 @@ +--source include/have_rocksdb.inc + +# +# Information Schema index file map +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +--enable_warnings + + +CREATE TABLE t1 (i INT PRIMARY KEY, j INT, INDEX(j)) ENGINE = ROCKSDB; +CREATE TABLE t2 (k INT PRIMARY KEY, l INT REFERENCES t1.i) ENGINE = ROCKSDB; + +INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10); +INSERT INTO t2 VALUES (100,1), (200,2), (300,3), (400,4); + +COMMIT; + +# Flush memtable out to SST +SET GLOBAL rocksdb_force_flush_memtable_now = 1; + +############################################################################### +# Test that expected index_file_map data exists +############################################################################### + +# Return the data for the primary key of t1 +--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = + (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL + WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY"); + +# Return the data for the secondary index of t1 +--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = + (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL + WHERE TABLE_NAME = 't1' AND INDEX_NAME = "j"); + +# Return the data for the primary index of t2 +--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = + (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL + WHERE TABLE_NAME = 't2' AND INDEX_NAME = "PRIMARY"); + +# cleanup +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_key_block_size.test b/storage/rocksdb/mysql-test/rocksdb/t/index_key_block_size.test new file mode 100644 index 00000000000..f156aec0021 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_key_block_size.test @@ -0,0 +1,70 @@ +--source include/have_rocksdb.inc + +# +# KEY_BLOCK_SIZE index option. +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT, + b CHAR(8), + pk INT PRIMARY KEY, + KEY (a) KEY_BLOCK_SIZE=8 +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW KEYS IN t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, + b CHAR(8), + pk INT PRIMARY KEY, + KEY ind1(b ASC) KEY_BLOCK_SIZE=0 +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW INDEX IN t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, + b CHAR(8), + PRIMARY KEY ind2(b(1) DESC) KEY_BLOCK_SIZE=32768 COMMENT 'big key_block_size value' +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW INDEX IN t1; + +DROP TABLE t1; + +CREATE TABLE t1 (a INT, + b CHAR(8), + pk INT AUTO_INCREMENT PRIMARY KEY, + KEY a_b(a,b) KEY_BLOCK_SIZE=8192 +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW INDEX IN t1; + +DROP TABLE t1; + +--source drop_table_sync.inc + +# +# ALTER TABLE +# + +CREATE TABLE t1 (a INT, + b CHAR(8), + PRIMARY KEY (b) +) ENGINE=rocksdb; + +INSERT INTO t1 (a,b) VALUES (100,'z'); + +ALTER TABLE t1 ADD KEY(a) KEY_BLOCK_SIZE 8192; +--replace_column 7 # +SHOW INDEX FROM t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_primary.test b/storage/rocksdb/mysql-test/rocksdb/t/index_primary.test new file mode 100644 index 00000000000..3abd2dd05fe --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_primary.test @@ -0,0 +1,64 @@ +--source include/have_rocksdb.inc + +# +# Basic syntax related to primary keys +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, + b CHAR(8) +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW KEYS IN t1; + +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (1,'c'); + +DROP TABLE t1; + +--error ER_MULTIPLE_PRI_KEY +CREATE TABLE t1 (a INT PRIMARY KEY, + b CHAR(8) PRIMARY KEY +) ENGINE=rocksdb; + +CREATE TABLE t1 (a INT, + b CHAR(8), + PRIMARY KEY (a,b) +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW INDEX IN t1; +INSERT INTO t1 (a,b) VALUES (1,'a'),(1,'b'),(2,'a'),(2,'b'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (1,'b'); + DROP TABLE t1; + +# KEY in a column definition means PK! + +CREATE TABLE t1 (a INT KEY, + b CHAR(8), + KEY (b) +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW INDEX IN t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, + b CHAR(8) PRIMARY KEY +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW INDEX IN t1; + +--error ER_MULTIPLE_PRI_KEY +ALTER TABLE t1 ADD CONSTRAINT PRIMARY KEY pk (a); +--replace_column 7 # +SHOW KEYS IN t1; +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_type_btree.test b/storage/rocksdb/mysql-test/rocksdb/t/index_type_btree.test new file mode 100644 index 00000000000..4adc5b55329 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_type_btree.test @@ -0,0 +1,12 @@ +--source include/have_rocksdb.inc + +# +# Index type BTREE +# + +let $index_type = BTREE; + +--source index.inc + +let $index_type =; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_type_hash.test b/storage/rocksdb/mysql-test/rocksdb/t/index_type_hash.test new file mode 100644 index 00000000000..f3dc9cf5f10 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_type_hash.test @@ -0,0 +1,12 @@ +--source include/have_rocksdb.inc + +# +# Index type HASH +# + +let $index_type = HASH; + +--source index.inc + +let $index_type =; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/information_schema-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/information_schema-master.opt new file mode 100644 index 00000000000..a12f583ef82 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/information_schema-master.opt @@ -0,0 +1 @@ +--force-restart --binlog_format=row --gtid_mode=ON --enforce_gtid_consistency --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test b/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test new file mode 100644 index 00000000000..39bae56bea6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test @@ -0,0 +1,72 @@ +--source include/have_rocksdb.inc +--source include/have_log_bin.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +--enable_warnings + +--let $max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1) +--replace_result $max_index_id max_index_id +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; + +CREATE TABLE t1 (i1 INT, i2 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB; +INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3); + +--let $MASTER_UUID = query_get_value(SELECT @@SERVER_UUID, @@SERVER_UUID, 1) +--let $max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1) +--replace_result $MASTER_UUID uuid $max_index_id max_index_id +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; + +CREATE INDEX tindex1 on t1 (i1); +--let $start_max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1) + +CREATE INDEX tindex2 on t1 (i2); +--let $end_max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1) + +if ($end_max_index_id <= $start_max_index_id) { + echo Max index ID did not increase; +} + +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where TYPE = 'CF_FLAGS'; + +CREATE TABLE t2 ( + a int, + b int, + c int, + d int, + e int, + PRIMARY KEY (a) COMMENT "cf_a", + KEY (b) COMMENT "cf_b", + KEY (c) COMMENT "cf_c", + KEY (d) COMMENT "$per_index_cf", + KEY (e) COMMENT "rev:cf_d") ENGINE=ROCKSDB; + +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where TYPE = 'CF_FLAGS'; + +CREATE TABLE t3 (a INT, PRIMARY KEY (a)) ENGINE=ROCKSDB; +insert into t3 (a) values (1), (2), (3); +SET @ORIG_ROCKSDB_PAUSE_BACKGROUND_WORK = @@GLOBAL.ROCKSDB_PAUSE_BACKGROUND_WORK; +--let $t3_index_id = query_get_value(SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME = 't3', INDEX_NUMBER, 1) +--let $t3_cf_id = query_get_value(SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME = 't3', COLUMN_FAMILY, 1) +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=1; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=1; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +DROP TABLE t3; +--let $result = query_get_value("SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO WHERE TYPE = 'DDL_DROP_INDEX_ONGOING' AND NAME LIKE 'cf_id:$t3_cf_id,index_id:$t3_index_id'", NAME, 1) +--echo $result +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +--echo next line shouldn't cause assertion to fail +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_ROCKSDB_PAUSE_BACKGROUND_WORK; + +DROP TABLE t1; +DROP TABLE t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/init_stats_procedure.inc b/storage/rocksdb/mysql-test/rocksdb/t/init_stats_procedure.inc new file mode 100644 index 00000000000..c798bb91cfa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/init_stats_procedure.inc @@ -0,0 +1,35 @@ +# This inc script creates two procedures -- save_read_stats() and +# get_read_stats(). get_read_stats() prints differential rocksdb_rows_read, +# rocksdb_rows_updated, and rocksdb_rows_deleted values since calling +# save_read_stats(). + +delimiter //; +create procedure save_read_stats() +begin + select rows_requested into @rq from information_schema.table_statistics + where table_schema=database() and table_name='t1'; + select variable_value into @rr from information_schema.global_status + where variable_name='rocksdb_rows_read'; + select variable_value into @ru from information_schema.global_status + where variable_name='rocksdb_rows_updated'; + select variable_value into @rd from information_schema.global_status + where variable_name='rocksdb_rows_deleted'; +end// + +create procedure get_read_stats() +begin + select rows_requested - @rq as rows_requested from + information_schema.table_statistics + where table_schema=database() and table_name='t1'; + select variable_value - @rr as rows_read from + information_schema.global_status + where variable_name='rocksdb_rows_read'; + select variable_value - @ru as rows_updated from + information_schema.global_status + where variable_name='rocksdb_rows_updated'; + select variable_value - @rd as rows_deleted from + information_schema.global_status + where variable_name='rocksdb_rows_deleted'; +end// +delimiter ;// + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test b/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test new file mode 100644 index 00000000000..99ad24f09fc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test @@ -0,0 +1,35 @@ +--source include/have_rocksdb.inc + +# Make sure that the InnoDb information schema tables are disabled when InnoDB +# is turned off and attempting to access them doesn't crash. + +SELECT * FROM INFORMATION_SCHEMA.INNODB_TRX; +SELECT * FROM INFORMATION_SCHEMA.INNODB_FILE_STATUS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCKS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCK_WAITS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP; +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_RESET; +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX; +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX_RESET; +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMPMEM; +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMPMEM_RESET; +SELECT * FROM INFORMATION_SCHEMA.INNODB_METRICS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_DEFAULT_STOPWORD; +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_DELETED; +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_BEING_DELETED; +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHE; +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE; +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_CONFIG; +SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_POOL_STATS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE; +SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE_LRU; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_INDEXES; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_COLUMNS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FIELDS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DATAFILES; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DOCSTORE_FIELDS; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/insert.test b/storage/rocksdb/mysql-test/rocksdb/t/insert.test new file mode 100644 index 00000000000..14cfe1cadb8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/insert.test @@ -0,0 +1,99 @@ +--source include/have_rocksdb.inc + +# +# Basic INSERT statements +# + +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +# INSERT [INTO] .. VALUES|VALUE .. + +INSERT INTO t1 VALUES (100,'foobar',100),(1,'a',101),(2,'b',103),(3,'c',104),(4,'d',105),(5,'e',106); +--sorted_result +SELECT a,b FROM t1; + +INSERT t1 VALUE (10,'foo',107),(11,'abc',108); +--sorted_result +SELECT a,b FROM t1; + +INSERT INTO t1 (b,a) VALUES ('test',0); +--sorted_result +SELECT a,b FROM t1; + +INSERT INTO t1 VALUES (DEFAULT,DEFAULT,NULL); +--sorted_result +SELECT a,b FROM t1; + +INSERT t1 (a) VALUE (10),(20); +--sorted_result +SELECT a,b FROM t1; + +# INSERT [INTO] .. SET + +INSERT INTO t1 SET a = 11, b = 'f'; +--sorted_result +SELECT a,b FROM t1; + +INSERT t1 SET b = DEFAULT; +--sorted_result +SELECT a,b FROM t1; + + +# INSERT .. SELECT + +CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +INSERT INTO t2 SELECT a,b,pk FROM t1; +INSERT INTO t1 (a) SELECT a FROM t2 WHERE b = 'foo'; +--sorted_result +SELECT a,b FROM t1; + +INSERT t1 (a,b) SELECT a,b FROM t1; +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1, t2; + +# +# Transactional INSERT +# + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +BEGIN; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(100,'foo'); +INSERT t1 (a,b) VALUE (10,'foo'),(11,'abc'); +COMMIT; +--sorted_result +SELECT a,b FROM t1; + +BEGIN; +INSERT INTO t1 (b,a) VALUES ('test',0); +SAVEPOINT spt1; +INSERT INTO t1 (a,b) VALUES (DEFAULT,DEFAULT); +RELEASE SAVEPOINT spt1; +INSERT INTO t1 (a,b) VALUES (DEFAULT,DEFAULT); +ROLLBACK; +--sorted_result +SELECT a,b FROM t1; + +BEGIN; +INSERT t1 (a) VALUE (10),(20); +SAVEPOINT spt1; +INSERT INTO t1 SET a = 11, b = 'f'; +INSERT t1 SET b = DEFAULT; +--error ER_UNKNOWN_ERROR +ROLLBACK TO SAVEPOINT spt1; +INSERT INTO t1 (b,a) VALUES ('test1',10); +--error ER_UNKNOWN_ERROR +COMMIT; +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt new file mode 100644 index 00000000000..2d5e33057f6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt @@ -0,0 +1,6 @@ +--rocksdb_write_disable_wal=1 +--rocksdb_default_cf_options=write_buffer_size=16k;target_file_size_base=16k;level0_file_num_compaction_trigger=4;level0_slowdown_writes_trigger=256;level0_stop_writes_trigger=256;max_write_buffer_number=16;compression_per_level=kNoCompression;memtable=vector:1024 +--rocksdb_override_cf_options=__system__={memtable=skip_list:16} +--rocksdb_compaction_sequential_deletes=0 +--rocksdb_compaction_sequential_deletes_window=0 + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config.test b/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config.test new file mode 100644 index 00000000000..3e1cf7375e8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config.test @@ -0,0 +1,41 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +create table t1( + id bigint not null primary key, + i1 bigint, #unique + i2 bigint, #repeating + c1 varchar(20), #unique + c2 varchar(20), #repeating + index t1_2(i1) +) engine=rocksdb; + +--disable_query_log +set rocksdb_bulk_load=1; +let $i=0; +while ($i<50000) +{ + inc $i; + eval insert t1(id, i1, i2, c1, c2) values($i, $i, $i div 10, $i, $i div 10); +} +set rocksdb_bulk_load=0; +--enable_query_log + +select count(*), sum(id), sum(i1), sum(i2) from t1; + +# reload without load optimized config +let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; +--exec echo "wait" > $restart_file +--shutdown_server 10 +--source include/wait_until_disconnected.inc +-- exec echo "restart:--rocksdb_write_disable_wal=0 --rocksdb_default_cf_options=write_buffer_size=64k;target_file_size_base=64k;max_bytes_for_level_base=1m;compression_per_level=kNoCompression;" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- enable_reconnect +-- source include/wait_until_connected_again.inc + +select count(*), sum(id), sum(i1), sum(i2) from t1; + +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/insert_with_keys.test b/storage/rocksdb/mysql-test/rocksdb/t/insert_with_keys.test new file mode 100644 index 00000000000..b2f37a07999 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/insert_with_keys.test @@ -0,0 +1,93 @@ +--source include/have_rocksdb.inc + +# +# INSERT statements for tables with keys +# + +################################################## +# TODO: +# A part of the test is disabled because currently +# unique indexes are not supported +################################################## + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f'); +INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n'); +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +INSERT INTO t1 (a,b) VALUES (3,'a'),(0,''); +--sorted_result +SELECT a,b FROM t1; +DROP TABLE t1; + +--echo #---------------------------------------- +--echo # UNIQUE KEYS are not supported currently +--echo #----------------------------------------- + +--disable_parsing + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, UNIQUE INDEX(a)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f'); +INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (3,'a'),(0,''); +INSERT INTO t1 (a,b) VALUES (0,''); +--sorted_result +SELECT a,b FROM t1; + +INSERT IGNORE INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +INSERT INTO t1 (a,b) VALUES (3,'a'),(4,'d') ON DUPLICATE KEY UPDATE a = a+10; + +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, UNIQUE INDEX(a,b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f'); +INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n'); +INSERT INTO t1 (a,b) VALUES (100,'b'), (2,'c'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +--sorted_result +SELECT a,b FROM t1; + +INSERT IGNORE INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z') ON DUPLICATE KEY UPDATE a = a+VALUES(a); +--sorted_result +SELECT a,b FROM t1; + +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (101,'x'),(101,'x'); +DROP TABLE t1; + +--enable_parsing + + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f'); +INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (3,'a'),(0,''); +INSERT INTO t1 (a,b) VALUES (0,''); +--sorted_result +SELECT a,b FROM t1; + +INSERT IGNORE INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z') ON DUPLICATE KEY UPDATE b = CONCAT(b,b); +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue100.test b/storage/rocksdb/mysql-test/rocksdb/t/issue100.test new file mode 100644 index 00000000000..b0b3eb7a8fd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue100.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +create table t1 ( + id int, + value int, + primary key (id) +) engine=rocksdb; + +insert into t1 values(1,1),(2,2); +set autocommit=0; + +begin; +insert into t1 values (50,50); +select * from t1; + +update t1 set id=id+100; + +select * from t1; + +rollback; +set autocommit=1; + +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete-master.opt new file mode 100644 index 00000000000..436edf2b40c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete-master.opt @@ -0,0 +1 @@ +--rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete.test b/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete.test new file mode 100644 index 00000000000..743bf7dd7a1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete.test @@ -0,0 +1,19 @@ +--source include/have_rocksdb.inc + +create table ten(a int primary key); +insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table one_k(a int primary key); +insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C; + +create table t100(pk int primary key, a int, b int, key(a)); +insert into t100 select a,a,a from test.one_k; + +set global rocksdb_force_flush_memtable_now=1; +select num_rows, entry_deletes, entry_singledeletes from information_schema.rocksdb_index_file_map where index_number = (select max(index_number) from information_schema.rocksdb_index_file_map) order by entry_deletes, entry_singledeletes; + +update t100 set a=a+1; +set global rocksdb_force_flush_memtable_now=1; +select num_rows, entry_deletes, entry_singledeletes from information_schema.rocksdb_index_file_map where index_number = (select max(index_number) from information_schema.rocksdb_index_file_map) order by entry_deletes, entry_singledeletes; + +drop table ten, t100, one_k; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue111.test b/storage/rocksdb/mysql-test/rocksdb/t/issue111.test new file mode 100644 index 00000000000..671ea4708d6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue111.test @@ -0,0 +1,38 @@ +--source include/have_rocksdb.inc + +connect (con2,localhost,root,,); +connection default; + +create table t1 ( + pk int not null primary key, + col1 int not null, + col2 int not null, + key(col1) +) engine=rocksdb; + +create table ten(a int primary key); +insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table one_k(a int primary key); +insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C; + +insert into t1 select a,a,a from one_k; + +--echo # Start the transaction, get the snapshot +begin; +select * from t1 where col1<10; + +--echo # Connect with another connection and make a conflicting change +connection con2; + +begin; +update t1 set col2=123456 where pk=0; +commit; + +connection default; + +--error ER_LOCK_DEADLOCK +update t1 set col2=col2+1 where col1 < 10 limit 5; + +disconnect con2; +drop table t1, ten, one_k; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue290.test b/storage/rocksdb/mysql-test/rocksdb/t/issue290.test new file mode 100644 index 00000000000..99c0479ec21 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue290.test @@ -0,0 +1,40 @@ +--source include/have_rocksdb.inc + +CREATE TABLE `linktable` ( + `id1` bigint(20) unsigned NOT NULL DEFAULT '0', + `id1_type` int(10) unsigned NOT NULL DEFAULT '0', + `id2` bigint(20) unsigned NOT NULL DEFAULT '0', + `id2_type` int(10) unsigned NOT NULL DEFAULT '0', + `link_type` bigint(20) unsigned NOT NULL DEFAULT '0', + `visibility` tinyint(3) NOT NULL DEFAULT '0', + `data` varchar(255) NOT NULL DEFAULT '', + `time` bigint(20) unsigned NOT NULL DEFAULT '0', + `version` int(11) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (link_type, `id1`,`id2`) COMMENT 'cf_link_pk', + KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`id2`,`version`,`data`) COMMENT 'rev:cf_link_id1_type' +) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $insert = INSERT INTO linktable (id1, link_type, id2) values (1, 1, $i); + inc $i; + eval $insert; +} +--enable_query_log +set global rocksdb_force_flush_memtable_now=1; + +insert into linktable (id1, link_type, id2) values (2, 1, 1); +insert into linktable (id1, link_type, id2) values (2, 1, 2); +insert into linktable (id1, link_type, id2) values (2, 1, 3); +insert into linktable (id1, link_type, id2) values (2, 1, 4); +insert into linktable (id1, link_type, id2) values (2, 1, 5); +insert into linktable (id1, link_type, id2) values (2, 1, 6); +insert into linktable (id1, link_type, id2) values (2, 1, 7); +insert into linktable (id1, link_type, id2) values (2, 1, 8); +insert into linktable (id1, link_type, id2) values (2, 1, 9); +insert into linktable (id1, link_type, id2) values (2, 1, 10); + +--replace_column 9 # +explain select id1, id2, link_type, data from linktable where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5); + +drop table linktable; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue314.test b/storage/rocksdb/mysql-test/rocksdb/t/issue314.test new file mode 100644 index 00000000000..2059eef2195 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue314.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +CREATE TABLE t1(a int); +SET TRANSACTION ISOLATION LEVEL READ COMMITTED; +INSERT INTO t1 VALUES(1); +--error ER_UNKNOWN_ERROR +select * from t1; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +select * from t1; +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/level_read_committed.test b/storage/rocksdb/mysql-test/rocksdb/t/level_read_committed.test new file mode 100644 index 00000000000..bedf87a8efa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/level_read_committed.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +let $trx_isolation = READ COMMITTED; + +--source transaction_isolation.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/level_read_uncommitted.test b/storage/rocksdb/mysql-test/rocksdb/t/level_read_uncommitted.test new file mode 100644 index 00000000000..8ee3af60bf3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/level_read_uncommitted.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +let $trx_isolation = READ UNCOMMITTED; +--source transaction_isolation.inc +--source consistent_snapshot.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/level_repeatable_read.test b/storage/rocksdb/mysql-test/rocksdb/t/level_repeatable_read.test new file mode 100644 index 00000000000..cf29073f69e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/level_repeatable_read.test @@ -0,0 +1,5 @@ +--source include/have_rocksdb.inc + +let $trx_isolation = REPEATABLE READ; +--source transaction_isolation.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/level_serializable.test b/storage/rocksdb/mysql-test/rocksdb/t/level_serializable.test new file mode 100644 index 00000000000..9b5db0e8998 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/level_serializable.test @@ -0,0 +1,5 @@ +--source include/have_rocksdb.inc + +let $trx_isolation = SERIALIZABLE; +--source transaction_isolation.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/loaddata.inc b/storage/rocksdb/mysql-test/rocksdb/t/loaddata.inc new file mode 100644 index 00000000000..5d3678f5f27 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/loaddata.inc @@ -0,0 +1,117 @@ +--source include/have_rocksdb.inc + +# +# Basic LOAD DATA statements +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +let $datadir = `SELECT @@datadir`; + +--write_file $datadir/se_loaddata.dat +1,foo, +2,bar, +3,, +4,abc, +EOF + +--replace_result $datadir +eval +LOAD DATA INFILE '$datadir/se_loaddata.dat' INTO TABLE t1 + FIELDS TERMINATED BY ',' (a,b); +--sorted_result +SELECT a,b FROM t1; + +--replace_result $datadir +eval +LOAD DATA LOCAL INFILE '$datadir/se_loaddata.dat' INTO TABLE t1 + CHARACTER SET utf8 COLUMNS TERMINATED BY ',' + ESCAPED BY '/' (a,b); +--sorted_result +SELECT a,b FROM t1; + +--remove_file $datadir/se_loaddata.dat +--write_file $datadir/se_loaddata.dat +5;YYY; +102;'zzz'; +0;'test'; +EOF + +--replace_result $datadir +eval +LOAD DATA LOCAL INFILE '$datadir/se_loaddata.dat' INTO TABLE t1 + FIELDS TERMINATED BY ';' + (a) SET b='loaded'; + +--sorted_result +SELECT a,b FROM t1; + +--remove_file $datadir/se_loaddata.dat +--write_file $datadir/se_loaddata.dat +prefix:5;'foo'; +prefix:6;''; +prefix:100;foo; +prefix:7;'test';suffix +101;abc; +102;'z'; +prefix:0;; +EOF + +--replace_result $datadir +eval +LOAD DATA INFILE '$datadir/se_loaddata.dat' INTO TABLE t1 + FIELDS TERMINATED BY ';' + OPTIONALLY ENCLOSED BY '''' + LINES STARTING BY 'prefix:' + IGNORE 2 LINES (a,b); + +--sorted_result +SELECT a,b FROM t1; + +--remove_file $datadir/se_loaddata.dat +--write_file $datadir/se_loaddata.dat +1 foo +2 bar +3 +4 abc +EOF + +--replace_result $datadir +eval +LOAD DATA INFILE '$datadir/se_loaddata.dat' INTO TABLE t1; +--sorted_result +SELECT a,b FROM t1; +--remove_file $datadir/se_loaddata.dat + + +--write_file $datadir/se_replacedata.dat +1 aaa +2 bbb +3 ccc +4 ddd +EOF + +--replace_result $datadir +if ($skip_unique_check == 1) +{ + --error ER_UNKNOWN_ERROR + eval + LOAD DATA INFILE '$datadir/se_replacedata.dat' REPLACE INTO TABLE t1; +} +if ($skip_unique_check == 0) +{ + eval + LOAD DATA INFILE '$datadir/se_replacedata.dat' REPLACE INTO TABLE t1; +} +--sorted_result +SELECT a,b FROM t1; +--remove_file $datadir/se_replacedata.dat + + +# Cleanup +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/loaddata.test b/storage/rocksdb/mysql-test/rocksdb/t/loaddata.test new file mode 100644 index 00000000000..837fa746ed7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/loaddata.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +let $skip_unique_check = 0; +--source loaddata.inc +let $skip_unique_check = 1; +set session rocksdb_skip_unique_check=1; +--source loaddata.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/lock.test b/storage/rocksdb/mysql-test/rocksdb/t/lock.test new file mode 100644 index 00000000000..015a08bef57 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/lock.test @@ -0,0 +1,202 @@ +--source include/have_rocksdb.inc + +# Standard table locking: +# LOCK TABLE .. READ +# LOCK TABLE .. [LOW_PRIORITY] WRITE +# UNLOCK TABLES +# +# and global locking: +# FLUSH TABLES [..] WITH READ LOCK +# +############################################ +# TODO: +# A part of the test is currently disabled +# because temporary tables are not supported +############################################ + +--enable_connect_log + +# Save the initial number of concurrent sessions. +--source include/count_sessions.inc + +connect (con1,localhost,root,,); +SET lock_wait_timeout=1; + +connection default; + +--disable_warnings +DROP TABLE IF EXISTS t1, t2, t3; +--enable_warnings + +CREATE TABLE t1 (id INT, id2 INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (id,id2) VALUES (1,1),(1,2),(1,3); + +# LOW_PRIORITY has no effect, but is still syntactically correct +LOCK TABLE t1 LOW_PRIORITY WRITE; +SELECT id2,COUNT(DISTINCT id) FROM t1 GROUP BY id2; + +UPDATE t1 SET id=-1 WHERE id=1; + +connection con1; +# With WRITE lock held by connection 'default', +# nobody else can access the table +--error ER_LOCK_WAIT_TIMEOUT +SELECT id,id2 FROM t1; +--error ER_LOCK_WAIT_TIMEOUT +LOCK TABLE t1 READ; + +connection default; +LOCK TABLE t1 READ; +--error ER_TABLE_NOT_LOCKED_FOR_WRITE +UPDATE t1 SET id=1 WHERE id=1; + +connection con1; +# With READ lock held by connection 'default', +# it should be possible to read from the table +# or acquire another READ lock, +# but not update it or acquire WRITE lock +SELECT COUNT(DISTINCT id) FROM t1; +--error ER_LOCK_WAIT_TIMEOUT +UPDATE t1 SET id=2 WHERE id=2; +--error ER_LOCK_WAIT_TIMEOUT +LOCK TABLE t1 WRITE; +LOCK TABLE t1 READ; +UNLOCK TABLES; + + +--connection default + +--error ER_TABLE_NOT_LOCKED +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; + +--disable_parsing + +CREATE TEMPORARY TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +DROP TABLE IF EXISTS t2; + +--enable_parsing + +UNLOCK TABLES; + +CREATE TABLE t2 (id INT, id2 INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +LOCK TABLE t1 WRITE, t2 WRITE; +INSERT INTO t2 (id,id2) SELECT id,id2 FROM t1; +UPDATE t1 SET id=1 WHERE id=-1; +DROP TABLE t1,t2; + +# +# INSERT ... SELECT with lock tables +# + +CREATE TABLE t1 (i1 INT, nr INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +CREATE TABLE t2 (nr INT, nm INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (nr,nm) VALUES (1,3); +INSERT INTO t2 (nr,nm) VALUES (2,4); + +LOCK TABLES t1 WRITE, t2 READ; +INSERT INTO t1 (i1,nr) SELECT 1, nr FROM t2 WHERE nm=3; +INSERT INTO t1 (i1,nr) SELECT 2, nr FROM t2 WHERE nm=4; +UNLOCK TABLES; + +LOCK TABLES t1 WRITE; +--error ER_TABLE_NOT_LOCKED +INSERT INTO t1 (i1,nr) SELECT i1, nr FROM t1; +UNLOCK TABLES; +LOCK TABLES t1 WRITE, t1 AS t1_alias READ; +INSERT INTO t1 (i1,nr) SELECT i1, nr FROM t1 AS t1_alias; +--error ER_TABLE_NOT_LOCKED +DROP TABLE t1,t2; +UNLOCK TABLES; +DROP TABLE t1,t2; + +# +# Check that a dropped table is removed from a lock + +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb; +CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb; +LOCK TABLES t1 WRITE, t2 WRITE, t3 WRITE; +# This removes one table after the other from the lock. +DROP TABLE t2, t3, t1; +# +# Check that a lock merge works + +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb; +CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb; +LOCK TABLES t1 WRITE, t2 WRITE, t3 WRITE, t1 AS t4 READ; + +ALTER TABLE t2 ADD COLUMN c2 INT; + +DROP TABLE t1, t2, t3; + +# FLUSH TABLES is not permitted when there is an active LOCK TABLES .. READ, +# FLUSH TABLES .. WITH READ LOCK should be used instead +# (and for other connections the table is locked) + +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb; + +LOCK TABLE t1 READ, t2 READ; +--error ER_TABLE_NOT_LOCKED_FOR_WRITE +FLUSH TABLE t1; +--error ER_TABLE_NOT_LOCKED_FOR_WRITE +FLUSH TABLES; +--error ER_LOCK_OR_ACTIVE_TRANSACTION +FLUSH TABLES t1, t2 WITH READ LOCK; +UNLOCK TABLES; + +FLUSH TABLES t1, t2 WITH READ LOCK; + +--connection con1 +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); + +--connection default +UNLOCK TABLES; + +# Global lock + +FLUSH TABLES WITH READ LOCK; + +--connection con1 +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); + +--connection default +UNLOCK TABLES; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +FLUSH TABLES WITH READ LOCK; +--error ER_CANT_UPDATE_WITH_READLOCK +DROP TABLE t1, t2; +UNLOCK TABLES; +DROP TABLE t1, t2; + +--disconnect con1 + +# +# Ensure that FLUSH TABLES doesn't substitute a base locked table +# with a temporary one. +# + +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb; + +LOCK TABLE t1 WRITE, t2 WRITE; + +--disable_parsing + +CREATE TEMPORARY TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +FLUSH TABLE t1; +DROP TEMPORARY TABLE t1; + +--enable_parsing + +SELECT a,b FROM t1; +UNLOCK TABLES; +DROP TABLE t1, t2; + +# Check that all connections opened by test cases in this file are really +# gone so execution of other tests won't be affected by their presence. +--source include/wait_until_count_sessions.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/lock_rows_not_exist.test b/storage/rocksdb/mysql-test/rocksdb/t/lock_rows_not_exist.test new file mode 100644 index 00000000000..d8a6bde45c8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/lock_rows_not_exist.test @@ -0,0 +1,110 @@ +--source include/have_rocksdb.inc + +--enable_connect_log + +# Save the initial number of concurrent sessions +--source include/count_sessions.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +#1. Using all primary key columns, with equal conditions +connection con1; +CREATE TABLE t (id1 int, id2 int, id3 int, value int, PRIMARY KEY (id1, id2, id3)) ENGINE=RocksDB; + +#1.1 SELECT FOR UPDATE +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE; + +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +--error ER_LOCK_WAIT_TIMEOUT +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE; +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=2 FOR UPDATE; + +#1.2 UPDATE +connection con1; +ROLLBACK; +BEGIN; +UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1; + +connection con2; +ROLLBACK; +BEGIN; +--error ER_LOCK_WAIT_TIMEOUT +UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1; +UPDATE t SET value=value+100 WHERE id1=1 AND id2=0 AND id3=1; + +#1.3 DELETE +connection con1; +ROLLBACK; +BEGIN; +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1; + +connection con2; +ROLLBACK; +BEGIN; +--error ER_LOCK_WAIT_TIMEOUT +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1; +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=0; + +--disable_parsing +# +# The following is commented out because RocksDB's Transaction API doesn't +# "support" READ COMMITED, in particular, it doesn't release row locks +# after each statement. (MyRocks is able to request a new snapshot for +# every statement, but this won't free the locks. TODO: Is the behavior +# that is tested below really needed?) +# +connection con1; +ROLLBACK; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE; + +connection con2; +ROLLBACK; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE; +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=2 FOR UPDATE; + +connection con1; +ROLLBACK; +BEGIN; +UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1; + +connection con2; +ROLLBACK; +BEGIN; +UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1; +UPDATE t SET value=value+100 WHERE id1=1 AND id2=0 AND id3=1; + +connection con1; +ROLLBACK; +BEGIN; +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1; + +connection con2; +ROLLBACK; +BEGIN; +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1; +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=0; + +connection con1; +ROLLBACK; +connection con2; +ROLLBACK; + +--enable_parsing +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test new file mode 100644 index 00000000000..035046ae368 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test @@ -0,0 +1,67 @@ +--source include/have_rocksdb.inc + +let $isolation_level = REPEATABLE READ; +--source suite/rocksdb/include/locking_issues_case1_1.inc + +let $isolation_level = READ COMMITTED; +--source suite/rocksdb/include/locking_issues_case1_1.inc + +let $isolation_level = REPEATABLE READ; +--source suite/rocksdb/include/locking_issues_case1_2.inc + +let $isolation_level = READ COMMITTED; +--source suite/rocksdb/include/locking_issues_case1_2.inc + +let $lock_scanned_rows=0; +let $isolation_level = REPEATABLE READ; +--source suite/rocksdb/include/locking_issues_case2.inc + +let $isolation_level = READ COMMITTED; +--source suite/rocksdb/include/locking_issues_case2.inc + +# Rerun the case2 tests with rocksdb_lock_scanned_rows on +let $lock_scanned_rows=1; +let $isolation_level = REPEATABLE READ; +--source suite/rocksdb/include/locking_issues_case2.inc + +let $isolation_level = READ COMMITTED; +--source suite/rocksdb/include/locking_issues_case2.inc + +let $isolation_level = REPEATABLE READ; +--source suite/rocksdb/include/locking_issues_case3.inc + +let $isolation_level = READ COMMITTED; +--source suite/rocksdb/include/locking_issues_case3.inc + +let $isolation_level = REPEATABLE READ; +--source suite/rocksdb/include/locking_issues_case4.inc + +let $isolation_level = READ COMMITTED; +--source suite/rocksdb/include/locking_issues_case4.inc + +let $isolation_level = REPEATABLE READ; +--source suite/rocksdb/include/locking_issues_case5.inc + +let $isolation_level = READ COMMITTED; +--source suite/rocksdb/include/locking_issues_case5.inc + +let $isolation_level = REPEATABLE READ; +--source suite/rocksdb/include/locking_issues_case6.inc + +let $isolation_level = READ COMMITTED; +--source suite/rocksdb/include/locking_issues_case6.inc + +let $lock_scanned_rows=0; +let $isolation_level = REPEATABLE READ; +--source suite/rocksdb/include/locking_issues_case7.inc + +let $isolation_level = READ COMMITTED; +--source suite/rocksdb/include/locking_issues_case7.inc + +# Rerun the case7 tests with rocksdb_lock_scanned_rows on +let $lock_scanned_rows=1; +let $isolation_level = REPEATABLE READ; +--source suite/rocksdb/include/locking_issues_case7.inc + +let $isolation_level = READ COMMITTED; +--source suite/rocksdb/include/locking_issues_case7.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/misc.test b/storage/rocksdb/mysql-test/rocksdb/t/misc.test new file mode 100644 index 00000000000..b8efd0389d2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/misc.test @@ -0,0 +1,45 @@ +--source include/have_rocksdb.inc + +# +# Different statements not related to an engine, +# but added to provide function coverage for handler.cc and handler.h. +# The test can be disabled or removed later. +# + +# hits get_error_message(int, String*) + +INSERT INTO mysql.event ( + db, + name, + body, + definer, + interval_value, + interval_field, + originator, + character_set_client, + collation_connection, + db_collation, + body_utf8) +values ( + database(), + "ev1", + "select 1", + user(), + 100, + "SECOND_MICROSECOND", + 1, + 'utf8', + 'utf8_general_ci', + 'utf8_general_ci', + 'select 1'); + +--error ER_NOT_SUPPORTED_YET +SHOW EVENTS; +DROP EVENT ev1; + +# hits get_foreign_key_list(THD*, List*) +--sorted_result +SELECT TABLE_NAME, COLUMN_NAME, REFERENCED_TABLE_NAME, REFERENCED_COLUMN_NAME +FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE ORDER BY TABLE_NAME; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb-master.opt new file mode 100644 index 00000000000..53303252212 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb-master.opt @@ -0,0 +1 @@ +--binlog_format=row --gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb.test new file mode 100644 index 00000000000..0c04b60427e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +################################################################################ +# Test the --skip-empty-trans option for mysqlbinlog with rocksdb +################################################################################ +--disable_warnings +--source include/have_gtid.inc +--enable_warnings + +reset master; +set timestamp=1000000000; +let $storage_engine=rocksdb; + +# Test row based replication +set SESSION binlog_format = 'ROW'; +--source include/mysqlbinlog_gtid_skip_empty_trans_input.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump-master.opt new file mode 100644 index 00000000000..e41620e94f6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump-master.opt @@ -0,0 +1 @@ +--force-restart --binlog_format=row diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test new file mode 100644 index 00000000000..107790f0c9a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test @@ -0,0 +1,65 @@ +--source include/have_rocksdb.inc + +--source include/have_log_bin.inc + +--enable_connect_log + +# Save the initial number of concurrent sessions +--source include/count_sessions.inc + +--disable_warnings +drop table if exists r1; +--enable_warnings + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +create table r1 (id1 int, id2 int, id3 varchar(100), id4 int, value1 int, value2 int, value3 int, value4 int, primary key (id1, id2, id3, id4)) engine=rocksdb; +insert into r1 values (1,1,1,1,1,1,1,1); +insert into r1 values (1,1,1,2,2,2,2,2); +insert into r1 values (1,1,2,1,3,3,3,3); +insert into r1 values (1,1,2,2,4,4,4,4); +insert into r1 values (1,2,1,1,5,5,5,5); +insert into r1 values (1,2,1,2,6,6,6,6); +insert into r1 values (1,2,2,1,7,7,7,7); +insert into r1 values (1,2,2,2,8,8,8,8); +insert into r1 values (2,1,1,1,9,9,9,9); +insert into r1 values (2,1,1,2,10,10,10,10); +insert into r1 values (2,1,2,1,11,11,11,11); +insert into r1 values (2,1,2,2,12,12,12,12); +insert into r1 values (2,2,1,1,13,13,13,13); +insert into r1 values (2,2,1,2,14,14,14,14); +insert into r1 values (2,2,2,1,15,15,15,15); +insert into r1 values (2,2,2,2,16,16,16,16); + +connection con2; +BEGIN; +insert into r1 values (5,5,5,5,5,5,5,5); +update r1 set value1=value1+100 where id1=1 and id2=1 and id3='1'; + +--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --rocksdb --order-by-primary-desc --rocksdb_bulk_load test + +rollback; + +connection con1; + +--exec grep "START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT" $MYSQLTEST_VARDIR/mysqld.1/mysqld.log | wc -l + +set @save_default_storage_engine=@@global.default_storage_engine; +SET GLOBAL default_storage_engine=rocksdb; +--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test +--exec grep "START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT" $MYSQLTEST_VARDIR/mysqld.1/mysqld.log | wc -l + + +# wiping general log so that this test case doesn't fail with --repeat +--exec echo "" > $MYSQLTEST_VARDIR/mysqld.1/mysqld.log + +# testing mysqldump work with statement based binary logging +SET GLOBAL binlog_format=statement; +--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test > /dev/null +SET GLOBAL binlog_format=row; + +drop table r1; +reset master; +set @@global.default_storage_engine=@save_default_storage_engine; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2-master.opt new file mode 100644 index 00000000000..2672d4ff35e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2-master.opt @@ -0,0 +1 @@ +--binlog_format=row diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2.test b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2.test new file mode 100644 index 00000000000..3631e703de6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2.test @@ -0,0 +1,43 @@ +--source include/have_rocksdb.inc + +--source include/have_log_bin.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings +create table t1 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; + +--disable_query_log +let $i = 1; +while ($i <= 50000) { + let $insert = INSERT INTO t1 VALUES($i, $i, REPEAT('x', 150)); + inc $i; + eval $insert; +} +--enable_query_log + +optimize table t1; + +#wiping block cache +let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; +--exec echo "wait" > $restart_file +--shutdown_server 10 +--source include/wait_until_disconnected.inc +-- exec echo "restart:--rocksdb_default_cf_options=write_buffer_size=64k;target_file_size_base=64k;max_bytes_for_level_base=1m;compression_per_level=kNoCompression;disable_auto_compactions=true;level0_stop_writes_trigger=1000 " > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- enable_reconnect +-- source include/wait_until_connected_again.inc + +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_block_cache_add'; + +--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --rocksdb test > /dev/null + +# verifying block cache was not filled +select case when variable_value - @a > 20 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_block_cache_add'; + +select count(*) from t1; + +# verifying block cache was filled +select case when variable_value - @a > 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_block_cache_add'; + +#cleanup +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/negative_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/negative_stats.test new file mode 100644 index 00000000000..e3e0cf898a1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/negative_stats.test @@ -0,0 +1,26 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (i1 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB; + +--disable_query_log +let $max = 10000; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i); + inc $i; + eval $insert; +} +--enable_query_log + +SET GLOBAL ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW=1; + +set session debug= "+d,myrocks_simulate_negative_stats"; +SELECT CASE WHEN DATA_LENGTH < 1024 * 1024 THEN 'true' ELSE 'false' END FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'; +set session debug= "-d,myrocks_simulate_negative_stats"; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/no_merge_sort.test b/storage/rocksdb/mysql-test/rocksdb/t/no_merge_sort.test new file mode 100644 index 00000000000..ccef7182c11 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/no_merge_sort.test @@ -0,0 +1,32 @@ +--source include/have_rocksdb.inc + +# For Issue#117 MyRocks does merge sort with small data sets + +let $datadir = `SELECT @@datadir`; + +--disable_query_log +let $i= 0; +while ($i < 30) { + DROP TABLE IF EXISTS ti_nk; + + CREATE TABLE `ti_nk` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + `c` varchar(36) COLLATE latin1_bin NOT NULL, + PRIMARY KEY (`c`) + ); + let $j = 0; + while ($j < 200) { + eval insert into ti_nk values ($j, $j, md5($j)); + inc $j; + } + + select variable_value into @s from information_schema.global_status where variable_name='Sort_merge_passes'; + eval SELECT a, b, c FROM ti_nk ORDER BY a,b,c INTO OUTFILE '$datadir/select.out'; + --remove_file $datadir/select.out + select case when variable_value-@s = 0 then 'true' else 'false' end as skip_merge_sort from information_schema.global_status where variable_name='Sort_merge_passes'; + inc $i; +} +--enable_query_log + +DROP TABLE ti_nk; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/no_primary_key_basic_ops.inc b/storage/rocksdb/mysql-test/rocksdb/t/no_primary_key_basic_ops.inc new file mode 100644 index 00000000000..0dcd452194a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/no_primary_key_basic_ops.inc @@ -0,0 +1,65 @@ +# +# This include file checks some very basic capabilities for restart insert +# update and delete for tables with no pk +# NOTE: requires table with structure similar to +# CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +# + +SHOW CREATE TABLE t1; +SHOW COLUMNS IN t1; + +### test INSERT +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); + +## test SELECT w/ index scans +--sorted_result +SELECT * FROM t1 WHERE a = 35; +--sorted_result +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +--sorted_result +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +--sorted_result +SELECT * FROM t1 WHERE a > 35; +--sorted_result +SELECT * FROM t1; + +# test UPDATE +UPDATE t1 SET a=a+100; +--sorted_result +SELECT * FROM t1; + +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +--sorted_result +SELECT * FROM t1; +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +--sorted_result +SELECT * FROM t1; +UPDATE t1 SET a=123 WHERE a=35; +--sorted_result +SELECT * FROM t1; +UPDATE t1 SET a=321 WHERE b='ccc'; +--sorted_result +SELECT * FROM t1; + + +## test RESTART/OPEN +--source include/restart_mysqld.inc +## test insert after restart +INSERT INTO t1 (a,b) VALUES (45,'bob'); +--sorted_result +SELECT * FROM t1; + +# test DELETE +DELETE FROM t1 WHERE a=123; +--sorted_result +SELECT * FROM t1; + +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +--sorted_result +SELECT * FROM t1; + +# test TRUNCATE +TRUNCATE TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table-master.opt new file mode 100644 index 00000000000..71f74ee53ab --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table-master.opt @@ -0,0 +1 @@ +--rocksdb_default_cf_options=write_buffer_size=64k;target_file_size_base=64k;max_bytes_for_level_base=1m;compression_per_level=kNoCompression diff --git a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc new file mode 100644 index 00000000000..a41bd046455 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc @@ -0,0 +1,78 @@ +let $datadir = `SELECT @@datadir`; + +--disable_warnings +DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6; +--enable_warnings +create table t1 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +create table t3 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +create table t4 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; +create table t5 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; +create table t6 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; + +--disable_query_log +let $t = 1; +while ($t <= 6) { + let $i = 1; + while ($i <= 10000) { + let $insert = INSERT INTO t$t VALUES($i, $i, REPEAT('x', 150)); + inc $i; + eval $insert; + } + inc $t; +} +--enable_query_log + +# Disable auto compaction so that effects of optimize table are stable +let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; +--exec echo "wait" > $restart_file +--shutdown_server 10 +--source include/wait_until_disconnected.inc +-- exec echo "restart:--rocksdb_default_cf_options=write_buffer_size=64k;target_file_size_base=64k;max_bytes_for_level_base=1m;compression_per_level=kNoCompression;disable_auto_compactions=true;level0_stop_writes_trigger=1000 " > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- enable_reconnect +-- source include/wait_until_connected_again.inc + +select count(*) from t1; +select count(*) from t2; +select count(*) from t3; +select count(*) from t4; +select count(*) from t5; +select count(*) from t6; +delete from t1 where id <= 9900; +delete from t2 where id <= 9900; +delete from t3 where id <= 9900; +delete from t4 where id <= 9900; +delete from t5 where id <= 9900; +delete from t6 where id <= 9900; + +--let $size_cmd = du -ks $datadir/.rocksdb/*.sst | awk '{t=t+\$1} END{print t}' >> $MYSQL_TMP_DIR/sst_size.dat +--exec $size_cmd +optimize table t1; +--exec $size_cmd +optimize table t3; +--exec $size_cmd +optimize table t4; +--exec $size_cmd +optimize table t6; +--exec $size_cmd + +select count(*) from t1; +select count(*) from t2; +select count(*) from t3; +select count(*) from t4; +select count(*) from t5; +select count(*) from t6; + +# run a check script to verify sst files reduced enough during each optimize table +--exec perl suite/rocksdb/optimize_table_check_sst.pl $MYSQL_TMP_DIR/sst_size.dat + +#cleanup +optimize table t2; +optimize table t5; +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE t4; +DROP TABLE t5; +DROP TABLE t6; +--remove_file $MYSQL_TMP_DIR/sst_size.dat diff --git a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.test b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.test new file mode 100644 index 00000000000..383bae72904 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# OPTIMIZE TABLE statements +# + +--source optimize_table.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/partition.test b/storage/rocksdb/mysql-test/rocksdb/t/partition.test new file mode 100644 index 00000000000..9f3ccb9dd6c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/partition.test @@ -0,0 +1,42 @@ +--source include/have_rocksdb.inc + +# +# Create tables with partitions and try to update/select from them. +# +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS VAR_POP; +DROP TABLE IF EXISTS TEMP0; +DROP TABLE IF EXISTS VAR_SAMP; + +--enable_warnings + +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; + +--disable_query_log +let $max = 1000; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i, $i); + inc $i; + eval $insert; +} + +# Parition string is "#P#". To verify that parsing is done correctly then we'll +# verify if tables containing "P" somwhere can be created correctly. +CREATE TABLE VAR_POP (a int) ENGINE = ROCKSDB; +CREATE TABLE TEMP0 (a int) ENGINE = ROCKSDB PARTITION BY HASH (a) PARTITIONS 3; +CREATE TABLE VAR_SAMP (a int) ENGINE = ROCKSDB PARTITION BY HASH (a) PARTITIONS 10; + +--enable_query_log + +SHOW TABLES; + +SELECT * FROM t1 ORDER BY i LIMIT 10; +SELECT COUNT(*) FROM t1; + +DROP TABLE t1; +DROP TABLE VAR_POP; +DROP TABLE TEMP0; +DROP TABLE VAR_SAMP; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/perf_context.test b/storage/rocksdb/mysql-test/rocksdb/t/perf_context.test new file mode 100644 index 00000000000..4290811e868 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/perf_context.test @@ -0,0 +1,92 @@ +--source include/have_rocksdb.inc + +# +# Information Schema perf context +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +--enable_warnings + + +SET @prior_rocksdb_perf_context_level = @@rocksdb_perf_context_level; +SET GLOBAL rocksdb_perf_context_level=3; + +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; +CREATE TABLE t2 (k INT, PRIMARY KEY (k)) ENGINE = ROCKSDB; + +INSERT INTO t1 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); + +############################################################################### +# Test that expected perf context stats exists +############################################################################### + +# Check per-table perf context +--replace_column 5 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT WHERE TABLE_NAME = 't1'; + +# Check global perf context +--replace_column 2 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL; + +############################################################################### +# Test iteration skip counters +############################################################################### + +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't1' +AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT'); + +SELECT * FROM t1; + +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't1' +AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT'); + +SELECT * FROM t1 WHERE j BETWEEN 1 AND 5; + +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't1' +AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT'); + +############################################################################### +# Test write I/O stats +############################################################################### + +# Statistics for multi-statement transactions cannot be attributed to +# individual tables but should show up in global perf context stats + +BEGIN; +INSERT INTO t2 VALUES (1), (2); +INSERT INTO t2 VALUES (3), (4); +COMMIT; + +SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't2' +AND STAT_TYPE = 'IO_WRITE_NANOS' +AND VALUE > 0; + +SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL +WHERE STAT_TYPE = 'IO_WRITE_NANOS' AND VALUE > 0; + +SELECT VALUE INTO @a from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL +WHERE STAT_TYPE = 'IO_WRITE_NANOS'; + +# Single statement writes do show up in per-table stats +INSERT INTO t2 VALUES (5), (6), (7), (8); + +SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't2' +AND STAT_TYPE = 'IO_WRITE_NANOS' +AND VALUE > 0; + +SELECT VALUE INTO @b from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL +WHERE STAT_TYPE = 'IO_WRITE_NANOS'; + +SELECT CASE WHEN @b - @a > 0 THEN 'true' ELSE 'false' END; + +# cleanup +DROP TABLE t1; +DROP TABLE t2; +SET GLOBAL rocksdb_perf_context_level = @prior_rocksdb_perf_context_level; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx-master.opt new file mode 100644 index 00000000000..52f4895dc2f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx-master.opt @@ -0,0 +1 @@ +--rocksdb_default_cf_options=write_buffer_size=16k --log-bin --binlog_format=row --gtid_mode=ON --enforce_gtid_consistency --log-slave-updates diff --git a/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test new file mode 100644 index 00000000000..56070652618 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test @@ -0,0 +1,70 @@ +--source include/have_log_bin.inc +--source include/have_rocksdb.inc +--source include/count_sessions.inc +--disable_warnings +--source include/have_gtid.inc +--enable_warnings +-- let $uuid = `select @@server_uuid;` + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +CREATE TABLE t1 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,1); + +# Read-only, long-running transaction. SingleDelete/Put shouldn't increase much. +select variable_value into @p from information_schema.global_status where variable_name='rocksdb_number_sst_entry_put'; +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +-- replace_result $uuid uuid +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; + +connection con2; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $update = UPDATE t1 SET value=value+1 WHERE id=1; + inc $i; + eval $update; +} +--enable_query_log + +connection con1; +select case when variable_value-@p < 1000 then 'true' else variable_value-@p end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_put'; +select case when variable_value-@s < 100 then 'true' else variable_value-@s end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +SELECT * FROM t1; +--error ER_UNKNOWN_ERROR +INSERT INTO t1 values (2, 2); +ROLLBACK; +SELECT * FROM t1; +INSERT INTO t1 values (2, 2); +SELECT * FROM t1 ORDER BY id; + +# Regular long-running transaction. +# No "Transaction could not check for conflicts for opearation" error should happen. +BEGIN; + +connection con2; +--disable_query_log +let $i = 5; +while ($i <= 10000) { + let $insert = INSERT INTO t1 VALUES ($i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +connection con1; +SELECT COUNT(*) FROM t1; +COMMIT; + +connection default; +disconnect con1; +disconnect con2; +OPTIMIZE TABLE t1; +DROP TABLE t1; +reset master; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/records_in_range-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/records_in_range-master.opt new file mode 100644 index 00000000000..99929434028 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/records_in_range-master.opt @@ -0,0 +1,4 @@ +--force-restart +--rocksdb_debug_optimizer_n_rows=20000 +--rocksdb_records_in_range=1000 +--rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/records_in_range.test b/storage/rocksdb/mysql-test/rocksdb/t/records_in_range.test new file mode 100644 index 00000000000..9c939ef06e4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/records_in_range.test @@ -0,0 +1,144 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Create the table and insert some keys +CREATE TABLE t1 ( + i INT, + a INT, + b INT, + PRIMARY KEY (i), + KEY ka(a), + KEY kb(b) comment 'rev:cf1' +) ENGINE = rocksdb; + +--disable_query_log +let $max = 20000; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +# get results for records_in_range prior to memtable flush +# normal CF +explain extended select * from t1 where a> 500 and a< 750; +explain extended select * from t1 where a< 750; +explain extended select * from t1 where a> 500; +explain extended select * from t1 where a>=0 and a<=1000; + +#reverse CF +explain extended select * from t1 where b> 500 and b< 750; +explain extended select * from t1 where b< 750; +explain extended select * from t1 where b> 500; +explain extended select * from t1 where b>=0 and b<=1000; + +## cost calculation differences between covering vs non-covering (#298) +set @save_rocksdb_records_in_range = @@session.rocksdb_records_in_range; +set rocksdb_records_in_range = 15000; +# covering, range +explain extended select a from t1 where a < 750; +# non-covering, full +explain extended select a, b from t1 where a < 750; +# covering, ref +explain extended select a from t1 where a = 700; +# non-covering, ref +explain extended select a,b from t1 where a = 700; +# covering, full index +explain extended select a from t1 where a in (700, 800); +# non-covering, full +explain extended select a,b from t1 where a in (700, 800); +set rocksdb_records_in_range=8000; +# covering, range +explain extended select a from t1 where a in (700, 800); +# non-covering, full +explain extended select a,b from t1 where a in (700, 800); +set rocksdb_records_in_range = @save_rocksdb_records_in_range; + +# flush memtable and repeat +set global rocksdb_force_flush_memtable_now = true; +# normal CF +explain extended select * from t1 where a> 500 and a< 750; +explain extended select * from t1 where a< 750; +explain extended select * from t1 where a> 500; +explain extended select * from t1 where a>=0 and a<=1000; + +#reverse CF +explain extended select * from t1 where b> 500 and b< 750; +explain extended select * from t1 where b< 750; +explain extended select * from t1 where b> 500; +explain extended select * from t1 where b>=0 and b<=1000; + +# a set of 1 +explain extended select * from t1 where a>= 500 and a<= 500; +explain extended select * from t1 where b>= 500 and b<= 500; + +# two indexes +explain extended select * from t1 where a< 750 and b> 500 and b< 750; + +# composite index +drop index ka on t1; +drop index kb on t1; +create index kab on t1(a,b); +set global rocksdb_force_flush_memtable_now = true; +explain extended select * from t1 where a< 750 and b> 500 and b< 750; + +# override records in range +set rocksdb_records_in_range=444; +explain extended select * from t1 where a< 750 and b> 500 and b< 750; +set rocksdb_records_in_range=0; + +# issue 82 +## forward cf +CREATE TABLE `linktable` ( + `id1` bigint(20) unsigned NOT NULL DEFAULT '0', + `id1_type` int(10) unsigned NOT NULL DEFAULT '0', + `id2` bigint(20) unsigned NOT NULL DEFAULT '0', + `id2_type` int(10) unsigned NOT NULL DEFAULT '0', + `link_type` bigint(20) unsigned NOT NULL DEFAULT '0', + `visibility` tinyint(3) NOT NULL DEFAULT '0', + `data` varchar(255) COLLATE latin1_bin NOT NULL DEFAULT '', + `time` bigint(20) unsigned NOT NULL DEFAULT '0', + `version` int(11) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (`link_type`,`id1`,`id2`) COMMENT 'cf_link_pk', + KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'cf_link_id1_type' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; + +insert into linktable values (1,1,1,1,1,1,1,1,1); +insert into linktable values (1,1,2,1,1,1,1,1,1); +insert into linktable values (1,1,3,1,1,1,1,1,1); +insert into linktable values (1,1,4,1,1,1,1,1,1); +set global rocksdb_force_flush_memtable_now = true; +explain select id1, id2, link_type, visibility, data, time, version from linktable where id1 = 1 and link_type = 1 and id2 in (1, 2); +drop table linktable; + +## rev cf +CREATE TABLE `linktable` ( + `id1` bigint(20) unsigned NOT NULL DEFAULT '0', + `id1_type` int(10) unsigned NOT NULL DEFAULT '0', + `id2` bigint(20) unsigned NOT NULL DEFAULT '0', + `id2_type` int(10) unsigned NOT NULL DEFAULT '0', + `link_type` bigint(20) unsigned NOT NULL DEFAULT '0', + `visibility` tinyint(3) NOT NULL DEFAULT '0', + `data` varchar(255) COLLATE latin1_bin NOT NULL DEFAULT '', + `time` bigint(20) unsigned NOT NULL DEFAULT '0', + `version` int(11) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (`link_type`,`id1`,`id2`) COMMENT 'cf_link_pk', + KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'rev:cf_link_id1_type' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; + +insert into linktable values (1,1,1,1,1,1,1,1,1); +insert into linktable values (1,1,2,1,1,1,1,1,1); +insert into linktable values (1,1,3,1,1,1,1,1,1); +insert into linktable values (1,1,4,1,1,1,1,1,1); +set global rocksdb_force_flush_memtable_now = true; +explain select id1, id2, link_type, visibility, data, time, version from linktable where id1 = 1 and link_type = 1 and id2 in (1, 2); +drop table linktable; + +#cleanup +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/repair_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/repair_table.inc new file mode 100644 index 00000000000..47d6a299095 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/repair_table.inc @@ -0,0 +1,38 @@ +# +# REPAIR TABLE statements +# +# Note: the output is likely to be different for the engine under test, +# in which case rdiff will be needed. Or, the output might say that +# the storage engine does not support REPAIR. +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +CREATE TABLE t2 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb; + +REPAIR TABLE t1; + +INSERT INTO t1 (a,b) VALUES (3,'c'); +INSERT INTO t2 (a,b) VALUES (4,'d'); +REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2; +INSERT INTO t2 (a,b) VALUES (5,'e'),(6,'f'); +REPAIR LOCAL TABLE t2; +INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h'); +INSERT INTO t2 (a,b) VALUES (9,'i'); +REPAIR LOCAL TABLE t2, t1 EXTENDED; +INSERT INTO t1 (a,b) VALUES (10,'j'); +INSERT INTO t2 (a,b) VALUES (11,'k'); +REPAIR TABLE t1, t2 QUICK USE_FRM; +INSERT INTO t1 (a,b) VALUES (12,'l'); +INSERT INTO t2 (a,b) VALUES (13,'m'); +REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2 QUICK EXTENDED USE_FRM; +FLUSH TABLE t1; + +DROP TABLE t1, t2; + + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/repair_table.test b/storage/rocksdb/mysql-test/rocksdb/t/repair_table.test new file mode 100644 index 00000000000..5c4807ce986 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/repair_table.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# REPAIR TABLE statements +# + +--source repair_table.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/replace.test b/storage/rocksdb/mysql-test/rocksdb/t/replace.test new file mode 100644 index 00000000000..3ac37330b2d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/replace.test @@ -0,0 +1,54 @@ +--source include/have_rocksdb.inc + +# +# Basic REPLACE statements +# + +########################################### +# TODO: +# A part of the test is currently disabled +# because unique indexes are not supported +########################################### + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +# When there is no duplicate key, REPLACE should work as INSERT + +REPLACE INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +--sorted_result +SELECT a,b FROM t1; + +REPLACE t1 (a,b) VALUE (10,'foo'),(10,'foo'); +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + +--disable_parsing + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, UNIQUE INDEX (a)) ENGINE=rocksdb; +REPLACE INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (2,'d'); +REPLACE INTO t1 (a,b) VALUES (2,'d'); +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + +--enable_parsing + +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (b)) ENGINE=rocksdb; +REPLACE INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (4,'b'); +REPLACE INTO t1 (a,b) VALUES (4,'b'); +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb-master.opt new file mode 100644 index 00000000000..6ad42e58aa2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 --rocksdb_records_in_range=50 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test new file mode 100644 index 00000000000..f612cb0997b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test @@ -0,0 +1,1925 @@ +--source include/have_rocksdb.inc + +# +# RocksDB Storage Engine tests +# +select ENGINE,COMMENT,TRANSACTIONS,XA,SAVEPOINTS from information_schema.engines where engine = 'rocksdb'; + +--disable_warnings +drop table if exists t0,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +drop table if exists t11,t12,t13,t14,t15,t16,t17,t18,t19,t20; +drop table if exists t21,t22,t23,t24,t25,t26,t27,t28,t29; +drop table if exists t30,t31,t32,t33,t34,t35,t36,t37,t38,t39; +drop table if exists t40,t41,t42,t43,t44,t45,t46,t47,t48,t49; +--enable_warnings + +# Disable background compaction to prevent stats from affect explain output +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--echo # +--echo # Issue #1: Don't update indexes if index values have not changed +--echo # +# [Jay Edgar] I moved this test first because it uses the +# rocksdb_number_keys_written value, but this value is affected out of band +# by drop tables. There is a background thread that periodically processes +# through the list of dropped keys and if any are gone from the database it +# deletes information related to the key - and this delete causes this count +# to be incorrect. I moved this test first and made the whole test require +# a fresh server to hopefully avoid tihs. +create table t1 ( + pk int primary key, + a int, + b int, + key(a) +) engine=rocksdb; + +insert into t1 values +(1,1,1), (2,2,2), (3,3,3), (4,4,4); + +set @var1=(select variable_value + from information_schema.global_status + where variable_name='rocksdb_number_keys_written'); + +--echo # Do an update that doesn't change the key 'a'. +update t1 set b=3334341 where a=2; + +set @var2=(select variable_value + from information_schema.global_status + where variable_name='rocksdb_number_keys_written'); +--echo # The following should produce 1 +select @var2 - @var1; + +--echo # Do an update that sets the key to the same value +update t1 set a=pk where a=3; +set @var3=(select variable_value + from information_schema.global_status + where variable_name='rocksdb_number_keys_written'); +--echo # We have 'updated' column to the same value, so the following must return 0: +select @var3 - @var2; +drop table t1; + +create table t0 (a int primary key) engine=rocksdb; +show create table t0; +drop table t0; + +create table t1 (a int primary key, b int) engine=rocksdb; +insert into t1 values (1,1); +insert into t1 values (2,2); + +select * from t1; + +--echo # Check that we can create another table and insert there +create table t2 (a varchar(10) primary key, b varchar(10)) engine=rocksdb; +insert into t2 value ('abc','def'); +insert into t2 value ('hijkl','mnopq'); +select * from t2; + +--echo # Select again from t1 to see that records from different tables dont mix +select * from t1; + +explain select * from t2 where a='no-such-key'; +--replace_column 9 # +explain select * from t2 where a='abc'; +select * from t2 where a='abc'; + +--echo # Try a composite PK +create table t3 ( + pk1 int, + pk2 varchar(10), + col1 varchar(10), + primary key(pk1, pk2) +) engine=rocksdb; + +insert into t3 values (2,'two', 'row#2'); +insert into t3 values (3,'three', 'row#3'); +insert into t3 values (1,'one', 'row#1'); + +select * from t3; +select * from t3 where pk1=3 and pk2='three'; + +drop table t1, t2, t3; + +--echo # +--echo # Test blob values +--echo # + +create table t4 (a int primary key, b blob) engine=rocksdb; +insert into t4 values (1, repeat('quux-quux', 60)); +insert into t4 values (10, repeat('foo-bar', 43)); +insert into t4 values (5, repeat('foo-bar', 200)); + +insert into t4 values (2, NULL); + + +select + a, + (case a + when 1 then b=repeat('quux-quux', 60) + when 10 then b=repeat('foo-bar', 43) + when 5 then b=repeat('foo-bar', 200) + when 2 then b is null + else 'IMPOSSIBLE!' end) as CMP +from t4; + +drop table t4; + +--echo # +--echo # Test blobs of various sizes +--echo # + +--echo # TINYBLOB +create table t5 (a int primary key, b tinyblob) engine=rocksdb; +insert into t5 values (1, repeat('quux-quux', 6)); +insert into t5 values (10, repeat('foo-bar', 4)); +insert into t5 values (5, repeat('foo-bar', 2)); +select + a, + (case a + when 1 then b=repeat('quux-quux', 6) + when 10 then b=repeat('foo-bar', 4) + when 5 then b=repeat('foo-bar', 2) + else 'IMPOSSIBLE!' end) as CMP +from t5; +drop table t5; + +--echo # MEDIUMBLOB +create table t6 (a int primary key, b mediumblob) engine=rocksdb; +insert into t6 values (1, repeat('AB', 65000)); +insert into t6 values (10, repeat('bbb', 40000)); +insert into t6 values (5, repeat('foo-bar', 2)); +select + a, + (case a + when 1 then b=repeat('AB', 65000) + when 10 then b=repeat('bbb', 40000) + when 5 then b=repeat('foo-bar', 2) + else 'IMPOSSIBLE!' end) as CMP +from t6; +drop table t6; + +--echo # LONGBLOB +create table t7 (a int primary key, b longblob) engine=rocksdb; +insert into t7 values (1, repeat('AB', 65000)); +insert into t7 values (10, repeat('bbb', 40000)); +insert into t7 values (5, repeat('foo-bar', 2)); +select + a, + (case a + when 1 then b=repeat('AB', 65000) + when 10 then b=repeat('bbb', 40000) + when 5 then b=repeat('foo-bar', 2) + else 'IMPOSSIBLE!' end) as CMP +from t7; +drop table t7; + + +--echo # +--echo # Check if DELETEs work +--echo # +create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb; + +insert into t8 values + ('one', 'eins'), + ('two', 'zwei'), + ('three', 'drei'), + ('four', 'vier'), + ('five', 'funf'); + +--echo # Delete by PK +--replace_column 9 # +explain delete from t8 where a='three'; +delete from t8 where a='three'; + +select * from t8; + +--echo # Delete while doing a full table scan +delete from t8 where col1='eins' or col1='vier'; +select * from t8; + +--echo # delete w/o WHERE: +delete from t8; +select * from t8; + +--echo # +--echo # Test UPDATEs +--echo # +insert into t8 values + ('one', 'eins'), + ('two', 'zwei'), + ('three', 'drei'), + ('four', 'vier'), + ('five', 'funf'); + +update t8 set col1='dva' where a='two'; + +update t8 set a='fourAAA' where col1='vier'; + +select * from t8; +delete from t8; + +--echo # +--echo # Basic transactions tests +--echo # +begin; +insert into t8 values ('trx1-val1', 'data'); +insert into t8 values ('trx1-val2', 'data'); +rollback; +select * from t8; + +begin; +insert into t8 values ('trx1-val1', 'data'); +insert into t8 values ('trx1-val2', 'data'); +commit; +select * from t8; + +drop table t8; + +--echo # +--echo # Check if DROP TABLE works +--echo # +create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb; +select * from t8; +insert into t8 values ('foo','foo'); +drop table t8; +create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb; +select * from t8; +drop table t8; + +--echo # +--echo # MDEV-3961: Assertion ... on creating a TEMPORARY RocksDB table +--echo # +--error ER_ILLEGAL_HA_CREATE_OPTION +CREATE TEMPORARY TABLE t10 (pk INT PRIMARY KEY) ENGINE=RocksDB; + +--echo # +--echo # MDEV-3963: JOIN or WHERE conditions involving keys on RocksDB tables don't work +--echo # +CREATE TABLE t10 (i INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t10 VALUES (1),(3); +CREATE TABLE t11 (j INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t11 VALUES (1),(4); + +select * from t10; +select * from t11; +--replace_column 9 # +EXPLAIN +SELECT * FROM t10, t11 WHERE i=j; +SELECT * FROM t10, t11 WHERE i=j; + +DROP TABLE t10,t11; + +--echo # +--echo # MDEV-3962: SELECT with ORDER BY causes "ERROR 1030 (HY000): Got error 122 +--echo # +CREATE TABLE t12 (pk INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t12 VALUES (2),(1); +SELECT * FROM t12 ORDER BY pk; +DROP TABLE t12; + +--echo # +--echo # MDEV-3964: Assertion `!pk_descr' fails in ha_rocksdb::open on adding partitions ... +--echo # +create table t14 (pk int primary key) engine=RocksDB partition by hash(pk) partitions 2; +#--error ER_GET_ERRNO +#alter table t14 add partition partitions 2; +# ^^ works, but causes weird warnings in error log. +drop table t14; + +--echo # +--echo # MDEV-3960: Server crashes on running DISCARD TABLESPACE on a RocksDB table +--echo # +create table t9 (i int primary key) engine=rocksdb; +--error ER_ILLEGAL_HA +alter table t9 discard tablespace; +drop table t9; + +--echo # +--echo # MDEV-3959: Assertion `slice->size() == table->s->reclength' fails ... +--echo # on accessing a table after ALTER +--echo # +CREATE TABLE t15 (a INT, rocksdb_pk INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t15 VALUES (1,1),(5,2); +#--error ER_ILLEGAL_HA +ALTER TABLE t15 DROP COLUMN a; +DROP TABLE t15; + +--echo # +--echo # MDEV-3968: UPDATE produces a wrong result while modifying a PK on a RocksDB table +--echo # +create table t16 (pk int primary key, a char(8)) engine=RocksDB; +insert into t16 values (1,'a'),(2,'b'),(3,'c'),(4,'d'); + +# +# Not anymore: The following query will still eat a record because of CANT-SEE-OWN-CHANGES +# property. +# +--error ER_DUP_ENTRY +update t16 set pk=100, a = 'updated' where a in ('b','c'); +select * from t16; +drop table t16; + +--echo # +--echo # MDEV-3970: A set of assorted crashes on inserting a row into a RocksDB table +--echo # +--disable_warnings +drop table if exists t_very_long_table_name; +--enable_warnings + +CREATE TABLE `t_very_long_table_name` ( + `c` char(1) NOT NULL, + `c0` char(0) NOT NULL, + `c1` char(1) NOT NULL, + `c20` char(20) NOT NULL, + `c255` char(255) NOT NULL, + PRIMARY KEY (`c255`) + ) ENGINE=RocksDB DEFAULT CHARSET=latin1; +INSERT INTO t_very_long_table_name VALUES ('a', '', 'c', REPEAT('a',20), REPEAT('x',255)); +drop table t_very_long_table_name; + + +--echo # +--echo # Test table locking and read-before-write checks. +--echo # +create table t17 (pk varchar(12) primary key, col1 varchar(12)) engine=rocksdb; +insert into t17 values ('row1', 'val1'); + +--error ER_DUP_ENTRY +insert into t17 values ('row1', 'val1-try2'); +--error ER_DUP_ENTRY +insert into t17 values ('ROW1', 'val1-try2'); + +insert into t17 values ('row2', 'val2'); +insert into t17 values ('row3', 'val3'); + +--echo # This is ok +update t17 set pk='row4' where pk='row1'; + +--echo # This will try to overwrite another row: +--error ER_DUP_ENTRY +update t17 set pk='row3' where pk='row2'; + +select * from t17; + +--echo # +--echo # Locking tests +--echo # + +connect (con1,localhost,root,,); + +--echo # First, make sure there's no locking when transactions update different rows +connection con1; +set autocommit=0; +update t17 set col1='UPD1' where pk='row2'; + +connection default; +update t17 set col1='UPD2' where pk='row3'; + +connection con1; +commit; + +connection default; +select * from t17; + +--echo # Check the variable +show variables like 'rocksdb_lock_wait_timeout'; +set rocksdb_lock_wait_timeout=2; # seconds +show variables like 'rocksdb_lock_wait_timeout'; + +--echo # Try updating the same row from two transactions +connection con1; +begin; +update t17 set col1='UPD2-AA' where pk='row2'; + +connection default; +--error ER_LOCK_WAIT_TIMEOUT +update t17 set col1='UPD2-BB' where pk='row2'; + +set rocksdb_lock_wait_timeout=1000; # seconds +--send + update t17 set col1='UPD2-CC' where pk='row2'; + +connection con1; +rollback; + +connection default; +reap; +select * from t17 where pk='row2'; + +drop table t17; + +disconnect con1; +--echo # +--echo # MDEV-4035: RocksDB: SELECT produces different results inside a transaction (read is not repeatable) +--echo # +--enable_connect_log + +create table t18 (pk int primary key, i int) engine=RocksDB; +begin; +select * from t18; +select * from t18 where pk = 1; + +--connect (con1,localhost,root,,) +insert into t18 values (1,100); + +--connection default +select * from t18; +select * from t18 where pk = 1; +commit; + +drop table t18; + +--echo # +--echo # MDEV-4036: RocksDB: INSERT .. ON DUPLICATE KEY UPDATE does not work, produces ER_DUP_KEY +--echo # +create table t19 (pk int primary key, i int) engine=RocksDB; +insert into t19 values (1,1); +insert into t19 values (1,100) on duplicate key update i = 102; +select * from t19; +drop table t19; + +--echo # MDEV-4037: RocksDB: REPLACE doesn't work, produces ER_DUP_KEY +create table t20 (pk int primary key, i int) engine=RocksDB; +insert into t20 values (1,1); +replace into t20 values (1,100); +select * from t20; +drop table t20; + +--echo # +--echo # MDEV-4041: Server crashes in Primary_key_comparator::get_hashnr on INSERT +--echo # +create table t21 (v varbinary(16) primary key, i int) engine=RocksDB; +insert into t21 values ('a',1); +select * from t21; +drop table t21; + +--echo # +--echo # MDEV-4047: RocksDB: Assertion `0' fails in Protocol::end_statement() on multi-table INSERT IGNORE +--echo # + +CREATE TABLE t22 (a int primary key) ENGINE=RocksDB; +INSERT INTO t22 VALUES (1),(2); +CREATE TABLE t23 (b int primary key) ENGINE=RocksDB; +INSERT INTO t23 SELECT * FROM t22; +DELETE IGNORE t22.*, t23.* FROM t22, t23 WHERE b < a; +DROP TABLE t22,t23; + +--echo # +--echo # MDEV-4046: RocksDB: Multi-table DELETE locks itself and ends with ER_LOCK_WAIT_TIMEOUT +--echo # +CREATE TABLE t24 (pk int primary key) ENGINE=RocksDB; +INSERT INTO t24 VALUES (1),(2); + +CREATE TABLE t25 LIKE t24; +INSERT INTO t25 SELECT * FROM t24; + +DELETE t25.* FROM t24, t25; +DROP TABLE t24,t25; + +--echo # +--echo # MDEV-4044: RocksDB: UPDATE or DELETE with ORDER BY locks itself +--echo # +create table t26 (pk int primary key, c char(1)) engine=RocksDB; +insert into t26 values (1,'a'),(2,'b'); +update t26 set c = 'x' order by pk limit 1; +delete from t26 order by pk limit 1; +select * from t26; +drop table t26; + + +--echo # +--echo # Test whether SELECT ... FOR UPDATE puts locks +--echo # +create table t27(pk varchar(10) primary key, col1 varchar(20)) engine=RocksDB; +insert into t27 values + ('row1', 'row1data'), + ('row2', 'row2data'), + ('row3', 'row3data'); + +connection con1; +begin; +select * from t27 where pk='row3' for update; + +connection default; +set rocksdb_lock_wait_timeout=1; +--error ER_LOCK_WAIT_TIMEOUT +update t27 set col1='row2-modified' where pk='row3'; + +connection con1; +rollback; +connection default; +disconnect con1; + +drop table t27; + +--echo # +--echo # MDEV-4060: RocksDB: Assertion `! trx->batch' fails in +--echo # +create table t28 (pk int primary key, a int) engine=RocksDB; +insert into t28 values (1,10),(2,20); +begin; +update t28 set a = 100 where pk = 3; +rollback; +select * from t28; +drop table t28; + + +--echo # +--echo # Secondary indexes +--echo # +create table t30 ( + pk varchar(16) not null primary key, + key1 varchar(16) not null, + col1 varchar(16) not null, + key(key1) +) engine=rocksdb; + +insert into t30 values ('row1', 'row1-key', 'row1-data'); +insert into t30 values ('row2', 'row2-key', 'row2-data'); +insert into t30 values ('row3', 'row3-key', 'row3-data'); + +--replace_column 9 # +explain +select * from t30 where key1='row2-key'; +select * from t30 where key1='row2-key'; + +--replace_column 9 # +explain +select * from t30 where key1='row1'; +--echo # This will produce nothing: +select * from t30 where key1='row1'; + +--replace_column 9 # +explain +select key1 from t30; +select key1 from t30; + +--echo # Create a duplicate record +insert into t30 values ('row2a', 'row2-key', 'row2a-data'); + +--echo # Can we see it? +select * from t30 where key1='row2-key'; + +delete from t30 where pk='row2'; +select * from t30 where key1='row2-key'; + +--echo # +--echo # Range scans on secondary index +--echo # +delete from t30; +insert into t30 values + ('row1', 'row1-key', 'row1-data'), + ('row2', 'row2-key', 'row2-data'), + ('row3', 'row3-key', 'row3-data'), + ('row4', 'row4-key', 'row4-data'), + ('row5', 'row5-key', 'row5-data'); +analyze table t30; + +--replace_column 9 # +explain +select * from t30 where key1 <='row3-key'; +select * from t30 where key1 <='row3-key'; + +--replace_column 9 # +explain +select * from t30 where key1 between 'row2-key' and 'row4-key'; +select * from t30 where key1 between 'row2-key' and 'row4-key'; + +--replace_column 9 # +explain +select * from t30 where key1 in ('row2-key','row4-key'); +select * from t30 where key1 in ('row2-key','row4-key'); + +--replace_column 9 # +explain +select key1 from t30 where key1 in ('row2-key','row4-key'); +select key1 from t30 where key1 in ('row2-key','row4-key'); + +--replace_column 9 # +explain +select * from t30 where key1 > 'row1-key' and key1 < 'row4-key'; +select * from t30 where key1 > 'row1-key' and key1 < 'row4-key'; + +--replace_column 9 # +explain +select * from t30 order by key1 limit 3; +select * from t30 order by key1 limit 3; + +--replace_column 9 # +explain +select * from t30 order by key1 desc limit 3; +select * from t30 order by key1 desc limit 3; + +--echo # +--echo # Range scans on primary key +--echo # +--replace_column 9 # +explain +select * from t30 where pk <='row3'; +select * from t30 where pk <='row3'; + +--replace_column 9 # +explain +select * from t30 where pk between 'row2' and 'row4'; +select * from t30 where pk between 'row2' and 'row4'; + +--replace_column 9 # +explain +select * from t30 where pk in ('row2','row4'); +select * from t30 where pk in ('row2','row4'); + +--replace_column 9 # +explain +select * from t30 order by pk limit 3; +select * from t30 order by pk limit 3; + +drop table t30; + + +--echo # +--echo # MDEV-3841: RocksDB: Reading by PK prefix does not work +--echo # +create table t31 (i int, j int, k int, primary key(i,j,k)) engine=RocksDB; +insert into t31 values (1,10,100),(2,20,200); +select * from t31 where i = 1; +select * from t31 where j = 10; +select * from t31 where k = 100; +select * from t31 where i = 1 and j = 10; +select * from t31 where i = 1 and k = 100; +select * from t31 where j = 10 and k = 100; +select * from t31 where i = 1 and j = 10 and k = 100; +drop table t31; + +--echo # +--echo # MDEV-4055: RocksDB: UPDATE/DELETE by a multi-part PK does not work +--echo # +create table t32 (i int, j int, k int, primary key(i,j,k), a varchar(8)) engine=RocksDB; +insert into t32 values + (1,10,100,''), + (2,20,200,''); +select * from t32 where i = 1 and j = 10 and k = 100; +update t32 set a = 'updated' where i = 1 and j = 10 and k = 100; +select * from t32; +drop table t32; + +--echo # +--echo # MDEV-3841: RocksDB: Assertion `0' fails in ha_rocksdb::index_read_map on range select with ORDER BY .. DESC +--echo # +CREATE TABLE t33 (pk INT PRIMARY KEY, a CHAR(1)) ENGINE=RocksDB; +INSERT INTO t33 VALUES (1,'a'),(2,'b'); +SELECT * FROM t33 WHERE pk <= 10 ORDER BY pk DESC; +DROP TABLE t33; + +--echo # +--echo # MDEV-4081: RocksDB throws error 122 on an attempt to create a table with unique index +--echo # +#--error ER_GET_ERRMSG +--echo # Unique indexes can be created, but uniqueness won't be enforced +create table t33 (pk int primary key, u int, unique index(u)) engine=RocksDB; +drop table t33; + +--echo # +--echo # MDEV-4077: RocksDB: Wrong result (duplicate row) on select with range +--echo # +CREATE TABLE t34 (pk INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t34 VALUES (10),(11); +SELECT pk FROM t34 WHERE pk > 5 AND pk < 15; +SELECT pk FROM t34 WHERE pk BETWEEN 5 AND 15; +SELECT pk FROM t34 WHERE pk > 5; +SELECT pk FROM t34 WHERE pk < 15; +drop table t34; + +--echo # +--echo # MDEV-4086: RocksDB does not allow a query with multi-part pk and index and ORDER BY .. DEC +--echo # +create table t35 (a int, b int, c int, d int, e int, primary key (a,b,c), key (a,c,d,e)) engine=RocksDB; +insert into t35 values (1,1,1,1,1),(2,2,2,2,2); +select * from t35 where a = 1 and c = 1 and d = 1 order by e desc; +drop table t35; + +--echo # +--echo # MDEV-4084: RocksDB: Wrong result on IN subquery with index +--echo # +CREATE TABLE t36 (pk INT PRIMARY KEY, a INT, KEY(a)) ENGINE=RocksDB; +INSERT INTO t36 VALUES (1,10),(2,20); +SELECT 3 IN ( SELECT a FROM t36 ); +drop table t36; + +--echo # +--echo # MDEV-4084: RocksDB: Wrong result on IN subquery with index +--echo # +CREATE TABLE t37 (pk INT PRIMARY KEY, a INT, b CHAR(1), KEY(a), KEY(a,b)) + ENGINE=RocksDB; +INSERT INTO t37 VALUES (1,10,'x'), (2,20,'y'); +SELECT MAX(a) FROM t37 WHERE a < 100; +DROP TABLE t37; + +--echo # +--echo # MDEV-4090: RocksDB: Wrong result (duplicate rows) on range access with secondary key and ORDER BY DESC +--echo # +CREATE TABLE t38 (pk INT PRIMARY KEY, i INT, KEY(i)) ENGINE=RocksDB; +INSERT INTO t38 VALUES (1,10), (2,20); +SELECT i FROM t38 WHERE i NOT IN (8) ORDER BY i DESC; +drop table t38; + +--echo # +--echo # MDEV-4092: RocksDB: Assertion `in_table(pa, a_len)' fails in Rdb_key_def::cmp_full_keys +--echo # with a multi-part key and ORDER BY .. DESC +--echo # +CREATE TABLE t40 (pk1 INT PRIMARY KEY, a INT, b VARCHAR(1), KEY(b,a)) ENGINE=RocksDB; +INSERT INTO t40 VALUES (1, 7,'x'),(2,8,'y'); + +CREATE TABLE t41 (pk2 INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t41 VALUES (1),(2); + +SELECT * FROM t40, t41 WHERE pk1 = pk2 AND b = 'o' ORDER BY a DESC; +DROP TABLE t40,t41; + +--echo # +--echo # MDEV-4093: RocksDB: IN subquery by secondary key with NULL among values returns true instead of NULL +--echo # +CREATE TABLE t42 (pk INT PRIMARY KEY, a INT, KEY(a)) ENGINE=RocksDB; +INSERT INTO t42 VALUES (1, NULL),(2, 8); +SELECT ( 3 ) NOT IN ( SELECT a FROM t42 ); +DROP TABLE t42; + +--echo # +--echo # MDEV-4094: RocksDB: Wrong result on SELECT and ER_KEY_NOT_FOUND on +--echo # DELETE with search by NULL-able secondary key ... +--echo # +CREATE TABLE t43 (pk INT PRIMARY KEY, a INT, b CHAR(1), KEY(a)) ENGINE=RocksDB; +INSERT INTO t43 VALUES (1,8,'g'),(2,9,'x'); +UPDATE t43 SET pk = 10 WHERE a = 8; +REPLACE INTO t43 ( a ) VALUES ( 8 ); +REPLACE INTO t43 ( b ) VALUES ( 'y' ); +SELECT * FROM t43 WHERE a = 8; +DELETE FROM t43 WHERE a = 8; +DROP TABLE t43; + +--echo # +--echo # Basic AUTO_INCREMENT tests +--echo # +create table t44(pk int primary key auto_increment, col1 varchar(12)) engine=rocksdb; +insert into t44 (col1) values ('row1'); +insert into t44 (col1) values ('row2'); +insert into t44 (col1) values ('row3'); +select * from t44; +drop table t44; + +--echo # +--echo # ALTER TABLE tests +--echo # +create table t45 (pk int primary key, col1 varchar(12)) engine=rocksdb; +insert into t45 values (1, 'row1'); +insert into t45 values (2, 'row2'); +alter table t45 rename t46; +select * from t46; +drop table t46; +--error ER_BAD_TABLE_ERROR +drop table t45; + + +--echo # +--echo # Check Bulk loading +--echo # Bulk loading used to overwrite existing data +--echo # Now it fails if there is data overlap with what +--echo # already exists +--echo # +show variables like 'rocksdb%'; +create table t47 (pk int primary key, col1 varchar(12)) engine=rocksdb; +insert into t47 values (1, 'row1'); +insert into t47 values (2, 'row2'); +set rocksdb_bulk_load=1; +insert into t47 values (3, 'row3'),(4, 'row4'); +set rocksdb_bulk_load=0; +select * from t47; +drop table t47; + +--echo # +--echo # Fix TRUNCATE over empty table (transaction is committed when it wasn't +--echo # started) +--echo # +create table t48(pk int primary key auto_increment, col1 varchar(12)) engine=rocksdb; +set autocommit=0; +#--error ER_ILLEGAL_HA +truncate table t48; +set autocommit=1; +drop table t48; + +--echo # +--echo # MDEV-4059: RocksDB: query waiting for a lock cannot be killed until query timeout exceeded +--echo # +--enable_connect_log + +create table t49 (pk int primary key, a int) engine=RocksDB; +insert into t49 values (1,10),(2,20); +begin; +update t49 set a = 100 where pk = 1; + +--connect (con1,localhost,root,,) +--let $con1_id = `SELECT CONNECTION_ID()` +set rocksdb_lock_wait_timeout=5000; +set @var1= to_seconds(now()); +send update t49 set a = 1000 where pk = 1; + +--connect (con2,localhost,root,,) +--echo kill query \$con1_id; +--disable_query_log +eval kill query $con1_id; +--enable_query_log +--connection con1 +--error ER_QUERY_INTERRUPTED +--reap +set @var2= to_seconds(now()); + +# We expect the time to kill query in con1 should be below +# rocksdb_lock_wait_timeout (5000). +--echo "[Jay Edgar] I've updated this query to help determine why it is sometimes failing" +--echo "(t13541934). If you get an error here (i.e. not 'passed') notify me." +select if ((@var2 - @var1) < 1000, "passed", (@var2 - @var1)) as 'result'; + +--connection default +--disconnect con1 + +commit; +drop table t49; + +--echo # +--echo # Index-only tests for INT-based columns +--echo # +create table t1 (pk int primary key, key1 int, col1 int, key(key1)) engine=rocksdb; +insert into t1 values (1,1,1); +insert into t1 values (2,2,2); +insert into t1 values (-5,-5,-5); +--echo # INT column uses index-only: +--replace_column 9 # +explain +select key1 from t1 where key1=2; +select key1 from t1 where key1=2; +select key1 from t1 where key1=-5; +drop table t1; + + +create table t2 (pk int primary key, key1 int unsigned, col1 int, key(key1)) engine=rocksdb; +insert into t2 values (1,1,1), (2,2,2); +--echo # INT UNSIGNED column uses index-only: +--replace_column 9 # +explain +select key1 from t2 where key1=2; +select key1 from t2 where key1=2; +drop table t2; + + +create table t3 (pk bigint primary key, key1 bigint, col1 int, key(key1)) engine=rocksdb; +insert into t3 values (1,1,1), (2,2,2); +--echo # BIGINT uses index-only: +--replace_column 9 # +explain +select key1 from t3 where key1=2; +select key1 from t3 where key1=2; +drop table t3; + +--echo # +--echo # Index-only reads for string columns +--echo # +create table t1 ( + pk int primary key, + key1 char(10) character set binary, + col1 int, + key (key1) +) engine=rocksdb; +insert into t1 values(1, 'one',11), (2,'two',22); +--replace_column 9 # +explain +select key1 from t1 where key1='one'; +--echo # The following will produce no rows. This looks like a bug, +--echo # but it is actually correct behavior. Binary strings are end-padded +--echo # with \0 character (and not space). Comparison does not ignore +--echo # the tail of \0. +select key1 from t1 where key1='one'; +--replace_column 9 # +explain +select hex(key1) from t1 where key1='one\0\0\0\0\0\0\0'; +select hex(key1) from t1 where key1='one\0\0\0\0\0\0\0'; +drop table t1; + + +create table t2 ( + pk int primary key, + key1 char(10) collate latin1_bin, + col1 int, + key (key1) +) engine=rocksdb; +insert into t2 values(1, 'one',11), (2,'two',22); +--replace_column 9 # +explain +select key1 from t2 where key1='one'; +select key1 from t2 where key1='one'; +drop table t2; + + +create table t3 ( + pk int primary key, + key1 char(10) collate utf8_bin, + col1 int, + key (key1) +) engine=rocksdb; +insert into t3 values(1, 'one',11), (2,'two',22); +--replace_column 9 # +explain +select key1 from t3 where key1='one'; +select key1 from t3 where key1='one'; +drop table t3; + + +--echo # a VARCHAR column +create table t4 ( + pk int primary key, + key1 varchar(10) collate latin1_bin, + key(key1) +) engine=rocksdb; +insert into t4 values(1, 'one'), (2,'two'),(3,'threee'),(55,'fifty-five'); + +--replace_column 9 # +explain +select key1 from t4 where key1='two'; +select key1 from t4 where key1='two'; + +select key1 from t4 where key1='fifty-five'; + +--replace_column 9 # +explain +select key1 from t4 where key1 between 's' and 'u'; +select key1 from t4 where key1 between 's' and 'u'; + +drop table t4; + +--echo # +--echo # MDEV-4305: RocksDB: Assertion `((keypart_map + 1) & keypart_map) == 0' fails in calculate_key_len +--echo # +CREATE TABLE t1 (pk1 INT, pk2 CHAR(32), i INT, PRIMARY KEY(pk1,pk2), KEY(i)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,'test1',6),(2,'test2',8); +SELECT * FROM t1 WHERE i != 3 OR pk1 > 9; +DROP TABLE t1; + +--echo # +--echo # MDEV-4298: RocksDB: Assertion `thd->is_error() || kill_errno' fails in ha_rows filesort +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, KEY(i)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,1),(2,2); +BEGIN; +UPDATE t1 SET i = 100; + +--connect (con1,localhost,root,,test) +--error ER_LOCK_WAIT_TIMEOUT +DELETE IGNORE FROM t1 ORDER BY i; +--disconnect con1 + +--connection default +COMMIT; +DROP TABLE t1; + +--echo # +--echo # MDEV-4324: RocksDB: Valgrind "Use of uninitialised value" warnings on inserting value into varchar field +--echo # (testcase only) +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, c VARCHAR(4)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,'foo'), (2,'bar'); +DROP TABLE t1; + +--echo # +--echo # MDEV-4304: RocksDB: Index-only scan by a field with utf8_bin collation returns garbage symbols +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, c1 CHAR(1), c2 CHAR(1), KEY(c1)) ENGINE=RocksDB CHARSET utf8 COLLATE utf8_bin; +INSERT INTO t1 VALUES (1,'h','h'); +SELECT * FROM t1; +SELECT c1 FROM t1; +DROP TABLE t1; + +--echo # +--echo # MDEV-4300: RocksDB: Server crashes in inline_mysql_mutex_lock on SELECT .. FOR UPDATE +--echo # +CREATE TABLE t2 (pk INT PRIMARY KEY, i INT, KEY (i)) ENGINE=RocksDB; +INSERT INTO t2 VALUES (1,4),(2,5); +SELECT 1 FROM t2 WHERE i < 0 FOR UPDATE; +DROP TABLE t2; + +--echo # +--echo # MDEV-4301: RocksDB: Assertion `pack_info != __null' fails in Rdb_key_def::unpack_record +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, c CHAR(1), KEY(c,i)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,4,'d'),(2,8,'e'); +SELECT MAX( pk ) FROM t1 WHERE i = 105 AND c = 'h'; +DROP TABLE t1; + +--echo # +--echo # MDEV-4337: RocksDB: Inconsistent results comparing a char field with an int field +--echo # +create table t1 (c char(1), i int, primary key(c), key(i)) engine=RocksDB; +insert into t1 values ('2',2),('6',6); +select * from t1 where c = i; +select * from t1 ignore index (i) where c = i; +drop table t1; + + +--echo # +--echo # Test statement rollback inside a transaction +--echo # +create table t1 (pk varchar(12) primary key) engine=rocksdb; +insert into t1 values ('old-val1'),('old-val2'); + +create table t2 (pk varchar(12) primary key) engine=rocksdb; +insert into t2 values ('new-val2'),('old-val1'); + +begin; +insert into t1 values ('new-val1'); +--error ER_DUP_ENTRY +insert into t1 select * from t2; +commit; + +select * from t1; +drop table t1, t2; + +--echo # +--echo # MDEV-4383: RocksDB: Wrong result of DELETE .. ORDER BY .. LIMIT: +--echo # rows that should be deleted remain in the table +--echo # +CREATE TABLE t2 (pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=RocksDB; +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=RocksDB; + +INSERT INTO t1 (pk) VALUES (NULL),(NULL); +BEGIN; +INSERT INTO t2 (pk) VALUES (NULL),(NULL); +INSERT INTO t1 (pk) VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL); + +--enable_info +SELECT * FROM t1 ORDER BY pk LIMIT 9; +DELETE FROM t1 ORDER BY pk LIMIT 9; +SELECT * FROM t1 ORDER BY pk LIMIT 9; +--disable_info + +DROP TABLE t1,t2; + +--echo # +--echo # MDEV-4374: RocksDB: Valgrind warnings 'Use of uninitialised value' on +--echo # inserting into a varchar column +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, a VARCHAR(32)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,'foo'),(2,'bar'); +DROP TABLE t1; + + +--echo # +--echo # MDEV-4061: RocksDB: Changes from an interrupted query are still applied +--echo # + +--enable_connect_log + +create table t1 (pk int primary key, a int) engine=RocksDB; +insert into t1 values (1,10),(2,20); + +--let $con_id = `select connection_id()` + +set autocommit = 1; +--send +update t1 set a = sleep(100) where pk = 1; + +--connect (con1,localhost,root,,) +--echo kill query \$con_id; +--disable_query_log +eval kill query $con_id; +--enable_query_log + +--connection default +--error ER_QUERY_INTERRUPTED +--reap + +select * from t1; +--disconnect con1 +--disable_connect_log +drop table t1; + + +--echo # +--echo # MDEV-4099: RocksDB: Wrong results with index and range access after INSERT IGNORE or REPLACE +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, a SMALLINT, b INT, KEY (a)) ENGINE=RocksDB; +INSERT IGNORE INTO t1 VALUES (1, 157, 0), (2, 1898, -504403), (1, -14659, 0); +SELECT * FROM t1; +SELECT pk FROM t1; +SELECT * FROM t1 WHERE a != 97; +DROP TABLE t1; + + +--echo # +--echo # Test @@rocksdb_max_row_locks +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, a int) ENGINE=RocksDB; +set @a=-1; +insert into t1 select (@a:=@a+1), 1234 from information_schema.session_variables limit 100; +set @tmp1= @@rocksdb_max_row_locks; +set rocksdb_max_row_locks= 20; +--error ER_INTERNAL_ERROR +update t1 set a=a+10; +DROP TABLE t1; + + +--echo # +--echo # Test AUTO_INCREMENT behavior problem, +--echo # "explicit insert into an auto-inc column is not noticed by RocksDB" +--echo # +create table t1 (i int primary key auto_increment) engine=RocksDB; + +insert into t1 values (null); +insert into t1 values (null); +select * from t1; +drop table t1; + +create table t2 (i int primary key auto_increment) engine=RocksDB; + +insert into t2 values (1); +select * from t2; + +--echo # this fails (ie. used to fail), RocksDB engine did not notice use of '1' above +insert into t2 values (null); +select * from t2; + +--echo # but then this succeeds, so previous statement must have incremented next number counter +insert into t2 values (null); +select * from t2; +drop table t2; + +--echo # +--echo # Fix Issue#2: AUTO_INCREMENT value doesn't survive server shutdown +--echo # +create table t1 (i int primary key auto_increment) engine=RocksDB; + +insert into t1 values (null); +insert into t1 values (null); + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; + +--source include/restart_mysqld.inc + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +insert into t1 values (null); +select * from t1; + +drop table t1; + +--echo # +--echo # Fix Issue #3: SHOW TABLE STATUS shows Auto_increment=0 +--echo # +create table t1 (i int primary key auto_increment) engine=RocksDB; + +insert into t1 values (null),(null); +--replace_column 7 # +show table status like 't1'; +drop table t1; + +--echo # +--echo # Fix Issue #4: Crash when using pseudo-unique keys +--echo # +CREATE TABLE t1 ( + i INT, + t TINYINT, + s SMALLINT, + m MEDIUMINT, + b BIGINT, + pk MEDIUMINT AUTO_INCREMENT PRIMARY KEY, + UNIQUE KEY b_t (b,t) +) ENGINE=rocksdb; + +INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16); + +SELECT b+t FROM t1 WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t; +DROP TABLE t1; + +--echo # +--echo # Fix issue #5: Transaction rollback doesn't undo all changes. +--echo # +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 (id int auto_increment primary key, value int) engine=rocksdb; + +set autocommit=0; +begin; +set @a:=0; +insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4; +insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4; +insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4; +rollback; +select count(*) from t1; + +set autocommit=1; +drop table t0, t1; + +--echo # +--echo # Check status variables +--echo # +--replace_column 2 # +show status like 'rocksdb%'; + +select VARIABLE_NAME from INFORMATION_SCHEMA.global_status where VARIABLE_NAME LIKE 'rocksdb%'; +--echo # RocksDB-SE's status variables are global internally +--echo # but they are shown as both session and global, like InnoDB's status vars. +select VARIABLE_NAME from INFORMATION_SCHEMA.session_status where VARIABLE_NAME LIKE 'rocksdb%'; + + +--echo # +--echo # Fix issue #9: HA_ERR_INTERNAL_ERROR when running linkbench +--echo # +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 ( + pk int primary key, + col1 varchar(255), + key(col1) +) engine=rocksdb; +insert into t1 select a, repeat('123456789ABCDEF-', 15) from t0; +select * from t1 where pk=3; +drop table t0, t1; + +--echo # +--echo # Fix issue #10: Segfault in Rdb_key_def::get_primary_key_tuple +--echo # +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +CREATE TABLE t1 ( + id1 bigint(20) unsigned NOT NULL DEFAULT '0', + id2 bigint(20) unsigned NOT NULL DEFAULT '0', + link_type bigint(20) unsigned NOT NULL DEFAULT '0', + visibility tinyint(3) NOT NULL DEFAULT '0', + data varchar(255) NOT NULL DEFAULT '', + time bigint(20) unsigned NOT NULL DEFAULT '0', + version int(11) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (link_type,id1,id2) +) engine=rocksdb; + +insert into t1 select a,a,a,1,a,a,a from t0; + +alter table t1 add index id1_type (id1,link_type,visibility,time,version,data); +select * from t1 where id1 = 3; + +drop table t0,t1; + +--echo # +--echo # Test column families +--echo # + +create table t1 ( + pk int primary key, + col1 int, + col2 int, + key(col1) comment 'cf3', + key(col2) comment 'cf4' +) engine=rocksdb; + +insert into t1 values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5); + +--replace_column 9 # +explain +select * from t1 where col1=2; +select * from t1 where col1=2; + +--replace_column 9 # +explain +select * from t1 where col2=3; +select * from t1 where col2=3; + +select * from t1 where pk=4; + +drop table t1; + +--echo # +--echo # Try primary key in a non-default CF: +--echo # +create table t1 ( + pk int, + col1 int, + col2 int, + key(col1) comment 'cf3', + key(col2) comment 'cf4', + primary key (pk) comment 'cf5' +) engine=rocksdb; +insert into t1 values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5); + +--replace_column 9 # +explain +select * from t1 where col1=2; +select * from t1 where col1=2; + +select * from t1 where pk=4; + +drop table t1; + +--echo # +--echo # Issue #15: SIGSEGV from reading in blob data +--echo # +CREATE TABLE t1 ( + id int not null, + blob_col text, + PRIMARY KEY (id) +) ENGINE=ROCKSDB CHARSET=latin1; + +INSERT INTO t1 SET id=123, blob_col=repeat('z',64000) ON DUPLICATE KEY UPDATE blob_col=VALUES(blob_col); +INSERT INTO t1 SET id=123, blob_col='' ON DUPLICATE KEY UPDATE blob_col=VALUES(blob_col); +DROP TABLE t1; + + +--echo # +--echo # Issue #17: Automatic per-index column families +--echo # +create table t1 ( + id int not null, + key1 int, + PRIMARY KEY (id), + index (key1) comment '$per_index_cf' +) engine=rocksdb; + +--echo #Same CF ids with different CF flags +--error ER_UNKNOWN_ERROR +create table t1_err ( + id int not null, + key1 int, + PRIMARY KEY (id), + index (key1) comment 'test.t1.key1' +) engine=rocksdb; + +create table t1_err ( + id int not null, + key1 int, + PRIMARY KEY (id), + index (key1) comment 'test.t1.key2' +) engine=rocksdb; +drop table t1_err; + +--echo # Unfortunately there is no way to check which column family everything goes to +insert into t1 values (1,1); +select * from t1; +--echo # Check that ALTER and RENAME are disallowed +--error ER_NOT_SUPPORTED_YET +alter table t1 add col2 int; + +--error ER_NOT_SUPPORTED_YET +rename table t1 to t2; + +drop table t1; + +--echo # Check detection of typos in \$per_index_cf +--error ER_NOT_SUPPORTED_YET +create table t1 ( + id int not null, + key1 int, + PRIMARY KEY (id), + index (key1) comment '$per_idnex_cf' +)engine=rocksdb; + + +--echo # +--echo # Issue #22: SELECT ... FOR UPDATE takes a long time +--echo # +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 ( + id1 int, + id2 int, + value1 int, + value2 int, + primary key(id1, id2) COMMENT 'new_column_family', + key(id2) +) engine=rocksdb default charset=latin1 collate=latin1_bin; + +insert into t1 select A.a, B.a, 31, 1234 from t0 A, t0 B; + +--replace_column 9 # +explain +select * from t1 where id1=30 and value1=30 for update; + +set @var1=(select variable_value + from information_schema.global_status + where variable_name='rocksdb_number_keys_read'); + +select * from t1 where id1=3 and value1=3 for update; + +set @var2=(select variable_value + from information_schema.global_status + where variable_name='rocksdb_number_keys_read'); +--echo # The following must return true (before the fix, the difference was 70): +select if((@var2 - @var1) < 30, 1, @var2-@var1); + +drop table t0,t1; + +--echo # +--echo # Issue #33: SELECT ... FROM rocksdb_table ORDER BY primary_key uses sorting +--echo # +create table t1 (id int primary key, value int) engine=rocksdb; +insert into t1 values (1,1),(2,2),(3,3); +--echo # The following must not use 'Using filesort': +--replace_column 9 # +explain select * from t1 ORDER BY id; +drop table t1; + +--echo # +--echo # Issue #26: Index-only scans for DATETIME and TIMESTAMP +--echo # +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +--echo # Try a DATETIME column: +create table t1 ( + pk int auto_increment primary key, + kp1 datetime, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +insert into t1 (kp1,kp2) +select date_add('2015-01-01 12:34:56', interval a day), a from t0; + +select * from t1; + +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; + +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; + +--echo # Now, the same with NOT NULL column +create table t2 ( + pk int auto_increment primary key, + kp1 datetime not null, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; + +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +drop table t1,t2; + +--echo # Try a DATE column: +create table t1 ( + pk int auto_increment primary key, + kp1 date, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +insert into t1 (kp1,kp2) +select date_add('2015-01-01', interval a day), a from t0; + +select * from t1; + +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01' and '2015-01-05'; + +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01' and '2015-01-05'; + +--echo # Now, the same with NOT NULL column +create table t2 ( + pk int auto_increment primary key, + kp1 date not null, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; + +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +drop table t1,t2; + +--echo # +--echo # Try a TIMESTAMP column: +--echo # +create table t1 ( + pk int auto_increment primary key, + kp1 timestamp, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +insert into t1 (kp1,kp2) +select date_add('2015-01-01 12:34:56', interval a day), a from t0; + +select * from t1; + +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; + +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; + +--echo # Now, the same with NOT NULL column +create table t2 ( + pk int auto_increment primary key, + kp1 timestamp not null, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; + +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +drop table t1,t2; + +--echo # +--echo # Try a TIME column: +--echo # +create table t1 ( + pk int auto_increment primary key, + kp1 time, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +--disable_warnings +insert into t1 (kp1,kp2) +select date_add('2015-01-01 09:00:00', interval a minute), a from t0; +--enable_warnings + +select * from t1; + +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '09:01:00' and '09:05:00'; + +select kp1,kp2 from t1 force index (kp1) +where kp1 between '09:01:00' and '09:05:00'; + +--echo # Now, the same with NOT NULL column +create table t2 ( + pk int auto_increment primary key, + kp1 time not null, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '09:01:00' and '09:05:00'; + +select kp1,kp2 from t2 force index (kp1) +where kp1 between '09:01:00' and '09:05:00'; +drop table t1,t2; + +--echo # +--echo # Try a YEAR column: +--echo # +create table t1 ( + pk int auto_increment primary key, + kp1 year, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +--disable_warnings +insert into t1 (kp1,kp2) select 2015+a, a from t0; +--enable_warnings + +select * from t1; + +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2016' and '2020'; + +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2016' and '2020'; + +--echo # Now, the same with NOT NULL column +create table t2 ( + pk int auto_increment primary key, + kp1 year not null, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2016' and '2020'; + +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2016' and '2020'; + +drop table t1,t2; + +--echo # +--echo # Issue #57: Release row locks on statement errors +--echo # +create table t1 (id int primary key) engine=rocksdb; +insert into t1 values (1), (2), (3); +begin; +insert into t1 values (4), (5), (6); +--error ER_DUP_ENTRY +insert into t1 values (7), (8), (2), (9); +select * from t1; + +-- connect(con1,localhost,root,,) +--connection con1 +begin; +--error ER_LOCK_WAIT_TIMEOUT +select * from t1 where id=4 for update; + +select * from t1 where id=7 for update; + +select * from t1 where id=9 for update; + +--connection default +-- disconnect con1 +drop table t1; + +--echo #Index on blob column +SET @old_mode = @@sql_mode; +SET sql_mode = 'strict_all_tables'; +create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(c, b(255))) engine=rocksdb; +drop table t1; +create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(b(1255))) engine=rocksdb; +insert into t1 values (1, '1abcde', '1abcde'), (2, '2abcde', '2abcde'), (3, '3abcde', '3abcde'); +select * from t1; +--replace_column 9 # +explain select * from t1 where b like '1%'; +--replace_column 9 # +explain select b, a from t1 where b like '1%'; +update t1 set b= '12345' where b = '2abcde'; +select * from t1; +drop table t1; +--error ER_TOO_LONG_KEY +create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(b(2255))) engine=rocksdb; +SET sql_mode = @old_mode; + +drop table t0; + +--echo # +--echo # Fix assertion failure (attempt to overrun the key buffer) for prefix indexes +--echo # + +create table t1 ( + pk int primary key, + col1 varchar(100), + key (col1(10)) +) engine=rocksdb; + +insert into t1 values (1, repeat('0123456789', 9)); + +drop table t1; + +--echo # +--echo # Issue #76: Assertion `buf == table->record[0]' fails in virtual int ha_rocksdb::delete_row(const uchar*) +--echo # + +CREATE TABLE t1 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB; +CREATE TABLE t2 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB; + +CREATE TRIGGER tr AFTER DELETE ON t1 FOR EACH ROW DELETE FROM t2 WHERE pk = old.pk; + +INSERT INTO t1 VALUES (1,1); +REPLACE INTO t1 VALUES (1,2); + +SELECT * FROM t1; +DROP TABLE t1, t2; + +--echo # +--echo # Issue #99: UPDATE for table with VARCHAR pk gives "Can't find record" error +--echo # +create table t1(a int primary key); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t2 ( + a varchar(32) primary key, + col1 int +) engine=rocksdb; + +insert into t2 +select concat('v-', 100 + A.a*100 + B.a), 12345 from t1 A, t1 B; +update t2 set a=concat('x-', a) where a between 'v-1002' and 'v-1004'; + +drop table t1,t2; + +--echo # +--echo # Issue #131: Assertion `v->cfd_->internal_comparator().Compare(start, end) <= 0' failed +--echo # +CREATE TABLE t2(c1 INTEGER UNSIGNED NOT NULL, c2 INTEGER NULL, c3 TINYINT, c4 SMALLINT , c5 MEDIUMINT, c6 INT, c7 BIGINT, PRIMARY KEY(c1,c6)) ENGINE=RocksDB; +INSERT INTO t2 VALUES (1,1,1,1,1,1,1); +SELECT * FROM t2 WHERE c1 > 4294967295 ORDER BY c1,c6; +EXPLAIN SELECT * FROM t2 WHERE c1 > 4294967295 ORDER BY c1,c6; +drop table t2; + +--echo # +--echo # Issue #135: register transaction was not being called for statement +--echo # +--disable_warnings +DROP DATABASE IF EXISTS test_db; +--enable_warnings +CREATE DATABASE test_db; +CREATE TABLE test_db.t1(c1 INT PRIMARY KEY); +LOCK TABLES test_db.t1 READ; +SET AUTOCOMMIT=0; +SELECT c1 FROM test_db.t1; +START TRANSACTION WITH CONSISTENT SNAPSHOT, READ ONLY; +DROP DATABASE test_db; + +--echo # +--echo # Issue #143: Split rocksdb_bulk_load option into two +--echo # +CREATE TABLE t1 (id int primary key, value int) engine=RocksDB; +SET rocksdb_skip_unique_check=1; +INSERT INTO t1 VALUES(1, 1); +INSERT INTO t1 VALUES(1, 2); +INSERT INTO t1 VALUES(1, 3); +SELECT * FROM t1; +--error ER_UNKNOWN_ERROR +REPLACE INTO t1 VALUES(4, 4); +--error ER_UNKNOWN_ERROR +INSERT INTO t1 VALUES(5, 5) ON DUPLICATE KEY UPDATE value=value+1; +TRUNCATE TABLE t1; +SET @save_rocksdb_bulk_load_size= @@rocksdb_bulk_load_size; +SET rocksdb_skip_unique_check=0; +SET rocksdb_commit_in_the_middle=1; +SET rocksdb_bulk_load_size=10; +BEGIN; +INSERT INTO t1 (id) VALUES(1),(2),(3),(4),(5),(6),(7),(8),(9),(10), + (11),(12),(13),(14),(15),(16),(17),(18),(19); +ROLLBACK; +SELECT * FROM t1; +INSERT INTO t1 (id) VALUES (11),(12),(13),(14),(15); +BEGIN; +UPDATE t1 SET value=100; +ROLLBACK; +SELECT * FROM t1; +BEGIN; +DELETE FROM t1; +ROLLBACK; +SELECT * FROM t1; +SET rocksdb_commit_in_the_middle=0; +SET rocksdb_bulk_load_size= @save_rocksdb_bulk_load_size; +DROP TABLE t1; + +--echo # +--echo # Issue #185 Assertion `BaseValid()' failed in void rocksdb::BaseDeltaIterator::Advance() +--echo # +CREATE TABLE t2(id INT NOT NULL PRIMARY KEY, data INT) Engine=MEMORY; +INSERT INTO t2 VALUES (100,NULL),(150,"long varchar"),(200,"varchar"),(250,"long long long varchar"); +create TABLE t1 (a int not null, b int not null, primary key(a,b)); +INSERT INTO t1 VALUES (1,1); +SELECT a FROM t1, t2 WHERE a=b AND (b NOT IN (SELECT a FROM t1 WHERE a > 4)); +DROP TABLE t1, t2; + +--echo # +--echo # Issue #189 ha_rocksdb::load_auto_incr_value() creates implicit snapshot and doesn't release +--echo # +--connect (con1,localhost,root,,) +create table r1 (id int auto_increment primary key, value int); +insert into r1 (id) values (null), (null), (null), (null), (null); +connection con1; +create table r2 like r1; +show create table r2; +connection default; +begin; +insert into r1 values (10, 1); +commit; +connection con1; +begin; +select * from r1; +commit; +connection default; +drop table r1, r2; + +# hidden primary key +create table r1 (id int auto_increment, value int, index i(id)); +insert into r1 (id) values (null), (null), (null), (null), (null); +connection con1; +create table r2 like r1; +show create table r2; +connection default; +begin; +insert into r1 values (10, 1); +commit; +connection con1; +begin; +select * from r1; +commit; +connection default; +drop table r1, r2; + +disconnect con1; + +--echo # +--echo # Issue#211 Crash on LOCK TABLES + START TRANSACTION WITH CONSISTENT SNAPSHOT +--echo # +CREATE TABLE t1(c1 INT); +lock TABLE t1 read local; +SELECT 1 FROM t1 GROUP BY TRIM(LEADING RAND()FROM''); +set AUTOCOMMIT=0; +start transaction with consistent snapshot; +SELECT * FROM t1; +COMMIT; +UNLOCK TABLES; +DROP TABLE t1; + +--echo # +--echo # Issue#213 Crash on LOCK TABLES + partitions +--echo # +CREATE TABLE t1(a INT,b INT,KEY (b)) engine=rocksdb PARTITION BY HASH(a) PARTITIONS 2; +INSERT INTO t1(a)VALUES (20010101101010.999949); +lock tables t1 write,t1 as t0 write,t1 as t2 write; +SELECT a FROM t1 ORDER BY a; +truncate t1; +INSERT INTO t1 VALUES(X'042000200020',X'042000200020'),(X'200400200020',X'200400200020'); +UNLOCK TABLES; +DROP TABLE t1; + +--echo # +--echo # Issue#250: MyRocks/Innodb different output from query with order by on table with index and decimal type +--echo # (the test was changed to use VARCHAR, because DECIMAL now supports index-only, and this issue +--echo # needs a datype that doesn't support index-inly) +--echo # + +CREATE TABLE t1( + c1 varchar(10) character set utf8 collate utf8_general_ci NOT NULL, + c2 varchar(10) character set utf8 collate utf8_general_ci, + c3 INT, + INDEX idx(c1,c2) +); +INSERT INTO t1 VALUES ('c1-val1','c2-val1',5); +INSERT INTO t1 VALUES ('c1-val2','c2-val3',6); +INSERT INTO t1 VALUES ('c1-val3','c2-val3',7); +SELECT * FROM t1 force index(idx) WHERE c1 <> 'c1-val2' ORDER BY c1 DESC; +--replace_column 9 # +explain SELECT * FROM t1 force index(idx) WHERE c1 <> '1' ORDER BY c1 DESC; +drop table t1; + +--echo # +--echo # Issue#267: MyRocks issue with no matching min/max row and count(*) +--echo # +CREATE TABLE t1(c1 INT UNSIGNED, c2 INT SIGNED, INDEX idx2(c2)); +INSERT INTO t1 VALUES(1,null); +INSERT INTO t1 VALUES(2,null); +SELECT count(*) as total_rows, min(c2) as min_value FROM t1; +DROP TABLE t1; + +--echo # +--echo # Issue#263: MyRocks auto_increment skips values if you insert a negative value +--echo # +# We have slightly different behavior regarding auto-increment values than +# InnoDB, so the results of the SHOW TABLE STATUS command will be slightly +# different. InnoDB will reserve 3 values but only use 2 of them (because +# the user hard-coded a -1 as the second value). MyRocks will only reserve +# the values as needed, so only 2 values will be used. This means that the +# SHOW TABLE STATUS in InnoDB will indicate that the next auto-increment +# value is 4 while MyRocks will show it as 3. +CREATE TABLE t1(a INT AUTO_INCREMENT KEY); +INSERT INTO t1 VALUES(0),(-1),(0); +SHOW TABLE STATUS LIKE 't1'; +SELECT * FROM t1; +DROP TABLE t1; +CREATE TABLE t1(a INT AUTO_INCREMENT KEY); +INSERT INTO t1 VALUES(0),(10),(0); +SHOW TABLE STATUS LIKE 't1'; +SELECT * FROM t1; +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.opt new file mode 100644 index 00000000000..95d819ee425 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.opt @@ -0,0 +1 @@ +--rocksdb_default_cf_options="write_buffer_size=12m;target_file_size_base=1m;max_bytes_for_level_multiplier=10" diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.sh b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.sh new file mode 100755 index 00000000000..9381de1fafc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +cat > $MYSQL_TMP_DIR/cf_configs.cnf < $MYSQLTEST_VARDIR/log/mysqld.1.err +--let $_mysqld_option=--rocksdb_override_cf_options=cf1={write_buffer_size=8m;target_file_size_base=2m};cf2={write_buffer_size=16m;max_bytes_for_level_multiplier=8};z={target_file_size_base=4m}; +--source include/restart_mysqld_with_option.inc + +# check column family options in log -- should reflect individual settings + +--echo +--echo Individualized options for column families: +--echo +select cf_name, option_type, value + from information_schema.rocksdb_cf_options + where option_type in ('WRITE_BUFFER_SIZE', + 'TARGET_FILE_SIZE_BASE', + 'MAX_BYTES_FOR_LEVEL_MULTIPLIER') + order by cf_name, option_type; + +# syntax error in options (no equal sign) + +--exec echo "restart:--rocksdb_override_cf_options=cf1" > $_expect_file_name +--error 1 +--source include/wait_until_connected_again.inc + +# invalid cf option config (no curly braces) + +--exec echo "restart:--rocksdb_override_cf_options=cf1=write_buffer_size=8m" > $_expect_file_name +--error 1 +--source include/wait_until_connected_again.inc + +# invalid cf option config (cf listed twice) + +--exec echo "restart:--rocksdb_override_cf_options=cf1={write_buffer_size=8m};cf1={target_file_size_base=2m}" > $_expect_file_name +--error 1 +--source include/wait_until_connected_again.inc + +# clean up + +--source include/restart_mysqld.inc + +--echo +drop table t1,t2,t3; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse-master.opt new file mode 100644 index 00000000000..ba9364e1523 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse.test new file mode 100644 index 00000000000..8e30332bafe --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse.test @@ -0,0 +1,71 @@ +--source include/have_rocksdb.inc + +# +# RocksDB-SE tests for reverse-ordered Column Families +# + +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 ( + pk int primary key, + a int not null, + b int not null, + key(a) comment 'rev:foo', + key(b) comment 'bar' +) engine=rocksdb; + +insert into t1 select a,a,a from t0; +insert into t1 select a+10,a+10,a+10 from t0; + +--echo # Primary key is not in a reverse-ordered CF, so full table scan +--echo # returns rows in ascending order: +select * from t1; + +--replace_column 9 # +explain +select a from t1 order by a limit 5; +select a from t1 order by a limit 5; + +--replace_column 9 # +explain +select b from t1 order by b limit 5; +select a from t1 order by a limit 5; + +--replace_column 9 # +explain +select a from t1 order by a desc limit 5; +select a from t1 order by a desc limit 5; + +--replace_column 9 # +explain +select b from t1 order by b desc limit 5; +select b from t1 order by b desc limit 5; + +drop table t1; + +--echo # +--echo # Try a primary key in a reverse-ordered CF. +--echo # + +create table t2 ( + pk int, + a int not null, + primary key(pk) comment 'rev:cf1' +) engine=rocksdb; + +insert into t2 select a,a from t0; +--echo # Primary key is in a reverse-ordered CF, so full table scan +--echo # returns rows in descending order: +select * from t2; + +set autocommit=0; +begin; +delete from t2 where a=3 or a=7; +select * from t2; +rollback; +set autocommit=1; + +drop table t2; +drop table t0; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums-master.opt new file mode 100644 index 00000000000..792e3808f1e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums-master.opt @@ -0,0 +1,2 @@ +--rocksdb_debug_optimizer_n_rows=1000 +--rocksdb_records_in_range=50 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.pl b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.pl new file mode 100644 index 00000000000..322f0781719 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.pl @@ -0,0 +1,16 @@ +$file=$ARGV[0]; +$total=$ARGV[1]; +$pct=$ARGV[2]; + +open($fh, "<", $file) or die $!; +while(readline($fh)) { + if (/(\d+) index entries checked \((\d+) had checksums/) { + if ($1 == $total && $2 >= $total*($pct-2)/100 && $2 <= $total*($pct+2)/100) { + printf("%d index entries had around %d checksums\n", $total, $total*$pct/100); + } + }elsif (/(\d+) table records had checksums/) { + if ($1 >= $total*($pct-2)/100 && $1 <= $total*($pct+2)/100) { + printf("Around %d table records had checksums\n", $total*$pct/100); + } + } +} diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test new file mode 100644 index 00000000000..1a0364ebaee --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test @@ -0,0 +1,124 @@ +--source include/have_rocksdb.inc + +# +# Tests for row checksums feature +# +--source include/have_debug.inc + +set @save_rocksdb_store_checksums=@@global.rocksdb_store_checksums; +set @save_rocksdb_verify_checksums=@@global.rocksdb_verify_checksums; +set @save_rocksdb_checksums_pct=@@global.rocksdb_checksums_pct; + +# wiping mysql log for repeatable tests +--exec echo "" > $MYSQLTEST_VARDIR/log/mysqld.1.err + +--disable_warnings +drop table if exists t1,t2,t3; +--enable_warnings +-- exec echo "" > $MYSQLTEST_VARDIR/log/mysqld.1.err + +show variables like 'rocksdb_%checksum%'; + +create table t1 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; +insert into t1 values (1,1,1),(2,2,2),(3,3,3); +check table t1; +--exec grep "^[0-9-]* [0-9:]* [0-9]* \[Note\] CHECKTABLE t1" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 + +drop table t1; + +set session rocksdb_store_checksums=on; +create table t2 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; +insert into t2 values (1,1,1),(2,2,2),(3,3,3); +check table t2; +--exec grep "^[0-9-]* [0-9:]* [0-9]* \[Note\] CHECKTABLE t2" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 + +--echo # Now, make a table that has both rows with checksums and without +create table t3 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; +insert into t3 values (1,1,1),(2,2,2),(3,3,3); +set session rocksdb_store_checksums=off; +update t3 set b=3 where a=2; +set session rocksdb_store_checksums=on; +check table t3; +--exec grep "^[0-9-]* [0-9:]* [0-9]* \[Note\] CHECKTABLE t3" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 + +set session rocksdb_store_checksums=on; +set session rocksdb_checksums_pct=5; +create table t4 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; +--disable_query_log +let $i=0; +let $x= 100000; +while ($i<10000) +{ + inc $i; + eval insert t4(pk,a,b) values($i, $i, $i div 10); + eval update t4 set a= a+$x where a=$i; + eval update t4 set pk=pk+$x where pk=$i; +} +--enable_query_log +check table t4; +--exec grep "^[0-9-]* [0-9:]* [0-9]* \[Note\] CHECKTABLE t4" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 > $MYSQL_TMP_DIR/rocksdb_checksums.log +--exec perl suite/rocksdb/t/rocksdb_checksums.pl $MYSQL_TMP_DIR/rocksdb_checksums.log 10000 5 +--remove_file $MYSQL_TMP_DIR/rocksdb_checksums.log +set session rocksdb_checksums_pct=100; + +--echo # +--echo # Ok, table t2 has all rows with checksums. Simulate a few checksum mismatches. +--echo # +insert into mtr.test_suppressions values + ('Checksum mismatch in key of key-value pair for index'), + ('Checksum mismatch in value of key-value pair for index'), + ('Data with incorrect checksum'); + +--echo # 1. Start with mismatch in key checksum of the PK. +set session debug= "+d,myrocks_simulate_bad_pk_checksum1"; +set session rocksdb_verify_checksums=off; +select * from t3; +set session rocksdb_verify_checksums=on; +--error ER_INTERNAL_ERROR +select * from t3; +--error ER_INTERNAL_ERROR +select * from t4; +set session debug= "-d,myrocks_simulate_bad_pk_checksum1"; + +--echo # 2. Continue with mismatch in pk value checksum. +set session debug= "+d,myrocks_simulate_bad_pk_checksum2"; +set session rocksdb_verify_checksums=off; +select * from t3; +set session rocksdb_verify_checksums=on; +--error ER_INTERNAL_ERROR +select * from t3; +--error ER_INTERNAL_ERROR +select * from t4; +set session debug= "-d,myrocks_simulate_bad_pk_checksum2"; + +--echo # 3. Check if we catch checksum mismatches for secondary indexes +--replace_column 9 # +explain +select * from t3 force index(a) where a<4; +select * from t3 force index(a) where a<4; + +set session debug= "+d,myrocks_simulate_bad_key_checksum1"; +--error ER_INTERNAL_ERROR +select * from t3 force index(a) where a<4; +--error ER_INTERNAL_ERROR +select * from t4 force index(a) where a<1000000; +set session debug= "-d,myrocks_simulate_bad_key_checksum1"; + +--echo # 4. The same for index-only reads? +--replace_column 9 # +explain +select a from t3 force index(a) where a<4; +select a from t3 force index(a) where a<4; + +set session debug= "+d,myrocks_simulate_bad_key_checksum1"; +--error ER_INTERNAL_ERROR +select a from t3 force index(a) where a<4; +--error ER_INTERNAL_ERROR +select a from t4 force index(a) where a<1000000; +set session debug= "-d,myrocks_simulate_bad_key_checksum1"; + +set @@global.rocksdb_store_checksums=@save_rocksdb_store_checksums; +set @@global.rocksdb_verify_checksums=@save_rocksdb_verify_checksums; +set @@global.rocksdb_checksums_pct=@save_rocksdb_checksums_pct; + +drop table t2,t3,t4; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_delete.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_delete.test new file mode 100644 index 00000000000..ecb4b2a3609 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_delete.test @@ -0,0 +1,24 @@ +--source include/have_rocksdb.inc +--source include/have_debug_sync.inc + +# This validates the fix for Issue #144. The problem was that with more +# than one client accessing/deleting the same row there was a possibility +# of client A finding a row (through Next() or Prev()) but the row being +# deleted before the GetForUpdate() call could occur. When this happened +# a nearly useless error was being returned. + +let $order=ASC; +let $comment=""; +--source suite/rocksdb/include/rocksdb_concurrent_delete.inc + +let $order=DESC; +let $comment=""; +--source suite/rocksdb/include/rocksdb_concurrent_delete.inc + +let $order=ASC; +let $comment="rev:cf2"; +--source suite/rocksdb/include/rocksdb_concurrent_delete.inc + +let $order=DESC; +let $comment="rev:cf2"; +--source suite/rocksdb/include/rocksdb_concurrent_delete.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_insert.py b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_insert.py new file mode 100644 index 00000000000..37b118d525a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_insert.py @@ -0,0 +1,95 @@ +""" +This script tests concurrent inserts on a given table. +Example Usage (in Mysql Test Framework): + + CREATE TABLE t1 (a INT) ENGINE=rocksdb; + + let $exec = python suite/rocksdb/t/rocksdb_concurrent_insert.py \ + root 127.0.0.1 $MASTER_MYPORT test t1 100 4; + exec $exec; + +""" +import cStringIO +import hashlib +import MySQLdb +import os +import random +import signal +import sys +import threading +import time +import string + +def get_insert(table_name, idx): + return """INSERT INTO %s (a) VALUES (%d)""" % (table_name, idx) + +class Inserter(threading.Thread): + Instance = None + def __init__(self, con, table_name, num_inserts): + threading.Thread.__init__(self) + self.finished = False + self.num_inserts = num_inserts + con.autocommit(False) + self.con = con + self.rand = random.Random() + self.exception = None + self.table_name = table_name + Inserter.Instance = self + self.start() + def run(self): + try: + self.runme() + except Exception, e: + self.exception = traceback.format_exc() + print "caught (%s)" % e + finally: + self.finish() + def runme(self): + cur = self.con.cursor() + for i in xrange(self.num_inserts): + try: + cur.execute(get_insert(self.table_name, i)) + r = self.rand.randint(1,10) + if r < 4: + self.con.commit() + except: + cur = self.con.cursor() + try: + self.con.commit() + except Exception, e: + self.exception = traceback.format_exc() + print "caught (%s)" % e + pass + def finish(self): + self.finished = True + +if __name__ == '__main__': + if len(sys.argv) != 8: + print "Usage: rocksdb_concurrent_insert.py user host port db_name " \ + "table_name num_inserts num_threads" + sys.exit(1) + + user = sys.argv[1] + host = sys.argv[2] + port = int(sys.argv[3]) + db = sys.argv[4] + table_name = sys.argv[5] + num_inserts = int(sys.argv[6]) + num_workers = int(sys.argv[7]) + + worker_failed = False + workers = [] + for i in xrange(num_workers): + inserter = Inserter( + MySQLdb.connect(user=user, host=host, port=port, db=db), table_name, + num_inserts) + workers.append(inserter) + + for w in workers: + w.join() + if w.exception: + print "Worker hit an exception:\n%s\n" % w.exception + worker_failed = True + + if worker_failed: + sys.exit(1) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test new file mode 100644 index 00000000000..6dd4dd11748 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test @@ -0,0 +1,30 @@ +--source include/have_rocksdb.inc + +let $ddir = $MYSQL_TMP_DIR/.rocksdb_datadir.test.install.db; +let $rdb_ddir = $MYSQL_TMP_DIR/.rocksdb_datadir.test; +let $sql_file = $MYSQL_TMP_DIR/rocksdb_datadir.sql; + +--write_file $sql_file +DROP DATABASE IF EXISTS mysqltest; +CREATE DATABASE mysqltest; +USE mysqltest; +CREATE TABLE t1 (a INT PRIMARY KEY); +INSERT INTO t1 VALUES(42); +SET GLOBAL rocksdb_force_flush_memtable_now = 1; +SELECT sleep(1); +DROP TABLE t1; +EOF + +# Must ensure this directory exists before launching mysqld +mkdir $ddir; + +# Launch mysqld with non-standard rocksdb_datadir +exec $MYSQLD_BOOTSTRAP_CMD --datadir=$ddir --rocksdb_datadir=$rdb_ddir --default-storage-engine=rocksdb --skip-innodb --default-tmp-storage-engine=MyISAM --rocksdb < $sql_file; + +--echo Check for the number of MANIFEST files +exec ls $rdb_ddir/MANIFEST-0000* | wc -l; + +# Clean up +exec rm -rf $ddir; +remove_files_wildcard $rdb_ddir *; +remove_file $sql_file; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp-master.opt new file mode 100644 index 00000000000..885b15e36e3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=20000 --rocksdb_records_in_range=1000 --rocksdb_perf_context_level=2 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.inc b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.inc new file mode 100644 index 00000000000..5728e49b5e0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.inc @@ -0,0 +1,154 @@ +# +# Testing Index Condition Pushdown for MyRocks +# Test file parameter: $cf_name specifies the CF to store test data in +# It can be forward or reverse-ordered CF +# +select * from information_schema.engines where engine = 'rocksdb'; + +--disable_warnings +drop table if exists t0,t1,t2,t3; +--enable_warnings +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1(a int) engine=myisam; +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; + +eval +create table t2 ( + pk int primary key, + kp1 int, + kp2 int, + col1 int, + key (kp1,kp2) comment '$cf_name' +) engine=rocksdb; + +insert into t2 select a,a,a,a from t1; + +--echo # Try a basic case: +--replace_column 9 # +explain +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0; +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0; + +--echo # Check that ICP doesnt work for columns where column value +--echo # cant be restored from mem-comparable form: + +eval +create table t3 ( + pk int primary key, + kp1 int, + kp2 varchar(10) collate utf8_general_ci, + col1 int, + key (kp1,kp2) comment '$cf_name' +) engine=rocksdb; + +insert into t3 select a,a/10,a,a from t1; +--echo # This must not use ICP: +--replace_column 9 # +explain +select * from t3 where kp1=3 and kp2 like '%foo%'; + +--replace_column 9 # +explain format=json +select * from t3 where kp1 between 2 and 4 and mod(kp1,3)=0 and kp2 like '%foo%'; + +--echo # Check that we handle the case where out-of-range is encountered sooner +--echo # than matched index condition +--replace_column 9 # +explain +select * from t2 where kp1< 3 and kp2+1>50000; +select * from t2 where kp1< 3 and kp2+1>50000; + +--replace_column 9 # +explain +select * from t2 where kp1< 3 and kp2+1>50000; +select * from t2 where kp1< 3 and kp2+1>50000; + +--echo # Try doing backwards scans +--replace_column 9 # +explain +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; + +--replace_column 9 # +explain +select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc; +select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc; + +--replace_column 9 # +explain +select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc; +select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc; + +drop table t0,t1,t2,t3; + +--echo # +--echo # Check how ICP affects counters +--echo # +--echo # First, some preparations +--echo # +create procedure save_read_stats() + select ROWS_READ, ROWS_REQUESTED, ROWS_INDEX_FIRST, ROWS_INDEX_NEXT + into @rr, @rq, @rif, @rin + from information_schema.table_statistics + where table_name='t4' and table_schema=database(); + +create procedure get_read_stats() + select + ROWS_READ-@rr, ROWS_REQUESTED-@rq, ROWS_INDEX_FIRST-@rif, ROWS_INDEX_NEXT-@rin + from information_schema.table_statistics + where table_name='t4' and table_schema=database(); + +eval +create table t4 ( + id int, + id1 int, + id2 int, + value int, + value2 varchar(100), + primary key (id), + key id1_id2 (id1, id2) comment '$cf_name' +) engine=rocksdb charset=latin1 collate latin1_bin; + +insert into t4 values +(1,1,1,1,1), (2,1,2,2,2), (3,1,3,3,3),(4,1,4,4,4),(5,1,5,5,5), +(6,1,6,6,6), (7,1,7,7,7), (8,1,8,8,8),(9,1,9,9,9),(10,1,10,10,10); + +--echo # +--echo # Now, the test itself +--echo # +call save_read_stats(); +call get_read_stats(); + + +--echo # ============== index-only query ============== +--replace_column 9 # +explain +select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +call save_read_stats(); +select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +query_vertical call get_read_stats(); + +--echo # ============== Query without ICP ============== +set optimizer_switch='index_condition_pushdown=off'; +--replace_column 9 # +explain +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +call save_read_stats(); +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +query_vertical call get_read_stats(); + +--echo # ============== Query with ICP ============== +set optimizer_switch='index_condition_pushdown=on'; +--replace_column 9 # +explain +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +call save_read_stats(); +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +query_vertical call get_read_stats(); + +drop table t4; +drop procedure save_read_stats; +drop procedure get_read_stats; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test new file mode 100644 index 00000000000..175476974df --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test @@ -0,0 +1,44 @@ +--source include/have_rocksdb.inc + + +let $cf_name=cf1; + +--source suite/rocksdb/t/rocksdb_icp.inc + +--echo # +--echo # Issue #67: Inefficient index condition pushdown +--echo # +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 ( + pk int not null primary key, + key1 bigint(20) unsigned, + col1 int, + key (key1) +) engine=rocksdb; + +insert into t1 +select + A.a+10*B.a+100*C.a, + A.a+10*B.a+100*C.a, + 1234 +from t0 A, t0 B, t0 C; + +set @count=0; +let $save_query= +set @count_diff =(select (value - @count) from information_schema.rocksdb_perf_context + where table_schema=database() and table_name='t1' and stat_type='INTERNAL_KEY_SKIPPED_COUNT'); + +--replace_column 9 # +explain +select * from t1 where key1=1; + +eval $save_query; +select * from t1 where key1=1; +eval $save_query; +--echo # The following must be =1, or in any case not 999: +select @count_diff as "INTERNAL_KEY_SKIPPED_COUNT increment"; + +drop table t0,t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev-master.opt new file mode 100644 index 00000000000..6ae466bcc09 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=20000 --rocksdb_records_in_range=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev.test new file mode 100644 index 00000000000..c3fd43e5b7e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + + +let $cf_name=rev:cf1; + +--source suite/rocksdb/t/rocksdb_icp.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks.test new file mode 100644 index 00000000000..3b28df0d63b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks.test @@ -0,0 +1,92 @@ +--source include/have_rocksdb.inc + +# +# MyRocks-specific tests for locking +# +--source include/have_debug.inc + +--enable_connect_log +create table t1 (pk int not null primary key) engine=rocksdb; + +insert into t1 values (1),(2),(3); + +set autocommit=0; +begin; +select * from t1 where pk=1 for update; + +--connect (con1,localhost,root,,) +--connection con1 +--echo ### Connection con1 +let $ID= `select connection_id()`; +set @@rocksdb_lock_wait_timeout=500; +set autocommit=0; +begin; +--send select * from t1 where pk=1 for update; + +--connection default +--echo ### Connection default + +let $wait_condition= select 1 from INFORMATION_SCHEMA.PROCESSLIST + where ID = $ID and STATE = "Waiting for row lock"; +--source include/wait_condition.inc +## Waiting for row lock +## select connection_id(); +## select state='Waiting for row lock' from information_schema.processlist where id=2; + +rollback; + +connection con1; +reap; +rollback; +connection default; + +## +## Now, repeat the same test but let the wait time out. +## +begin; +select * from t1 where pk=1 for update; + +--connection con1 +--echo ### Connection con1 +set @@rocksdb_lock_wait_timeout=2; +set autocommit=0; +begin; +--error ER_LOCK_WAIT_TIMEOUT +select * from t1 where pk=1 for update; + +--connection default + +rollback; +set autocommit=1; + +--connection con1 +drop table t1; +--connection default + +--echo # +--echo # Now, test what happens if another transaction modified the record and committed +--echo # + +CREATE TABLE t1 ( + id int primary key, + value int +) engine=rocksdb collate latin1_bin; +insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10); + +--connection con1 +BEGIN; +SELECT * FROM t1 WHERE id=3; + +--connection default +BEGIN; +UPDATE t1 SET value=30 WHERE id=3; +COMMIT; + +--connection con1 +--error ER_LOCK_DEADLOCK +SELECT * FROM t1 WHERE id=3 FOR UPDATE; + +ROLLBACK; +--disconnect con1 +--connection default +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts-master.opt new file mode 100644 index 00000000000..ba9364e1523 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test new file mode 100644 index 00000000000..82fb70b0596 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test @@ -0,0 +1,121 @@ +--source include/have_rocksdb.inc + +--source include/have_partition.inc + +--disable_warnings +drop table if exists t1,t2; +--enable_warnings + +--echo # Tests for MyRocks + partitioning + +--echo # +--echo # MyRocks Issue #70: Server crashes in Rdb_key_def::get_primary_key_tuple +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, f1 INT, f2 INT, KEY(f2)) ENGINE=RocksDB +PARTITION BY HASH(pk) PARTITIONS 2; +INSERT INTO t1 VALUES (1, 6, NULL), (2, NULL, 1); + +CREATE TABLE t2 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB; +INSERT INTO t2 VALUES (1, 1), (2, 1); + +SELECT f1 FROM t1 WHERE f2 = ( SELECT f1 FROM t2 WHERE pk = 2 ); + +drop table t1,t2; + +--echo # +--echo # Issue#105: key_info[secondary_key].actual_key_parts does not include primary key on partitioned tables +--echo # +CREATE TABLE t1 ( + id INT PRIMARY KEY, + a set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8, + b set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8 default null, + c set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8 not null, + INDEX (a), + INDEX (b), + INDEX (c) +) ENGINE=RocksDB PARTITION BY key (id) partitions 2; + +INSERT INTO t1 (id, b) VALUES (28, 3); +UPDATE t1 SET id=8 WHERE c < 8 LIMIT 1; +check table t1; +drop table t1; + +--echo # +--echo # Issue #105, another testcase +--echo # +create table t1 ( + pk int primary key, + col1 int, + col2 int, + key (col1) comment 'rev:cf_issue105' +) engine=rocksdb partition by hash(pk) partitions 2; + +insert into t1 values (1,10,10); +insert into t1 values (2,10,10); + +insert into t1 values (11,20,20); +insert into t1 values (12,20,20); +explain select * from t1 force index(col1) where col1=10; +select * from t1 force index(col1) where col1=10; +select * from t1 use index () where col1=10; +drop table t1; + +--echo # +--echo # Issue #108: Index-only scans do not work for partitioned tables and extended keys +--echo # +create table t1 ( + pk int primary key, + col1 int, + col2 int, + key (col1) +) engine=rocksdb partition by hash(pk) partitions 2; + +insert into t1 values (1,10,10); +insert into t1 values (2,10,10); + +insert into t1 values (11,20,20); +insert into t1 values (12,20,20); +--echo # The following must use "Using index" +explain select pk from t1 force index(col1) where col1=10; + +drop table t1; + +--echo # +--echo # Issue #214: subqueries cause crash +--echo # +create TABLE t1(a int,b int,c int,primary key(a,b)) + partition by list (b*a) (partition x1 values in (1) tablespace ts1, + partition x2 values in (3,11,5,7) tablespace ts2, + partition x3 values in (16,8,5+19,70-43) tablespace ts3); +create table t2(b binary(2)); +set session optimizer_switch=5; +insert into t1(a,b) values(1,7); +select a from t1 where a in (select a from t1 where a in (select b from t2)); + +drop table t1, t2; + +--echo # +--echo # Issue #260: altering name to invalid value leaves table unaccessible +--echo # +CREATE TABLE t1 (c1 INT NOT NULL, c2 CHAR(5)) PARTITION BY HASH(c1) PARTITIONS 4; +INSERT INTO t1 VALUES(1,'a'); +--error ER_ERROR_ON_RENAME +RENAME TABLE t1 TO db3.t3; +SELECT * FROM t1; +SHOW TABLES; +# try it again to the same database +RENAME TABLE t1 TO test.t3; +SELECT * FROM t3; +SHOW TABLES; +# now try it again but with another existing database +CREATE DATABASE db3; +USE test; +RENAME TABLE t3 to db3.t2; +USE db3; +SELECT * FROM t2; +SHOW TABLES; +# cleanup +DROP TABLE t2; +use test; +DROP DATABASE db3; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache-master.opt new file mode 100644 index 00000000000..a00258bc48c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache-master.opt @@ -0,0 +1 @@ +--query_cache_type=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache.test new file mode 100644 index 00000000000..5cfbe3fbd39 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache.test @@ -0,0 +1,30 @@ +--source include/have_rocksdb.inc + +# Important: +# The test needs to be run with --mysqld=--query-cache-type=1 + +-- source include/have_query_cache.inc +--enable_connect_log + +create table t1 (pk int primary key, c char(8)) engine=RocksDB; +insert into t1 values (1,'new'),(2,'new'); + +select * from t1; + +--connect (con1,localhost,root,,) + +update t1 set c = 'updated'; +#select * from t1; + +--connection default +flush status; +show status like 'Qcache_hits'; +show global status like 'Qcache_hits'; +select * from t1; +select sql_no_cache * from t1; +select * from t1 where pk = 1; +show status like 'Qcache_hits'; +show status like 'Qcache_not_cached'; +show global status like 'Qcache_hits'; + +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range-master.opt new file mode 100644 index 00000000000..6ad42e58aa2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 --rocksdb_records_in_range=50 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range.test new file mode 100644 index 00000000000..c6f1ecc8424 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range.test @@ -0,0 +1,193 @@ +--source include/have_rocksdb.inc + +# +# Range access test for RocksDB storage engine +# +select * from information_schema.engines where engine = 'rocksdb'; + +--disable_warnings +drop table if exists t0,t1,t2,t3,t4,t5; +--enable_warnings +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1(a int) engine=myisam; +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; + +create table t2 ( + pk int not null, + a int not null, + b int not null, + primary key(pk), + key(a) comment 'rev:cf1' +) engine=rocksdb; + +# 10 pk values for each value of a... +insert into t2 select A.a, FLOOR(A.a/10), A.a from t1 A; + +--echo # +--echo # HA_READ_KEY_EXACT tests +--echo # + +--echo # Original failure was here: +--replace_column 9 # +explain +select * from t2 force index (a) where a=0; +select * from t2 force index (a) where a=0; + +--echo # The rest are for code coverage: +--replace_column 9 # +explain +select * from t2 force index (a) where a=2; +select * from t2 force index (a) where a=2; + +--replace_column 9 # +explain +select * from t2 force index (a) where a=3 and pk=33; +select * from t2 force index (a) where a=3 and pk=33; + +select * from t2 force index (a) where a=99 and pk=99; +select * from t2 force index (a) where a=0 and pk=0; +select * from t2 force index (a) where a=-1; +select * from t2 force index (a) where a=-1 and pk in (101,102); +select * from t2 force index (a) where a=100 and pk in (101,102); + + +--echo # +--echo # #36: Range in form tbl.key >= const doesn't work in reverse column family +--echo # +--replace_column 9 # +explain +select count(*) from t2 force index (a) where a>=0 and a <=1; +select count(*) from t2 force index (a) where a>=0 and a <=1; + +--replace_column 9 # +explain +select count(*) from t2 force index (a) where a>=-1 and a <=1; +select count(*) from t2 force index (a) where a>=-1 and a <=1; + +--replace_column 9 # +explain +select * from t2 force index (a) where a=0 and pk>=3; +select * from t2 force index (a) where a=0 and pk>=3; + +--echo # Try edge cases where we fall over the end of the table +create table t3 like t2; +insert into t3 select * from t2; + +select * from t3 where pk>=1000000; +select * from t2 where pk>=1000000; + +--echo # +--echo # #42: Range in form tbl.key > const doesn't work in reverse column family +--echo # +--replace_column 9 # +explain +select count(*) from t2 force index (a) where a>0; +select count(*) from t2 force index (a) where a>0; + +--replace_column 9 # +explain +select count(*) from t2 force index (a) where a>99; +select count(*) from t2 force index (a) where a>99; + +select * from t2 where pk>1000000; +select * from t3 where pk>1000000; + +--replace_column 9 # +explain +select count(*) from t2 force index (a) where a=2 and pk>25; +select count(*) from t2 force index (a) where a=2 and pk>25; + + +select * from t2 force index (a) where a>-10 and a < 1; +select * from t3 force index (a) where a>-10 and a < 1; + + +--echo # +--echo # #46: index_read_map(HA_READ_BEFORE_KEY) does not work in reverse column family +--echo # +select max(a) from t2 where a < 2; +select max(a) from t2 where a < -1; + +select max(pk) from t2 where a=3 and pk < 6; + +select max(pk) from t2 where pk < 200000; +select max(pk) from t2 where pk < 20; + +select max(a) from t3 where a < 2; +select max(a) from t3 where a < -1; +select max(pk) from t3 where pk < 200000; +select max(pk) from t3 where pk < 20; + +select max(pk) from t2 where a=3 and pk < 33; +select max(pk) from t3 where a=3 and pk < 33; + +--echo # +--echo # #48: index_read_map(HA_READ_PREFIX_LAST) does not work in reverse CF +--echo # + +--echo # Tests for search_flag=HA_READ_PREFIX_LAST_OR_PREV +--replace_column 9 # +explain +select * from t2 where a between 99 and 2000 order by a desc; +select * from t2 where a between 99 and 2000 order by a desc; + +select max(a) from t2 where a <=10; +select max(a) from t2 where a <=-4; + +select max(pk) from t2 where a=5 and pk <=55; +select max(pk) from t2 where a=5 and pk <=55555; +select max(pk) from t2 where a=5 and pk <=0; + +select max(pk) from t2 where pk <=-1; +select max(pk) from t2 where pk <=999999; +select max(pk) from t3 where pk <=-1; +select max(pk) from t3 where pk <=999999; + +--echo # +--echo # Tests for search_flag=HA_READ_PREFIX_LAST +--echo # + +create table t4 ( + pk int primary key, + a int, + b int, + c int, + key(a,b,c) +) engine=rocksdb; + +insert into t4 select pk,pk,pk,pk from t2 where pk < 100; + +--replace_column 9 # +explain +select * from t4 where a=1 and b in (1) order by c desc; +select * from t4 where a=1 and b in (1) order by c desc; + +--replace_column 9 # +explain +select * from t4 where a=5 and b in (4) order by c desc; +select * from t4 where a=5 and b in (4) order by c desc; + +--echo # HA_READ_PREFIX_LAST for reverse-ordered CF +create table t5 ( + pk int primary key, + a int, + b int, + c int, + key(a,b,c) comment 'rev:cf2' +) engine=rocksdb; + +insert into t5 select pk,pk,pk,pk from t2 where pk < 100; + +--replace_column 9 # +explain +select * from t5 where a=1 and b in (1) order by c desc; +select * from t5 where a=1 and b in (1) order by c desc; + +--replace_column 9 # +explain +select * from t5 where a=5 and b in (4) order by c desc; +select * from t5 where a=5 and b in (4) order by c desc; + +drop table t0,t1,t2,t3,t4,t5; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range2.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range2.test new file mode 100644 index 00000000000..6b8d0b90e90 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range2.test @@ -0,0 +1,20 @@ +--source include/have_rocksdb.inc + +# Issue#212 MyRocks chooses full index scan even if range scan is more efficient +# rocksdb_debug_optimizer_n_rows must not be set. + +create table t1 (id1 bigint, id2 bigint, c1 bigint, c2 bigint, c3 bigint, c4 bigint, c5 bigint, c6 bigint, c7 bigint, primary key (id1, id2), index i(c1, c2)); +--disable_query_log +let $i=0; +while ($i<10000) +{ + inc $i; + eval insert t1(id1, id2, c1, c2, c3, c4, c5, c6, c7) + values($i, 0, $i, 0, 0, 0, 0, 0, 0); +} +--enable_query_log +analyze table t1; +select count(*) from t1; +explain select c1 from t1 where c1 > 5 limit 10; +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_row_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_row_stats.test new file mode 100644 index 00000000000..ebcc741fc17 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_row_stats.test @@ -0,0 +1,57 @@ +source include/have_rocksdb.inc; +create table t1 (a int primary key) engine=rocksdb; + +-- echo Verify rocksdb_rows_inserted +select variable_value into @old_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_rows_inserted'; +insert into t1 values(1); +select variable_value into @new_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_rows_inserted'; +select @new_rows_inserted - @old_rows_inserted; + +-- echo Verify rocksdb_rows_updated +select variable_value into @old_rows_updated from information_schema.global_status where variable_name = 'rocksdb_rows_updated'; +update t1 set a=2 where a=1; +select variable_value into @new_rows_updated from information_schema.global_status where variable_name = 'rocksdb_rows_updated'; +select @new_rows_updated - @old_rows_updated; + +-- echo Verify rocksdb_rows_read +select variable_value into @old_rows_read from information_schema.global_status where variable_name = 'rocksdb_rows_read'; +select * from t1; +select variable_value into @new_rows_read from information_schema.global_status where variable_name = 'rocksdb_rows_read'; +select @new_rows_read - @old_rows_read; + +-- echo Verify rocksdb_rows_deleted +select variable_value into @old_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_rows_deleted'; +delete from t1; +select variable_value into @new_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_rows_deleted'; +select @new_rows_deleted - @old_rows_deleted; + +use mysql; +create table t1(a int primary key) engine=rocksdb; + +-- echo Verify rocksdb_system_rows_inserted +select variable_value into @old_system_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_system_rows_inserted'; +insert into t1 values(1); +select variable_value into @new_system_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_system_rows_inserted'; +select @new_system_rows_inserted - @old_system_rows_inserted; + +-- echo Verify rocksdb_system_rows_updated +select variable_value into @old_system_rows_updated from information_schema.global_status where variable_name = 'rocksdb_system_rows_updated'; +update t1 set a=2 where a=1; +select variable_value into @new_system_rows_updated from information_schema.global_status where variable_name = 'rocksdb_system_rows_updated'; +select @new_system_rows_updated - @old_system_rows_updated; + +-- echo Verify rocksdb_system_rows_read +select variable_value into @old_system_rows_read from information_schema.global_status where variable_name = 'rocksdb_system_rows_read'; +select * from t1; +select variable_value into @new_system_rows_read from information_schema.global_status where variable_name = 'rocksdb_system_rows_read'; +select @new_system_rows_read - @old_system_rows_read; + +-- echo Verify rocksdb_system_rows_deleted +select variable_value into @old_system_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_system_rows_deleted'; +delete from t1; +select variable_value into @new_system_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_system_rows_deleted'; +select @new_system_rows_deleted - @old_system_rows_deleted; + +drop table t1; +use test; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_table_stats_sampling_pct_change.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_table_stats_sampling_pct_change.test new file mode 100644 index 00000000000..5eaeff5cdbd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_table_stats_sampling_pct_change.test @@ -0,0 +1,80 @@ +--source include/have_rocksdb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +# +# First set sampling rate to 100% and make sure that the baseline is +# correct and we get the correct number of rows as a result. +# +SET @ORIG_PCT = @@ROCKSDB_TABLE_STATS_SAMPLING_PCT; +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 100; + +create table t1 (pk int primary key) engine=rocksdb; + +--disable_query_log +let $i = 0; +let $n = 10000; + +while ($i < $n) +{ + inc $i; + eval insert t1(pk) values($i); +} +--enable_query_log + +set global rocksdb_force_flush_memtable_now = true; + +# This should return 10K rows. +select table_rows from information_schema.tables +where table_schema = database() and table_name = 't1'; + +let $t1_len = `select data_length from information_schema.tables where table_schema = database() and table_name = 't1'`; + +drop table t1; + +--disable_warnings +drop table if exists t2; +--enable_warnings + +# +# Now, set the sampling rate to 10% and expect to see the same amount of +# rows. +# +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 10; + +create table t2 (pk int primary key) engine=rocksdb; + +--disable_query_log +let $i = 0; +let $n = 10000; + +while ($i < $n) +{ + inc $i; + eval insert t2(pk) values($i); +} +--enable_query_log + +set global rocksdb_force_flush_memtable_now = true; + +# This should return 10K rows as well. +select table_rows from information_schema.tables +where table_schema = database() and table_name = 't2'; + +let $t2_len = `select data_length from information_schema.tables where table_schema = database() and table_name = 't2'`; +let $diff = `select abs($t1_len - $t2_len)`; + +# +# Table sizes are approximations and for this particular case we allow about +# 10% deviation. +# +if ($diff < 6000) { + select table_name from information_schema.tables where table_schema = database() and table_name = 't2'; +} + +drop table t2; + +SET GLOBAL ROCKSDB_TABLE_STATS_SAMPLING_PCT = @ORIG_PCT; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.cnf new file mode 100644 index 00000000000..13dea1236d8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.cnf @@ -0,0 +1,14 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +sync_binlog=0 +binlog_format=row +rocksdb_read_free_rpl_tables="t.*" +slave-exec-mode=strict + +[mysqld.2] +sync_binlog=0 +binlog_format=row +rocksdb_read_free_rpl_tables="t.*" +slave-exec-mode=strict +rocksdb_default_cf_options=write_buffer_size=16k;target_file_size_base=16k diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.test new file mode 100644 index 00000000000..38fb3c32149 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.test @@ -0,0 +1,302 @@ +--source include/have_rocksdb.inc + +source include/master-slave.inc; + + +connection master; +--disable_warnings +drop table if exists t1; +--enable_warnings + +# initialization/insert +connection master; +--source init_stats_procedure.inc + +create table t1 (id int primary key, value int); +insert into t1 values (1,1), (2,2), (3,3), (4,4); +--source include/sync_slave_sql_with_master.inc + +--let $diff_tables= master:t1, slave:t1 + +--echo +--echo # regular update/delete. With rocks_read_free_rpl_tables=.*, rocksdb_rows_read does not increase on slaves +--echo +connection slave; +call save_read_stats(); +connection master; +update t1 set value=value+1 where id=1; +delete from t1 where id=4; +select * from t1; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +select * from t1; + + +--echo +--echo # "rocks_read_free_rpl_tables=.*" makes "row not found error" not happen anymore +--echo +connection slave; +--source include/stop_slave.inc +delete from t1 where id in (2, 3); +--source include/start_slave.inc +call save_read_stats(); + +connection master; +update t1 set value=value+1 where id=3; +delete from t1 where id=2; +select * from t1; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +select * from t1; + + +--echo +--echo ## tables without primary key -- read free replication should be disabled +--echo +--echo +--echo #no index +--echo +connection master; +drop table t1; +create table t1 (c1 int, c2 int); +insert into t1 values (1,1), (2,2),(3,3),(4,4),(5,5); +--source include/sync_slave_sql_with_master.inc +connection slave; +call save_read_stats(); +connection master; +update t1 set c2=100 where c1=3; +delete from t1 where c1 <= 2; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +select * from t1; + +--echo +--echo #secondary index only +--echo +connection master; +drop table t1; +create table t1 (c1 int, c2 int, index i(c1)); +insert into t1 values (1,1), (2,2),(3,3),(4,4),(5,5); +--source include/sync_slave_sql_with_master.inc +connection slave; +call save_read_stats(); +connection master; +update t1 set c2=100 where c1=3; +delete from t1 where c1 <= 2; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +select * from t1; + + + +--echo +--echo ## large row operations -- primary key modification, secondary key modification +--echo +connection master; +drop table t1; +create table t1 (id1 bigint, id2 bigint, c1 bigint, c2 bigint, c3 bigint, c4 bigint, c5 bigint, c6 bigint, c7 bigint, primary key (id1, id2), index i(c1, c2)); + +--disable_query_log +let $i=1; +while ($i<=10000) +{ + eval insert t1(id1,id2,c1,c2,c3,c4,c5,c6,c7) + values($i,0,$i,0,0,0,0,0,0); + inc $i; +} +--enable_query_log + +--source include/sync_slave_sql_with_master.inc +connection slave; +call save_read_stats(); +connection master; + +--echo +--echo #updating all seconary keys by 1 +--echo +--disable_query_log +let $i=1; +while ($i<=10000) +{ + eval update t1 set c2=c2+1 where id1=$i and id2=0; + inc $i; +} +--enable_query_log +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +connection master; +--source include/diff_tables.inc + +--echo +--echo #updating all primary keys by 2 +--echo +connection slave; +call save_read_stats(); +connection master; +--disable_query_log +let $i=1; +while ($i<=10000) +{ + eval update t1 set id2=id2+2 where id1=$i and id2=0; + inc $i; +} +--enable_query_log +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +connection master; +--source include/diff_tables.inc + +--echo +--echo #updating secondary keys after truncating t1 on slave +--echo +connection slave; +truncate table t1; +call save_read_stats(); +connection master; +update t1 set c2=c2+10; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +connection master; +--source include/diff_tables.inc + +--echo +--echo #updating primary keys after truncating t1 on slave +--echo +connection slave; +truncate table t1; +call save_read_stats(); +connection master; +update t1 set id2=id2+10; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +connection master; +--source include/diff_tables.inc + +--echo +--echo #deleting half rows +--echo +connection slave; +call save_read_stats(); +connection master; +delete from t1 where id1 <= 5000; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +connection master; +--source include/diff_tables.inc + +#--echo +#--echo # some tables with read-free replication on and some with it off +#--echo # secondary keys lose rows +#--echo +# The configuration is set up so the slave will do read-free replication on +# all tables starting with 't' +connection master; +--echo [on master] +create table t2 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2)); +create table u2 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2)); +insert into t2 values (1,1,1,1),(2,2,2,2),(3,3,3,3); +insert into u2 values (1,1,1,1),(2,2,2,2),(3,3,3,3); +--source include/sync_slave_sql_with_master.inc + +# make a mismatch between the slave and the master +connection slave; +--echo [on slave] +delete from t2 where id <= 2; +delete from u2 where id <= 2; + +# make changes on the master +connection master; +--echo [on master] +update t2 set i2=100, value=100 where id=1; +update u2 set i2=100, value=100 where id=1; + +connection slave; +--echo [on slave] +call mtr.add_suppression("Slave SQL.*Could not execute Update_rows event on table test.u2.*Error_code.*"); +call mtr.add_suppression("Slave: Can't find record in 'u2'.*"); +# wait until we have the expected error +--let $slave_sql_errno= convert_error(ER_KEY_NOT_FOUND) +--source include/wait_for_slave_sql_error.inc + +# query the t2 table on the slave +connection slave; +select count(*) from t2 force index(primary); +select count(*) from t2 force index(i1); +select count(*) from t2 force index(i2); +select * from t2 where id=1; +select i1 from t2 where i1=1; +select i2 from t2 where i2=100; + +# query the u2 table on the slave +select count(*) from u2 force index(primary); +select count(*) from u2 force index(i1); +select count(*) from u2 force index(i2); +select * from u2 where id=1; +select i1 from u2 where i1=1; +select i2 from u2 where i2=100; + +# the slave replication thread stopped because of the errors; +# cleanup the problem and restart it +--disable_query_log +insert into u2 values(1,1,1,1), (2,2,2,2); +start slave sql_thread; +--source include/wait_for_slave_sql_to_start.inc +--enable_query_log + +--echo +--echo # some tables with read-free replication on and some with it off +--echo # secondary keys have extra rows +--echo +connection master; +--echo [on master] +create table t3 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2)); +create table u3 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2)); +insert into t3 values (1,1,1,1),(2,2,2,2),(3,3,3,3); +insert into u3 values (1,1,1,1),(2,2,2,2),(3,3,3,3); +--source include/sync_slave_sql_with_master.inc + +# make a mismatch between the slave and the master +connection slave; +--echo [on slave] +update t3 set i1=100 where id=1; +update u3 set i1=100 where id=1; + +# make changes on the master +connection master; +--echo [on master] +delete from t3 where id=1; +delete from u3 where id=1; + +# make sure the slave is caught up +--source include/sync_slave_sql_with_master.inc + +# query the t3 table on the slave +connection slave; +--echo [on slave] +select count(*) from t3 force index(primary); +select count(*) from t3 force index(i1); +select count(*) from t3 force index(i2); +select i1 from t3 where i1=100; + +# query the u3 table on the slave +select count(*) from u3 force index(primary); +select count(*) from u3 force index(i1); +select count(*) from u3 force index(i2); +select i1 from u3 where i1=100; + +# cleanup +connection master; +drop table t1, t2, t3, u2, u3; +--source drop_stats_procedure.inc + +--source include/rpl_end.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.cnf new file mode 100644 index 00000000000..44100e59cc2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.cnf @@ -0,0 +1,9 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=row +[mysqld.2] +binlog_format=row +slave_parallel_workers=4 +slave_exec_mode=SEMI_STRICT +rocksdb_lock_wait_timeout=5 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.inc b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.inc new file mode 100644 index 00000000000..5a78979f048 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.inc @@ -0,0 +1,92 @@ +--source include/have_rocksdb.inc +--source include/master-slave.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc + +connection master; +--disable_warnings +drop table if exists t1; +--enable_warnings + +connection master; + +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int) engine=myisam; +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 ( + pk int primary key, + kp1 int, + kp2 int, + col1 int, + key (kp1,kp2) +) engine=rocksdb; +insert into t2 select a,a,a,a from t1; +create table t3 like t2; +insert into t3 select * from t2; + + +# For GitHub issue#166 +# Slave is suspended at ha_rocksdb::read_range_first() -> index_read_map_impl() +# -> ha_rocksdb::get_row_by_rowid() -- which is after creating an iterator, +# Seek(), Next() (getting pk=1) +# and before GetForUpdate() and before creating a snapshot. +# Deletes remove pk=2 and pk=3, then resumes update on slave. +# The update resumes with GetForUpdate(pk=1), +# index_next() -> secondary_index_read() -> get_row_by_rowid(pk=2) +# then doesn't find a row. +# The slave should not stop with error (Can't find a record). + +--source include/sync_slave_sql_with_master.inc + +connection slave; +let $old_debug = `select @@global.debug`; +set global debug= 'd,dbug.rocksdb.get_row_by_rowid'; +--source include/stop_slave.inc +--source include/start_slave.inc + +connection master; +update t2 set col1=100 where kp1 between 1 and 3 and mod(kp2,2)=0; + +connection slave; +set debug_sync= 'now WAIT_FOR Reached'; +eval set global debug = '$old_debug'; +set sql_log_bin=0; +delete from t2 where pk=2; +delete from t2 where pk=3; +set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running'; + +connection master; +--source include/sync_slave_sql_with_master.inc +connection slave; +select * from t2 where pk < 5; + +# For GitHub issue#162 (result file must be updated after fixing #162) +connection slave; +set global debug= 'd,dbug.rocksdb.get_row_by_rowid'; +--source include/stop_slave.inc +--source include/start_slave.inc + +connection master; +update t3 set col1=100 where kp1 between 1 and 4 and mod(kp2,2)=0; + +connection slave; +call mtr.add_suppression("Deadlock found when trying to get lock"); +set debug_sync= 'now WAIT_FOR Reached'; +eval set global debug = '$old_debug'; +set sql_log_bin=0; +delete from t3 where pk=2; +delete from t3 where pk=3; +set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running'; + +connection master; +--source include/sync_slave_sql_with_master.inc +connection slave; +# col1 for pk=4 should be 100 +select * from t3 where pk < 5; + +# Cleanup +connection master; +drop table t0, t1, t2, t3; +--source include/rpl_end.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.test new file mode 100644 index 00000000000..36188427585 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.test @@ -0,0 +1,4 @@ +--source include/have_binlog_format_row.inc + +--source rpl_row_not_found.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.cnf new file mode 100644 index 00000000000..b46b417c257 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.cnf @@ -0,0 +1,7 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=row +[mysqld.2] +binlog_format=row + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.test new file mode 100644 index 00000000000..2f00741afbb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.test @@ -0,0 +1,47 @@ +--source include/have_rocksdb.inc + +source include/master-slave.inc; + +connection master; +--disable_warnings +drop table if exists t1; +--enable_warnings + +connection master; + +select @@binlog_format; +create table t1 (pk int primary key) engine=rocksdb; +insert into t1 values (1),(2),(3); + +--source include/sync_slave_sql_with_master.inc +connection slave; + +select * from t1; + +connection master; +drop table t1; + +--echo # +--echo # Issue #18: slave crash on update with row based binary logging +--echo # +create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb; +insert into t1 values (1,1,1); +insert into t1 values (2,1,1); +insert into t1 values (3,1,1); +insert into t1 values (4,1,1); +insert into t1 values (5,1,1); +update t1 set value2=100 where id=1; +update t1 set value2=200 where id=2; +update t1 set value2=300 where id=3; + +--source include/sync_slave_sql_with_master.inc +connection slave; +select * from t1 where id=1; +select * from t1 where id=2; +select * from t1 where id=3; + +connection master; +drop table t1; + +--source include/rpl_end.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.cnf new file mode 100644 index 00000000000..b46b417c257 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.cnf @@ -0,0 +1,7 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=row +[mysqld.2] +binlog_format=row + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.test new file mode 100644 index 00000000000..c0b0122cbc0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.test @@ -0,0 +1,46 @@ +--source include/have_rocksdb.inc + +source include/master-slave.inc; + +connection master; +--disable_warnings +drop table if exists t1; +--enable_warnings + +# initialization/insert +connection master; +# creating save_read_stats() and get_read_stats() procedures +--source init_stats_procedure.inc + +create table t1 (id int primary key, value int); +insert into t1 values (1,1), (2,2), (3,3), (4,4), (5,5); +--source include/sync_slave_sql_with_master.inc + +connection slave; +call save_read_stats(); +connection master; +update t1 set value=value+1 where id=1; +update t1 set value=value+1 where id=3; +select * from t1; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +select * from t1; +call save_read_stats(); + +connection master; +delete from t1 where id in (4,5); +select * from t1; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +select * from t1; + + +# cleanup +connection master; +drop table t1; +--source drop_stats_procedure.inc + +--source include/rpl_end.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.cnf new file mode 100644 index 00000000000..d20d3396f0a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.cnf @@ -0,0 +1,19 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=row +gtid_mode=ON +enforce_gtid_consistency +log_slave_updates +binlog_row_image=FULL +rocksdb_read_free_rpl_tables=.* +rocksdb_strict_collation_check=0 +[mysqld.2] +binlog_format=row +gtid_mode=ON +enforce_gtid_consistency +log_slave_updates +binlog_row_image=FULL +rocksdb_read_free_rpl_tables=.* +rocksdb_strict_collation_check=0 + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.test new file mode 100644 index 00000000000..4490353b749 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.test @@ -0,0 +1,262 @@ +-- source include/have_binlog_format_row.inc +-- source include/have_rbr_triggers.inc +-- source include/have_rocksdb.inc +-- source include/master-slave.inc + +-- echo # Test of row replication with triggers on the slave side +connection master; +CREATE TABLE t1 (C1 CHAR(1) primary key, C2 CHAR(1)); +SELECT * FROM t1; + +sync_slave_with_master; + +connection slave; +SET @old_slave_exec_mode= @@global.slave_exec_mode; +SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr; +SET @@global.slave_exec_mode= IDEMPOTENT; +SET @@global.slave_run_triggers_for_rbr= YES; +SELECT * FROM t1; +create table t2 (id char(2) primary key, cnt int, o char(1), n char(1)); +insert into t2 values + ('u0', 0, ' ', ' '),('u1', 0, ' ', ' '), + ('d0', 0, ' ', ' '),('d1', 0, ' ', ' '), + ('i0', 0, ' ', ' '),('i1', 0, ' ', ' '); +create trigger t1_cnt_b before update on t1 for each row + update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u0'; +create trigger t1_cnt_db before delete on t1 for each row + update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd0'; +create trigger t1_cnt_ib before insert on t1 for each row + update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i0'; +create trigger t1_cnt_a after update on t1 for each row + update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u1'; +create trigger t1_cnt_da after delete on t1 for each row + update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd1'; +create trigger t1_cnt_ia after insert on t1 for each row + update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i1'; +SELECT * FROM t2 order by id; + +connection master; +--echo # INSERT triggers test +insert into t1 values ('a','b'); + +sync_slave_with_master; + +connection slave; +SELECT * FROM t2 order by id; + +connection master; +--echo # UPDATE triggers test +update t1 set C1= 'd'; +sync_slave_with_master; + +connection slave; +SELECT * FROM t2 order by id; + +connection master; +--echo # DELETE triggers test +delete from t1 where C1='d'; + +sync_slave_with_master; + +connection slave; +SELECT * FROM t2 order by id; +--echo # INSERT triggers which cause also UPDATE test (insert duplicate row) +insert into t1 values ('0','1'); +SELECT * FROM t2 order by id; + +connection master; +insert into t1 values ('0','1'); + +sync_slave_with_master; + +connection slave; +SELECT * FROM t2 order by id; +--echo # INSERT triggers which cause also DELETE test +--echo # (insert duplicate row in table referenced by foreign key) +insert into t1 values ('1','1'); + +connection master; +# Foreign key is not supported in MyRocks +#CREATE TABLE t3 (C1 CHAR(1) primary key, FOREIGN KEY (C1) REFERENCES t1(C1) ); +#insert into t1 values ('1','1'); + +#sync_slave_with_master; + +#connection slave; +#SELECT * FROM t2 order by id; + +#connection master; +#drop table t3,t1; +drop table if exists t1; + +sync_slave_with_master; + +connection slave; +SET @@global.slave_exec_mode= @old_slave_exec_mode; +SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr; +drop table t2; + +--connection master +CREATE TABLE t1 (i INT); +CREATE TABLE t2 (i INT); + +--sync_slave_with_master +SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr; +SET GLOBAL slave_run_triggers_for_rbr=YES; +CREATE TRIGGER tr AFTER INSERT ON t1 FOR EACH ROW + INSERT INTO t2 VALUES (new.i); + +--connection master +BEGIN; +INSERT INTO t1 VALUES (1); +INSERT INTO t1 VALUES (2); +COMMIT; +--sync_slave_with_master +select * from t2; +SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr; +--connection master +drop tables t2,t1; + +--sync_slave_with_master + +-- echo # Triggers on slave do not work if master has some + +connection master; +CREATE TABLE t1 (C1 CHAR(1) primary key, C2 CHAR(1)); +SELECT * FROM t1; +create trigger t1_dummy before delete on t1 for each row + set @dummy= 1; + +sync_slave_with_master; + +connection slave; +SET @old_slave_exec_mode= @@global.slave_exec_mode; +SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr; +SET @@global.slave_exec_mode= IDEMPOTENT; +SET @@global.slave_run_triggers_for_rbr= YES; +SELECT * FROM t1; +create table t2 (id char(2) primary key, cnt int, o char(1), n char(1)); +insert into t2 values + ('u0', 0, ' ', ' '),('u1', 0, ' ', ' '), + ('d0', 0, ' ', ' '),('d1', 0, ' ', ' '), + ('i0', 0, ' ', ' '),('i1', 0, ' ', ' '); +create trigger t1_cnt_b before update on t1 for each row + update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u0'; +create trigger t1_cnt_ib before insert on t1 for each row + update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i0'; +create trigger t1_cnt_a after update on t1 for each row + update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u1'; +create trigger t1_cnt_da after delete on t1 for each row + update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd1'; +create trigger t1_cnt_ia after insert on t1 for each row + update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i1'; +SELECT * FROM t2 order by id; + +connection master; +--echo # INSERT triggers test +insert into t1 values ('a','b'); + +sync_slave_with_master; + +connection slave; +SELECT * FROM t2 order by id; +connection master; +--echo # UPDATE triggers test +update t1 set C1= 'd'; + +sync_slave_with_master; + +connection slave; +SELECT * FROM t2 order by id; + +connection master; +--echo # DELETE triggers test +delete from t1 where C1='d'; + +sync_slave_with_master; + +connection slave; +SELECT * FROM t2 order by id; +--echo # INSERT triggers which cause also UPDATE test (insert duplicate row) +insert into t1 values ('0','1'); +SELECT * FROM t2 order by id; + + +connection master; +insert into t1 values ('0','1'); + +sync_slave_with_master; + +connection slave; +SELECT * FROM t2 order by id; +--echo # INSERT triggers which cause also DELETE test +--echo # (insert duplicate row in table referenced by foreign key) +insert into t1 values ('1','1'); + +connection master; + +# Foreign Key is not supported in MyRocks +#CREATE TABLE t3 (C1 CHAR(1) primary key, FOREIGN KEY (C1) REFERENCES t1(C1) ); +#insert into t1 values ('1','1'); + +#sync_slave_with_master; + +#connection slave; +#SELECT * FROM t2 order by id; + +#connection master; +#drop table t3,t1; +drop table if exists t1; + +sync_slave_with_master; + +connection slave; +SET @@global.slave_exec_mode= @old_slave_exec_mode; +SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr; +drop table t2; + +--echo # +--echo # MDEV-5513: Trigger is applied to the rows after first one +--echo # + +--connection master +create table t1 (a int, b int); +create table tlog (a int auto_increment primary key); +set sql_log_bin=0; +create trigger tr1 after insert on t1 for each row insert into tlog values (null); +set sql_log_bin=1; + +sync_slave_with_master; +--connection slave + +set @slave_run_triggers_for_rbr.saved = @@slave_run_triggers_for_rbr; +set global slave_run_triggers_for_rbr=1; +create trigger tr2 before insert on t1 for each row set new.b = new.a; + +--connection master +insert into t1 values (1,10),(2,20),(3,30); + +--sync_slave_with_master +select * from t1; + +--echo # +--echo # Verify slave skips running triggers if master ran and logged the row events for triggers +--echo # +--connection master +create table t4(a int, b int); +delete from tlog; +create trigger tr4 before insert on t4 for each row insert into tlog values (null); +insert into t4 values (1, 10),(2, 20); +select * from tlog; + +--sync_slave_with_master +select * from t4; +select * from tlog; + +# Cleanup +set global slave_run_triggers_for_rbr = @slave_run_triggers_for_rbr.saved; +--connection master +drop table t1, tlog, t4; +sync_slave_with_master; + +--source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.cnf new file mode 100644 index 00000000000..b46b417c257 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.cnf @@ -0,0 +1,7 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=row +[mysqld.2] +binlog_format=row + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test new file mode 100644 index 00000000000..0f26c24c27d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test @@ -0,0 +1,90 @@ +--source include/have_rocksdb.inc + +source include/master-slave.inc; + +connection master; +--disable_warnings +drop table if exists t1; +--enable_warnings + +connection master; + +create table t1 (id int primary key, value int); +insert into t1 values (1,1), (2,2), (3,3); + +begin; +insert into t1 values (11, 1); +savepoint a; +insert into t1 values (12, 1); +--error ER_UNKNOWN_ERROR +rollback to savepoint a; +--error ER_UNKNOWN_ERROR +commit; +commit; +select * from t1; + +--source include/sync_slave_sql_with_master.inc +connection slave; + +select * from t1; + +connection master; +begin; +insert into t1 values (21, 1); +savepoint a; +insert into t1 values (22, 1); +--error ER_UNKNOWN_ERROR +rollback to savepoint a; +--error ER_UNKNOWN_ERROR +insert into t1 values (23, 1); +--error ER_UNKNOWN_ERROR +commit; +commit; +select * from t1; + +--source include/sync_slave_sql_with_master.inc +connection slave; +select * from t1; + + +connection master; +begin; +insert into t1 values (31, 1); +savepoint a; +insert into t1 values (32, 1); +savepoint b; +insert into t1 values (33, 1); +--error ER_UNKNOWN_ERROR +rollback to savepoint a; +--error ER_UNKNOWN_ERROR +insert into t1 values (34, 1); +rollback; +select * from t1; + +--source include/sync_slave_sql_with_master.inc +connection slave; +select * from t1; + +### GitHub Issue#195 +connection master; +SET autocommit=off; +select * from t1; +SAVEPOINT A; +select * from t1; +SAVEPOINT A; +insert into t1 values (35, 35); +--error ER_UNKNOWN_ERROR +ROLLBACK TO SAVEPOINT A; +--error ER_UNKNOWN_ERROR +START TRANSACTION; +select * from t1; +--source include/sync_slave_sql_with_master.inc +connection slave; +select * from t1; + + +connection master; +drop table t1; + +--source include/rpl_end.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.cnf new file mode 100644 index 00000000000..6e5130c1f01 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.cnf @@ -0,0 +1,7 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=statement +[mysqld.2] +binlog_format=mixed +rocksdb_lock_wait_timeout=5 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test new file mode 100644 index 00000000000..b4126266956 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test @@ -0,0 +1,57 @@ +--source include/have_rocksdb.inc +source include/master-slave.inc; + +connection master; +--disable_warnings +drop table if exists t1; +--enable_warnings + +connection master; + +select @@binlog_format; +create table t1 (pk int primary key) engine=rocksdb; +--error ER_UNKNOWN_ERROR +insert into t1 values (1),(2),(3); + +set session rocksdb_unsafe_for_binlog=on; +insert into t1 values (1),(2),(3); +select * from t1; +delete from t1; +set session rocksdb_unsafe_for_binlog=off; + +--error ER_UNKNOWN_ERROR +insert into t1 values (1),(2),(3); + +set binlog_format=row; +insert into t1 values (1),(2),(3); + +--source include/sync_slave_sql_with_master.inc +connection slave; + +select * from t1; + +connection master; +drop table t1; + +create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb; +insert into t1 values (1,1,1); +insert into t1 values (2,1,1); +insert into t1 values (3,1,1); +insert into t1 values (4,1,1); +insert into t1 values (5,1,1); +update t1 set value2=100 where id=1; +update t1 set value2=200 where id=2; +update t1 set value2=300 where id=3; + +--source include/sync_slave_sql_with_master.inc +connection slave; +select * from t1 where id=1; +select * from t1 where id=2; +select * from t1 where id=3; + +connection master; +drop table t1; +set binlog_format=row; + +--source include/rpl_end.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.cnf new file mode 100644 index 00000000000..470b073d185 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.cnf @@ -0,0 +1,9 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=statement +rocksdb_unsafe_for_binlog=1 +[mysqld.2] +binlog_format=row +slave_parallel_workers=4 +rocksdb_lock_wait_timeout=5 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.test new file mode 100644 index 00000000000..d85fb0a1772 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.test @@ -0,0 +1,2 @@ +--source rpl_row_not_found.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg.inc b/storage/rocksdb/mysql-test/rocksdb/t/rqg.inc new file mode 100644 index 00000000000..9a6bf73d6a0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg.inc @@ -0,0 +1,43 @@ +# +# Random Query Generator tests +# +# Arguments needed to be set by the test when including this one: +# $TESTDIR : name of sub-directory in conf containing the data/grammar files +# $GRAMMAR_FILES: space separated list of grammar files +# $DATA_FILE: name of the data file +# + +let $MYSQL_BASEDIR = `SELECT @@BASEDIR`; +let RQG_BASE = $MYSQL_BASEDIR/rqg/rqg/common/mariadb-patches; +let MYSQL_SOCKET = `SELECT @@SOCKET`; +let GRAMMAR_FILES = $GRAMMAR_FILES; +let DATA_FILE = $DATA_FILE; +let TESTDIR = $TESTDIR; +let $TESTDB = rqg_$TESTDIR; +let TESTDB = $TESTDB; + +--eval CREATE DATABASE IF NOT EXISTS $TESTDB + +--perl + +foreach $grammar_file (split(/ /, $ENV{'GRAMMAR_FILES'})) { + + # Errors from the gentest.pl file will be captured in the results file + my $cmd = "RQG_HOME=$ENV{'RQG_BASE'} perl $ENV{'RQG_BASE'}/gentest.pl " . + "--dsn=dbi:mysql:host=:port=:user=root:database=$ENV{'TESTDB'}" . + ":mysql_socket=$ENV{'MYSQL_SOCKET'} " . + "--gendata=$ENV{'RQG_BASE'}/conf/$ENV{'TESTDIR'}/$ENV{'DATA_FILE'} " . + "--grammar=$ENV{'RQG_BASE'}/conf/$ENV{'TESTDIR'}/$grammar_file " . + "--threads=5 --queries=10000 --duration=60 --sqltrace 2>&1 >> " . + "$ENV{'MYSQLTEST_VARDIR'}/tmp/$ENV{'TESTDB'}.log"; + + print "Running test with grammar file $grammar_file\n"; + system($cmd); + if ($? != 0) { + print ("Failure running test! Command executed: $cmd\n"); + } +} + +EOF + +--eval DROP DATABASE $TESTDB diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples-master.opt new file mode 100644 index 00000000000..5b714857e13 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples-master.opt @@ -0,0 +1 @@ +--rocksdb_strict_collation_check=0 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples.test b/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples.test new file mode 100644 index 00000000000..4eb02ac648a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# RQG's examples test +let $TESTDIR = examples; +let $GRAMMAR_FILES = example.yy; +let $DATA_FILE = example.zz; + +--source rqg.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime-master.opt new file mode 100644 index 00000000000..f494273892c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime-master.opt @@ -0,0 +1 @@ +--rocksdb_strict_collation_check=0 --secure-file-priv=/tmp diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime.test b/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime.test new file mode 100644 index 00000000000..d5914745219 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime.test @@ -0,0 +1,53 @@ +--source include/have_rocksdb.inc + +call mtr.add_suppression("Did not write failed "); +call mtr.add_suppression("Can't open and lock privilege tables"); + +SET @ORIG_EVENT_SCHEDULER = @@EVENT_SCHEDULER; + +# mysql.user and mysql.tables_priv are modified by the +# tests, so they need to be restored to the original +# state. +--disable_warnings +CREATE TABLE mysql.user_temp LIKE mysql.user; +INSERT mysql.user_temp SELECT * FROM mysql.user; +CREATE TABLE mysql.tables_priv_temp LIKE mysql.tables_priv; +INSERT mysql.tables_priv_temp SELECT * FROM mysql.tables_priv_temp; +--enable_warnings + +# RQG's runtime test +let $TESTDIR = runtime; + +let $GRAMMAR_FILES = alter_online.yy; +let $DATA_FILE = alter_online.zz; + +--source rqg.inc + +let $GRAMMAR_FILES = concurrency_1.yy; +let $DATA_FILE = concurrency_1.zz; + +--source rqg.inc + +let $GRAMMAR_FILES = connect_kill_sql.yy; +let $DATA_FILE = connect_kill_data.zz; + +--source rqg.inc + +let $GRAMMAR_FILES = metadata_stability.yy; +let $DATA_FILE = metadata_stability.zz; + +--source rqg.inc + +--disable_warnings +DELETE FROM mysql.tables_priv; +DELETE FROM mysql.user; +INSERT mysql.user SELECT * FROM mysql.user_temp; +INSERT mysql.tables_priv SELECT * FROM mysql.tables_priv_temp; +DROP TABLE mysql.user_temp; +DROP TABLE mysql.tables_priv_temp; +DROP TABLE IF EXISTS test.executors; +DROP DATABASE IF EXISTS testdb_N; +DROP DATABASE IF EXISTS testdb_S; +--enable_warnings + +SET GLOBAL EVENT_SCHEDULER = @ORIG_EVENT_SCHEDULER; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions-master.opt new file mode 100644 index 00000000000..5b714857e13 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions-master.opt @@ -0,0 +1 @@ +--rocksdb_strict_collation_check=0 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions.test b/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions.test new file mode 100644 index 00000000000..f29ddcb8c81 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions.test @@ -0,0 +1,10 @@ +--source include/have_rocksdb.inc + +call mtr.add_suppression("Deadlock found when trying to get lock"); + +# RQG's transactions test +let $TESTDIR = transactions; +let $GRAMMAR_FILES = transactions.yy repeatable_read.yy transaction_durability.yy transactions-flat.yy combinations.yy repeatable_read.yy transaction_durability.yy transactions-flat.yy; +let $DATA_FILE = transactions.zz; + +--source rqg.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/se-innodb.out b/storage/rocksdb/mysql-test/rocksdb/t/se-innodb.out new file mode 100644 index 00000000000..406e5066132 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/se-innodb.out @@ -0,0 +1 @@ +Can't open perl script "./mtr": No such file or directory diff --git a/storage/rocksdb/mysql-test/rocksdb/t/select.test b/storage/rocksdb/mysql-test/rocksdb/t/select.test new file mode 100644 index 00000000000..c4e1ad464a3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/select.test @@ -0,0 +1,202 @@ +--source include/have_rocksdb.inc + +# +# Basic SELECT statements +# + +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'foobar'),(1,'z'),(200,'bar'); + +CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (a,b) SELECT a, b FROM t1; +INSERT INTO t1 (a,b) SELECT a, b FROM t2; + +--sorted_result +SELECT * FROM t1; + +# Modifiers + +--sorted_result +SELECT DISTINCT a FROM t1; + +--sorted_result +SELECT ALL b, a FROM t1; + +# Optimizer and cache directives should not have any visible effect here, +# but we will add them for completness + +--sorted_result +SELECT STRAIGHT_JOIN SQL_CACHE t1.* FROM t2, t1 WHERE t1.a <> t2.a; + +--sorted_result +SELECT SQL_SMALL_RESULT SQL_NO_CACHE t1.a FROM t1, t2; + +--sorted_result +SELECT SQL_BIG_RESULT SQL_CALC_FOUND_ROWS DISTINCT(t2.a) + FROM t1 t1_1, t2, t1 t1_2; +SELECT FOUND_ROWS(); + +let $query_cache = `SELECT @@query_cache_size`; +SET GLOBAL query_cache_size = 1024*1024; +--sorted_result +SELECT SQL_CACHE * FROM t1, t2; +eval SET GLOBAL query_cache_size = $query_cache; + +# Combination of main clauses + +--sorted_result +SELECT a+10 AS field1, CONCAT(b,':',b) AS field2 FROM t1 +WHERE b > 'b' AND a IS NOT NULL +GROUP BY 2 DESC, field1 ASC +HAVING field1 < 1000 +ORDER BY field2, 1 DESC, field1*2 +LIMIT 5 OFFSET 1; + +# ROLLUP +--sorted_result +SELECT SUM(a), MAX(a), b FROM t1 GROUP BY b WITH ROLLUP; + +# Procedure + +--sorted_result +SELECT * FROM t2 WHERE a>0 PROCEDURE ANALYSE(); + +# SELECT INTO +let $datadir = `SELECT @@datadir`; + +--replace_result $datadir +eval +SELECT t1.a, t2.b FROM t2, t1 WHERE t1.a = t2.a ORDER BY t2.b, t1.a + INTO OUTFILE '$datadir/select.out' + CHARACTER SET utf8 + FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY ''''; +--cat_file $datadir/select.out +--remove_file $datadir/select.out + +--replace_result $datadir +--error ER_TOO_MANY_ROWS +eval +SELECT t1.a, t2.b FROM t2, t1 WHERE t1.a = t2.a ORDER BY t2.b, t1.a + INTO DUMPFILE '$datadir/select.dump'; +--remove_file $datadir/select.dump +--replace_result $datadir +eval +SELECT t1.*, t2.* FROM t1, t2 ORDER BY t2.b, t1.a, t2.a, t1.b, t1.pk, t2.pk LIMIT 1 + INTO DUMPFILE '$datadir/select.dump'; + +--cat_file $datadir/select.dump +--echo +--remove_file $datadir/select.dump + +SELECT MIN(a), MAX(a) FROM t1 INTO @min, @max; +SELECT @min, @max; + +# Joins + +--sorted_result +SELECT t1_1.*, t2.* FROM t2, t1 AS t1_1, t1 AS t1_2 + WHERE t1_1.a = t1_2.a AND t2.a = t1_1.a; + +--sorted_result +SELECT alias1.* FROM ( SELECT a,b FROM t1 ) alias1, t2 WHERE t2.a IN (100,200); + +--sorted_result +SELECT t1.a FROM { OJ t1 LEFT OUTER JOIN t2 ON t1.a = t2.a+10 }; + +--sorted_result +SELECT t1.* FROM t2 INNER JOIN t1; + +--sorted_result +SELECT t1_2.* FROM t1 t1_1 CROSS JOIN t1 t1_2 ON t1_1.b = t1_2.b; + +--sorted_result +SELECT t1.a, t2.b FROM t2 STRAIGHT_JOIN t1 WHERE t1.b > t2.b; + +SELECT t1.a, t2.b FROM t2 STRAIGHT_JOIN t1 ON t1.b > t2.b ORDER BY t1.a, t2.b; + +SELECT t2.* FROM t1 LEFT JOIN t2 USING (a) ORDER BY t2.a, t2.b LIMIT 1; + +--sorted_result +SELECT t2.* FROM t2 LEFT OUTER JOIN t1 ON t1.a = t2.a WHERE t1.a IS NOT NULL; + +SELECT SUM(t2.a) FROM t1 RIGHT JOIN t2 ON t2.b = t1.b; + +SELECT MIN(t2.a) FROM t1 RIGHT OUTER JOIN t2 USING (b,a); + +--sorted_result +SELECT alias.b FROM t1 NATURAL JOIN ( SELECT a,b FROM t1 ) alias WHERE b > ''; + +--sorted_result +SELECT t2.b FROM ( SELECT a,b FROM t1 ) alias NATURAL LEFT JOIN t2 WHERE b IS NOT NULL; + +--sorted_result +SELECT t1.*, t2.* FROM t1 NATURAL LEFT OUTER JOIN t2; + +--sorted_result +SELECT t2_2.* FROM t2 t2_1 NATURAL RIGHT JOIN t2 t2_2 WHERE t2_1.a IN ( SELECT a FROM t1 ); + +--sorted_result +SELECT t1_2.b FROM t1 t1_1 NATURAL RIGHT OUTER JOIN t1 t1_2 INNER JOIN t2; + +# Subquery as scalar operand, subquery in the FROM clause + +--sorted_result +SELECT ( SELECT MIN(a) FROM ( SELECT a,b FROM t1 ) alias1 ) AS min_a FROM t2; + +# Comparison using subqueries + +--sorted_result +SELECT a,b FROM t2 WHERE a = ( SELECT MIN(a) FROM t1 ); + +--sorted_result +SELECT a,b FROM t2 WHERE b LIKE ( SELECT b FROM t1 ORDER BY b LIMIT 1 ); + +# Subquery with IN, correlated subquery + +--sorted_result +SELECT t2.* FROM t1 t1_outer, t2 WHERE ( t1_outer.a, t2.b ) IN ( SELECT a, b FROM t2 WHERE a = t1_outer.a ); + +# Subquery with ANY, ALL + +--sorted_result +SELECT a,b FROM t2 WHERE b = ANY ( SELECT b FROM t1 WHERE a > 1 ); + +--sorted_result +SELECT a,b FROM t2 WHERE b > ALL ( SELECT b FROM t1 WHERE b < 'foo' ); + +# Row subqueries + +--sorted_result +SELECT a,b FROM t1 WHERE ROW(a, b) = ( SELECT a, b FROM t2 ORDER BY a, b LIMIT 1 ); + +# Subquery with EXISTS + +--sorted_result +SELECT a,b FROM t1 WHERE EXISTS ( SELECT a,b FROM t2 WHERE t2.b > t1.b ); + +# Subquery in ORDER BY + +--sorted_result +SELECT t1.* FROM t1, t2 ORDER BY ( SELECT b FROM t1 WHERE a IS NULL ORDER BY b LIMIT 1 ) DESC; + +# Subquery in HAVING + +--sorted_result +SELECT a, b FROM t1 HAVING a IN ( SELECT a FROM t2 WHERE b = t1.b ); + +# Union + +--sorted_result +SELECT a,b FROM t1 UNION SELECT a,b FROM t2 UNION DISTINCT SELECT a,b FROM t1; + +--sorted_result +SELECT a,b FROM t1 UNION SELECT a,b FROM t2 UNION ALL SELECT a,b FROM t1; + + +# Cleanup +DROP TABLE t1, t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/select_for_update.test b/storage/rocksdb/mysql-test/rocksdb/t/select_for_update.test new file mode 100644 index 00000000000..14fdfb7896c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/select_for_update.test @@ -0,0 +1,55 @@ +--source include/have_rocksdb.inc + +# +# SELECT .. FOR UPDATE +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--enable_connect_log + +--source include/count_sessions.inc + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a'); + +--connect (con1,localhost,root,,) +BEGIN; +--sorted_result +SELECT a,b FROM t1 WHERE b='a' FOR UPDATE; + +--connection default +SET lock_wait_timeout = 1; + +# Should still be able to select + +--sorted_result +SELECT a,b FROM t1 WHERE b='a'; + +# ... but not with LOCK IN SHARE MODE + +--sorted_result +--error ER_LOCK_WAIT_TIMEOUT +SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; + +--error ER_LOCK_WAIT_TIMEOUT +UPDATE t1 SET b='c' WHERE b='a'; + +--connection con1 +COMMIT; +--sorted_result +SELECT a,b FROM t1; + +--disconnect con1 +--connection default +# Now it can be updated all right +UPDATE t1 SET b='c' WHERE b='a'; +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + +--source include/wait_until_count_sessions.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/select_for_update_skip_locked_nowait.test b/storage/rocksdb/mysql-test/rocksdb/t/select_for_update_skip_locked_nowait.test new file mode 100644 index 00000000000..c8548d96888 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/select_for_update_skip_locked_nowait.test @@ -0,0 +1,46 @@ +############################################################################## +## SKIP LOCKED | NOWAIT are *not* supported for SELECT...FOR UPDATE in RocksDB + +--disable_warnings +drop table if exists t1; +--enable_warnings + +create table t1 (a int primary key) engine=rocksdb; + +insert into t1 values (1), (2), (3); + +### SKIP LOCKED + +--echo Should succeed since no table gets involved +select 1 for update skip locked; + +--error ER_NO_SUCH_TABLE +select * from nonexistence for update skip locked; + +--error ER_ILLEGAL_HA +select * from t1 for update skip locked; + +--error ER_ILLEGAL_HA +select * from t1 where a > 1 and a < 3 for update skip locked; + +--error ER_ILLEGAL_HA +insert into t1 select * from t1 for update skip locked; + +### NOWAIT + +--echo Should succeed since no table gets involved +select 1 for update nowait; + +--error ER_NO_SUCH_TABLE +select * from nonexistence for update nowait; + +--error ER_ILLEGAL_HA +select * from t1 for update nowait; + +--error ER_ILLEGAL_HA +select * from t1 where a > 1 and a < 3 for update nowait; + +--error ER_ILLEGAL_HA +insert into t1 select * from t1 for update nowait; + +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/select_lock_in_share_mode.test b/storage/rocksdb/mysql-test/rocksdb/t/select_lock_in_share_mode.test new file mode 100644 index 00000000000..d1d289bcb74 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/select_lock_in_share_mode.test @@ -0,0 +1,58 @@ +--source include/have_rocksdb.inc + +# +# SELECT .. LOCK IN SHARE MODE +# +# If the engine has its own lock timeouts, +# it makes sense to set them to minimum to decrease +# the duration of the test. + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--enable_connect_log + +--source include/count_sessions.inc + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a'); + +--connect (con1,localhost,root,,) +BEGIN; +--sorted_result +SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; + +--connection default +SET lock_wait_timeout = 1; + +# Should still be able to select + +--sorted_result +SELECT a,b FROM t1 WHERE b='a'; +--sorted_result +--echo # +--echo # Currently, SELECT ... LOCK IN SHARE MODE works like +--echo # SELECT FOR UPDATE +--error ER_LOCK_WAIT_TIMEOUT +SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; + +--error ER_LOCK_WAIT_TIMEOUT +UPDATE t1 SET b='c' WHERE b='a'; + +--connection con1 +COMMIT; +--sorted_result +SELECT a,b FROM t1; + +--disconnect con1 +--connection default +# Now it can be updated all right +UPDATE t1 SET b='c' WHERE b='a'; +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + +--source include/wait_until_count_sessions.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc b/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc new file mode 100644 index 00000000000..a8d8ed53cba --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc @@ -0,0 +1,27 @@ +# Usage: +# let $checkpoint = ; +# let $succeeds = <1 if checkpoint creation should succeed, 0 otherwise>; +# --source set_checkpoint.inc + + +if ($succeeds) +{ + # Create checkpoint + --replace_result '$checkpoint' [CHECKPOINT] + eval SET GLOBAL ROCKSDB_CREATE_CHECKPOINT = '$checkpoint'; + + # Check checkpoint + --exec ls $checkpoint/CURRENT | sed s/.*CURRENT/CURRENT/g + + # Cleanup + --exec rm -rf $checkpoint +} +if (!$succeeds) +{ + --disable_result_log + --disable_query_log + --error ER_UNKNOWN_ERROR + eval SET GLOBAL ROCKSDB_CREATE_CHECKPOINT = '$checkpoint'; + --enable_query_log + --enable_result_log +} diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_engine-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/show_engine-master.opt new file mode 100644 index 00000000000..cef79bc8585 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/show_engine-master.opt @@ -0,0 +1 @@ +--force-restart diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test b/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test new file mode 100644 index 00000000000..0cb32d95d8a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test @@ -0,0 +1,75 @@ +--source include/have_rocksdb.inc + +# +# SHOW ENGINE STATUS command +# Checking that the command doesn't produce an error. +# If it starts producing an actual result, the result file +# will need to be updated, and possibly masked. + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +--enable_warnings + +CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB; +CREATE TABLE t2 (j INT, PRIMARY KEY (j) COMMENT 'rev:cf_t2') ENGINE = ROCKSDB; +CREATE TABLE t3 (k INT, PRIMARY KEY (k) COMMENT 'cf_t1') ENGINE = ROCKSDB; +CREATE TABLE t4 (l INT, PRIMARY KEY (l) COMMENT 'cf_t4') ENGINE = ROCKSDB + PARTITION BY KEY(l) PARTITIONS 4; + +--replace_column 3 # +SHOW ENGINE rocksdb STATUS; + +INSERT INTO t1 VALUES (1), (2), (3); +SELECT COUNT(*) FROM t1; + +INSERT INTO t2 VALUES (1), (2), (3), (4); +SELECT COUNT(*) FROM t2; + +INSERT INTO t4 VALUES (1), (2), (3), (4), (5); +SELECT COUNT(*) FROM t4; + +# Fetch data from information schema as well +--replace_column 3 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_CFSTATS; + +--replace_column 2 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_DBSTATS; + +SELECT TABLE_SCHEMA, TABLE_NAME, PARTITION_NAME, COUNT(STAT_TYPE) +FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_SCHEMA = 'test' +GROUP BY TABLE_NAME, PARTITION_NAME; + +--replace_column 3 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_CF_OPTIONS; + +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE t4; + +SHOW ENGINE rocksdb MUTEX; +# For SHOW ALL MUTEX even the number of lines is volatile, so the result logging is disabled +--disable_result_log +SHOW ENGINE ALL MUTEX; +--enable_result_log + +# The output from SHOW ENGINE ROCKSDB TRANSACTION STATUS has some +# non-deterministic results. Replace the timestamp with 'TIMESTAMP', the +# number of seconds active with 'NUM', the thread id with 'TID' and the thread +# pointer with 'PTR'. This test may fail in the future if it is being run in +# parallel with other tests as the number of snapshots would then be greater +# than expected. We may need to turn off the result log if that is the case. +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /(ACTIVE) [0-9]+ /\1 NUM / /(thread id) [0-9]+/\1 TID/ /0x[0-9a-f]+/PTR/ +SHOW ENGINE rocksdb TRANSACTION STATUS; + +START TRANSACTION WITH CONSISTENT SNAPSHOT; + +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /(ACTIVE) [0-9]+ /\1 NUM / /(thread id) [0-9]+/\1 TID/ /0x[0-9a-f]+/PTR/ +SHOW ENGINE rocksdb TRANSACTION STATUS; + +ROLLBACK; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_table_status-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status-master.opt new file mode 100644 index 00000000000..83bb6823ee3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status-master.opt @@ -0,0 +1,2 @@ +--rocksdb_debug_optimizer_n_rows=1000 +--rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test new file mode 100644 index 00000000000..29cc2ccfb5e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test @@ -0,0 +1,64 @@ +--source include/have_rocksdb.inc + +# +# SHOW TABLE STATUS statement +# + +################################### +# TODO: +# The result file is likely to change +# if MDEV-4197 is fixed +################################### + +--disable_warnings +DROP TABLE IF EXISTS t1, t2, t3; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'a'),(2,'foo'); + +CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t2 (a,b) VALUES (1,'bar'); + +set global rocksdb_force_flush_memtable_now = true; + +CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb CHARACTER SET utf8; + +--replace_column 6 # 7 # +SHOW TABLE STATUS WHERE name IN ( 't1', 't2', 't3' ); + +# Some statistics don't get updated as quickly. The Data_length and +# Avg_row_length are trailing statistics, meaning they don't get updated +# for the current SST until the next SST is written. Insert a bunch of data, +# then flush, then insert a bit more and do another flush to get them to show +# up. + +--disable_query_log +let $count = 2; +let $max = 10000; +while ($count < $max) { + eval INSERT INTO t2 (a) VALUES ($count); + inc $count; +} + +set global rocksdb_force_flush_memtable_now = true; +eval INSERT INTO t2 (a) VALUES ($max); +set global rocksdb_force_flush_memtable_now = true; +--enable_query_log + +# We expect the number of rows to be 10000. Data_len and Avg_row_len +# may vary, depending on built-in compression library. +--replace_column 6 # 7 # +SHOW TABLE STATUS WHERE name LIKE 't2'; +DROP TABLE t1, t2, t3; + +# +# Confirm that long db and table names work. +# + +CREATE DATABASE `db_new..............................................end`; +USE `db_new..............................................end`; +CREATE TABLE `t1_new..............................................end`(a int) engine=rocksdb; +INSERT INTO `t1_new..............................................end` VALUES (1); +--query_vertical SELECT TABLE_SCHEMA, TABLE_NAME FROM information_schema.table_statistics WHERE TABLE_NAME = 't1_new..............................................end' +DROP DATABASE `db_new..............................................end`; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/shutdown-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/shutdown-master.opt new file mode 100644 index 00000000000..d6c7939eae6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/shutdown-master.opt @@ -0,0 +1 @@ +--log-bin --binlog_format=row --rocksdb_default_cf_options=write_buffer_size=64k diff --git a/storage/rocksdb/mysql-test/rocksdb/t/shutdown.test b/storage/rocksdb/mysql-test/rocksdb/t/shutdown.test new file mode 100644 index 00000000000..ba625deb514 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/shutdown.test @@ -0,0 +1,36 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Ensure bin log is enabled. +SHOW GLOBAL VARIABLES LIKE "log_bin"; + +# Create the table and insert some keys +CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB; + +--disable_query_log +let $max = 1000; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i); + inc $i; + eval $insert; +} +--enable_query_log + +# Restart the server +let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; +--exec echo "wait" > $restart_file +--shutdown_server 10 +--source include/wait_until_disconnected.inc +-- exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- enable_reconnect +-- source include/wait_until_connected_again.inc + +# Verify table has correct rows +SELECT COUNT(*) FROM t1; + +#cleanup +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/singledelete-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/singledelete-master.opt new file mode 100644 index 00000000000..72b3af6bcf7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/singledelete-master.opt @@ -0,0 +1 @@ +--rocksdb_default_cf_options=write_buffer_size=16k diff --git a/storage/rocksdb/mysql-test/rocksdb/t/singledelete.test b/storage/rocksdb/mysql-test/rocksdb/t/singledelete.test new file mode 100644 index 00000000000..718f6b7202e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/singledelete.test @@ -0,0 +1,89 @@ +--source include/have_rocksdb.inc + +# only SingleDelete increases +CREATE TABLE t1 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $update = UPDATE t1 SET value=value+1 WHERE value=$i; + inc $i; + eval $update; +} +--enable_query_log +optimize table t1; +select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; + + +# both SingleDelete and Delete increases +CREATE TABLE t2 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB; +INSERT INTO t2 VALUES (1,1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $update = UPDATE t2 SET id=id+1 WHERE id=$i; + inc $i; + eval $update; +} +--enable_query_log +optimize table t2; +select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select case when variable_value-@d > 9000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; + +# only Delete increases +CREATE TABLE t3 (id INT, value int, PRIMARY KEY (id)) ENGINE=RocksDB; +INSERT INTO t3 VALUES (1,1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $update = UPDATE t3 SET id=id+1 WHERE id=$i; + inc $i; + eval $update; +} +--enable_query_log +optimize table t3; +select case when variable_value-@s = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select case when variable_value-@d > 9000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; + +# only SingleDelete increases +CREATE TABLE t4 (id INT, PRIMARY KEY (id)) ENGINE=RocksDB; +INSERT INTO t4 VALUES (1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $update = UPDATE t4 SET id=id+1 WHERE id=$i; + inc $i; + eval $update; +} +--enable_query_log +optimize table t4; +select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; + +# only SingleDelete increases +CREATE TABLE t5 (id1 INT, id2 INT, PRIMARY KEY (id1, id2), INDEX(id2)) ENGINE=RocksDB; +INSERT INTO t5 VALUES (1, 1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $update = UPDATE t5 SET id1=id1+1 WHERE id1=$i; + inc $i; + eval $update; +} +--enable_query_log +optimize table t5; +select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; + +DROP TABLE t1, t2, t3, t4, t5; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log-master.opt new file mode 100644 index 00000000000..fc5c3ed4c7a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log-master.opt @@ -0,0 +1 @@ +--log-slow-extra --rocksdb-perf-context-level=2 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log.test b/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log.test new file mode 100644 index 00000000000..9f36a7fb958 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log.test @@ -0,0 +1,34 @@ +--source include/have_rocksdb.inc +SET @cur_long_query_time = @@long_query_time; +# Set the long query time to something big so that nothing unexpected gets into it +SET @@long_query_time = 600; +# Test the slow query log feature + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (id INT PRIMARY KEY, value INT) ENGINE=ROCKSDB; + +--disable_query_log +let $max = 10000; +let $i = 1; +while ($i < $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i); + inc $i; + eval $insert; +} + +DELETE FROM t1 WHERE id < 2500; +--enable_query_log + +SET @@long_query_time = 0; +# we expect this query to be reflected in the slow query log +SELECT COUNT(*) FROM t1; + +SET @@long_query_time = @cur_long_query_time; + +# Verify the output of the slow query log contains counts for the skipped keys +--exec awk -f suite/rocksdb/slow_query_log.awk $MYSQLTEST_VARDIR/mysqld.1/mysqld-slow.log + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh b/storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh new file mode 100755 index 00000000000..72442fa1e3e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +sst_dump=$2 +wait_for_no_more_deletes=$3 +num_retries=240 +retry=0 + +echo "wait_for_delete: $wait_for_no_more_deletes" + +while : ; do + TOTAL_D=0 + TOTAL_E=0 + for f in `ls $1/mysqld.1/data/.rocksdb/*.sst` + do + # excluding system cf + DELETED=`$sst_dump --command=scan --output_hex --file=$f | \ + perl -ne 'print if(/''(\d\d\d\d\d\d\d\d)/ && $1 >= 8)' | \ + grep -e ": 0" -e ": 7" | wc -l` + EXISTS=`$sst_dump --command=scan --output_hex --file=$f | \ + perl -ne 'print if(/''(\d\d\d\d\d\d\d\d)/ && $1 >= 8)' | \ + grep ": 1" | wc -l` + TOTAL_D=$(($TOTAL_D+$DELETED)) + TOTAL_E=$(($TOTAL_E+$EXISTS)) + # echo "${f##*/} $DELETED $EXISTS" + done + if [ $TOTAL_E != "0" ] + then + if [ $TOTAL_D = "0" ] || [ $wait_for_no_more_deletes = "0" ] + then + break + fi + fi + if [ $retry -ge $num_retries ] + then + break + fi + sleep 1 + retry=$(($retry + 1)) +done + +if [ "$TOTAL_E" = "0" ] +then + echo "No records in the database" + exit +fi + +if [ "$TOTAL_D" = "0" ] +then + echo "No more deletes left" +else + echo "There are deletes left" +fi diff --git a/storage/rocksdb/mysql-test/rocksdb/t/statistics-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/statistics-master.opt new file mode 100644 index 00000000000..8a56deb0299 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/statistics-master.opt @@ -0,0 +1,3 @@ +--rocksdb_default_cf_options=max_write_buffer_number_to_maintain=10 +--rocksdb_debug_optimizer_n_rows=1000 +--rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/statistics.test b/storage/rocksdb/mysql-test/rocksdb/t/statistics.test new file mode 100644 index 00000000000..3971fd18ecd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/statistics.test @@ -0,0 +1,74 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +--enable_warnings + +# table with index in default CF +create table t1( + id bigint not null primary key auto_increment, + a varchar(255) not null, + b bigint, + index t1_1(b) +) engine=rocksdb; + +# a table with index in a different CF +create table t2( + id bigint not null primary key auto_increment, + a varchar(255) not null, + b bigint, + index t2_1(b) comment 'cf_t3' +) engine=rocksdb; + +# a table wint index in a reverse CF +create table t3( + id bigint not null primary key auto_increment, + a varchar(255) not null, + b bigint, + index t3_1(b) comment 'rev:cf_t4' +) engine=rocksdb; + +--disable_query_log +let $i=0; +while ($i<100000) +{ + inc $i; + eval insert t1(a,b) values(concat('a',$i,'b',$i,'c',$i), $i); + if ($i<5000) + { + eval insert t2(a,b) values(concat('a',$i,'b',$i,'c',$i), $i); + eval insert t3(a,b) values(concat('a',$i,'b',$i,'c',$i), $i); + } +} +--enable_query_log + +# should have some statistics before the memtable flush +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE() and table_name <> 't1'; + +# due to inconsistencies in when the memtable is flushed, just verify t1 has fewer +# than the expected number of rows. +SELECT CASE WHEN table_rows < 100000 then 'true' else 'false' end from information_schema.tables where table_name = 't1'; + +# flush and get even better statistics +set global rocksdb_force_flush_memtable_now = true; +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); +SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE(); + +# restart the server, check the stats +--source include/restart_mysqld.inc + +# give the server a chance to load in statistics +--exec sleep 5 + +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); +SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE(); + +analyze table t1,t2,t3,t4,t5; + +# make sure that stats do not change after calling analyze table +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); +SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE(); + +drop table t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/table_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/table_stats.test new file mode 100644 index 00000000000..734a5169608 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/table_stats.test @@ -0,0 +1,27 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Create the table and insert some keys +CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB; + +--disable_query_log +let $max = 1000; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i); + inc $i; + eval $insert; +} +--enable_query_log + +# Verify table has correct rows +SELECT COUNT(*) FROM t1; + +# Verify the table stats are returned +SELECT * FROM INFORMATION_SCHEMA.TABLE_STATISTICS WHERE TABLE_NAME = "t1"; + +#cleanup +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_ai.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_ai.test new file mode 100644 index 00000000000..8fb4539b401 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_ai.test @@ -0,0 +1,29 @@ +--source include/have_rocksdb.inc + +# +# Check whether AUTO_INCREMENT option +# is supported in CREATE and ALTER TABLE +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb AUTO_INCREMENT=10; +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES (NULL); +SELECT * FROM t1; + +ALTER TABLE t1 AUTO_INCREMENT=100; +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES (NULL); +SELECT * FROM t1 ORDER BY a; + +ALTER TABLE t1 AUTO_INCREMENT=50; +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES (NULL); +SELECT * FROM t1 ORDER BY a; + +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_avg_row_length.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_avg_row_length.test new file mode 100644 index 00000000000..3e6797a8686 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_avg_row_length.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Check whether AVG_ROW_LENGTH option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb AVG_ROW_LENGTH=300; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 AVG_ROW_LENGTH=30000000; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_checksum.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_checksum.test new file mode 100644 index 00000000000..3b49b967937 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_checksum.test @@ -0,0 +1,19 @@ +--source include/have_rocksdb.inc + +# +# Check whether CHECKSUM option is supported +# in CREATE and ALTER TABLE. +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 CHECKSUM=0; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_connection.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_connection.test new file mode 100644 index 00000000000..b97b3dd9d4c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_connection.test @@ -0,0 +1,32 @@ +--source include/have_rocksdb.inc + +# +# Check whether CONNECTION option is supported +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE DATABASE test_remote; +CREATE SERVER test_connection FOREIGN DATA WRAPPER mysql +OPTIONS (USER 'root', HOST 'localhost', DATABASE 'test_remote'); +CREATE SERVER test_connection2 FOREIGN DATA WRAPPER mysql +OPTIONS (USER 'root', HOST 'localhost', DATABASE 'test_remote'); + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CONNECTION='test_connection'; +SHOW CREATE TABLE t1; +ALTER TABLE t1 CONNECTION='test_connection2'; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + +DROP SERVER test_connection; +DROP SERVER test_connection2; +DROP DATABASE test_remote; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test new file mode 100644 index 00000000000..a188d298cb0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test @@ -0,0 +1,37 @@ +--source include/have_rocksdb.inc + +# +# Check whether DATA DIRECTORY and INDEX DIRECTORY +# are supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the options +# have any real effect on the table, only +# that they are accepted +# (and apparently ignored) +# + +--let $data_dir = $MYSQLTEST_VARDIR/storage_engine_data_dir/ +--let $index_dir = $MYSQLTEST_VARDIR/storage_engine_index_dir/ +--mkdir $data_dir +--mkdir $index_dir + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--replace_result $data_dir $index_dir +eval CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '$data_dir' INDEX DIRECTORY = '$index_dir'; +--replace_result $data_dir $index_dir +SHOW CREATE TABLE t1; + +--replace_result $data_dir $index_dir +eval ALTER TABLE t1 INDEX DIRECTORY = '$data_dir'; +--replace_result $data_dir $index_dir +SHOW CREATE TABLE t1; + +DROP TABLE t1; + +--rmdir $data_dir +--rmdir $index_dir + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_delay_key_write.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_delay_key_write.test new file mode 100644 index 00000000000..85cd45e969d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_delay_key_write.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Check whether DELAY_KEY_WRITE option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DELAY_KEY_WRITE=1; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 DELAY_KEY_WRITE=0; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_insert_method.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_insert_method.test new file mode 100644 index 00000000000..e289827ac72 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_insert_method.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Check whether INSERT_METHOD option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted (and apparently ignored) +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INSERT_METHOD=FIRST; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 INSERT_METHOD=NO; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_key_block_size.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_key_block_size.test new file mode 100644 index 00000000000..d927c785ae9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_key_block_size.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Check whether KEY_BLOCK_SIZE option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb KEY_BLOCK_SIZE=8; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_max_rows.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_max_rows.test new file mode 100644 index 00000000000..35aa0f4dafa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_max_rows.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Check whether MAX_ROWS option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb MAX_ROWS=10000000; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 MAX_ROWS=30000000; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_min_rows.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_min_rows.test new file mode 100644 index 00000000000..d62a8771ea3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_min_rows.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Check whether MIN_ROWS option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb MIN_ROWS=1; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 MIN_ROWS=10000; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_pack_keys.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_pack_keys.test new file mode 100644 index 00000000000..acdb612b4d4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_pack_keys.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Check whether PACK KEYS option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb PACK_KEYS=1; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 PACK_KEYS=0; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_password.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_password.test new file mode 100644 index 00000000000..e897992e933 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_password.test @@ -0,0 +1,27 @@ +--source include/have_rocksdb.inc + +# +# Check whether PASSWORD option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# +# This option is not supported by any known engines, +# that's why the result file does not contain it; +# but it's syntactically acceptable. +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb PASSWORD='password'; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 PASSWORD='new_password'; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_row_format.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_row_format.test new file mode 100644 index 00000000000..de834d238ef --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_row_format.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Check whether ROW_FORMAT option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb ROW_FORMAT=FIXED; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_union.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_union.test new file mode 100644 index 00000000000..d3c371b18c7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_union.test @@ -0,0 +1,28 @@ +--source include/have_rocksdb.inc + +# +# Check whether UNION option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1, child1, child2; +--enable_warnings + +--disable_query_log +CREATE TABLE child1 (a INT PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE child2 (a INT PRIMARY KEY) ENGINE=MyISAM; +--enable_query_log + +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb UNION(child1); +SHOW CREATE TABLE t1; + +ALTER TABLE t1 UNION = (child1,child2); +SHOW CREATE TABLE t1; + +DROP TABLE t1, child1, child2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_standard_opts.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_standard_opts.test new file mode 100644 index 00000000000..5d60c02a7e6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_standard_opts.test @@ -0,0 +1,42 @@ +--source include/have_rocksdb.inc + +# +# Standard options in CREATE and ALTER TABLE +# +# Note: the test does not check whether the options +# have any real effect on the table, only +# that they are accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Create table with standard options + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb + DEFAULT CHARACTER SET = utf8 + COLLATE = utf8_general_ci + COMMENT = 'standard table options' +; +SHOW CREATE TABLE t1; + +# Alter comment + +ALTER TABLE t1 COMMENT = 'table altered'; +SHOW CREATE TABLE t1; + +# Alter ENGINE value + +ALTER TABLE t1 ENGINE=MEMORY; +SHOW CREATE TABLE t1; +ALTER TABLE t1 ENGINE=rocksdb; +SHOW CREATE TABLE t1; + +# Alter character set and collation + +ALTER TABLE t1 CHARACTER SET = latin1 COLLATE = latin1_swedish_ci; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/transaction.test b/storage/rocksdb/mysql-test/rocksdb/t/transaction.test new file mode 100644 index 00000000000..a76fa8f9871 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/transaction.test @@ -0,0 +1,105 @@ +--source include/have_rocksdb.inc + +create table t1 (id int primary key, value int, value2 varchar(100), index(value)) engine=rocksdb; + +insert into t1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(8,8,8),(9,9,9),(10,10,10); + +# insert +begin; +insert into t1 values (11,11,11); +--source transaction_select.inc +rollback; + +# insert in the middle +begin; +insert into t1 values (7,7,7); +--source transaction_select.inc +rollback; + +# update non-index column by primary key +begin; +update t1 set value2=100 where id=1; +--source transaction_select.inc +rollback; + +# update secondary key by primary key +begin; +update t1 set value=100 where id=1; +--source transaction_select.inc +rollback; + +# update primary key by primary key +begin; +update t1 set id=100 where id=1; +--source transaction_select.inc +rollback; + +# update non-index column key by secondary key +begin; +update t1 set value2=100 where value=1; +--source transaction_select.inc +rollback; + +# update secondary key by secondary key +begin; +update t1 set value=100 where value=1; +--source transaction_select.inc +rollback; + +# update primary key by secondary key +begin; +update t1 set id=100 where value=1; +--source transaction_select.inc +rollback; + +# update non-index column by non-index column +begin; +update t1 set value2=100 where value2=1; +--source transaction_select.inc +rollback; + +# update secondary key by non-index column +begin; +update t1 set value=100 where value2=1; +--source transaction_select.inc +rollback; + +# update primary key column by non-index column +begin; +update t1 set id=100 where value2=1; +--source transaction_select.inc +rollback; + + +# delete by primary key +begin; +delete from t1 where id=1; +--source transaction_select.inc +rollback; + +# delete by secondary key +begin; +delete from t1 where value=1; +--source transaction_select.inc +rollback; + +# delete by non-index column +begin; +delete from t1 where value2=1; +--source transaction_select.inc +rollback; + +# mixed +begin; +insert into t1 values (11,11,11); +insert into t1 values (12,12,12); +insert into t1 values (13,13,13); +delete from t1 where id=9; +delete from t1 where value=8; +update t1 set id=100 where value2=5; +update t1 set value=103 where value=4; +update t1 set id=115 where id=3; +--source transaction_select.inc +rollback; + +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/transaction_isolation.inc b/storage/rocksdb/mysql-test/rocksdb/t/transaction_isolation.inc new file mode 100644 index 00000000000..dbd1d90622f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/transaction_isolation.inc @@ -0,0 +1,150 @@ +# +# Basic check for transaction isolation. +# The results should be different depending on the isolation level. +# For some isolation levels, some statements will end with a timeout. +# If the engine has its own timeout parameters, reduce them to minimum, +# otherwise the test will take very long. +# If the timeout value is greater than the testcase-timeout the test is run with, +# it might fail due to the testcase timeout. +# + +--enable_connect_log + +# Save the initial number of concurrent sessions +--source include/count_sessions.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +connect (con1,localhost,root,,); +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; +connect (con2,localhost,root,,); +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; + +connection con1; + +CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +START TRANSACTION; +--sorted_result +SELECT a FROM t1; # First snapshot + +connection con2; + +BEGIN; +--error 0,ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 (a) VALUES(1); + +connection con1; +--sorted_result +SELECT a FROM t1; # Second snapshot + +connection con2; +--error 0,ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 (a) VALUES (2); + +connection con1; +--sorted_result +SELECT a FROM t1; # Third snapshot + +--error 0,ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 (a) SELECT a+100 FROM t1; + +--sorted_result +SELECT a FROM t1; + +connection con2; +--sorted_result +SELECT a FROM t1; # Inside the transaction +COMMIT; +--sorted_result +SELECT a FROM t1; # Outside the transaction + +connection con1; +--sorted_result +SELECT a FROM t1; # Inside the transaction + +# Note: INSERT .. SELECT might be tricky, for example for InnoDB +# even with REPEATABLE-READ it works as if it is executed with READ COMMITTED. +# The test will have a 'logical' result for repeatable read, even although +# we currently don't have an engine which works this way. + +--error 0,ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 (a) SELECT a+200 FROM t1; + +--sorted_result +SELECT a FROM t1; +COMMIT; +--sorted_result +SELECT a FROM t1; # Outside the transaction + +connection con2; +--sorted_result +SELECT a FROM t1; # After both transactions have committed + +# Now test with an error in one statement to make sure the snapshots are +# Held/released when expected +connection default; +CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (a) VALUES (1); +COMMIT; + +connection con1; +BEGIN; +--sorted_result +SELECT a from t2; +--error ER_DUP_ENTRY +INSERT INTO t2 (a) VALUES (1), (3); # failure + +connection con2; +--error 0,ER_LOCK_WAIT_TIMEOUT +INSERT INTO t2 (a) VALUES (2); +COMMIT; + +connection con1; +--sorted_result +SELECT a from t2; +COMMIT; + +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; +DROP TABLE t2; + +CREATE TABLE t3 ( + pk int unsigned PRIMARY KEY, + count int unsigned DEFAULT '0' +) ENGINE=ROCKSDB; + +connect (con1,localhost,root,,); +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; +connect (con2,localhost,root,,); +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; + +connection con1; +BEGIN; +SELECT * FROM t3; + +connection con2; +BEGIN; +INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1; +COMMIT; + +connection con1; +--error 0,ER_LOCK_DEADLOCK +INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1; +COMMIT; + +# count will be 0 for repeatable read (because the last insert failed) +# and 1 for read committed +SELECT count FROM t3; + +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t3; + +--source include/wait_until_count_sessions.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/transaction_select.inc b/storage/rocksdb/mysql-test/rocksdb/t/transaction_select.inc new file mode 100644 index 00000000000..4feb427be49 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/transaction_select.inc @@ -0,0 +1,15 @@ +select * from t1 where id=1; +select * from t1 where value=1; +select value from t1 where value=1; +select * from t1 where value2=1; +select * from t1 where id=5; +select * from t1 where value=5; +select value from t1 where value=5; +select * from t1 where value2=5; +select * from t1 where id < 3; +select * from t1 where value < 3; +select value from t1 where value < 3; +select * from t1 where value2 < 3; +select * from t1; +select value from t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/truncate_table.test b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table.test new file mode 100644 index 00000000000..a61488654a3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table.test @@ -0,0 +1,74 @@ +--source include/have_rocksdb.inc + +# +# TRUNCATE TABLE +# + +######################################## +# TODO: +# A part of the test is disabled because +# HANDLER is not supported. If it ever +# changes, the test will complain about +# NOT producing ER_ILLEGAL_HA +######################################## + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +TRUNCATE TABLE t1; +INSERT INTO t1 (a,b) VALUES (1,'a'), (2,'b'), (3,'c'); +TRUNCATE TABLE t1; +SELECT a,b FROM t1; +DROP TABLE t1; + + +# Truncate resets auto-increment value on the table + +CREATE TABLE t1 (a INT KEY AUTO_INCREMENT, c CHAR(8)) ENGINE=rocksdb; + +#--replace_column 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 12 # 13 # 14 # 15 # 16 # 17 # 18 # +--replace_column 5 # 6 # 7 # +SHOW TABLE STATUS LIKE 't1'; + +INSERT INTO t1 (c) VALUES ('a'),('b'),('c'); +#--replace_column 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 12 # 13 # 14 # 15 # 16 # 17 # 18 # +--replace_column 5 # 6 # 7 # +SHOW TABLE STATUS LIKE 't1'; + +TRUNCATE TABLE t1; +#--replace_column 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 12 # 13 # 14 # 15 # 16 # 17 # 18 # +--replace_column 5 # 6 # 7 # +SHOW TABLE STATUS LIKE 't1'; + +INSERT INTO t1 (c) VALUES ('d'); +#--replace_column 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 12 # 13 # 14 # 15 # 16 # 17 # 18 # +--replace_column 5 # 6 # 7 # +SHOW TABLE STATUS LIKE 't1'; + +--sorted_result +SELECT a,c FROM t1; +DROP TABLE t1; + +# Truncate closes handlers + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'); + +--error ER_ILLEGAL_HA +HANDLER t1 OPEN AS h1; + +--disable_parsing + +HANDLER h1 READ FIRST; +TRUNCATE TABLE t1; +--error ER_UNKNOWN_TABLE +HANDLER h1 READ NEXT; +HANDLER t1 OPEN AS h2; +HANDLER h2 READ FIRST; + +--enable_parsing + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3-master.opt new file mode 100644 index 00000000000..a9ebc4ec20b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3-master.opt @@ -0,0 +1,2 @@ +--rocksdb_max_subcompactions=1 +--rocksdb_default_cf_options=write_buffer_size=16k;target_file_size_base=16k;level0_slowdown_writes_trigger=-1;level0_stop_writes_trigger=1000;compression_per_level=kNoCompression; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3.test b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3.test new file mode 100644 index 00000000000..b3f95f812b3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3.test @@ -0,0 +1,5 @@ +--source include/have_rocksdb.inc + +-- let $truncate_table = 1 +-- let $drop_table = 0 +-- source drop_table3.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_binary.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_binary.inc new file mode 100644 index 00000000000..8e2b0e8549b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_binary.inc @@ -0,0 +1,45 @@ +# +# BINARY column types +# + +# $extra_col_options is set by a calling test + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + b BINARY $extra_col_opts, + b0 BINARY(0) $extra_col_opts, + b1 BINARY(1) $extra_col_opts, + b20 BINARY(20) $extra_col_opts, + b255 BINARY(255) $extra_col_opts, + pk BINARY PRIMARY KEY +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +# Valid values + +INSERT INTO t1 VALUES ('','','','','',''); +INSERT INTO t1 VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.','a'); + +--sorted_result +SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk; + +# Invalid values + +INSERT INTO t1 VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256),'b'); +--error ER_DUP_ENTRY +INSERT INTO t1 SELECT b255, b255, b255, b255, CONCAT('a',b255,b255), 'c' FROM t1; + +--sorted_result +SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk; + +--error ER_TOO_BIG_FIELDLENGTH +eval ALTER TABLE t1 ADD COLUMN b257 BINARY(257) $extra_col_opts; + +SHOW COLUMNS IN t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_binary.test b/storage/rocksdb/mysql-test/rocksdb/t/type_binary.test new file mode 100644 index 00000000000..91749e36a2e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_binary.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# BINARY column types +# + +--source type_binary.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes-master.opt new file mode 100644 index 00000000000..6ad42e58aa2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 --rocksdb_records_in_range=50 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes.test new file mode 100644 index 00000000000..f4360ed629b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes.test @@ -0,0 +1,99 @@ +--source include/have_rocksdb.inc + +# +# BINARY and VARBINARY columns with indexes +# + +####################################### +# TODO: +# A part of the test is disabled +# because unique keys are not supported +####################################### + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (b BINARY, + b20 BINARY(20) PRIMARY KEY, + v16 VARBINARY(16), + v128 VARBINARY(128) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (b,b20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b'); + +--replace_column 9 # +EXPLAIN SELECT HEX(b20) FROM t1 ORDER BY b20; +SELECT HEX(b20) FROM t1 ORDER BY b20; + +--replace_column 9 # +EXPLAIN SELECT HEX(b20) FROM t1 IGNORE INDEX (PRIMARY) ORDER BY b20 DESC; +SELECT HEX(b20) FROM t1 ORDER BY b20 DESC; + +DROP TABLE t1; + +--disable_parsing +--error ER_GET_ERRMSG +CREATE TABLE t1 (b BINARY, + b20 BINARY(20), + v16 VARBINARY(16), + v128 VARBINARY(128), + UNIQUE INDEX b_v (b,v128), + pk VARBINARY(10) PRIMARY KEY +) ENGINE=rocksdb; + + +SHOW INDEX IN t1; + +INSERT INTO t1 (b,b20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b'); + +--replace_column 9 # +EXPLAIN SELECT HEX(b), HEX(v128) FROM t1 WHERE b != 'a' AND v128 > 'varchar'; +--sorted_result +SELECT HEX(b), HEX(v128) FROM t1 WHERE b != 'a' AND v128 > 'varchar'; + +--replace_column 9 # +EXPLAIN SELECT HEX(b), HEX(v128) FROM t1 USE INDEX (b_v) WHERE b != 'a' AND v128 > 'varchar'; +--sorted_result +SELECT HEX(b), HEX(v128) FROM t1 USE INDEX (b_v) WHERE b != 'a' AND v128 > 'varchar'; + +--replace_column 9 # +EXPLAIN SELECT HEX(v128), COUNT(*) FROM t1 GROUP BY HEX(v128); +--sorted_result +SELECT HEX(v128), COUNT(*) FROM t1 GROUP BY HEX(v128); + +DROP TABLE t1; + +--enable_parsing + +CREATE TABLE t1 (b BINARY, + b20 BINARY(20), + v16 VARBINARY(16), + v128 VARBINARY(128), + pk VARBINARY(10) PRIMARY KEY, + INDEX (v16(10)) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (b,b20,v16,v128,pk) VALUES ('a','char1','varchar1a','varchar1b',1),('a','char2','varchar2a','varchar2b',2),('b','char3','varchar1a','varchar1b',3),('c','char4','varchar3a','varchar3b',4),('d','char5','varchar4a','varchar3b',5),('e','char6','varchar2a','varchar3b',6); +INSERT INTO t1 (b,b20,v16,v128,pk) SELECT b,b20,v16,v128,pk+100 FROM t1; + +--replace_column 9 # +EXPLAIN SELECT HEX(SUBSTRING(v16,0,3)) FROM t1 WHERE v16 LIKE 'varchar%'; +--sorted_result +SELECT HEX(SUBSTRING(v16,7,3)) FROM t1 WHERE v16 LIKE 'varchar%'; + +--replace_column 9 # +EXPLAIN SELECT HEX(SUBSTRING(v16,0,3)) FROM t1 FORCE INDEX (v16) WHERE v16 LIKE 'varchar%'; +--sorted_result +SELECT HEX(SUBSTRING(v16,7,3)) FROM t1 FORCE INDEX (v16) WHERE v16 LIKE 'varchar%'; + +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bit.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_bit.inc new file mode 100644 index 00000000000..ba0c6537404 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bit.inc @@ -0,0 +1,53 @@ +# +# BIT column type +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Valid values + +eval CREATE TABLE t1 ( + a BIT $extra_col_opts, + b BIT(20) $extra_col_opts, + c BIT(64) $extra_col_opts, + d BIT(1) $extra_col_opts, + PRIMARY KEY (c) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +ALTER TABLE t1 DROP COLUMN d; +eval ALTER TABLE t1 ADD COLUMN d BIT(0) $extra_col_opts; +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b,c,d) VALUES (0,POW(2,20)-1,b'1111111111111111111111111111111111111111111111111111111111111111',1); +SELECT BIN(a), HEX(b), c+0 FROM t1 WHERE d>0; + +INSERT INTO t1 (a,b,c,d) VALUES (1,0,-2,0); +--sorted_result +SELECT a+0, b+0, c+0 FROM t1 WHERE d<100; + +INSERT INTO t1 (a,b,c,d) VALUES (b'1', 'f', 0xFF, 0x0); +--sorted_result +SELECT a+0, b+0, c+0 FROM t1 WHERE d IN (0, 2); + +DELETE FROM t1; + +# Out of range values +# (should produce warnings) + +INSERT INTO t1 (a,b,c,d) VALUES (0x10,0,0,1); +SELECT a+0,b+0,c+0,d+0 FROM t1; + +INSERT INTO t1 (a,b,c,d) VALUES (0x01,0,0x10000000000000000,0); +--sorted_result +SELECT a+0,b+0,c+0,d+0 FROM t1; + +DROP TABLE t1; + +--error ER_TOO_BIG_DISPLAYWIDTH +eval CREATE TABLE t1 (pk INT PRIMARY KEY, a BIT(65) $extra_col_opts) ENGINE=rocksdb; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bit.test b/storage/rocksdb/mysql-test/rocksdb/t/type_bit.test new file mode 100644 index 00000000000..8d57cabffc8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bit.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# BIT column type +# + +--source type_bit.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes-master.opt new file mode 100644 index 00000000000..ba9364e1523 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes.test new file mode 100644 index 00000000000..e4f4bb81819 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes.test @@ -0,0 +1,113 @@ +--source include/have_rocksdb.inc + +# +# BIT columns with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 ( + a BIT, + b BIT(20) PRIMARY KEY, + c BIT(32), + d BIT(64) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (a,b,c,d) VALUES +(0,0xFFFFF,0,1),(0,256,0xAAA,0x12345),(1,16,0,0xFFFFFFF),(0,11,12,13), +(1,100,101,102),(0,12,13,14),(1,13,14,15),(0,101,201,202),(1,1000,1001,1002), +(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF); + +--replace_column 9 # +EXPLAIN SELECT b+0 FROM t1 ORDER BY b; +SELECT b+0 FROM t1 ORDER BY b; + +DROP TABLE t1; + +--echo # TODO: Unique indexes are not enforced +--disable_parsing +--error ER_GET_ERRMSG +CREATE TABLE t1 ( + a BIT, + b BIT(20), + c BIT(32), + d BIT(64), + pk BIT(10) PRIMARY KEY, +UNIQUE INDEX b_c (b,c) +) ENGINE=rocksdb; + + +SHOW INDEX IN t1; + +INSERT INTO t1 (a,b,c,d,pk) VALUES +(0,0xFFFFF,0,1,1),(0,256,0xAAA,0x12345,2),(1,16,0,0xFFFFFFF,3),(0,11,12,13,4), +(1,100,101,102,5),(0,12,13,14,6),(1,13,14,15,7),(0,101,201,202,8),(1,1000,1001,1002,9), +(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF,10); + +--replace_column 9 # +EXPLAIN SELECT HEX(b+c) FROM t1 WHERE c > 1 OR HEX(b) < 0xFFFFFF; +--sorted_result +SELECT HEX(b+c) FROM t1 WHERE c > 1 OR HEX(b) < 0xFFFFFF; + +DROP TABLE t1; + +--enable_parsing + +CREATE TABLE t1 ( + a BIT, + b BIT(20), + c BIT(32), + d BIT(64), + pk BIT(10) PRIMARY KEY, + INDEX(a) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (a,b,c,d,pk) VALUES +(0,0xFFFFF,0,1,1),(0,256,0xAAA,0x12345,2),(1,16,0,0xFFFFFFF,3),(0,11,12,13,4), +(1,100,101,102,5),(0,12,13,14,6),(1,13,14,15,7),(0,101,201,202,8),(1,1000,1001,1002,9), +(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF,10); + +--replace_column 9 # +EXPLAIN SELECT DISTINCT a+0 FROM t1 ORDER BY a; +SELECT DISTINCT a+0 FROM t1 ORDER BY a; + +DROP TABLE t1; + +--disable_parsing +--error ER_GET_ERRMSG +CREATE TABLE t1 ( + a BIT, + b BIT(20), + c BIT(32), + d BIT(64), + pk BIT(10) PRIMARY KEY, + UNIQUE INDEX (d) +) ENGINE=rocksdb; + + +SHOW INDEX IN t1; + +INSERT INTO t1 (a,b,c,d,pk) VALUES +(0,0xFFFFF,0,1,1),(0,256,0xAAA,0x12345,2),(1,16,0,0xFFFFFFF,3),(0,11,12,13,4), +(1,100,101,102,5),(0,12,13,14,6),(1,13,14,15,7),(0,101,201,202,8),(1,1000,1001,1002,9), +(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF,10); + +--replace_column 9 # +EXPLAIN SELECT d FROM t1 WHERE d BETWEEN 1 AND 10000; +--sorted_result +SELECT d+0 FROM t1 WHERE d BETWEEN 1 AND 10000; + +DROP TABLE t1; + +--enable_parsing + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_blob.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_blob.inc new file mode 100644 index 00000000000..723b3ee528c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_blob.inc @@ -0,0 +1,49 @@ +# +# BLOB column types +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + pk INT AUTO_INCREMENT PRIMARY KEY, + b BLOB $extra_col_opts, + b0 BLOB(0) $extra_col_opts, + b1 BLOB(1) $extra_col_opts, + b300 BLOB(300) $extra_col_opts, + bm BLOB(65535) $extra_col_opts, + b70k BLOB(70000) $extra_col_opts, + b17m BLOB(17000000) $extra_col_opts, + t TINYBLOB $extra_col_opts, + m MEDIUMBLOB $extra_col_opts, + l LONGBLOB $extra_col_opts +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +# Valid values +# (cannot get MAX for all columns due to max_allowed_packet limitations) + +INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES +('','','','','','','','','',''), +('a','b','c','d','e','f','g','h','i','j'), +('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'), +( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), HEX(REPEAT('g',1048576)), REPEAT('h',255), REPEAT('i',1048576), HEX(REPEAT('j',1048576)) ); + +--sorted_result +SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1; + +# Invalid values (produce warnings, except for mediumblob and longblob columns for which the values are within limits) + +INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES +( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) ); + +--sorted_result +SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1; + +--error ER_TOO_BIG_DISPLAYWIDTH +ALTER TABLE t1 ADD COLUMN bbb BLOB(4294967296); + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_blob.test b/storage/rocksdb/mysql-test/rocksdb/t/type_blob.test new file mode 100644 index 00000000000..54859707091 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_blob.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# BLOB column types +# + +--source type_blob.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes-master.opt new file mode 100644 index 00000000000..6ad42e58aa2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 --rocksdb_records_in_range=50 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes.test new file mode 100644 index 00000000000..24c70e8e733 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes.test @@ -0,0 +1,176 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc + +# +# BLOB columns with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 ( + b BLOB, + t TINYBLOB, + m MEDIUMBLOB, + l LONGBLOB, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (b,t,m,l) VALUES +('','','',''), +('a','b','c','d'), +('b','d','c','b'), +('test1','test2','test3','test4'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)), +(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')), +('abc','def','ghi','jkl'), +('test2','test3','test4','test5'), +('test3','test4','test5','test6'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128)); + +--replace_column 1 # 2 # 3 # 4 # 5 # 7 # 8 # 9 # 10 # +EXPLAIN SELECT SUBSTRING(b,16) AS f FROM t1 WHERE b IN ('test1','test2') ORDER BY f; +SELECT SUBSTRING(b,16) AS f FROM t1 WHERE b IN ('test1','test2') ORDER BY f; + +--replace_column 1 # 2 # 3 # 4 # 5 # 7 # 8 # 9 # 10 # +EXPLAIN SELECT SUBSTRING(b,16) AS f FROM t1 USE INDEX () WHERE b IN ('test1','test2') ORDER BY f; +SELECT SUBSTRING(b,16) AS f FROM t1 USE INDEX () WHERE b IN ('test1','test2') ORDER BY f; + +DROP TABLE t1; + + +CREATE TABLE t1 ( + b BLOB, + t TINYBLOB, + m MEDIUMBLOB, + l LONGBLOB, + pk INT AUTO_INCREMENT PRIMARY KEY, + UNIQUE INDEX l_t (l(256),t(64)) +) ENGINE=rocksdb; + +--replace_column 6 # 7 # 10 # 11 # +SHOW INDEX IN t1; + +INSERT INTO t1 (b,t,m,l) VALUES +('','','',''), +('a','b','c','d'), +('b','d','c','b'), +('test1','test2','test3','test4'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)), +(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')), +('abc','def','ghi','jkl'), +('test2','test3','test4','test5'), +('test3','test4','test5','test6'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128)); + +# Here we are getting possible key l_t, but not the final key +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC; +SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC; + +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 FORCE INDEX (l_t) WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC; +SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 FORCE INDEX (l_t) WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC; + +DROP TABLE t1; + + +CREATE TABLE t1 ( + b BLOB, + t TINYBLOB, + m MEDIUMBLOB, + l LONGBLOB, + pk INT AUTO_INCREMENT PRIMARY KEY, + INDEX (m(128)) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (b,t,m,l) VALUES +('','','',''), +('a','b','c','d'), +('b','d','c','b'), +('test1','test2','test3','test4'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)), +(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')), +('abc','def','ghi','jkl'), +('test2','test3','test4','test5'), +('test3','test4','test5','test6'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128)); + +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC; +SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC; + +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 IGNORE INDEX FOR ORDER BY (m) WHERE m = 'test1' ORDER BY f DESC; +SELECT SUBSTRING(m,128) AS f FROM t1 IGNORE INDEX FOR ORDER BY (m) WHERE m = 'test1' ORDER BY f DESC; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b BLOB, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); + +SELECT b FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b TINYBLOB, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); + +SELECT b FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b MEDIUMBLOB, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); + +SELECT b FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b LONGBLOB, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); + +SELECT b FROM t1; + +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bool.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_bool.inc new file mode 100644 index 00000000000..cddc0822c44 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bool.inc @@ -0,0 +1,64 @@ +# +# BOOLEAN column type +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + pk INT AUTO_INCREMENT PRIMARY KEY, + b1 BOOL $extra_col_opts, + b2 BOOLEAN $extra_col_opts +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +# Valid values + +INSERT INTO t1 (b1,b2) VALUES (1,TRUE); +SELECT b1,b2 FROM t1; + +INSERT INTO t1 (b1,b2) VALUES (FALSE,0); +--sorted_result +SELECT b1,b2 FROM t1; + +INSERT INTO t1 (b1,b2) VALUES (2,3); +--sorted_result +SELECT b1,b2 FROM t1; + +INSERT INTO t1 (b1,b2) VALUES (-1,-2); +--sorted_result +SELECT b1,b2 FROM t1; + +--sorted_result +SELECT IF(b1,'true','false') AS a, IF(b2,'true','false') AS b FROM t1; + +--sorted_result +SELECT b1,b2 FROM t1 WHERE b1 = TRUE; + +--sorted_result +SELECT b1,b2 FROM t1 WHERE b2 = FALSE; + +# Invalid values + +INSERT INTO t1 (b1,b2) VALUES ('a','b'); +--sorted_result +SELECT b1,b2 FROM t1; + +INSERT INTO t1 (b1,b2) VALUES (128,-129); +--sorted_result +SELECT b1,b2 FROM t1; + +# This is why we don't have zerofill and unsigned tests +# for boolean columns: +--error ER_PARSE_ERROR +eval ALTER TABLE t1 ADD COLUMN b3 BOOLEAN UNSIGNED $extra_col_opts; + +--error ER_PARSE_ERROR +eval ALTER TABLE ADD COLUMN b3 BOOL ZEROFILL $extra_col_opts; + +DROP TABLE t1; + + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bool.test b/storage/rocksdb/mysql-test/rocksdb/t/type_bool.test new file mode 100644 index 00000000000..d5a3b9be83d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bool.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# BOOLEAN column type +# + +--source type_bool.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_char.inc new file mode 100644 index 00000000000..d770dc608fd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char.inc @@ -0,0 +1,45 @@ +# +# CHAR column types +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + c CHAR $extra_col_opts, + c0 CHAR(0) $extra_col_opts, + c1 CHAR(1) $extra_col_opts, + c20 CHAR(20) $extra_col_opts, + c255 CHAR(255) $extra_col_opts, + PRIMARY KEY (c255) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +# Valid values + +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('','','','',''); +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.'); + +--sorted_result +SELECT c,c0,c1,c20,c255 FROM t1; + +# Invalid values + +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256)); +INSERT INTO t1 (c,c0,c1,c20,c255) SELECT c255, c255, c255, c255, CONCAT('a',c255,c1) FROM t1; + +--sorted_result +SELECT c,c0,c1,c20,c255 FROM t1; + +--sorted_result +SELECT DISTINCT c20, REPEAT('a',LENGTH(c20)), COUNT(*) FROM t1 GROUP BY c1, c20; + +--error ER_TOO_BIG_FIELDLENGTH +eval ALTER TABLE t1 ADD COLUMN c257 CHAR(257) $extra_col_opts; + +DROP TABLE t1; + + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char.test b/storage/rocksdb/mysql-test/rocksdb/t/type_char.test new file mode 100644 index 00000000000..5bcf23b39ee --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char.test @@ -0,0 +1,19 @@ +--source include/have_rocksdb.inc + +# +# CHAR column types +# + +--source type_char.inc + +# Issue #226 +CREATE TABLE t1(c1 CHAR(0) NOT NULL); +INSERT INTO t1 VALUES('a'); +SELECT * FROM t1; +DROP TABLE t1; + +# Issue #259 +CREATE TABLE t1(a char(10) character set utf8 collate utf8_bin primary key); +INSERT INTO t1 VALUES ('one'),('two'),('three'),('four'),('five'); +SELECT * FROM t1 LIMIT 1 UNION SELECT * FROM t1; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes-master.opt new file mode 100644 index 00000000000..ba9364e1523 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes.test new file mode 100644 index 00000000000..6ee2f03e74d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes.test @@ -0,0 +1,107 @@ +--source include/have_rocksdb.inc + +# +# CHAR and VARCHAR columns with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 ( + c CHAR, + c20 CHAR(20) PRIMARY KEY, + v16 VARCHAR(16), + v128 VARCHAR(128) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (c,c20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b'); + +--replace_column 9 # +EXPLAIN SELECT c20 FROM t1 ORDER BY c20; +SELECT c20 FROM t1 ORDER BY c20; + +--replace_column 9 # +EXPLAIN SELECT c20 FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY c20; +SELECT c20 FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY c20; + +DROP TABLE t1; + +--disable_parsing +--error ER_GET_ERRMSG +CREATE TABLE t1 ( + c CHAR, + c20 CHAR(20), + v16 VARCHAR(16), + v128 VARCHAR(128), + pk CHAR(64) PRIMARY KEY, + UNIQUE INDEX c_v (c,v128) +) ENGINE=rocksdb; + + +SHOW INDEX IN t1; + +INSERT INTO t1 (c,c20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b'); + +--replace_column 9 # +EXPLAIN SELECT c, v128 FROM t1 WHERE c != 'a' AND v128 > 'varchar'; +--sorted_result +SELECT c, v128 FROM t1 WHERE c != 'a' AND v128 > 'varchar'; + +--replace_column 9 # +EXPLAIN SELECT v128, COUNT(*) FROM t1 GROUP BY v128; +--sorted_result +SELECT v128, COUNT(*) FROM t1 GROUP BY v128; + +--replace_column 9 # +EXPLAIN SELECT v128, COUNT(*) FROM t1 USE INDEX FOR GROUP BY (c_v) GROUP BY v128; +--sorted_result +SELECT v128, COUNT(*) FROM t1 USE INDEX FOR GROUP BY (c_v) GROUP BY v128; + +SET SESSION optimizer_switch = 'engine_condition_pushdown=on'; +--replace_column 9 # +EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE c > 'a'; +--sorted_result +SELECT c,c20,v16,v128 FROM t1 WHERE c > 'a'; +SET SESSION optimizer_switch = @@global.optimizer_switch; + +DROP TABLE t1; + +--enable_parsing + +CREATE TABLE t1 ( + c CHAR, + c20 CHAR(20), + v16 VARCHAR(16), + v128 VARCHAR(128), + pk VARCHAR(64) PRIMARY KEY, + INDEX (v16) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (c,c20,v16,v128,pk) VALUES ('a','char1','varchar1a','varchar1b','1'),('a','char2','varchar2a','varchar2b','2'),('b','char3','varchar1a','varchar1b','3'),('c','char4','varchar3a','varchar3b','4'); + +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(v16,0,3) FROM t1 WHERE v16 LIKE 'varchar%'; +--sorted_result +SELECT SUBSTRING(v16,7,3) FROM t1 WHERE v16 LIKE 'varchar%'; + +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(v16,0,3) FROM t1 IGNORE INDEX (v16) WHERE v16 LIKE 'varchar%'; +--sorted_result +SELECT SUBSTRING(v16,7,3) FROM t1 IGNORE INDEX (v16) WHERE v16 LIKE 'varchar%'; + +--replace_column 9 # +EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16; +--sorted_result +SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16; + +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation-master.opt new file mode 100644 index 00000000000..ba9364e1523 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test new file mode 100644 index 00000000000..d51b55f07e2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test @@ -0,0 +1,142 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc + +# Test to see if index-only scan fails gracefully if unpack info is not +# available. +set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +create table t (id int not null auto_increment primary key, + c varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci, + key sk (c)); +set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +insert into t (c) values ('A'), ('b'), ('C'); +--replace_column 9 # +explain select c from t; +select c from t; +select c from t where c = 'a'; + +drop table t; + +# Test if unknown collation works. +set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans"; +create table t (id int not null auto_increment primary key, + c varchar(8) CHARACTER SET utf8 COLLATE utf8_general_ci, + key sk (c)); +insert into t (c) values ('☀'), ('ß'); +--replace_column 9 # +explain select c from t; +select c from t; +drop table t; +set session debug= "-d,myrocks_enable_unknown_collation_index_only_scans"; + +# Testing if all characters in latin1 charset get restored correctly. This is +# done by comparing results from a PK scan. +create table t (id int not null auto_increment, + c1 varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci, + c2 char(1) CHARACTER SET latin1 COLLATE latin1_general_ci, + primary key (id), + key sk1 (c1), + key sk2 (c2)); + +let $i = 0; + +--disable_query_log +while ($i < 256) +{ + --eval insert into t (c1, c2) values (CHAR('$i'), CHAR('$i')) + inc $i; +} +--enable_query_log + +--replace_column 9 # +explain select hex(c1) from t order by c1; +--replace_column 9 # +explain select hex(c1) from t IGNORE INDEX (sk1) order by c1; + +--replace_column 9 # +explain select hex(c2) from t order by c2; +--replace_column 9 # +explain select hex(c2) from t IGNORE INDEX (sk1) order by c2; + +--let $file1=$MYSQLTEST_VARDIR/tmp/filesort_order +--let $file2=$MYSQLTEST_VARDIR/tmp/sk_order + +--disable_query_log +--eval select hex(c1) INTO OUTFILE '$file1' from t order by c1 +--eval select hex(c1) INTO OUTFILE '$file2' from t IGNORE INDEX (sk1) order by c1 +--enable_query_log + +--diff_files $file1 $file2 +--remove_file $file1 +--remove_file $file2 + +--disable_query_log +--eval select hex(c2) INTO OUTFILE '$file1' from t order by c2 +--eval select hex(c2) INTO OUTFILE '$file2' from t IGNORE INDEX (sk1) order by c2 +--enable_query_log + +--diff_files $file1 $file2 +--remove_file $file1 +--remove_file $file2 + +truncate t; + +# Test handling of spaces at the end of fields. +insert into t (c1, c2) values ('Asdf ', 'Asdf '); +select char_length(c1), char_length(c2), c1, c2 from t; + +drop table t; + +create table t (id int not null auto_increment, + c2 char(255) CHARACTER SET latin1 COLLATE latin1_general_ci, + primary key (id), + unique key sk2 (c2)); + +insert into t (c2) values ('Asdf'); +--error ER_DUP_ENTRY +insert into t (c2) values ('asdf '); + +drop table t; + +create table t (id int not null auto_increment, + c1 varchar(256) CHARACTER SET latin1 COLLATE latin1_swedish_ci, + primary key (id), + unique key sk1 (c1)); + +insert into t (c1) values ('Asdf'); +--error ER_DUP_ENTRY +insert into t (c1) values ('asdf '); +--error ER_DUP_ENTRY +insert into t (c1) values ('asdf'); + +drop table t; + +create table t (id int not null auto_increment, + c1 varchar(256) CHARACTER SET latin1 COLLATE latin1_swedish_ci, + primary key (id), + unique key sk1 (c1(1))); + +insert into t (c1) values ('Asdf'); +insert into t (c1) values ('bbbb '); +--error ER_DUP_ENTRY +insert into t (c1) values ('a '); + +--replace_column 9 # +explain select c1 from t; +select c1 from t; + +drop table t; + +# Test varchar keyparts with key prefix +set session rocksdb_verify_checksums = on; +create table t (id int primary key, email varchar(100), KEY email_i (email(30))) engine=rocksdb default charset=latin1; +insert into t values (1, ' a'); +--replace_column 9 # +explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); +select 'email_i' as index_name, count(*) AS count from t force index(email_i); +drop table t; + +# Test varchar with length greater than 255 +create table t (id int primary key, email varchar(767), KEY email_i (email)) engine=rocksdb default charset=latin1; +insert into t values (1, REPEAT('a', 700)); +select 'email_i' as index_name, count(*) AS count from t force index(email_i); +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.inc new file mode 100644 index 00000000000..69d1154ea39 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.inc @@ -0,0 +1,45 @@ +# +# Date and time column types +# (DATE, DATETIME, TIMESTAMP, TIME, YEAR) +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + d DATE $extra_col_opts, + dt DATETIME $extra_col_opts, + ts TIMESTAMP $extra_col_opts, + t TIME $extra_col_opts, + y YEAR $extra_col_opts, + y4 YEAR(4) $extra_col_opts, + y2 YEAR(2) $extra_col_opts, + pk DATETIME PRIMARY KEY +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +SET @tm = '2012-04-09 05:27:00'; + +# Valid values +# '1970-01-01 00:00:01' +INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES +('1000-01-01', '1000-01-01 00:00:00', FROM_UNIXTIME(1), '-838:59:59', '1901', '1901', '00','2012-12-12 12:12:12'), +('9999-12-31', '9999-12-31 23:59:59', FROM_UNIXTIME(2147483647), '838:59:59', '2155', '2155', '99','2012-12-12 12:12:13'), +('0000-00-00', '0000-00-00 00:00:00', '0000-00-00 00:00:00', '00:00:00', '0', '0', '0','2012-12-12 12:12:14'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),YEAR(@tm),YEAR(@tm),'2012-12-12 12:12:15'); + +--sorted_result +SELECT d,dt,ts,t,y,y4,y2 FROM t1; + +# Invalid values + +INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES +('999-13-32', '999-11-31 00:00:00', '0', '-839:00:00', '1900', '1900', '-1','2012-12-12 12:12:16'); + +SELECT d,dt,ts,t,y,y4,y2 FROM t1; + +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.test b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.test new file mode 100644 index 00000000000..af4e006c900 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.test @@ -0,0 +1,9 @@ +--source include/have_rocksdb.inc + +# +# Date and time column types +# (DATE, DATETIME, TIMESTAMP, TIME, YEAR) +# + +--source type_date_time.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes-master.opt new file mode 100644 index 00000000000..ba9364e1523 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes.test new file mode 100644 index 00000000000..06cf86b7661 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes.test @@ -0,0 +1,157 @@ +--source include/have_rocksdb.inc + +# +# Date and time columns with indexes +# (DATE, DATETIME, TIMESTAMP, TIME, YEAR) +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + + +CREATE TABLE t1 ( + d DATE, + dt DATETIME PRIMARY KEY, + ts TIMESTAMP, + t TIME, + y YEAR +) ENGINE=rocksdb; + +SHOW INDEX IN t1; +SET @tm = '2012-04-09 05:27:00'; + +INSERT INTO t1 (d,dt,ts,t,y) VALUES +('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000'), +('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001'), +('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999'), +('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998'), +('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm)); + +--replace_column 9 # +EXPLAIN SELECT dt FROM t1 ORDER BY dt LIMIT 3; +SELECT dt FROM t1 ORDER BY dt LIMIT 3; + +--replace_column 9 # +EXPLAIN SELECT dt FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY dt LIMIT 3; +SELECT dt FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY dt LIMIT 3; + +--error ER_DUP_ENTRY +INSERT INTO t1 (d,dt,ts,t,y) VALUES +('2012-01-11', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000'); + +DROP TABLE t1; + +CREATE TABLE t1 ( + d DATE, + dt DATETIME, + ts TIMESTAMP, + t TIME, + y YEAR, + pk TIME PRIMARY KEY, + INDEX (ts) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; +SET @tm = '2012-04-09 05:27:00'; + +INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES +('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000','12:00:00'), +('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001','12:01:00'), +('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999','12:02:00'), +('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998','12:03:00'), +('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994','12:04:00'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'12:05:00'); + +--replace_column 9 # +EXPLAIN SELECT ts FROM t1 WHERE ts > NOW(); +--sorted_result +SELECT ts FROM t1 WHERE ts > NOW(); + +--replace_column 9 # +EXPLAIN SELECT ts FROM t1 USE INDEX () WHERE ts > NOW(); +--sorted_result +SELECT ts FROM t1 USE INDEX () WHERE ts > NOW(); + +DROP TABLE t1; + +--disable_parsing +--error ER_GET_ERRMSG +CREATE TABLE t1 ( + d DATE, + dt DATETIME, + ts TIMESTAMP, + t TIME, + y YEAR, + pk YEAR PRIMARY KEY, + UNIQUE INDEX d_t (d,t) +) ENGINE=rocksdb; + + +SHOW INDEX IN t1; +SET @tm = '2012-04-09 05:27:00'; + +INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES +('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000','1990'), +('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001','1991'), +('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999','1992'), +('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998','1993'), +('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994','1994'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'1995'); + +--replace_column 9 # +EXPLAIN SELECT d, t FROM t1 WHERE CONCAT(d,' ',t) != CURRENT_DATE(); +--sorted_result +SELECT d, t FROM t1 WHERE CONCAT(d,' ',t) != CURRENT_DATE(); + +--replace_column 9 # +EXPLAIN SELECT d, t FROM t1 IGNORE INDEX (d_t) WHERE CONCAT(d,' ',t) != CURRENT_DATE(); +--sorted_result +SELECT d, t FROM t1 IGNORE INDEX (d_t) WHERE CONCAT(d,' ',t) != CURRENT_DATE(); + +--error ER_DUP_ENTRY +INSERT INTO t1 (d,dt,ts,t,y) VALUES +('2012-01-12', '2010-11-22 12:33:53', '2011-11-14 21:45:55', '00:12:33', '2000'); + +DROP TABLE t1; + +--enable_parsing + +CREATE TABLE t1 ( + d DATE, + dt DATETIME, + ts TIMESTAMP, + t TIME, + y YEAR, + pk TIME PRIMARY KEY, + INDEX (y,t) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; +SET @tm = '2012-04-09 05:27:00'; + +INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES +('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000','18:18:18'), +('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001','19:18:18'), +('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999','20:18:18'), +('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998','21:18:18'), +('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994','22:18:18'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'23:18:18'); + +--replace_column 9 # +EXPLAIN SELECT y, COUNT(*) FROM t1 GROUP BY y; +--sorted_result +SELECT y, COUNT(*) FROM t1 GROUP BY y; + +--replace_column 9 # +EXPLAIN SELECT y, COUNT(*) FROM t1 USE INDEX FOR GROUP BY () GROUP BY y; +--sorted_result +SELECT y, COUNT(*) FROM t1 USE INDEX FOR GROUP BY () GROUP BY y; + +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test b/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test new file mode 100644 index 00000000000..d5ee75686df --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test @@ -0,0 +1,163 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc + +--disable_warnings +drop table if exists t1, t2; +--enable_warnings + +--echo # +--echo # Check that DECIMAL PK +--echo # +create table t0(a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +--echo # First, make the server to create a dataset in the old format: +set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +create table t1 ( + pk1 decimal(32,16), + pk2 decimal(32,16), + pk3 decimal(32,16), + a smallint not null, + primary key(pk1, pk2, pk3) +); +insert into t1 +select + A.a, B.a, C.a, 1234 +from t0 A, t0 B, t0 C; + +--echo # +--echo # Looking at the table size, one can tell that the data is stored using +--echo # old format: +--echo # +set global rocksdb_force_flush_memtable_now=1; + +--let $data_length_old = query_get_value("select DATA_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", DATA_LENGTH, 1) + +--echo # Check the format version: +select table_name,index_name,kv_format_version +from information_schema.ROCKSDB_DDL +where TABLE_SCHEMA=database() AND table_name='t1'; + +flush tables; + +set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +--source include/restart_mysqld.inc + +--echo # Check that the new server reads the data in the old format: +select * from t1 order by pk1,pk2,pk3 limit 5; + +--echo # +--echo # Ok, now, enable the new data format: +--echo # +create table t2 ( + pk1 decimal(32,16), + pk2 decimal(32,16), + pk3 decimal(32,16), + a smallint not null, + primary key(pk1, pk2, pk3) +); +insert into t2 +select + A.a, B.a, C.a, 1234 +from t0 A, t0 B, t0 C; +set global rocksdb_force_flush_memtable_now=1; + +--let $data_length_new = query_get_value("select DATA_LENGTH from information_schema.tables where table_schema=database() and table_name='t2'", DATA_LENGTH, 1) +--disable_query_log +--eval select $data_length_old > $data_length_new as "larger" +--enable_query_log + +--echo # This should show the new PK data fromat +select table_name,index_name,kv_format_version from information_schema.ROCKSDB_DDL +where TABLE_SCHEMA=database() AND table_name='t2'; + +--echo # +--echo # Check that the server is able to read BOTH the old and the new formats: +--echo # +select * from t2 limit 3; +select * from t1 limit 3; + +drop table t1,t2; +drop table t0; + +--echo # +--echo # Check that DECIMAL datatype supports 'index-only' scans and is decoded correctly. +--echo # (Decoding happens from the mem-comparable image in the index, regardless +--echo # of whether the value part has original value or not) +--echo # + +create table t1 ( + pk int not null primary key, + col1 decimal (2,1) signed, + col2 decimal (2,1) unsigned, + filler varchar(100), + key key1(col1, col2) +)engine=rocksdb; +insert into t1 values +(1,-9.1, 0.7, 'filler'), +(2,-8.2, 1.6, 'filler'), +(3, 0.3, 2.5, 'filler'), +(4, 1.4, 3.4, 'filler'), +(5, 2.5, 4.3, 'filler'), +(6, 3.3, 5.3, 'filler'); +insert into t1 select pk+100, 9.0, 9.0, 'extra-data' from t1; +insert into t1 select pk+200, 9.0, 9.0, 'extra-data' from t1; +insert into t1 select pk+1000, 9.0, 9.0, 'extra-data' from t1; +insert into t1 select pk+10000, 9.0, 9.0, 'extra-data' from t1; +insert into t1 select pk+100000, 9.0, 9.0, 'extra-data' from t1; +analyze table t1; + +--echo # The following can't use index-only: +--replace_column 9 # +explain select * from t1 where col1 between -8 and 8; + +--echo # This will use index-only: +--replace_column 9 # +explain +select col1, col2 from t1 where col1 between -8 and 8; +select col1, col2 from t1 where col1 between -8 and 8; + +insert into t1 values (11, NULL, 0.9, 'row1-with-null'); +insert into t1 values (10, -8.4, NULL, 'row2-with-null'); +--replace_column 9 # +explain +select col1, col2 from t1 force index(key1) where col1 is null or col1 < -7; +select col1, col2 from t1 force index(key1) where col1 is null or col1 < -7; + +--echo # Try an UPDATE +select * from t1 where pk in (3,4); +update t1 set col2= col2+0.2 where pk in (3,4); +select * from t1 where pk in (3,4); + +drop table t1; + +--echo # +--echo # Try another DECIMAL-based type that takes more space +--echo # +create table t1 ( + pk int not null primary key, + col1 decimal (12,6) signed, + col2 decimal (12,6) unsigned, + filler varchar(100), + key key1(col1, col2) +)engine=rocksdb; +insert into t1 values +(1,-900.001, 000.007, 'filler'), +(2,-700.002, 100.006, 'filler'), +(3, 000.003, 200.005, 'filler'), +(4, 100.004, 300.004, 'filler'), +(5, 200.005, 400.003, 'filler'), +(6, 300.003, 500.003, 'filler'); +insert into t1 select pk+100, col1+20000, col2+20000, 'extra-data' from t1; +insert into t1 select pk+200, col1+20000, col2+20000, 'extra-data' from t1; +insert into t1 select pk+1000, col1+20000, col2+20000, 'extra-data' from t1; +insert into t1 select pk+10000, col1+20000, col2+20000, 'extra-data' from t1; +insert into t1 select pk+100000, col1+20000, col2+20000, 'extra-data' from t1; +analyze table t1; + +--replace_column 9 # +explain +select col1, col2 from t1 force index(key1) where col1 between -800 and 800; +select col1, col2 from t1 force index(key1) where col1 between -800 and 800; +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_enum.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_enum.inc new file mode 100644 index 00000000000..8184f6261cc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_enum.inc @@ -0,0 +1,50 @@ +# +# ENUM column type +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Valid values. +# We cannot test the maximum of 65,536 here, +# because mysqltest has the standard limit of MAX_QUERY=256K; +# but we can at least try 257 + +eval CREATE TABLE t1 ( + a ENUM('') $extra_col_opts, + b ENUM('test1','test2','test3','test4','test5') $extra_col_opts, + c ENUM('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') $extra_col_opts, + PRIMARY KEY (b) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b,c) VALUES ('','test2','4'),('',5,2); +SELECT a,b,c FROM t1; + +# Out of range values +# (should produce warnings) + +INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1); +--sorted_result +SELECT a,b,c FROM t1; + +# Non-unique values in enum +# (should produce a warning) +eval ALTER TABLE t1 ADD COLUMN e ENUM('a','A') $extra_col_opts; +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b,c,e) VALUES ('','test3','75','A'); +--sorted_result +SELECT a,b,c,e FROM t1; + +# Simple comparison + +--sorted_result +SELECT a,b,c,e FROM t1 WHERE b='test2' OR a != ''; + +DROP TABLE t1; + + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_enum.test b/storage/rocksdb/mysql-test/rocksdb/t/type_enum.test new file mode 100644 index 00000000000..d79469b2fad --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_enum.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# ENUM column type +# + +--source type_enum.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes-master.opt new file mode 100644 index 00000000000..ba9364e1523 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes.test new file mode 100644 index 00000000000..d7086a45fe1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes.test @@ -0,0 +1,93 @@ +--source include/have_rocksdb.inc + +# +# ENUM columns with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--disable_parsing +--error ER_GET_ERRMSG +CREATE TABLE t1 ( + a ENUM('N.America','S.America','Africa','Europe','Australia','Asia','Antarctica'), + b ENUM('test1','test2','test3','test4','test5'), + c ENUM('1a','1b','1d','1j','4a','4z','5a','5b','6v','6z'), + pk ENUM('1','2','3','4','5','6','7','8','9') PRIMARY KEY, + UNIQUE KEY a_b (a,b) +) ENGINE=rocksdb; + + +INSERT INTO t1 (a,b,c,pk) VALUES +('N.America','test1','5a',1),('Europe','test1','5b',2),('Europe','test2','6v',3), +('Africa','test3','4z',4),('Africa','test4','1j',5),('Antarctica','test4','1d',6); + +SHOW INDEX IN t1; + +--replace_column 9 # +EXPLAIN SELECT a FROM t1 WHERE b > 'test2' ORDER BY a; +SELECT a FROM t1 WHERE b > 'test2' ORDER BY a; + +--replace_column 9 # +EXPLAIN SELECT a FROM t1 FORCE INDEX (a_b) WHERE b > 'test2' ORDER BY a; +SELECT a FROM t1 FORCE INDEX (a_b) WHERE b > 'test2' ORDER BY a; + +DROP TABLE t1; + +--enable_parsing + +CREATE TABLE t1 ( + a ENUM('N.America','S.America','Africa','Europe','Australia','Asia','Antarctica'), + b ENUM('test1','test2','test3','test4','test5'), + c ENUM('1a','1b','1d','1j','4a','4z','5a','5b','6v','6z') PRIMARY KEY +) ENGINE=rocksdb; + +INSERT INTO t1 (a,b,c) VALUES +('N.America','test1','5a'),('Europe','test1','5b'),('Europe','test2','6v'), +('Africa','test3','4z'),('Africa','test4','1j'),('Antarctica','test4','1d'); + +SHOW INDEX IN t1; + +--replace_column 9 # +EXPLAIN SELECT c FROM t1 WHERE c BETWEEN '1d' AND '6u'; +--sorted_result +SELECT c FROM t1 WHERE c BETWEEN '1d' AND '6u'; + +--replace_column 9 # +EXPLAIN SELECT c FROM t1 USE INDEX () WHERE c BETWEEN '1d' AND '6u'; +--sorted_result +SELECT c FROM t1 USE INDEX () WHERE c BETWEEN '1d' AND '6u'; + +DROP TABLE t1; + +CREATE TABLE t1 ( + a ENUM('N.America','S.America','Africa','Europe','Australia','Asia','Antarctica'), + b ENUM('test1','test2','test3','test4','test5'), + c ENUM('1a','1b','1d','1j','4a','4z','5a','5b','6v','6z'), + pk ENUM('1','2','3','4','5','6','7','8','9') PRIMARY KEY, + INDEX(b) +) ENGINE=rocksdb; + +INSERT INTO t1 (a,b,c,pk) VALUES +('N.America','test1','5a',1),('Europe','test1','5b',2),('Europe','test2','6v',3), +('Africa','test3','4z',4),('Africa','test4','1j',5),('Antarctica','test4','1d',6); + +SHOW INDEX IN t1; + +--replace_column 9 # +EXPLAIN SELECT DISTINCT b FROM t1; +--sorted_result +SELECT DISTINCT b FROM t1; + +--replace_column 9 # +EXPLAIN SELECT DISTINCT b FROM t1 IGNORE INDEX (b); +--sorted_result +SELECT DISTINCT b FROM t1 IGNORE INDEX (b); + +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.inc new file mode 100644 index 00000000000..424f7c4f4ac --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.inc @@ -0,0 +1,85 @@ +# +# Fixed point types +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + d DECIMAL $extra_col_opts, + d0 DECIMAL(0) $extra_col_opts, + d1_1 DECIMAL(1,1) $extra_col_opts, + d10_2 DECIMAL(10,2) $extra_col_opts, + d60_10 DECIMAL(60,10) $extra_col_opts, + n NUMERIC $extra_col_opts, + n0_0 NUMERIC(0,0) $extra_col_opts, + n1 NUMERIC(1) $extra_col_opts, + n20_4 NUMERIC(20,4) $extra_col_opts, + n65_4 NUMERIC(65,4) $extra_col_opts, + pk NUMERIC $extra_col_opts PRIMARY KEY +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +# Always valid values + +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3); + +--sorted_result +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; + +# Values which can be valid or not, +# depending on whether columns are SIGNED or UNSIGNED +# (if not valid should produce warnings) + +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5); + +--sorted_result +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; + +--sorted_result +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100; + +# Invalid values + +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES ( + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 6 +); + +--sorted_result +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; + +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7); +--sorted_result +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; + +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8); +--sorted_result +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; + +--error ER_TOO_BIG_PRECISION +eval ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) $extra_col_opts; + +--error ER_TOO_BIG_PRECISION +eval ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) $extra_col_opts; + +--error ER_TOO_BIG_SCALE +eval ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) $extra_col_opts; + +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.test b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.test new file mode 100644 index 00000000000..4f2c09d17c1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# Fixed point types +# + +--source type_fixed.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes-master.opt new file mode 100644 index 00000000000..ba9364e1523 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes.test new file mode 100644 index 00000000000..e9e6df58d21 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes.test @@ -0,0 +1,107 @@ +--source include/have_rocksdb.inc + +# +# Fixed point columns with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + + +CREATE TABLE t1 ( + d1 DECIMAL(10,2) PRIMARY KEY, + d2 DECIMAL(60,10), + n1 NUMERIC, + n2 NUMERIC(65,4) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (d1,d2,n1,n2) VALUES +(10.22,60.12345,123456,14.3456), +(10.0,60.12345,123456,14), +(11.14,15,123456,13), +(100,100,1,2), +(0,0,0,0), +(4540424564.23,3343303441.0,12,13), +(15,17,23,100000); + +--replace_column 9 # +EXPLAIN SELECT d1 FROM t1 ORDER BY d1 DESC; +SELECT d1 FROM t1 ORDER BY d1 DESC; + +--replace_column 9 # +EXPLAIN SELECT d1 FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY d1 DESC; +SELECT d1 FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY d1 DESC; + +DROP TABLE t1; + +# --error ER_GET_ERRMSG +CREATE TABLE t1 ( + d1 DECIMAL(10,2), + d2 DECIMAL(60,10), + n1 NUMERIC, + n2 NUMERIC(65,4), + pk NUMERIC PRIMARY KEY, + UNIQUE INDEX n1_n2 (n1,n2) +) ENGINE=rocksdb; + +# --disable_parsing + +SHOW INDEX IN t1; + +INSERT INTO t1 (d1,d2,n1,n2,pk) VALUES +(10.22,60.12345,123456,14.3456,1), +(10.0,60.12345,123456,14,2), +(11.14,15,123456,13,3), +(100,100,1,2,4), +(0,0,0,0,5), +(4540424564.23,3343303441.0,12,13,6), +(15,17,23,100000,7); + +--replace_column 9 # +EXPLAIN SELECT DISTINCT n1+n2 FROM t1; +--sorted_result +SELECT DISTINCT n1+n2 FROM t1; + +DROP TABLE t1; + +#--enable_parsing + +CREATE TABLE t1 ( + d1 DECIMAL(10,2), + d2 DECIMAL(60,10), + n1 NUMERIC, + n2 NUMERIC(65,4), + pk DECIMAL(20,10) PRIMARY KEY, + INDEX (d2) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (d1,d2,n1,n2,pk) VALUES +(10.22,60.12345,123456,14.3456,1), +(10.0,60.12345,123456,14,2), +(11.14,15,123456,13,3), +(100,100,1,2,4), +(0,0,0,0,5), +(4540424564.23,3343303441.0,12,13,6), +(15,17,23,100000,7); + +--replace_column 9 # +EXPLAIN SELECT d2, COUNT(*) FROM t1 GROUP BY d2; +--sorted_result +SELECT d2, COUNT(*) FROM t1 GROUP BY d2; + +--replace_column 9 # +EXPLAIN SELECT d2, COUNT(*) FROM t1 IGNORE INDEX FOR GROUP BY (d2) GROUP BY d2; +--sorted_result +SELECT d2, COUNT(*) FROM t1 IGNORE INDEX FOR GROUP BY (d2) GROUP BY d2; + +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_float.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_float.inc new file mode 100644 index 00000000000..2f37e55b8d6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_float.inc @@ -0,0 +1,108 @@ +# +# Float types +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + f FLOAT $extra_col_opts, + f0 FLOAT(0) $extra_col_opts, + r1_1 REAL(1,1) $extra_col_opts, + f23_0 FLOAT(23) $extra_col_opts, + f20_3 FLOAT(20,3) $extra_col_opts, + d DOUBLE $extra_col_opts, + d1_0 DOUBLE(1,0) $extra_col_opts, + d10_10 DOUBLE PRECISION (10,10) $extra_col_opts, + d53 DOUBLE(53,0) $extra_col_opts, + d53_10 DOUBLE(53,10) $extra_col_opts, + pk DOUBLE $extra_col_opts PRIMARY KEY +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +# Always valid values + +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1); + +--sorted_result +--query_vertical SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1 + +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( + 99999999999999999999999999999999999999, + 99999999999999999999999999999999999999.9999999999999999, + 0.9, + 99999999999999999999999999999999999999.9, + 99999999999999999.999, + 999999999999999999999999999999999999999999999999999999999999999999999999999999999, + 9, + 0.9999999999, + 1999999999999999999999999999999999999999999999999999999, + 19999999999999999999999999999999999999999999.9999999999, + 3 +); + +--sorted_result +--query_vertical SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1 + +# Values which can be valid or not, +# depending on whether columns are SIGNED or UNSIGNED +# (if not valid should produce warnings) + +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4); + +--sorted_result +--query_vertical SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1 + +--sorted_result +--query_vertical SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1 + +# Invalid values + +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 5 +); + +--sorted_result +--query_vertical SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1 + +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( + 999999999999999999999999999999999999999, + 999999999999999999999999999999999999999.9999999999999999, + 1.9, + 999999999999999999999999999999999999999.9, + 999999999999999999.999, + 9999999999999999999999999999999999999999999999999999999999999999999999999999999999, + 99, + 1.9999999999, + 1999999999999999999999999999999999999999999999999999999, + 19999999999999999999999999999999999999999999.9999999999, + 6 +); + +--sorted_result +--query_vertical SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1 + +--error ER_TOO_BIG_DISPLAYWIDTH +eval ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) $extra_col_opts; + +--error ER_TOO_BIG_PRECISION +eval ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) $extra_col_opts; + +--error ER_TOO_BIG_SCALE +eval ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) $extra_col_opts; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_float.test b/storage/rocksdb/mysql-test/rocksdb/t/type_float.test new file mode 100644 index 00000000000..1f1849992ca --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_float.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# Float types +# + +--source type_float.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes-master.opt new file mode 100644 index 00000000000..ba9364e1523 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes.test new file mode 100644 index 00000000000..907bc614d49 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes.test @@ -0,0 +1,175 @@ +--source include/have_rocksdb.inc + +# +# Float type columns with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 ( + f FLOAT PRIMARY KEY, + r REAL(20,3), + d DOUBLE, + dp DOUBLE PRECISION (64,10) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (f,r,d,dp) VALUES +(1.2345,1422.22,1.2345,1234567.89), +(0,0,0,0), +(-1,-1,-1,-1), +(17.5843,4953453454.44,29229114.0,1111111.23), +(4644,1422.22,466664.999,0.5); + +--replace_column 9 # +EXPLAIN SELECT f FROM t1 ORDER BY f; +SELECT f FROM t1 ORDER BY f; + +--replace_column 9 # +EXPLAIN SELECT f FROM t1 IGNORE INDEX (PRIMARY) ORDER BY f; +SELECT f FROM t1 IGNORE INDEX (PRIMARY) ORDER BY f; + +DROP TABLE t1; + +#--error ER_GET_ERRMSG +CREATE TABLE t1 ( + f FLOAT, + r REAL(20,3), + d DOUBLE, + dp DOUBLE PRECISION (64,10), + pk DOUBLE PRIMARY KEY, + UNIQUE KEY r_dp (r,dp) +) ENGINE=rocksdb; + +#--disable_parsing + +SHOW INDEX IN t1; + +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); + +--replace_column 9 # +EXPLAIN SELECT r, dp FROM t1 WHERE r > 0 or dp > 0; +--sorted_result +SELECT r, dp FROM t1 WHERE r > 0 or dp > 0; + +DROP TABLE t1; + +CREATE TABLE t1 ( + f FLOAT, + r REAL(20,3), + d DOUBLE, + dp DOUBLE PRECISION (64,10), + pk FLOAT PRIMARY KEY, + UNIQUE KEY(d) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); + +--replace_column 9 # +EXPLAIN SELECT DISTINCT d FROM t1 ORDER BY d; +SELECT DISTINCT d FROM t1 ORDER BY d; + +DROP TABLE t1; + +#--enable_parsing + +CREATE TABLE t1 ( + f FLOAT, + r REAL(20,3), + d DOUBLE, + dp DOUBLE PRECISION (64,10), + pk FLOAT PRIMARY KEY, + KEY(d) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); + +--replace_column 9 # +EXPLAIN SELECT DISTINCT d FROM t1 ORDER BY d; +SELECT DISTINCT d FROM t1 ORDER BY d; + +DROP TABLE t1; + +CREATE TABLE t1 ( + f FLOAT, + r REAL(20,3), + d DOUBLE, + dp DOUBLE PRECISION (64,10), + pk FLOAT PRIMARY KEY, + UNIQUE KEY(f) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); + +# Should fail because of 'unique' constraint +--error ER_DUP_ENTRY +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,0,0,0,6); + +--replace_column 9 # +EXPLAIN SELECT DISTINCT f FROM t1 ORDER BY f; +SELECT DISTINCT f FROM t1 ORDER BY f; + +DROP TABLE t1; + +CREATE TABLE t1 ( + f FLOAT, + r REAL(20,3), + d DOUBLE, + dp DOUBLE PRECISION (64,10), + pk FLOAT PRIMARY KEY, + KEY(f) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); + +# Should succeed because of no 'unique' constraint +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,0,0,0,6); + +--replace_column 9 # +EXPLAIN SELECT DISTINCT f FROM t1 ORDER BY f; +SELECT DISTINCT f FROM t1 ORDER BY f; + +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_int.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_int.inc new file mode 100644 index 00000000000..dbcdfe4fbdd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_int.inc @@ -0,0 +1,68 @@ +# +# INT column types +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + i INT $extra_col_opts, + i0 INT(0) $extra_col_opts, + i1 INT(1) $extra_col_opts, + i20 INT(20) $extra_col_opts, + t TINYINT $extra_col_opts, + t0 TINYINT(0) $extra_col_opts, + t1 TINYINT(1) $extra_col_opts, + t20 TINYINT(20) $extra_col_opts, + s SMALLINT $extra_col_opts, + s0 SMALLINT(0) $extra_col_opts, + s1 SMALLINT(1) $extra_col_opts, + s20 SMALLINT(20) $extra_col_opts, + m MEDIUMINT $extra_col_opts, + m0 MEDIUMINT(0) $extra_col_opts, + m1 MEDIUMINT(1) $extra_col_opts, + m20 MEDIUMINT(20) $extra_col_opts, + b BIGINT $extra_col_opts, + b0 BIGINT(0) $extra_col_opts, + b1 BIGINT(1) $extra_col_opts, + b20 BIGINT(20) $extra_col_opts, + pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +# Always valid values + +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807); +--sorted_result +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; + +# Values which can be valid or not, +# depending on whether columns are SIGNED or UNSIGNED +# (if not valid should produce warnings) + +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615); + +--sorted_result +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; + +# Invalid values + +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809); + +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616); + +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615); + +--sorted_result +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; + +--error ER_TOO_BIG_DISPLAYWIDTH +eval ALTER TABLE t1 ADD COLUMN i257 INT(257) $extra_col_opts; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_int.test b/storage/rocksdb/mysql-test/rocksdb/t/type_int.test new file mode 100644 index 00000000000..e92f6692172 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_int.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# INT column types +# + +--source type_int.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes-master.opt new file mode 100644 index 00000000000..ba9364e1523 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes.test new file mode 100644 index 00000000000..c95c3d88b8a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes.test @@ -0,0 +1,75 @@ +--source include/have_rocksdb.inc + +# +# INT column types with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 ( + i INT PRIMARY KEY, + t TINYINT, + s SMALLINT, + m MEDIUMINT, + b BIGINT +) ENGINE=rocksdb; + +INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16); + +--replace_column 9 # +EXPLAIN SELECT i FROM t1 ORDER BY i; +SELECT i FROM t1 ORDER BY i; + +DROP TABLE t1; + +CREATE TABLE t1 ( + i INT, + t TINYINT, + s SMALLINT, + m MEDIUMINT, + b BIGINT, + pk SMALLINT AUTO_INCREMENT PRIMARY KEY, + INDEX s_m (s,m) +) ENGINE=rocksdb; + +INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16); + +--replace_column 9 # +EXPLAIN SELECT s, m FROM t1 WHERE s != 10 AND m != 1; +--sorted_result +SELECT s, m FROM t1 WHERE s != 10 AND m != 1; + +DROP TABLE t1; + +--echo # RocksDB: unique indexes allowed +#--error ER_GET_ERRMSG +CREATE TABLE t1 ( + i INT, + t TINYINT, + s SMALLINT, + m MEDIUMINT, + b BIGINT, + pk MEDIUMINT AUTO_INCREMENT PRIMARY KEY, + UNIQUE KEY b_t (b,t) +) ENGINE=rocksdb; + +##--disable_parsing + +INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16); + +# This query should use the index b_t, we just don't want to run EXPLAIN +# (to avoid mismatches due to different subquery-related plans) +SELECT b+t FROM t1 WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t; +SELECT b+t FROM t1 FORCE INDEX (b_t) WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t; +SELECT b+t FROM t1 IGNORE INDEX (b_t) WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t; + +DROP TABLE t1; + +##--enable_parsing + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_set.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_set.inc new file mode 100644 index 00000000000..25ae58294f7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_set.inc @@ -0,0 +1,49 @@ +# +# SET column type +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Valid values. + +eval CREATE TABLE t1 ( + a SET('') $extra_col_opts, + b SET('test1','test2','test3','test4','test5') $extra_col_opts, + c SET('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') $extra_col_opts, + PRIMARY KEY (c) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b,c) VALUES +('','test2,test3','01,34,44,,23'), +('',5,2), +(',','test4,test2',''); +--sorted_result +SELECT a,b,c FROM t1; + +# Out of range values +# (should produce warnings) + +INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1); +--sorted_result +SELECT a,b,c FROM t1; + +# Non-unique values in set +# (should produce a warning) +eval ALTER TABLE t1 ADD COLUMN e SET('a','A') $extra_col_opts; + +SHOW COLUMNS IN t1; + +--error ER_TOO_BIG_SET +eval ALTER TABLE t1 ADD COLUMN f SET('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i') $extra_col_opts; + +# Simple comparison + +--sorted_result +SELECT a,b,c,e FROM t1 WHERE FIND_IN_SET('test2',b)>0 OR a != ''; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_set.test b/storage/rocksdb/mysql-test/rocksdb/t/type_set.test new file mode 100644 index 00000000000..bd306668e6d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_set.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# SET column type +# + +--source type_set.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes-master.opt new file mode 100644 index 00000000000..ba9364e1523 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes.test new file mode 100644 index 00000000000..79232255d25 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes.test @@ -0,0 +1,104 @@ +--source include/have_rocksdb.inc + +# +# SET columns with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 ( + a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'), + b SET('test1','test2','test3','test4','test5'), + c SET('01','22','23','33','34','39','40','44','50','63','64') PRIMARY KEY +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (a,b,c) VALUES +('','test2,test3','01,34,44,23'), +('',5,2), +('N.America,Asia','test4,test2',''), +('Africa,Europe,Asia','test2,test3','01'), +('Antarctica','test3','34,44'), +('Asia','test5','50'), +('Europe,S.America','test1,','39'); + +--replace_column 9 # +EXPLAIN SELECT c FROM t1 ORDER BY c; +SELECT c FROM t1 ORDER BY c; + +--replace_column 9 # +EXPLAIN SELECT c FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY c; +SELECT c FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY c; + +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b,c) VALUES ('Antarctica','test3','02'); + +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b,c) VALUES ('','test1','34,44'); + +DROP TABLE t1; + +CREATE TABLE t1 ( + a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'), + b SET('test1','test2','test3','test4','test5'), + c SET('01','22','23','33','34','39','40','44','50','63','64'), + pk SET('1','2','3','4','5','6','7','8','9') PRIMARY KEY, + INDEX(a) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (a,b,c,pk) VALUES +('','test2,test3','01,34,44,23',1), +('',5,2,2), +('N.America,Asia','test4,test2','',3), +('Africa,Europe,Asia','test2,test3','01',4), +('Antarctica','test3','34,44',5), +('Asia','test5','50',6), +('Europe,S.America','test1,','39',7); + +--replace_column 9 # +EXPLAIN SELECT a FROM t1 WHERE FIND_IN_SET('Europe',a) > 0; +--sorted_result +SELECT a FROM t1 WHERE FIND_IN_SET('Europe',a) > 0; + +DROP TABLE t1; + +--disable_parsing +#--error ER_GET_ERRMSG +CREATE TABLE t1 ( + a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'), + b SET('test1','test2','test3','test4','test5'), + c SET('01','22','23','33','34','39','40','44','50','63','64'), + pk SET('1','2','3','4','5','6','7','8','9') PRIMARY KEY, + UNIQUE INDEX b_a (b,a) +) ENGINE=innodb; + + +SHOW INDEX IN t1; + +INSERT INTO t1 (a,b,c) VALUES +('','test2,test3','01,34,44,23'), +('',5,2), +('N.America,Asia','test4,test2',''), +('Africa,Europe,Asia','test2,test3','01'), +('Antarctica','test3','34,44'), +('Asia','test5','50'), +('Europe,S.America','test1,','39'); + +--replace_column 9 # +EXPLAIN SELECT DISTINCT a, b FROM t1 ORDER BY b DESC, a; +--sorted_result +SELECT DISTINCT a, b FROM t1 ORDER BY b DESC, a; + +DROP TABLE t1; + +--enable_parsing + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_text.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_text.inc new file mode 100644 index 00000000000..f5c9c3b8978 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_text.inc @@ -0,0 +1,49 @@ +# +# TEXT column types +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + pk INT AUTO_INCREMENT PRIMARY KEY, + t TEXT $extra_col_opts, + t0 TEXT(0) $extra_col_opts, + t1 TEXT(1) $extra_col_opts, + t300 TEXT(300) $extra_col_opts, + tm TEXT(65535) $extra_col_opts, + t70k TEXT(70000) $extra_col_opts, + t17m TEXT(17000000) $extra_col_opts, + tt TINYTEXT $extra_col_opts, + m MEDIUMTEXT $extra_col_opts, + l LONGTEXT $extra_col_opts +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +# Valid values +# (cannot get MAX for all columns due to max_allowed_packet limitations) + +INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES +('','','','','','','','','',''), +('a','b','c','d','e','f','g','h','i','j'), +('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'), +( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',255), REPEAT('i',1048576), REPEAT('j',1048576) ); + +--sorted_result +SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1; + +# Invalid values (produce warnings, except for mediumtext and longtext columns for which the values are within limits) + +INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES +( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) ); + +--sorted_result +SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1; + +--error ER_TOO_BIG_DISPLAYWIDTH +eval ALTER TABLE t1 ADD COLUMN ttt TEXT(4294967296) $extra_col_opts; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_text.test b/storage/rocksdb/mysql-test/rocksdb/t/type_text.test new file mode 100644 index 00000000000..c6dd24ff42e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_text.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# TEXT column types +# + +--source type_text.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes-master.opt new file mode 100644 index 00000000000..6ad42e58aa2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 --rocksdb_records_in_range=50 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes.test new file mode 100644 index 00000000000..5fdc4bff39d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes.test @@ -0,0 +1,171 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc + +# +# TEXT columns with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + + +CREATE TABLE t1 ( + t TEXT, + tt TINYTEXT, + m MEDIUMTEXT, + l LONGTEXT, + PRIMARY KEY t (t(32)) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (t,tt,m,l) VALUES +('','','',''), +('a','b','c','d'), +('b','d','c','b'), +('test1','test2','test3','test4'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)), +('abc','def','ghi','jkl'), +('test2','test3','test4','test5'), +('test3','test4','test5','test6'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128)); + +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(t,16) AS f FROM t1 WHERE t IN ('test1','test2') ORDER BY f; +SELECT SUBSTRING(t,16) AS f FROM t1 WHERE t IN ('test1','test2') ORDER BY f; + +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(t,16) AS f FROM t1 IGNORE INDEX (PRIMARY) WHERE t IN ('test1','test2') ORDER BY f; +SELECT SUBSTRING(t,16) AS f FROM t1 IGNORE INDEX (PRIMARY) WHERE t IN ('test1','test2') ORDER BY f; +DROP TABLE t1; + +--error ER_BLOB_KEY_WITHOUT_LENGTH +CREATE TABLE t1 ( + t TEXT, + tt TINYTEXT, + m MEDIUMTEXT, + l LONGTEXT, + pk TINYTEXT PRIMARY KEY, + UNIQUE INDEX l_tt (l(256),tt(64)) +) ENGINE=rocksdb; + +CREATE TABLE t1 ( + t TEXT, + tt TINYTEXT, + m MEDIUMTEXT, + l LONGTEXT, + pk MEDIUMTEXT, + PRIMARY KEY mt (pk(1)), + INDEX (m(128)) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (t,tt,m,l,pk) VALUES +('','','','','0'), +('a','b','c','d','1'), +('b','d','c','b','2'), +('test1','test2','test3','test4','3'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128),'4'), +('abc','def','ghi','jkl','5'), +('test2','test3','test4','test5','6'), +('test3','test4','test5','test6','7'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128),'8'), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128),'9'); + +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC; +SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b TEXT, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); + +SELECT b FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b TINYTEXT, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); + +SELECT b FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b MEDIUMTEXT, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); + +SELECT b FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b LONGTEXT, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); + +SELECT b FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b LONGTEXT CHARACTER SET "binary" COLLATE "binary", + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +INSERT INTO t1 (b) VALUES (''), (_binary 0x0), (' '); + +SELECT hex(b) FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b LONGTEXT CHARACTER SET "latin1" COLLATE "latin1_bin", + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +--error ER_DUP_ENTRY +INSERT INTO t1 (b) VALUES (''), (_binary 0x0), (' '); + +INSERT INTO t1 (b) VALUES (''), (_binary 0x0); +SELECT hex(b) FROM t1; + +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.inc new file mode 100644 index 00000000000..a9f9883198c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.inc @@ -0,0 +1,75 @@ +# +# VARBINARY column types +# + +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +eval CREATE TABLE t1 ( + v0 VARBINARY(0) $extra_col_opts, + v1 VARBINARY(1) $extra_col_opts, + v64 VARBINARY(64) $extra_col_opts, + v65000 VARBINARY(65000) $extra_col_opts, + PRIMARY KEY (v64) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +eval CREATE TABLE t2 (v VARBINARY(65532) $extra_col_opts, PRIMARY KEY(v(255))) ENGINE=rocksdb; +SHOW COLUMNS IN t2; + +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. + For developers who want to code on MariaDB or MySQL + + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + + For MariaDB / MySQL end users + + * MariaDB Crash Course by Ben Forta + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o Free to read in the Knowledgebase! + + * MySQL (4th Edition) by Paul DuBois + o The \'default\' book to read if you wont to learn to use MySQL / MariaDB. + + * MySQL Cookbook by Paul DuBois + o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject. + + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly) + + * MySQL Admin Cookbook + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. '); + +--sorted_result +SELECT HEX(v0), HEX(v1), HEX(v64), HEX(v65000) FROM t1; + +# Invalid values + +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501)); +INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1; + +--sorted_result +SELECT HEX(v0), HEX(v1), HEX(v64), LENGTH(HEX(v65000)) FROM t1; + +eval ALTER TABLE t1 ADD COLUMN v65536 VARBINARY(65536) $extra_col_opts; +SHOW COLUMNS IN t1; + +DROP TABLE t1, t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.test b/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.test new file mode 100644 index 00000000000..2ef3da8a499 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# VARBINARY column types +# + +--source type_varbinary.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.inc new file mode 100644 index 00000000000..ce5dbb444d3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.inc @@ -0,0 +1,77 @@ +# +# VARCHAR column types +# + +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +eval CREATE TABLE t1 ( + v0 VARCHAR(0) $extra_col_opts, + v1 VARCHAR(1) $extra_col_opts, + v64 VARCHAR(64) $extra_col_opts, + v65000 VARCHAR(65000) $extra_col_opts, + PRIMARY KEY (v64) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +CREATE TABLE t2 (v VARCHAR(65532), PRIMARY KEY (v(255))) ENGINE=rocksdb; +SHOW COLUMNS IN t2; + +# Valid values + +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. + For developers who want to code on MariaDB or MySQL + + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + + For MariaDB / MySQL end users + + * MariaDB Crash Course by Ben Forta + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o Free to read in the Knowledgebase! + + * MySQL (4th Edition) by Paul DuBois + o The \'default\' book to read if you wont to learn to use MySQL / MariaDB. + + * MySQL Cookbook by Paul DuBois + o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject. + + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly) + + * MySQL Admin Cookbook + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. '); + +--sorted_result +SELECT v0,v1,v64,v65000 FROM t1; + +# Invalid values + +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501)); +INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1; + +--sorted_result +SELECT v0, v1, v64, LENGTH(v65000) FROM t1; + +eval ALTER TABLE t1 ADD COLUMN v65536 VARCHAR(65536) $extra_col_opts; +SHOW COLUMNS IN t1; + +DROP TABLE t1, t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test new file mode 100644 index 00000000000..6416af7308f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test @@ -0,0 +1,75 @@ +--source include/have_rocksdb.inc + +--disable_warnings +drop table if exists t1,t2; +--enable_warnings + +# +# VARCHAR column types +# + +--echo # +--echo # A basic test whether endspace-aware variable length encoding +--echo # works when in PK +--echo # +create table t1 ( pk varchar(10) primary key, col1 int not null) ; +insert into t1 values ('a', 1); +select * from t1; +drop table t1; + +--source type_varchar.inc + + +--echo # +--echo # Endspace-comparison tests: +--echo # + +--let $character_set_collate=CHARACTER SET latin1 COLLATE latin1_bin +--source type_varchar_endspace.inc + +--let $character_set_collate=CHARACTER SET utf8 COLLATE utf8_bin +--source type_varchar_endspace.inc + +--let $character_set_collate=CHARACTER SET ucs2 COLLATE ucs2_bin +--source type_varchar_endspace.inc + +--let $character_set_collate=CHARACTER SET utf8mb4 COLLATE utf8mb4_bin +--source type_varchar_endspace.inc + +--let $character_set_collate=CHARACTER SET utf16 COLLATE utf16_bin +--source type_varchar_endspace.inc + +create table t1 ( + pk int primary key, + col1 varchar(10) collate utf8mb4_bin not null, + col2 varchar(20), + key(col1) +) engine=rocksdb; + +insert into t1 values (1, 'ab','ab'); +insert into t1 values (2, 'ab\0','ab0'); + +select pk, hex(col1), col2 from t1 force index(col1) order by col1; +select pk, hex(col1), col2 from t1 ignore index(col1) order by col1; +drop table t1; + +# Issue #306 - Do not store trailing spaces for prefixed keys. +create table t (id int primary key, email varchar(100), KEY email_i (email(30))); +insert into t values (1, 'abcabcabcabcabcabcabcabcabcabcabc '); +--replace_column 9 # +explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); +select 'email_i' as index_name, count(*) AS count from t force index(email_i); +drop table t; + +set @save_rocksdb_checksums_pct = @@global.rocksdb_checksums_pct; +set @save_rocksdb_verify_checksums = @@session.rocksdb_verify_checksums; +set global rocksdb_checksums_pct = 100; +set session rocksdb_verify_checksums = on; +create table t (id int primary key, email varchar(100), KEY email_i (email(30))); +insert into t values (1, 'a'); +--replace_column 9 # +explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); +select 'email_i' as index_name, count(*) AS count from t force index(email_i); +drop table t; +set global rocksdb_checksums_pct = @save_rocksdb_checksums_pct; +set session rocksdb_verify_checksums = @save_rocksdb_verify_checksums; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test new file mode 100644 index 00000000000..d61e85ed204 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test @@ -0,0 +1,137 @@ +# +# VARCHAR encoding tests that require debug support +# +--source include/have_rocksdb.inc +--source include/have_debug.inc + +--disable_warnings +drop table if exists t1,t2; +--enable_warnings + + +set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans"; + +--let $character_set_collate=CHARACTER SET utf8 COLLATE utf8_general_ci +--source type_varchar_endspace.inc + +set session debug= "-d,myrocks_enable_unknown_collation_index_only_scans"; + +--echo # +--echo # Check backwards compatibility: +--echo # + +set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; + +--echo # Create the tables in the old format + +create table t1 ( + pk varchar(64) collate latin1_bin, + col1 varchar(64), + primary key (pk) +); +insert into t1 values ('a','a'); +--echo # The following will not produce an error: +insert into t1 values ('a ', 'a-space'); +select pk, hex(pk), col1 from t1; + +create table t2 ( + pk int not null primary key, + col1 varchar(64) collate latin1_bin, + col2 varchar(64), + unique key (col1) +); + +insert into t2 values (0, 'ab', 'a-b'); +--echo # The following will not produce an error: +insert into t2 values (1, 'a ', 'a-space'); +insert into t2 values (2, 'a', 'a'); +select pk, col1, hex(col1), col2 from t2; + +--echo # Check the format version: +select table_name,index_name,kv_format_version +from information_schema.ROCKSDB_DDL +where TABLE_SCHEMA=database() AND table_name in ('t1','t2'); + +flush tables; +set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +select pk, hex(pk), col1 from t1; +select pk, col1, hex(col1), col2 from t2; + +## Check that we can still read the data when starting on the old datadir: +--source include/restart_mysqld.inc + +select pk, hex(pk), col1 from t1; +select pk, col1, hex(col1), col2 from t2; + +drop table t1,t2; + + +--echo # +--echo # General upgrade tests to see that they work. +--echo # +set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +create table t2 ( + id int primary key, + col1 varchar(64) collate latin1_swedish_ci, + unique key (col1) +) engine=rocksdb; +set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; + +insert into t2 values (1, 'a'); +insert into t2 values (2, 'b'); +insert into t2 values (3, 'c'); +# Check if this is indeed the old format +insert into t2 values (4, 'c '); +select col1 from t2; +delete from t2 where id = 4; +alter table t2 engine=rocksdb; +select col1 from t2; +# Check if this is indeed the new format +--error ER_DUP_ENTRY +insert into t2 values (4, 'c '); +drop table t2; + +set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +create table t2 ( + id int primary key, + col1 varchar(64) collate latin1_bin, + unique key (col1) +) engine=rocksdb; +set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; + +insert into t2 values (1, 'a'); +insert into t2 values (2, 'b'); +insert into t2 values (3, 'c'); +# Check if this is indeed the old format +insert into t2 values (4, 'c '); +select col1 from t2; +delete from t2 where id = 4; +alter table t2 engine=rocksdb; +select col1 from t2; +# Check if this is indeed the new format +--error ER_DUP_ENTRY +insert into t2 values (4, 'c '); +drop table t2; + +--echo # +--echo # Check what happens when one tries to 'upgrade' to the new data format +--echo # and causes a unique key violation: +--echo # +set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +create table t2 ( + pk int not null primary key, + col1 varchar(64) collate latin1_bin, + col2 varchar(64), + unique key (col1) +); + +insert into t2 values (1, 'a ', 'a-space'); +insert into t2 values (2, 'a', 'a'); + +select * from t2; +set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; + +--error ER_DUP_ENTRY +alter table t2 engine=rocksdb; +drop table t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc new file mode 100644 index 00000000000..f813b8871b1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc @@ -0,0 +1,84 @@ +--echo # +--echo # Issue 257: Sort order for varchars is different between +--echo # MyISAM/InnoDB vs MyRocks +--echo # + +eval create table t1 ( + pk varchar(64) $character_set_collate, + col1 varchar(64), + primary key (pk) +); +insert into t1 values ('a','a'); +--error ER_DUP_ENTRY +insert into t1 values ('a ', 'a-space'); + +insert into t1 values('b ', 'b-2x-space'); +--error ER_DUP_ENTRY +insert into t1 values ('b', 'b'); + +select pk, hex(pk), col1 from t1; + +insert into t1 values ('a\t', 'a-tab'); +insert into t1 values ('a \t', 'a-space-tab'); +select pk, hex(pk), col1 from t1 order by pk; + +--echo # Try longer values +--error ER_DUP_ENTRY +insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space'); + +insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space'); +select * from t1; + +drop table t1; + +--echo # Secondary index +eval create table t1 ( + pk int not null primary key, + col1 varchar(64) $character_set_collate, + col2 varchar(64), + key (col1) +); +insert into t1 values (0, 'ab', 'a-b'); +insert into t1 values (1, 'a ', 'a-space'); +insert into t1 values (2, 'a', 'a'); +insert into t1 values (3, 'a \t', 'a-tab'); + +--echo # Must show 'using index' for latin1_bin and utf8_bin: +--replace_column 9 # +explain +select col1, hex(col1) from t1; +select col1, hex(col1) from t1; + +--echo # Must show 'using index' for latin1_bin and utf8_bin: +--replace_column 4 # 9 # +explain +select col1, hex(col1) from t1 where col1 < 'b'; +select col1, hex(col1) from t1 where col1 < 'b'; + +delete from t1; +insert into t1 values(10, '', 'empty'); +insert into t1 values(11, repeat(' ', 8), '8x-space'); +insert into t1 values(12, repeat(' ', 16), '16x-space'); +insert into t1 values(13, repeat(' ', 24), '24x-space'); +insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a'); + +insert into t1 values(21, repeat(' ', 9), '9x-space'); +insert into t1 values(22, repeat(' ',17), '17x-space'); +insert into t1 values(23, repeat(' ',18), '18x-space'); + +--replace_column 4 # 9 # +explain +select pk, col1, hex(col1), length(col1) from t1; +select pk, col1, hex(col1), length(col1) from t1; +drop table t1; + +create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb; +insert into t1 values (1, concat('a', repeat(' ', 300))); +insert into t1 values (2, concat('b', repeat(' ', 300))); + +select pk,length(a) from t1 force index(a) where a < 'zz'; +select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz'; +select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz'; + +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_check.test b/storage/rocksdb/mysql-test/rocksdb/t/unique_check.test new file mode 100644 index 00000000000..73465e7627a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_check.test @@ -0,0 +1,145 @@ +--source include/have_rocksdb.inc +--source include/have_debug_sync.inc + +# For GitHub issue#167 -- Unique key check doesn't work + +connect (con1, localhost, root,,); +connect (con2, localhost, root,,); +connect (con3, localhost, root,,); +connection default; + +--disable_warnings +set debug_sync='RESET'; +drop table if exists t1; +--enable_warnings + +create table t1 (id int, value int, primary key (id)) engine=rocksdb; +create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb; + +# 1) second insert should be blocked at GetForUpdate(), then returning duplicate key error +# after con1's commit +connection con1; +begin; +insert into t1 values (1,1); + +connection con2; +let $ID= `select connection_id()`; +set session rocksdb_lock_wait_timeout=50; +begin; +send insert into t1 values (1,2); + +connection con1; +let $wait_condition= select 1 from INFORMATION_SCHEMA.PROCESSLIST + where ID = $ID and STATE = "Waiting for row lock"; +--source include/wait_condition.inc +commit; + +connection con2; +--error ER_DUP_ENTRY +reap; +commit; +select * from t1; +truncate table t1; + +# 2) same as 1) but using secondary unique key constraint +connection con1; +begin; +insert into t2 values (1,1,1); + +connection con2; +begin; +send insert into t2 values (2,1,2); + +connection con1; +--source include/wait_condition.inc +commit; + +connection con2; +--error ER_DUP_ENTRY +reap; +commit; +select * from t2; +truncate table t2; + +# 3) similar to 1),2) but rolled back +connection con1; +begin; +insert into t1 values (1,1); + +connection con2; +begin; +send insert into t1 values (1,2); + +connection con1; +--source include/wait_condition.inc +rollback; + +connection con2; +reap; +commit; +select * from t1; +truncate table t1; + +connection con1; +begin; +insert into t2 values (1,1,1); + +connection con2; +begin; +send insert into t2 values (2,1,2); + +connection con1; +--source include/wait_condition.inc +rollback; + +connection con2; +reap; +commit; +select * from t2; +truncate table t2; + + +# 4) simulating T1 GetForUpdate() -> T2 GetForUpdate(). T2 should fail with lock wait timeout. +connection con1; +set debug_sync='rocksdb.update_write_row_after_unique_check SIGNAL parked1 WAIT_FOR go1'; +send insert into t1 values (1,1); + +connection con2; +set debug_sync='rocksdb.update_write_row_after_unique_check SIGNAL parked2 WAIT_FOR go2'; +send insert into t2 values (1,1,1); + +connection default; +set debug_sync='now WAIT_FOR parked1'; +set debug_sync='now WAIT_FOR parked2'; + +connection con3; +set session rocksdb_lock_wait_timeout=1; +--error ER_LOCK_WAIT_TIMEOUT +insert into t1 values (1,2); +--error ER_LOCK_WAIT_TIMEOUT +insert into t2 values (2,1,2); + +connection default; +set debug_sync='now SIGNAL go1'; +set debug_sync='now SIGNAL go2'; + +connection con1; +reap; + +connection con2; +reap; + +connection default; +--error ER_DUP_ENTRY +insert into t1 values (1,2); +--error ER_DUP_ENTRY +insert into t2 values (2,1,2); +select * from t1; +select * from t2; +# Cleanup +connection default; +set debug_sync='RESET'; +disconnect con1; +disconnect con2; +disconnect con3; +drop table t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc new file mode 100644 index 00000000000..2f11cd3b65a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc @@ -0,0 +1,198 @@ +# Save the initial number of concurrent sessions +--source include/count_sessions.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; + +CREATE TABLE t1 (id1 INT NOT NULL, id2 INT NOT NULL, id3 VARCHAR(32), + id4 INT, id5 VARCHAR(32), + value1 INT, value2 INT, value3 VARCHAR(32), + PRIMARY KEY (id1, id2) ##CF##, + UNIQUE INDEX (id2, id1) ##CF##, + UNIQUE INDEX (id2, id3, id4) ##CF##, + INDEX (id1) ##CF##, + INDEX (id3, id1) ##CF##, + UNIQUE INDEX(id5) ##CF##, + INDEX (id2, id5)) ENGINE=ROCKSDB; + +--disable_query_log +let $max = 10; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i, $i, $i, $i, $i, $i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +SELECT COUNT(*) FROM t1; + +--echo # Test inserting a key that returns duplicate error +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (1, 1, 11, 11, 11, 11, 11, 11); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (5, 5, 11, 11, 11, 11, 11, 11); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (10, 10, 11, 11, 11, 11, 11, 11); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (11, 1, 1, 1, 11, 11, 11, 11); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (11, 5, 5, 5, 11, 11, 11, 11); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (11, 10, 10, 10, 11, 11, 11, 11); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (11, 11, 11, 11, 1, 11, 11, 11); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (11, 11, 11, 11, 5, 11, 11, 11); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (11, 11, 11, 11, 10, 11, 11, 11); + +--echo # Test updating a key that returns duplicate error +--error ER_DUP_ENTRY +UPDATE t1 SET id2=1, id3=1, id4=1 WHERE id1=2; +--error ER_DUP_ENTRY +UPDATE t1 SET id2=1, id3=1, id4=1; + +SELECT COUNT(*) FROM t1; + +--echo # Test updating a key to itself +UPDATE t1 set id2=id4; +UPDATE t1 set id5=id3, value1=value2; +UPDATE t1 set value3=value1; + +--echo # Test modifying values should not cause duplicates +UPDATE t1 SET value1=value3+1; +UPDATE t1 SET value3=value3 div 2; +UPDATE t1 SET value2=value3; + +SELECT COUNT(*) FROM t1; + +--echo # Test NULL values are considered unique +INSERT INTO t1 VALUES (20, 20, 20, NULL, NULL, 20, 20, 20); +INSERT INTO t1 VALUES (21, 20, 20, NULL, NULL, 20, 20, 20); +INSERT INTO t1 VALUES (22, 20, 20, NULL, NULL, 20, 20, 20); + +SELECT COUNT(*) FROM t1; + +--echo # Adding multiple rows where one of the rows fail the duplicate +--echo # check should fail the whole statement +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (23, 23, 23, 23, 23, 23, 23, 23), + (24, 24, 24, 24, 24, 24, 24, 24), + (25, 10, 10, 10, 25, 25, 25, 25), + (26, 26, 26, 26, 26, 26, 26, 26); +SELECT COUNT(*) FROM t1; + +# Test open transactions can prevent duplicate keys +connection con1; +BEGIN; +INSERT INTO t1 VALUES (30, 31, 32, 33, 34, 30, 30, 30); + +connection con2; +BEGIN; + +SELECT COUNT(*) FROM t1; + +--echo # Primary key should prevent duplicate on insert +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 VALUES (30, 31, 30, 30, 30, 30, 30, 30); + +--echo # Primary key should prevent duplicate on update +--error ER_LOCK_WAIT_TIMEOUT +UPDATE t1 SET id1=30, id2=31 WHERE id2=10; + +--echo # Unique secondary key should prevent duplicate on insert +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 VALUES (31, 31, 32, 33, 30, 30, 30, 30); +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 VALUES (32, 32, 32, 32, 34, 32, 32, 32); + +--echo # Unique secondary key should prevent duplicate on update +--error ER_LOCK_WAIT_TIMEOUT +UPDATE t1 SET id2=31, id3=32, id4=33 WHERE id2=8; +--error ER_LOCK_WAIT_TIMEOUT +UPDATE t1 SET id5=34 WHERE id2=8; + +--echo # Adding multiple rows where one of the rows fail the duplicate +--echo # check should fail the whole statement +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35), + (36, 36, 36, 36, 36, 36, 36, 36), + (37, 31, 32, 33, 37, 37, 37, 37), + (38, 38, 38, 38, 38, 38, 38, 38); +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35), + (36, 36, 36, 36, 36, 36, 36, 36), + (37, 37, 37, 37, 34, 37, 37, 37), + (38, 38, 38, 38, 38, 38, 38, 38); + +--echo # NULL values are unique and duplicates in value fields are ignored +INSERT INTO t1 VALUES (37, 31, 32, NULL, 37, 37, 37, 37), + (38, 31, 32, NULL, 38, 37, 37, 37), + (39, 31, 32, NULL, 39, 37, 37, 37); + +SELECT COUNT(*) FROM t1; + +--echo # Fail on duplicate key update for row added in our transaction +--error ER_DUP_ENTRY +UPDATE t1 SET id5=37 WHERE id1=38; + +--echo # Fail on lock timeout for row modified in another transaction +--error ER_LOCK_WAIT_TIMEOUT +UPDATE t1 SET id5=34 WHERE id1=38; + +--echo # NULL values are unique +UPDATE t1 SET id5=NULL WHERE value1 > 37; + +connection con1; +COMMIT; + +connection con2; +COMMIT; + +# Test transaction is reading from latest data +connection con2; +BEGIN; +SELECT COUNT(*) FROM t1; + +connection con1; +BEGIN; +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); + +connection con2; +--echo # When transaction is pending, fail on lock acquisition +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40); + +SELECT COUNT(*) FROM t1; + +connection con1; +COMMIT; + +connection con2; +--echo # When transaction is committed, fail on duplicate key +--error ER_DUP_ENTRY,ER_LOCK_DEADLOCK +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40); + +ROLLBACK; + +SELECT * FROM t1; + +disconnect con1; +disconnect con2; + +connection default; +DROP TABLE t1; + +# Wait till we reached the initial number of concurrent sessions +--source include/wait_until_count_sessions.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test new file mode 100644 index 00000000000..28b52f262cc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test @@ -0,0 +1,33 @@ +--source include/have_rocksdb.inc + +let ddl= $MYSQL_TMP_DIR/unique_sec.sql; +--exec sed s/##CF##//g suite/rocksdb/t/unique_sec.inc > $ddl +--source $ddl + +--echo # +--echo # Issue #88: Creating unique index over column with duplicate values succeeds +--echo # +create table t1 (pk int primary key, a int) engine=rocksdb; + +insert into t1 values +(1, 1), +(2, 2), +(3, 3), +(4, 1), +(5, 5); + +--error ER_DUP_ENTRY +alter table t1 add unique(a); +drop table t1; + +--echo # +--echo # Issue #111 +--echo # +CREATE TABLE t2 (pk int, a int, PRIMARY KEY (pk, a), UNIQUE KEY (a)) ENGINE=ROCKSDB PARTITION BY KEY (a) PARTITIONS 16; + +INSERT INTO t2 VALUES (1,1); +--error ER_DUP_ENTRY +INSERT INTO t2 VALUES (1,1); +--error ER_DUP_ENTRY +INSERT INTO t2 VALUES (2,1); +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test new file mode 100644 index 00000000000..d6a8e3d5a1b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test @@ -0,0 +1,5 @@ +--source include/have_rocksdb.inc + +let ddl= $MYSQL_TMP_DIR/unique_sec_rev_cf.sql; +--exec sed s/##CF##/" COMMENT 'rev:cf'"/g suite/rocksdb/t/unique_sec.inc > $ddl +--source $ddl diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unsupported_tx_isolations.test b/storage/rocksdb/mysql-test/rocksdb/t/unsupported_tx_isolations.test new file mode 100644 index 00000000000..0c7df020ef5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/unsupported_tx_isolations.test @@ -0,0 +1,25 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings +create table t1 (id int primary key, value int) engine=rocksdb; +insert into t1 values (1,1); + +SET session transaction isolation level read uncommitted; +begin; +--error ER_UNKNOWN_ERROR +insert into t1 values (2,1); +--error ER_UNKNOWN_ERROR +select * from t1 where id=1; +rollback; + +SET session transaction isolation level serializable; +begin; +--error ER_UNKNOWN_ERROR +insert into t1 values (2,1); +--error ER_UNKNOWN_ERROR +select * from t1 where id=1; +rollback; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update.test b/storage/rocksdb/mysql-test/rocksdb/t/update.test new file mode 100644 index 00000000000..e0a49ee8ca5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/update.test @@ -0,0 +1,72 @@ +--source include/have_rocksdb.inc + +# +# Basic UPDATE statements. +# UPDATE LOW_PRIORITY is covered in update_low_prio test +# UPDATE IGNORE is covered in update_ignore test +# Multi-table update is covered in update_multi test +# + +######################################## +# TODO: +# The results of the transactional part +# are unusual due to "can't-see-own-changes" +######################################## + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; + +UPDATE t1 SET a=a+100; +--sorted_result +SELECT a,b FROM t1; + +UPDATE t1 SET a=a-100, b=DEFAULT WHERE a>100; +--sorted_result +SELECT a,b FROM t1; + +# ORDER BY and LIMIT +UPDATE t1 SET b = 'update' WHERE a <= 4 ORDER BY b DESC, a ASC LIMIT 1; +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + +# +# Transactional UPDATE +# + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; + +BEGIN; +UPDATE t1 SET a=a+100; +UPDATE t1 SET a=a-50, b=DEFAULT WHERE a>100; +COMMIT; +--sorted_result +SELECT * FROM t1 ORDER BY pk; + +BEGIN; +UPDATE t1 SET b = 'update' WHERE a <= 4 ORDER BY a DESC, b ASC LIMIT 3; +UPDATE t1 SET b = ''; +ROLLBACK; +SELECT * FROM t1 ORDER BY pk; + +BEGIN; +UPDATE t1 SET b = 'update2' WHERE a <= 100; +SAVEPOINT spt1; +UPDATE t1 SET b = ''; +--error ER_UNKNOWN_ERROR +ROLLBACK TO SAVEPOINT spt1; +UPDATE t1 SET b = 'upd' WHERE a = 10050; +--error ER_UNKNOWN_ERROR +COMMIT; +SELECT * FROM t1 ORDER BY pk; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update_ignore-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/update_ignore-master.opt new file mode 100644 index 00000000000..ba9364e1523 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/update_ignore-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update_ignore.test b/storage/rocksdb/mysql-test/rocksdb/t/update_ignore.test new file mode 100644 index 00000000000..0208e1b3563 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/update_ignore.test @@ -0,0 +1,35 @@ +--source include/have_rocksdb.inc + +# +# UPDATE IGNORE +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; + +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +INSERT INTO t2 (c,d) SELECT b, a FROM t1; + +UPDATE IGNORE t1 SET b = 'upd1' WHERE b IS NOT NULL ORDER BY a LIMIT 1; +SELECT a,b FROM t1 ORDER BY pk; + +--error ER_SUBQUERY_NO_1_ROW +UPDATE t1, t2 SET b = 'upd2a', c = 'upd2b' + WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 ); + +UPDATE IGNORE t1, t2 SET b = 'upd2a', c = 'upd2b' + WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 ); + +SELECT a,b FROM t1 ORDER BY pk; +SELECT c,d FROM t2 ORDER BY pk; + +# Cleanup +DROP TABLE t1, t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update_multi.test b/storage/rocksdb/mysql-test/rocksdb/t/update_multi.test new file mode 100644 index 00000000000..1cb2f12804d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/update_multi.test @@ -0,0 +1,15 @@ +--source include/have_rocksdb.inc + +# +# Multi-table UPDATE statements. +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +let $i= 0; +while ($i < 10) { + --source update_multi_exec.inc + inc $i; +} diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update_multi_exec.inc b/storage/rocksdb/mysql-test/rocksdb/t/update_multi_exec.inc new file mode 100644 index 00000000000..40d736693c6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/update_multi_exec.inc @@ -0,0 +1,27 @@ +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; + +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; + +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +--sorted_result +SELECT a,b FROM t1; +--sorted_result +SELECT c,d FROM t2; + +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +--sorted_result +SELECT a,b FROM t1; +--sorted_result +SELECT c,d FROM t2; + +# Cleanup +DROP TABLE t1, t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update_with_keys.test b/storage/rocksdb/mysql-test/rocksdb/t/update_with_keys.test new file mode 100644 index 00000000000..c53eb9be85c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/update_with_keys.test @@ -0,0 +1,78 @@ +--source include/have_rocksdb.inc + +# +# UPDATE statements for tables with keys +# + +############################################# +# TODO: +# The test doesn't work quite as expected, +# apparently due to "can't see own changes" +############################################# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, INDEX(b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z'); +UPDATE t1 SET a=100, b='f' WHERE b IN ('b','c'); +UPDATE t1 SET b='m' WHERE b = 'f'; +UPDATE t1 SET b='z' WHERE a < 2; +UPDATE t1 SET b=''; +--sorted_result +SELECT a,b FROM t1; +DROP TABLE t1; + +--echo # RocksDB: skip the test for secondary UNIQUE keys. +--disable_parsing +--error ER_GET_ERRMSG +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, UNIQUE INDEX(a)) ENGINE=innodb; + + +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(0,'f'),(100,'a'); +UPDATE t1 SET a=a+200; +UPDATE t1 SET a=0 WHERE a > 250; +--error ER_DUP_ENTRY +UPDATE t1 SET a=205 WHERE a=200; +UPDATE t1 SET a=12345 ORDER BY a, b LIMIT 1; +--sorted_result +SELECT a,b FROM t1; + +--error ER_DUP_ENTRY +UPDATE t1 SET a=80 WHERE a IN (202,203); +--sorted_result +SELECT a,b FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, UNIQUE INDEX(a,b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(100,'a'),(6,'f'); +UPDATE t1 SET a=6 WHERE a=3; +--error ER_DUP_ENTRY +UPDATE t1 SET a=100 WHERE a=1; +--error ER_DUP_ENTRY +UPDATE t1 SET a=4, b='d' WHERE b='f'; +UPDATE t1 SET a=a+1; +--sorted_result +SELECT a,b FROM t1; +--error ER_DUP_ENTRY +UPDATE t1 SET b='z'; +DROP TABLE t1; + +--enable_parsing + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(0,'f'),(100,'a'); +UPDATE t1 SET a=a+200; +UPDATE t1 SET a=0 WHERE a > 250; +--error ER_DUP_ENTRY +UPDATE t1 SET a=205 WHERE a=200; +UPDATE t1 SET a=12345 ORDER BY a DESC, b LIMIT 1; + +--sorted_result +SELECT a,b FROM t1; + +--error ER_DUP_ENTRY +UPDATE t1 SET a=80 WHERE a IN (202,203); +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test b/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test new file mode 100644 index 00000000000..b34c85eb4c4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test @@ -0,0 +1,102 @@ +--source include/have_rocksdb.inc + +# +# Validate that the server starts when everything is okay, but detects errors +# if a table exists in the data dictionary but not as an .frm or vice versa. +# The default mode causes these failures to keep the server from starting, but +# this is problematic for the test as a server start failure is not easily +# trappable. Instead use the mode where it will detect the problem and report +# it in the log bug still start: --rocksdb_validate_tables=2 +# + +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +CREATE TABLE t1 (pk int primary key) ENGINE=ROCKSDB; +CREATE TABLE t2 (pk int primary key) ENGINE=ROCKSDB PARTITION BY KEY(pk) PARTITIONS 4; + +# Write file to make mysql-test-run.pl expect the "crash", but don't restart the +# server until it is told to +--let $_server_id= `SELECT @@server_id` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--exec echo "wait" >$_expect_file_name + +# Send shutdown to the connected server and give it 10 seconds to die before +# zapping it +shutdown_server 10; + +# Write file to make mysql-test-run.pl start up the server again +--exec echo "restart" >$_expect_file_name +--sleep 5 + +# Turn on reconnect +--enable_reconnect + +# Call script that will poll the server waiting for it to be back online again +--source include/wait_until_connected_again.inc + +# Turn off reconnect again +--disable_reconnect + +# Now shut down again and rename one of the .frm files +--exec echo "wait" >$_expect_file_name +shutdown_server 10; + +# Clear the log +--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err + +# Rename the file +--exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm.tmp +--exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm.tmp + +# Attempt to restart the server +--exec echo "restart:--rocksdb_validate_tables=2" >$_expect_file_name +--sleep 5 + +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + +# We should now have an error message +--exec echo "Expect errors that we are missing two .frm files" +--exec grep "Schema mismatch" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 + +# Now shut down again and rename one the .frm file back and make a copy of it +--exec echo "wait" >$_expect_file_name +shutdown_server 10; + +# Clear the log +--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err + +# Rename the file +--exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm +--exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm +--exec cp $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t1_dummy.frm + +# Attempt to restart the server +--exec echo "restart:--rocksdb_validate_tables=2" >$_expect_file_name +--sleep 5 + +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + +# We should now have an error message for the second case +--exec echo "Expect an error that we have an extra .frm file" +--exec grep "Schema mismatch" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 + +# Shut down an clean up +--exec echo "wait" >$_expect_file_name +shutdown_server 10; +--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err +--exec rm $MYSQLTEST_VARDIR/mysqld.1/data/test/t1_dummy.frm +--exec echo "restart" >$_expect_file_name +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + +--disable_warnings +DROP TABLE t1, t2; +--enable_warnings + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test new file mode 100644 index 00000000000..84a85fab32d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test @@ -0,0 +1,42 @@ +--source include/have_rocksdb.inc + +SET GLOBAL rocksdb_write_disable_wal=false; +SET GLOBAL rocksdb_write_ignore_missing_column_families=true; + +create table aaa (id int primary key, i int) engine rocksdb; + +SET LOCAL rocksdb_write_sync=off; +--exec sleep 30 +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +insert aaa(id, i) values(1,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +insert aaa(id, i) values(2,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +insert aaa(id, i) values(3,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; + +SET LOCAL rocksdb_write_sync=1; +insert aaa(id, i) values(4,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +insert aaa(id, i) values(5,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +insert aaa(id, i) values(6,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; + +SET GLOBAL rocksdb_background_sync=on; +SET LOCAL rocksdb_write_sync=off; +insert aaa(id, i) values(7,1); + +let $status_var=rocksdb_wal_synced; +let $status_var_value=`select @a+4`; +source include/wait_for_status_var.inc; + +truncate table aaa; + +# Cleanup +drop table aaa; +SET GLOBAL rocksdb_write_sync=off; +SET GLOBAL rocksdb_write_disable_wal=false; +SET GLOBAL rocksdb_write_ignore_missing_column_families=false; +SET GLOBAL rocksdb_background_sync=off; + diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/base.cnf b/storage/rocksdb/mysql-test/rocksdb_hotbackup/base.cnf new file mode 100644 index 00000000000..101dbce2385 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/base.cnf @@ -0,0 +1,25 @@ +# Use default setting for mysqld processes +!include include/default_mysqld.cnf +!include include/default_client.cnf + +[mysqld.1] +rocksdb +default-storage-engine=rocksdb +skip-innodb +default-tmp-storage-engine=MyISAM +binlog_format=row + +[mysqld.2] +rocksdb +default-storage-engine=rocksdb +skip-innodb +default-tmp-storage-engine=MyISAM +binlog_format=row + +[ENV] +MASTER_MYPORT= @mysqld.1.port +MASTER_MYSOCK= @mysqld.1.socket + +SLAVE_MYPORT= @mysqld.2.port +SLAVE_MYSOCK= @mysqld.2.socket + diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/cleanup.inc b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/cleanup.inc new file mode 100644 index 00000000000..947bf0270e2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/cleanup.inc @@ -0,0 +1,3 @@ + +--source include/rpl_end.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data.sh new file mode 100755 index 00000000000..f3836ab75e5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data.sh @@ -0,0 +1,43 @@ +set -e + +# Insert 100 batches of 100 records each to a table with following schema: +# create table db1.t1 ( +# `id` int(10) not null auto_increment, +# `k` int(10), +# `data` varchar(2048), +# primary key (`id`), +# key (`k`) +# ) engine=innodb; + +MAX_INSERTS=100 +MAX_ROWS_PER_INSERT=100 + +insertData() { + for ((i=1; i<=$MAX_INSERTS; i++)); + do + stmt='INSERT INTO db1.t1 values' + for ((j=1; j<=$MAX_ROWS_PER_INSERT; j++)); + do + k=$RANDOM + data=$(head -c 2048 /dev/urandom|tr -cd 'a-zA-Z0-9') + stmt=$stmt' (NULL, '$k', "'$data'")' + if [ $j -lt $MAX_ROWS_PER_INSERT ]; then + stmt=$stmt',' + fi + done + stmt=$stmt';' + $MYSQL --defaults-group-suffix=.1 -e "$stmt" + done +} + +NUM_PARALLEL_INSERTS=25 +pids=() +for ((k=1; k<=$NUM_PARALLEL_INSERTS; k++)); +do + insertData & + pids+=($!) +done +for ((k=1; k<=$NUM_PARALLEL_INSERTS; k++)); +do + wait ${pids[k]} +done diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_and_run.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_and_run.sh new file mode 100755 index 00000000000..a4e4afab9d4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_and_run.sh @@ -0,0 +1,9 @@ +set -e + +# Initially loads a chunk of data. +# Then start loading another chunk of data, +# while simultaneously running a backup + +suite/rocksdb_hotbackup/include/load_data.sh 2>&1 +suite/rocksdb_hotbackup/include/load_data.sh 2>&1 & +suite/rocksdb_hotbackup/include/stream_run.sh 2>&1 diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup.inc b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup.inc new file mode 100644 index 00000000000..26c3f2ce7f1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup.inc @@ -0,0 +1,16 @@ +--let $rpl_server_count= 2 +--let $rpl_topology= none +--source include/rpl_init.inc +--source include/rpl_default_connections.inc + +connection server_1; +create database db1; + +create table db1.t1 ( + `id` int(10) not null auto_increment, + `k` int(10), + `data` varchar(2048), + primary key (`id`), + key (`k`) +) engine=rocksdb; + diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid.sh new file mode 100755 index 00000000000..18e1feeda96 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid.sh @@ -0,0 +1,20 @@ +set -e + +binlog_line=($(grep -o "Last binlog file position [0-9]*, file name .*\.[0-9]*" ${MYSQLTEST_VARDIR}/log/mysqld.2.err | tail -1)) +binlog_pos=${binlog_line[4]%?} +binlog_file=${binlog_line[7]} + +sql="show gtid_executed in '$binlog_file' from $binlog_pos" +result=($($MYSQL --defaults-group-suffix=.1 -e "$sql")) +gtid_executed=${result[1]} + +sql="reset master;" +sql="$sql reset slave;" +sql="$sql change master to master_host='127.0.0.1', master_port=${MASTER_MYPORT}, master_user='root', master_auto_position=1, master_connect_retry=1;" +sql="$sql set global gtid_purged='$gtid_executed';" +sql="$sql start slave;" +sql="$sql stop slave;" +sql="$sql change master to master_auto_position=0;" +sql="$sql start slave;" +$MYSQL --defaults-group-suffix=.2 -e "$sql" +echo "$sql" > ${MYSQL_TMP_DIR}/gtid_stmt diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid_and_sync.inc b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid_and_sync.inc new file mode 100644 index 00000000000..75dc31964da --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid_and_sync.inc @@ -0,0 +1,4 @@ +--exec suite/rocksdb_hotbackup/include/setup_replication_gtid.sh + +let $slave_sync_timeout = 1800; +source include/wait_for_slave_to_sync_with_master.inc; diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh new file mode 100755 index 00000000000..b83b957cff0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh @@ -0,0 +1,71 @@ +if [ "$STREAM_TYPE" == 'wdt' ]; then + which wdt >/dev/null 2>&1 + if [ $? -ne 0 ]; then + # change to tar if wdt is not installed + STREAM_TYPE='tar' + fi +fi + +set -e + +# Takes a full backup from server_1 to server_2 +# using myrocks_hotbackup streaming + +checkpoint_dir="${MYSQLTEST_VARDIR}/checkpoint" +backup_dir="${MYSQLTEST_VARDIR}/backup" +dest_data_dir="${MYSQLTEST_VARDIR}/mysqld.2/data/" + +mysql_dir=$(echo $MYSQL | awk '{print $1}' | xargs dirname) +PATH=$mysql_dir:$PATH + +mkdir -p $checkpoint_dir +rm -rf $checkpoint_dir/* + +mkdir -p $backup_dir +rm -rf $backup_dir/* +# delete and recreate the dest dir to make sure all hidden files +# and directories (such as .rocksdb) are blown away +rm -rf $dest_data_dir/ +mkdir $dest_data_dir + +COPY_LOG="${MYSQL_TMP_DIR}/myrocks_hotbackup_copy_log" + +if [ "$STREAM_TYPE" == 'tar' ]; then + BACKUP_CMD="$MYSQL_MYROCKS_HOTBACKUP --user='root' --port=${MASTER_MYPORT} \ + --stream=tar --checkpoint_dir=$checkpoint_dir 2> \ + $COPY_LOG | tar -xi -C $backup_dir" +elif [ "$STREAM_TYPE" == 'xbstream' ]; then + BACKUP_CMD="$MYSQL_MYROCKS_HOTBACKUP --user='root' --port=${MASTER_MYPORT} \ + --stream=xbstream --checkpoint_dir=$checkpoint_dir 2> \ + $COPY_LOG | xbstream -x \ + --directory=$backup_dir" +else + BACKUP_CMD="$MYSQL_MYROCKS_HOTBACKUP --user='root' --stream=wdt \ + --port=${MASTER_MYPORT} --destination=localhost --backup_dir=$backup_dir \ + --avg_mbytes_per_sec=10 --interval=5 \ + --extra_wdt_sender_options='--block_size_mbytes=1' \ + --checkpoint_dir=$checkpoint_dir 2> \ + $COPY_LOG" +fi + +echo "myrocks_hotbackup copy phase" +eval "$BACKUP_CMD" +if [ $? -ne 0 ]; then + tail $COPY_LOG + exit 1 +fi + +mkdir ${backup_dir}/test # TODO: Fix skipping empty directories + +MOVEBACK_LOG="${MYSQL_TMP_DIR}/myrocks_hotbackup_moveback_log" + +echo "myrocks_hotbackup move-back phase" +$MYSQL_MYROCKS_HOTBACKUP --move_back --datadir=$dest_data_dir \ + --rocksdb_datadir=$dest_data_dir/.rocksdb \ + --rocksdb_waldir=$dest_data_dir/.rocksdb \ + --backup_dir=$backup_dir > $MOVEBACK_LOG 2>&1 + +if [ $? -ne 0 ]; then + tail $MOVEBACK_LOG + exit 1 +fi diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/my.cnf b/storage/rocksdb/mysql-test/rocksdb_hotbackup/my.cnf new file mode 100644 index 00000000000..bd9af04c813 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/my.cnf @@ -0,0 +1,2 @@ +# Use settings from xb_base.cnf +!include base.cnf diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/gtid.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/gtid.result new file mode 100644 index 00000000000..6cec6ca5d69 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/gtid.result @@ -0,0 +1,23 @@ +include/rpl_init.inc [topology=none] +include/rpl_default_connections.inc +create database db1; +create table db1.t1 ( +`id` int(10) not null auto_increment, +`k` int(10), +`data` varchar(2048), +primary key (`id`), +key (`k`) +) engine=rocksdb; +include/rpl_stop_server.inc [server_number=2] +myrocks_hotbackup copy phase +myrocks_hotbackup move-back phase +include/rpl_start_server.inc [server_number=2] +stop slave; +start slave; +select count(*) from db1.t1; +count(*) +500000 +drop database db1; +stop slave; +reset slave; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/stream.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/stream.result new file mode 100644 index 00000000000..d3f2ebc4e6f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/stream.result @@ -0,0 +1,20 @@ +include/rpl_init.inc [topology=none] +include/rpl_default_connections.inc +create database db1; +create table db1.t1 ( +`id` int(10) not null auto_increment, +`k` int(10), +`data` varchar(2048), +primary key (`id`), +key (`k`) +) engine=rocksdb; +include/rpl_stop_server.inc [server_number=2] +myrocks_hotbackup copy phase +myrocks_hotbackup move-back phase +include/rpl_start_server.inc [server_number=2] +select count(*) from db1.t1; +count(*) +250000 +drop database db1; +drop database db1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/wdt.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/wdt.result new file mode 100644 index 00000000000..d3f2ebc4e6f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/wdt.result @@ -0,0 +1,20 @@ +include/rpl_init.inc [topology=none] +include/rpl_default_connections.inc +create database db1; +create table db1.t1 ( +`id` int(10) not null auto_increment, +`k` int(10), +`data` varchar(2048), +primary key (`id`), +key (`k`) +) engine=rocksdb; +include/rpl_stop_server.inc [server_number=2] +myrocks_hotbackup copy phase +myrocks_hotbackup move-back phase +include/rpl_start_server.inc [server_number=2] +select count(*) from db1.t1; +count(*) +250000 +drop database db1; +drop database db1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream.result new file mode 100644 index 00000000000..d3f2ebc4e6f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream.result @@ -0,0 +1,20 @@ +include/rpl_init.inc [topology=none] +include/rpl_default_connections.inc +create database db1; +create table db1.t1 ( +`id` int(10) not null auto_increment, +`k` int(10), +`data` varchar(2048), +primary key (`id`), +key (`k`) +) engine=rocksdb; +include/rpl_stop_server.inc [server_number=2] +myrocks_hotbackup copy phase +myrocks_hotbackup move-back phase +include/rpl_start_server.inc [server_number=2] +select count(*) from db1.t1; +count(*) +250000 +drop database db1; +drop database db1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-master.opt b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-master.opt new file mode 100644 index 00000000000..9d7af67eec9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-master.opt @@ -0,0 +1 @@ +--gtid_mode=on --log_slave_updates=on --enforce_gtid_consistency=on diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-slave.opt b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-slave.opt new file mode 100644 index 00000000000..9d7af67eec9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-slave.opt @@ -0,0 +1 @@ +--gtid_mode=on --log_slave_updates=on --enforce_gtid_consistency=on diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid.test new file mode 100644 index 00000000000..f9d58da093e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid.test @@ -0,0 +1,47 @@ + +source suite/rocksdb_hotbackup/include/setup.inc; + +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--exec suite/rocksdb_hotbackup/include/load_data_and_run.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +connection server_2; +let $num_rows = `select count(*) from db1.t1`; +let $max_id = `select id from db1.t1 order by id desc limit 1`; + +if($num_rows != $max_id) { + echo Number of rows($num_rows) and last_id($max_id) does not match; +} +if($num_rows < 250000) { + echo Number of rows($num_rows) is less than 250000; +} +if($num_rows > 500000) { + echo Number of rows($num_rows) is more than 500000; +} + +--source suite/rocksdb_hotbackup/include/setup_replication_gtid_and_sync.inc + +connection server_2; +select count(*) from db1.t1; + +connection server_1; +let $checksum1 = `checksum tables db1.t1`; +connection server_2; +let $checksum2 = `checksum tables db1.t1`; + +if($checksum1 != $checksum2) { + echo Checksums ($checksum1 and $checksum2) do not match; +} + +connection server_1; +drop database db1; +sync_slave_with_master; +connection server_2; +stop slave; +reset slave; + +source suite/rocksdb_hotbackup/include/cleanup.inc; diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/stream.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/stream.test new file mode 100644 index 00000000000..2b999f3fce7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/stream.test @@ -0,0 +1,22 @@ + +source suite/rocksdb_hotbackup/include/setup.inc; + +--exec suite/rocksdb_hotbackup/include/load_data.sh 2>&1 +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--exec STREAM_TYPE=tar suite/rocksdb_hotbackup/include/stream_run.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +connection server_2; +select count(*) from db1.t1; + +connection server_1; +drop database db1; +connection server_2; +drop database db1; + +source suite/rocksdb_hotbackup/include/cleanup.inc; + diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/wdt.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/wdt.test new file mode 100644 index 00000000000..2d2ed89112b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/wdt.test @@ -0,0 +1,22 @@ + +source suite/rocksdb_hotbackup/include/setup.inc; + +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--exec suite/rocksdb_hotbackup/include/load_data.sh 2>&1 +--exec STREAM_TYPE=wdt suite/rocksdb_hotbackup/include/stream_run.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +connection server_2; +select count(*) from db1.t1; + +connection server_1; +drop database db1; +connection server_2; +drop database db1; + +source suite/rocksdb_hotbackup/include/cleanup.inc; + diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream.test new file mode 100644 index 00000000000..9bfab4252c4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream.test @@ -0,0 +1,22 @@ + +source suite/rocksdb_hotbackup/include/setup.inc; + +--exec suite/rocksdb_hotbackup/include/load_data.sh 2>&1 +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--exec STREAM_TYPE=xbstream suite/rocksdb_hotbackup/include/stream_run.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +connection server_2; +select count(*) from db1.t1; + +connection server_1; +drop database db1; +connection server_2; +drop database db1; + +source suite/rocksdb_hotbackup/include/cleanup.inc; + diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/combinations b/storage/rocksdb/mysql-test/rocksdb_rpl/combinations new file mode 100644 index 00000000000..f09d338c357 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/combinations @@ -0,0 +1,2 @@ +[row] +binlog-format=row diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/include/rpl_no_unique_check_on_lag.inc b/storage/rocksdb/mysql-test/rocksdb_rpl/include/rpl_no_unique_check_on_lag.inc new file mode 100644 index 00000000000..8f03c16e2f1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/include/rpl_no_unique_check_on_lag.inc @@ -0,0 +1,71 @@ +--source include/master-slave.inc +--source include/have_binlog_format_row.inc +--source include/not_embedded.inc +--source include/not_valgrind.inc + +call mtr.add_suppression("Slave SQL: Could not execute Write_rows event on table test.t1"); +call mtr.add_suppression(".*Worker.*failed executing transaction"); +call mtr.add_suppression(".*The slave coordinator and worker threads are stopped"); + +--disable_warnings +drop table if exists t1; +--enable_warnings + +connection master; +CREATE TABLE t1 (id int primary key, value int) engine=RocksDB; +sync_slave_with_master; +--let $rsbm = query_get_value(select @@global.reset_seconds_behind_master, @@global.reset_seconds_behind_master, 1) +set global reset_seconds_behind_master=1; + +connection slave; +INSERT INTO t1 VALUES(1, 0); +INSERT INTO t1 VALUES(2, 0); +INSERT INTO t1 VALUES(3, 0); + +connection master; +sync_slave_with_master; +connection master; +INSERT INTO t1 VALUES(1, 1); + +connection slave; +--let $slave_sql_errno= 1062 +--let $not_switch_connection= 0 +--source include/wait_for_slave_sql_error_and_skip.inc +set global reset_seconds_behind_master=0; +--source include/stop_slave_io.inc + +connection master; +INSERT INTO t1 values (4,0); +--sleep 11 +INSERT INTO t1 VALUES(2, 1); + +connection slave; +--source include/start_slave_io.inc + +connection master; +sync_slave_with_master; + +connection slave; +set global reset_seconds_behind_master=1; + +connection master; +insert into t1 values (5,0); +--sleep 1 +sync_slave_with_master; + +connection master; +INSERT INTO t1 VALUES(3, 1); + +connection slave; +--let $slave_sql_errno= 1062 +--let $not_switch_connection= 0 +--source include/wait_for_slave_sql_error_and_skip.inc + +--echo # +--echo # Cleanup +--echo # + +connection master; +DROP TABLE t1; +eval set global reset_seconds_behind_master=$rsbm; +--source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/consistent_snapshot_mixed_engines.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/consistent_snapshot_mixed_engines.result new file mode 100644 index 00000000000..31777c45c68 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/consistent_snapshot_mixed_engines.result @@ -0,0 +1,68 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +create table i1 (id int primary key , value int) engine=innodb; +create table r1 (id int primary key , value int) engine=rocksdb; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connection con2; +insert into i1 values (1,1); +insert into r1 values (1,1); +connection con1; +select * from i1; +id value +select * from r1; +id value +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 1115 uuid:1-5 +connection con2; +insert into i1 values (2,2); +insert into r1 values (2,2); +connection con1; +select * from i1; +id value +1 1 +2 2 +select * from r1; +id value +1 1 +connection con2; +insert into i1 values (3,2); +insert into r1 values (3,2); +connection con1; +select * from i1; +id value +1 1 +2 2 +select * from r1; +id value +1 1 +START TRANSACTION WITH CONSISTENT INNODB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 2015 uuid:1-9 +connection con2; +insert into r1 values (4,4); +connection con1; +select * from r1; +id value +1 1 +2 2 +3 2 +4 4 +connection con2; +insert into r1 values (5,5); +connection con1; +select * from r1; +id value +1 1 +2 2 +3 2 +4 4 +drop table i1; +drop table r1; +connection default; +disconnect con1; +disconnect con2; +reset master; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/multiclient_2pc.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/multiclient_2pc.result new file mode 100644 index 00000000000..71c0d6d5dbf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/multiclient_2pc.result @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS t1; +SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +create table t1 (a int primary key, b int, c varchar(255)) engine=rocksdb; +'con1' +SET SESSION debug="d,crash_commit_after_log"; +SET DEBUG_SYNC='rocksdb.prepared SIGNAL parked WAIT_FOR go'; +insert into t1 values (1, 1, "iamtheogthealphaandomega");; +'con2' +insert into t1 values (2, 1, "i_am_just_here_to_trigger_a_flush"); +SET GLOBAL ROCKSDB_DISABLE_2PC = ON; +SET GLOBAL ROCKSDB_WRITE_SYNC = OFF; +SET GLOBAL SYNC_BINLOG = 0; +SET DEBUG_SYNC='now WAIT_FOR parked'; +SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +SET GLOBAL ROCKSDB_WRITE_SYNC = ON; +SET GLOBAL SYNC_BINLOG = 1; +insert into t1 values (1000000, 1, "i_am_just_here_to_trigger_a_flush"); +SET DEBUG_SYNC='now SIGNAL go'; +**found 'prepare' log entry** +**found 'commit' log entry** +select * from t1 where a=1; +a b c +1 1 iamtheogthealphaandomega +select count(*) from t1; +count(*) +1000000 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_crash_safe_wal_corrupt.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_crash_safe_wal_corrupt.result new file mode 100644 index 00000000000..6d061e99846 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_crash_safe_wal_corrupt.result @@ -0,0 +1,135 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +drop table if exists x; +select @@binlog_format; +@@binlog_format +ROW +create table x (id int primary key, value int, value2 int, index(value)) engine=rocksdb; +insert into x values (1,1,1); +insert into x values (2,1,1); +insert into x values (3,1,1); +insert into x values (4,1,1); +insert into x values (5,1,1); +select @@global.gtid_executed; +@@global.gtid_executed + + +--- slave state before crash --- +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +select @@global.gtid_executed; +@@global.gtid_executed + +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +include/rpl_start_server.inc [server_number=2] + +--- slave state after crash recovery, slave stop, one transaction recovered--- +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +select @@global.gtid_executed; +@@global.gtid_executed + +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid + +--- slave state after restart, slave start --- +include/start_slave.inc +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +select @@global.gtid_executed; +@@global.gtid_executed + +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +insert into x values (6,1,1); +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +6 1 1 +select @@global.gtid_executed; +@@global.gtid_executed + +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +insert into x values (7,1,1); +insert into x values (8,1,1); +insert into x values (9,1,1); +insert into x values (10,1,1); +insert into x values (11,1,1); +insert into x values (12,1,1); +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +6 1 1 +7 1 1 +8 1 1 +9 1 1 +10 1 1 +11 1 1 +12 1 1 +select @@global.gtid_executed; +@@global.gtid_executed + +include/rpl_start_server.inc [server_number=2] + +--- slave state after crash recovery, slave stop, WAL was corrupted, point in time recovery with wal_recovery_mode=2 --- +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +6 1 1 +7 1 1 +8 1 1 +9 1 1 +include/start_slave.inc +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +6 1 1 +7 1 1 +8 1 1 +9 1 1 +10 1 1 +11 1 1 +12 1 1 +select @@global.gtid_executed; +@@global.gtid_executed + +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +drop table x; +include/rpl_end.inc +Binlog Info Found diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe.result new file mode 100644 index 00000000000..352ceff236c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe.result @@ -0,0 +1,361 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +call mtr.add_suppression("Recovery from master pos"); +create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB; +insert into t1 values(1); +insert into t1 values(2); +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 mtr uuid:1 +2 test uuid:4 +SET GLOBAL debug = '+d,crash_before_update_pos'; +insert into t1 values(3); +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 mtr uuid:1 +2 test uuid:4 +use test; +select * from t1; +a +1 +2 +change master to master_auto_position = 1; +include/start_slave.inc +rename table t1 to test1; +use test; +select * from test1; +a +1 +2 +3 +use test; +select * from test1; +a +1 +2 +3 +drop table test1; +include/stop_slave.inc +change master to master_auto_position = 0; +include/start_slave.inc +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 mtr uuid:1 +2 test uuid:7 +include/rpl_reset.inc +create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB; +insert into t1 values(1); +insert into t1 values(2); +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +SET GLOBAL debug = '+d,crash_after_update_pos_before_apply'; +insert into t1 values(3); +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +use test; +select * from t1; +a +1 +2 +change master to master_auto_position = 1; +include/start_slave.inc +rename table t1 to test1; +use test; +select * from test1; +a +1 +2 +3 +use test; +select * from test1; +a +1 +2 +3 +drop table test1; +include/stop_slave.inc +change master to master_auto_position = 0; +include/start_slave.inc +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:6 +include/rpl_reset.inc +create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB; +insert into t1 values(1); +insert into t1 values(2); +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +SET GLOBAL debug = '+d,crash_before_writing_xid'; +insert into t1 values(3); +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +use test; +select * from t1; +a +1 +2 +change master to master_auto_position = 1; +include/start_slave.inc +rename table t1 to test1; +use test; +select * from test1; +a +1 +2 +3 +use test; +select * from test1; +a +1 +2 +3 +drop table test1; +include/stop_slave.inc +change master to master_auto_position = 0; +include/start_slave.inc +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:6 +include/rpl_reset.inc +create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB; +insert into t1 values(1); +insert into t1 values(2); +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +SET GLOBAL debug = '+d,half_binlogged_transaction'; +insert into t1 values(3); +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +use test; +select * from t1; +a +1 +2 +change master to master_auto_position = 1; +include/start_slave.inc +rename table t1 to test1; +use test; +select * from test1; +a +1 +2 +3 +use test; +select * from test1; +a +1 +2 +3 +drop table test1; +include/stop_slave.inc +change master to master_auto_position = 0; +include/start_slave.inc +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:6 +include/rpl_reset.inc +create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB; +insert into t1 values(1); +insert into t1 values(2); +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +SET GLOBAL debug = '+d,crash_commit_before'; +insert into t1 values(3); +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +use test; +select * from t1; +a +1 +2 +change master to master_auto_position = 1; +include/start_slave.inc +rename table t1 to test1; +use test; +select * from test1; +a +1 +2 +3 +use test; +select * from test1; +a +1 +2 +3 +drop table test1; +include/stop_slave.inc +change master to master_auto_position = 0; +include/start_slave.inc +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:6 +include/rpl_reset.inc +create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB; +insert into t1 values(1); +insert into t1 values(2); +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +SET GLOBAL debug = '+d,crash_commit_after_log'; +insert into t1 values(3); +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +use test; +select * from t1; +a +1 +2 +change master to master_auto_position = 1; +include/start_slave.inc +rename table t1 to test1; +use test; +select * from test1; +a +1 +2 +3 +use test; +select * from test1; +a +1 +2 +3 +drop table test1; +include/stop_slave.inc +change master to master_auto_position = 0; +include/start_slave.inc +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:6 +include/rpl_reset.inc +create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB; +insert into t1 values(1); +insert into t1 values(2); +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +SET GLOBAL debug = '+d,crash_commit_after_prepare'; +insert into t1 values(3); +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +use test; +select * from t1; +a +1 +2 +change master to master_auto_position = 1; +include/start_slave.inc +rename table t1 to test1; +use test; +select * from test1; +a +1 +2 +3 +use test; +select * from test1; +a +1 +2 +3 +drop table test1; +include/stop_slave.inc +change master to master_auto_position = 0; +include/start_slave.inc +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:6 +include/rpl_reset.inc +create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB; +insert into t1 values(1); +insert into t1 values(2); +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +SET GLOBAL debug = '+d,crash_commit_after'; +insert into t1 values(3); +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +use test; +select * from t1; +a +1 +2 +change master to master_auto_position = 1; +include/start_slave.inc +rename table t1 to test1; +use test; +select * from test1; +a +1 +2 +3 +use test; +select * from test1; +a +1 +2 +3 +drop table test1; +include/stop_slave.inc +change master to master_auto_position = 0; +include/start_slave.inc +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:6 +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe_wal_corrupt.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe_wal_corrupt.result new file mode 100644 index 00000000000..e765e338cb5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe_wal_corrupt.result @@ -0,0 +1,140 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +drop table if exists x; +select @@binlog_format; +@@binlog_format +ROW +create table x (id int primary key, value int, value2 int, index(value)) engine=rocksdb; +insert into x values (1,1,1); +insert into x values (2,1,1); +insert into x values (3,1,1); +insert into x values (4,1,1); +insert into x values (5,1,1); +select @@global.gtid_executed; +@@global.gtid_executed +uuid:1-7 + +--- slave state before crash --- +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +select @@global.gtid_executed; +@@global.gtid_executed +uuid:1-7 +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:7 +include/rpl_start_server.inc [server_number=2] + +--- slave state after crash recovery, slave stop, one transaction recovered--- +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +select @@global.gtid_executed; +@@global.gtid_executed +uuid:1-6 +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:6 + +--- slave state after restart, slave start --- +include/start_slave.inc +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +select @@global.gtid_executed; +@@global.gtid_executed +uuid:1-7 +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:7 +insert into x values (6,1,1); +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +6 1 1 +select @@global.gtid_executed; +@@global.gtid_executed +uuid:1-8 +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:8 +insert into x values (7,1,1); +insert into x values (8,1,1); +insert into x values (9,1,1); +insert into x values (10,1,1); +insert into x values (11,1,1); +insert into x values (12,1,1); +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +6 1 1 +7 1 1 +8 1 1 +9 1 1 +10 1 1 +11 1 1 +12 1 1 +select @@global.gtid_executed; +@@global.gtid_executed +uuid:1-14 +include/rpl_start_server.inc [server_number=2] + +--- slave state after crash recovery, slave stop, WAL was corrupted, point in time recovery with wal_recovery_mode=2 --- +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +6 1 1 +7 1 1 +8 1 1 +9 1 1 +include/start_slave.inc +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +6 1 1 +7 1 1 +8 1 1 +9 1 1 +10 1 1 +11 1 1 +12 1 1 +select @@global.gtid_executed; +@@global.gtid_executed +uuid:1-14 +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:14 +drop table x; +include/rpl_end.inc +Binlog Info Found diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_rocksdb_sys_header.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_rocksdb_sys_header.result new file mode 100644 index 00000000000..b2703ee0cbb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_rocksdb_sys_header.result @@ -0,0 +1,16 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +create table t1 (a int primary key) engine=rocksdb; +insert into t1 values(1); +SET GLOBAL debug = '+d,crash_before_writing_xid'; +insert into t1 values(2); +ERROR HY000: Lost connection to MySQL server during query +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +include/start_slave.inc +RocksDB: Last MySQL Gtid master_uuid:2 +drop table t1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag.result new file mode 100644 index 00000000000..905b56dacb5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag.result @@ -0,0 +1,34 @@ +# +# Ensure skip_unique_check is set when lag exceeds lag_threshold +# +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +call mtr.add_suppression("Slave SQL: Could not execute Write_rows event on table test.t1"); +call mtr.add_suppression(".*Worker.*failed executing transaction"); +call mtr.add_suppression(".*The slave coordinator and worker threads are stopped"); +drop table if exists t1; +CREATE TABLE t1 (id int primary key, value int) engine=RocksDB; +set global reset_seconds_behind_master=1; +INSERT INTO t1 VALUES(1, 0); +INSERT INTO t1 VALUES(2, 0); +INSERT INTO t1 VALUES(3, 0); +INSERT INTO t1 VALUES(1, 1); +include/wait_for_slave_sql_error_and_skip.inc [errno=1062] +set global reset_seconds_behind_master=0; +include/stop_slave_io.inc +INSERT INTO t1 values (4,0); +INSERT INTO t1 VALUES(2, 1); +include/start_slave_io.inc +set global reset_seconds_behind_master=1; +insert into t1 values (5,0); +INSERT INTO t1 VALUES(3, 1); +include/wait_for_slave_sql_error_and_skip.inc [errno=1062] +# +# Cleanup +# +DROP TABLE t1; +set global reset_seconds_behind_master=1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag_mts.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag_mts.result new file mode 100644 index 00000000000..6c58cb16fed --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag_mts.result @@ -0,0 +1,31 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +call mtr.add_suppression("Slave SQL: Could not execute Write_rows event on table test.t1"); +call mtr.add_suppression(".*Worker.*failed executing transaction"); +call mtr.add_suppression(".*The slave coordinator and worker threads are stopped"); +drop table if exists t1; +CREATE TABLE t1 (id int primary key, value int) engine=RocksDB; +set global reset_seconds_behind_master=1; +INSERT INTO t1 VALUES(1, 0); +INSERT INTO t1 VALUES(2, 0); +INSERT INTO t1 VALUES(3, 0); +INSERT INTO t1 VALUES(1, 1); +include/wait_for_slave_sql_error_and_skip.inc [errno=1062] +set global reset_seconds_behind_master=0; +include/stop_slave_io.inc +INSERT INTO t1 values (4,0); +INSERT INTO t1 VALUES(2, 1); +include/start_slave_io.inc +set global reset_seconds_behind_master=1; +insert into t1 values (5,0); +INSERT INTO t1 VALUES(3, 1); +include/wait_for_slave_sql_error_and_skip.inc [errno=1062] +# +# Cleanup +# +DROP TABLE t1; +set global reset_seconds_behind_master=1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_2pc_crash_recover.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_2pc_crash_recover.result new file mode 100644 index 00000000000..325df314216 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_2pc_crash_recover.result @@ -0,0 +1,44 @@ +DROP TABLE IF EXISTS t1; +create table t1 (a int primary key, msg varchar(255)) engine=rocksdb; +SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +SET SESSION debug="d,crash_commit_after_prepare"; +insert into t1 values (1, 'dogz'); +select * from t1; +a msg +SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +SET SESSION debug="d,crash_commit_after_log"; +insert into t1 values (2, 'catz'), (3, 'men'); +select * from t1; +a msg +2 catz +3 men +SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +SET SESSION debug="d,crash_commit_after"; +insert into t1 values (4, 'cars'), (5, 'foo'); +select * from t1; +a msg +2 catz +3 men +4 cars +5 foo +SET GLOBAL ROCKSDB_DISABLE_2PC = ON; +SET SESSION debug="d,crash_commit_after_log"; +insert into t1 values (6, 'shipz'), (7, 'tankz'); +select * from t1; +a msg +2 catz +3 men +4 cars +5 foo +SET GLOBAL ROCKSDB_DISABLE_2PC = ON; +SET SESSION debug="d,crash_commit_after"; +insert into t1 values (8, 'space'), (9, 'time'); +select * from t1; +a msg +2 catz +3 men +4 cars +5 foo +8 space +9 time +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot.result new file mode 100644 index 00000000000..eb2c6cfcda3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot.result @@ -0,0 +1,222 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +DROP TABLE IF EXISTS t1; +# Establish connection con1 (user=root) +# Establish connection con2 (user=root) +# Establish connection con3 (user=root) +# Establish connection con4 (user=root) +# reset replication to guarantee that master-bin.000001 is used +include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +RESET MASTER; +CHANGE MASTER TO master_host="127.0.0.1",master_port=MASTER_PORT,master_user="root"; +Warnings: +Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure. +Note 1760 Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +include/start_slave.inc +# Switch to connection con1 +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); +SET TRANSACTION ISOLATION LEVEL READ COMMITTED; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR HY000: Only REPEATABLE READ isolation level is supported for START TRANSACTION WITH CONSISTENT SNAPSHOT in RocksDB Storage Engine. +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +ERROR HY000: Only REPEATABLE READ isolation level is supported for START TRANSACTION WITH CONSISTENT SNAPSHOT in RocksDB Storage Engine. +ROLLBACK; +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 531 UUID:1-2 +# Switch to connection con2 +INSERT INTO t1 VALUES(2); +INSERT INTO t1 VALUES(3); +# Switch to connection con1 +SELECT * FROM t1; +a +1 +COMMIT; +SELECT * FROM t1; +a +1 +2 +3 +DROP TABLE t1; +# Switch to connection con1 +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 1510 UUID:1-7 +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 1510 UUID:1-7 +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 1510 UUID:1-7 +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 1510 UUID:1-7 +# Switch to connection con2 +INSERT INTO t1 VALUES(2); +INSERT INTO t1 VALUES(3); +# Switch to connection con1 +SELECT * FROM t1; +a +1 +SELECT * INTO OUTFILE '/tmp/rpl_rocksdb_snapshot.out.file' FROM t1; +COMMIT; +# Switch to slave +CREATE TABLE t1_backup LIKE t1; +INSERT INTO t1_backup SELECT * FROM t1; +include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +DELETE FROM t1; +LOAD DATA INFILE '/tmp/rpl_rocksdb_snapshot.out.file' INTO TABLE t1; +SELECT * FROM t1; +a +1 +CHANGE MASTER TO master_host="127.0.0.1",master_port=MASTER_PORT,master_user="root",master_log_file="master-bin.000001",master_log_pos=binlog_pos; +Warnings: +Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure. +Note 1760 Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +include/start_slave.inc +SELECT * FROM t1; +a +1 +2 +3 +SELECT * FROM t1_backup; +a +1 +2 +3 +DROP TABLE t1_backup; +DROP TABLE t1; +# Switch to connection con1 +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); +# async queries from con2 +INSERT INTO t1 VALUES(2); +# async queries from con3 +INSERT INTO t1 VALUES(21); +# Switch to connection con1 +# Switch to connection con4 +INSERT INTO t1 VALUES(9); +# Switch to connection con1 +SELECT * INTO OUTFILE '/tmp/rpl_rocksdb_snapshot.out.file' FROM t1; +COMMIT; +# reap async statements +# Switch to slave +CREATE TABLE t1_backup LIKE t1; +INSERT INTO t1_backup SELECT * FROM t1; +include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +DELETE FROM t1; +LOAD DATA INFILE '/tmp/rpl_rocksdb_snapshot.out.file' INTO TABLE t1; +CHANGE MASTER TO master_host="127.0.0.1",master_port=MASTER_PORT,master_user="root",master_log_file="master-bin.000001",master_log_pos=binlog_pos; +Warnings: +Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure. +Note 1760 Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +include/start_slave.inc +# sync and then query slave +ShouldBeZero +0 +DROP TABLE t1_backup; +DROP TABLE t1; +# Switch to connection con1 +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 3688 UUID:1-18 +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 3688 UUID:1-18 +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 3688 UUID:1-18 +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 3688 UUID:1-18 +# Switch to connection con2 +INSERT INTO t1 VALUES(2); +INSERT INTO t1 VALUES(3); +# Switch to connection con1 +SELECT * FROM t1; +a +1 +SELECT * INTO OUTFILE '/tmp/rpl_rocksdb_snapshot.out.file' FROM t1; +COMMIT; +# Switch to slave +CREATE TABLE t1_backup LIKE t1; +INSERT INTO t1_backup SELECT * FROM t1; +include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +SET @@global.gtid_purged='gtid_executed_from_snapshot'; +DELETE FROM t1; +LOAD DATA INFILE '/tmp/rpl_rocksdb_snapshot.out.file' INTO TABLE t1; +SELECT * FROM t1; +a +1 +CHANGE MASTER TO master_host="127.0.0.1",master_port=MASTER_PORT,master_user="root", master_auto_position=1; +Warnings: +Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure. +Note 1760 Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +include/start_slave.inc +SELECT * FROM t1; +a +1 +2 +3 +SELECT * FROM t1_backup; +a +1 +2 +3 +DROP TABLE t1_backup; +DROP TABLE t1; +# Switch to connection con1 +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); +# async queries from con2 +INSERT INTO t1 VALUES(2); +# async queries from con3 +INSERT INTO t1 VALUES(21); +# Switch to connection con1 +# Switch to connection con4 +INSERT INTO t1 VALUES(9); +# Switch to connection con1 +SELECT * INTO OUTFILE '/tmp/rpl_rocksdb_snapshot.out.file' FROM t1; +COMMIT; +# reap async statements +# Switch to slave +CREATE TABLE t1_backup LIKE t1; +INSERT INTO t1_backup SELECT * FROM t1; +include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +SET @@global.gtid_purged='gtid_executed_from_snapshot'; +DELETE FROM t1; +LOAD DATA INFILE '/tmp/rpl_rocksdb_snapshot.out.file' INTO TABLE t1; +CHANGE MASTER TO master_host="127.0.0.1",master_port=MASTER_PORT,master_user="root", master_auto_position=1; +Warnings: +Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure. +Note 1760 Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +include/start_slave.inc +# sync and then query slave +ShouldBeZero +0 +DROP TABLE t1_backup; +DROP TABLE t1; +# Switch to connection default + close connections con1 and con2 +include/stop_slave.inc +CHANGE MASTER to master_auto_position=0; +include/start_slave.inc +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot_without_gtid.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot_without_gtid.result new file mode 100644 index 00000000000..57c1d0822c9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot_without_gtid.result @@ -0,0 +1,15 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +create table t1(a int primary key); +FLUSH LOGS; +insert into t1 values(1); +insert into t1 values(2); +FLUSH LOGS; +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000003 120 +drop table t1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_stress_crash.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_stress_crash.result new file mode 100644 index 00000000000..d4920b14705 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_stress_crash.result @@ -0,0 +1,28 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +call mtr.add_suppression(".*"); +include/stop_slave.inc +change master to master_auto_position=1; +include/start_slave.inc +call mtr.add_suppression('Slave: Error dropping database'); +stop slave sql_thread; +insert into test0.benchmark set state='slave is processing load'; +start slave sql_thread; +use test0; +insert into benchmark set state='slave ends load'; +use test; +select * from test1.benchmark into outfile 'benchmark.out'; +select ts from test0.benchmark where state like 'master started load' into @m_0; +select ts from test0.benchmark where state like 'master ends load' into @m_1; +select ts from test0.benchmark where state like 'slave takes on load' into @s_m0; +select ts from test0.benchmark where state like 'slave is supposed to finish with load' into @s_m1; +select ts from test0.benchmark where state like 'slave ends load' into @s_1; +select ts from test0.benchmark where state like 'slave is processing load' into @s_0; +select time_to_sec(@m_1) - time_to_sec(@m_0) as 'delta.out'; +include/stop_slave.inc +change master to master_auto_position=0; +include/start_slave.inc +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/rpl_1slave_base.cnf b/storage/rocksdb/mysql-test/rocksdb_rpl/rpl_1slave_base.cnf new file mode 100644 index 00000000000..ed8c77bcc0b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/rpl_1slave_base.cnf @@ -0,0 +1,51 @@ +# Use default setting for mysqld processes +!include include/default_mysqld.cnf +!include include/default_client.cnf + +[mysqld.1] + +# Run the master.sh script before starting this process +#!run-master-sh + +log-bin= master-bin + +loose-innodb + +[mysqld.2] +# Run the slave.sh script before starting this process +#!run-slave-sh + +# Append -slave.opt file to the list of argument used when +# starting the mysqld +#!use-slave-opt +innodb_use_native_aio = 0 + +log-bin= slave-bin +relay-log= slave-relay-bin + +log-slave-updates +master-retry-count= 10 + +# Values reported by slave when it connect to master +# and shows up in SHOW SLAVE STATUS; +report-host= 127.0.0.1 +report-port= @mysqld.2.port +report-user= root + +skip-slave-start + +# Directory where slaves find the dumps generated by "load data" +# on the server. The path need to have constant length otherwise +# test results will vary, thus a relative path is used. +slave-load-tmpdir= ../../tmp + +loose-innodb + + +[ENV] +MASTER_MYPORT= @mysqld.1.port +MASTER_MYSOCK= @mysqld.1.socket + +SLAVE_MYPORT= @mysqld.2.port +SLAVE_MYSOCK= @mysqld.2.socket + diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/combinations b/storage/rocksdb/mysql-test/rocksdb_rpl/t/combinations new file mode 100644 index 00000000000..f09d338c357 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/combinations @@ -0,0 +1,2 @@ +[row] +binlog-format=row diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines-master.opt new file mode 100644 index 00000000000..c747adc94d5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines-master.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines.test new file mode 100644 index 00000000000..acea1903c05 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines.test @@ -0,0 +1,81 @@ +--source include/have_log_bin.inc +--source include/have_rocksdb.inc +--source include/have_innodb.inc +--enable_connect_log +-- let $uuid = `select @@server_uuid;` + +# Save the initial number of concurrent sessions +--source include/count_sessions.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +create table i1 (id int primary key , value int) engine=innodb; +create table r1 (id int primary key , value int) engine=rocksdb; + + +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; + +# Without setting engine, this takes both InnoDB and RocksDB snapshots +-- replace_result $uuid uuid +START TRANSACTION WITH CONSISTENT SNAPSHOT; + +connection con2; +insert into i1 values (1,1); +insert into r1 values (1,1); + +connection con1; +select * from i1; +select * from r1; + +# This takes RocksDB snapshot only but both InnoDB participates in transaction. +-- replace_result $uuid uuid +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; + +connection con2; +insert into i1 values (2,2); +insert into r1 values (2,2); + +connection con1; +# takes InnoDB snapshot here so changes after that not visible +select * from i1; +select * from r1; + +connection con2; +insert into i1 values (3,2); +insert into r1 values (3,2); + +connection con1; +select * from i1; +select * from r1; + +# RocksDB also partipates in transaction +-- replace_result $uuid uuid +START TRANSACTION WITH CONSISTENT INNODB SNAPSHOT; + +connection con2; +insert into r1 values (4,4); + +connection con1; +# takes RocksDB snapshot here so changes after that are not visible +select * from r1; + +connection con2; +insert into r1 values (5,5); + +connection con1; +select * from r1; + +drop table i1; +drop table r1; + +connection default; +disconnect con1; +disconnect con2; +reset master; +--source include/wait_until_count_sessions.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc-mater.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc-mater.opt new file mode 100644 index 00000000000..c747adc94d5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc-mater.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test new file mode 100644 index 00000000000..69d2e87e40e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test @@ -0,0 +1,71 @@ +--source include/have_rocksdb.inc +--source include/have_binlog_format_row.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/big_test.inc + +--exec echo > $MYSQLTEST_VARDIR/log/mysqld.1.err + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +create table t1 (a int primary key, b int, c varchar(255)) engine=rocksdb; + +connect (con1, localhost, root,,); +connect (con2, localhost, root,,); + +# On connection one we insert a row and pause after commit marker is written to WAL. +# Connection two then inserts many rows. After connection two +# completes connection one continues only to crash before commit but after +# binlog write. On crash recovery we see that connection one's value +# has been recovered and commited +connection con1; +--echo 'con1' +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="d,crash_commit_after_log"; +SET DEBUG_SYNC='rocksdb.prepared SIGNAL parked WAIT_FOR go'; +--error 0,2013 +--send insert into t1 values (1, 1, "iamtheogthealphaandomega"); + +connection con2; +--echo 'con2' +insert into t1 values (2, 1, "i_am_just_here_to_trigger_a_flush"); + +# Disable 2PC and syncing for faster inserting of dummy rows +# These rows only purpose is to rotate the binlog +SET GLOBAL ROCKSDB_DISABLE_2PC = ON; +SET GLOBAL ROCKSDB_WRITE_SYNC = OFF; +SET GLOBAL SYNC_BINLOG = 0; + +SET DEBUG_SYNC='now WAIT_FOR parked'; +--disable_query_log +--let $pk= 3 +while ($pk < 1000000) { + eval insert into t1 values ($pk, 1, "foobardatagoesheresothatmorelogsrollwhichiswhatwewant"); + --inc $pk +} +--enable_query_log + +# re-enable 2PC an syncing then write to trigger a flush +# before we trigger the crash to simulate full-durability +SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +SET GLOBAL ROCKSDB_WRITE_SYNC = ON; +SET GLOBAL SYNC_BINLOG = 1; + +insert into t1 values (1000000, 1, "i_am_just_here_to_trigger_a_flush"); + +SET DEBUG_SYNC='now SIGNAL go'; + +--enable_reconnect +--source include/wait_until_connected_again.inc + +--exec sleep 60 + +--exec python suite/rocksdb/t/check_log_for_xa.py $MYSQLTEST_VARDIR/log/mysqld.1.err commit,prepare,rollback + +select * from t1 where a=1; +select count(*) from t1; + +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_check_for_binlog_info.pl b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_check_for_binlog_info.pl new file mode 100644 index 00000000000..a5e4d9d8035 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_check_for_binlog_info.pl @@ -0,0 +1,19 @@ +my $pid_file = $ARGV[0]; +my $log_file = $ARGV[1]; + +open(my $fh, '<', $pid_file) || die "Cannot open pid file $pid_file"; +my $slave_pid = <$fh>; +close($fh); + +$slave_pid =~ s/\s//g; +open(my $log_fh, '<', $log_file) || die "Cannot open log file $log_file"; + +my $pid_found = 0; +while (my $line = <$log_fh>) { + next unless ($pid_found || $line =~ /^[\d-]* [\d:]* $slave_pid /); + $pid_found = 1 unless ($pid_found); + if ($line =~ /^RocksDB: Last binlog file position.*slave-bin\..*\n/) { + print "Binlog Info Found\n"; + } +} +close($log_fh); diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf new file mode 100644 index 00000000000..454c9eb887a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf @@ -0,0 +1,9 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +log_slave_updates + +[mysqld.2] +relay_log_recovery=1 +relay_log_info_repository=TABLE +log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.test new file mode 100644 index 00000000000..0e40e5423a2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.test @@ -0,0 +1,12 @@ +--source suite/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc + +connection slave; +--let slave_pid_file= query_get_value(SELECT @@pid_file, @@pid_file, 1) + +# Verify the log file contains the Last binlog line, but only if the slave server's pid is found +--exec perl suite/rocksdb_rpl/t/rpl_check_for_binlog_info.pl $slave_pid_file $MYSQLTEST_VARDIR/log/mysqld.2.err + +--disable_query_log +connection slave; +call mtr.add_suppression("Recovery from master pos"); +--enable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-master.opt new file mode 100644 index 00000000000..d828b6c01f4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-master.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-slave.opt new file mode 100644 index 00000000000..aac6c6caadb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-slave.opt @@ -0,0 +1,2 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates +--sync_binlog=1000 --relay_log_recovery=1 diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe.test new file mode 100644 index 00000000000..949fbad666d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe.test @@ -0,0 +1,41 @@ +-- source include/have_gtid.inc +-- source include/master-slave.inc +-- source include/have_debug.inc +-- source include/not_valgrind.inc + +-- let $engine = ROCKSDB + +call mtr.add_suppression("Recovery from master pos"); + +-- let $debug_option = crash_before_update_pos +-- source extra/rpl_tests/rpl_gtid_crash_safe.inc + +-- source include/rpl_reset.inc +-- let $debug_option = crash_after_update_pos_before_apply +-- source extra/rpl_tests/rpl_gtid_crash_safe.inc + +-- source include/rpl_reset.inc +-- let $debug_option = crash_before_writing_xid +-- source extra/rpl_tests/rpl_gtid_crash_safe.inc + +-- source include/rpl_reset.inc +-- let $debug_option = half_binlogged_transaction +-- source extra/rpl_tests/rpl_gtid_crash_safe.inc + +-- source include/rpl_reset.inc +-- let $debug_option = crash_commit_before +-- source extra/rpl_tests/rpl_gtid_crash_safe.inc + +-- source include/rpl_reset.inc +-- let $debug_option = crash_commit_after_log +-- source extra/rpl_tests/rpl_gtid_crash_safe.inc + +-- source include/rpl_reset.inc +-- let $debug_option = crash_commit_after_prepare +-- source extra/rpl_tests/rpl_gtid_crash_safe.inc + +-- source include/rpl_reset.inc +-- let $debug_option = crash_commit_after +-- source extra/rpl_tests/rpl_gtid_crash_safe.inc + +-- source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf new file mode 100644 index 00000000000..b6e8beb8fcb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf @@ -0,0 +1,14 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +log_slave_updates +gtid_mode=ON +enforce_gtid_consistency=ON + +[mysqld.2] +sync_relay_log_info=100 +relay_log_recovery=1 +relay_log_info_repository=FILE +log_slave_updates +gtid_mode=ON +enforce_gtid_consistency=ON diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc new file mode 100644 index 00000000000..43ee7ec526c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc @@ -0,0 +1,153 @@ +source include/master-slave.inc; +-- let $uuid = `select @@server_uuid;` + +--exec echo > $MYSQLTEST_VARDIR/log/mysqld.1.err + +connection master; +--disable_warnings +drop table if exists x; +--enable_warnings + +connection master; + +select @@binlog_format; + +create table x (id int primary key, value int, value2 int, index(value)) engine=rocksdb; +insert into x values (1,1,1); +insert into x values (2,1,1); +insert into x values (3,1,1); +insert into x values (4,1,1); +insert into x values (5,1,1); +-- replace_result $uuid uuid +select @@global.gtid_executed; + +sync_slave_with_master; +connection slave; +--let slave_data_dir= query_get_value(SELECT @@DATADIR, @@DATADIR, 1) +--let slave_pid_file= query_get_value(SELECT @@pid_file, @@pid_file, 1) +--disable_query_log +select "--- slave state before crash ---" as ""; +--enable_query_log +select * from x; +-- replace_result $uuid uuid +select @@global.gtid_executed; +-- replace_result $uuid uuid +select * from mysql.slave_gtid_info; + +--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect + +--write_file $MYSQL_TMP_DIR/truncate_tail_wal.sh +#!/bin/bash + +F=`ls -t $slave_data_dir/.rocksdb/*.log | head -n 1` +SIZE=`stat -c %s $F` +NEW_SIZE=`expr $SIZE - 10` +truncate -s $NEW_SIZE $F +rc=$? +if [[ $rc != 0 ]]; then + exit 1 +fi + +kill -9 `head -1 $slave_pid_file` + +exit 0 +EOF +--chmod 0755 $MYSQL_TMP_DIR/truncate_tail_wal.sh +--exec $MYSQL_TMP_DIR/truncate_tail_wal.sh + +--let $rpl_skip_start_slave= 1 +--source include/rpl_start_server.inc +--disable_query_log +select "--- slave state after crash recovery, slave stop, one transaction recovered---" as ""; +--enable_query_log +connection slave; +--exec python suite/rocksdb/t/check_log_for_xa.py $MYSQLTEST_VARDIR/log/mysqld.2.err commit,prepare,rollback +select * from x; +-- replace_result $uuid uuid +select @@global.gtid_executed; +-- replace_result $uuid uuid +select * from mysql.slave_gtid_info; + +--disable_query_log +select "--- slave state after restart, slave start ---" as ""; +--enable_query_log +--source include/start_slave.inc +connection master; +sync_slave_with_master; +connection slave; +select * from x; +-- replace_result $uuid uuid +select @@global.gtid_executed; +-- replace_result $uuid uuid +select * from mysql.slave_gtid_info; + +connection master; +insert into x values (6,1,1); + +sync_slave_with_master; +connection slave; +select * from x; +-- replace_result $uuid uuid +select @@global.gtid_executed; +-- replace_result $uuid uuid +select * from mysql.slave_gtid_info; + +connection master; +insert into x values (7,1,1); +insert into x values (8,1,1); +insert into x values (9,1,1); +insert into x values (10,1,1); +insert into x values (11,1,1); +insert into x values (12,1,1); +select * from x; +-- replace_result $uuid uuid +select @@global.gtid_executed; +sync_slave_with_master; + +connection slave; + +# Corrupting WAL. MyRocks does point in time recovery with wal_recovery_mode=2. +# It loses some data but can resync after restarting slave. + +--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect + +--write_file $MYSQL_TMP_DIR/corrupt_wal.sh +#!/bin/bash + +# expected to be around 950 bytes +F=`ls -t $slave_data_dir/.rocksdb/*.log | head -n 1` +SIZE=`stat -c %s $F` +OFFSET=$(( $SIZE-500 )) +dd bs=1 if=/dev/zero of=$F count=100 seek=$OFFSET conv=notrunc + +kill -9 `head -1 $slave_pid_file` + +exit 0 +EOF +--chmod 0755 $MYSQL_TMP_DIR/corrupt_wal.sh +--exec $MYSQL_TMP_DIR/corrupt_wal.sh + +--let $rpl_skip_start_slave= 1 +--source include/rpl_start_server.inc +--disable_query_log +select "--- slave state after crash recovery, slave stop, WAL was corrupted, point in time recovery with wal_recovery_mode=2 ---" as ""; +--enable_query_log +select * from x; +--source include/start_slave.inc +connection master; +sync_slave_with_master; +connection slave; +select * from x; +-- replace_result $uuid uuid +select @@global.gtid_executed; +-- replace_result $uuid uuid +select * from mysql.slave_gtid_info; + +connection master; +drop table x; + + +--remove_file $MYSQL_TMP_DIR/truncate_tail_wal.sh +--remove_file $MYSQL_TMP_DIR/corrupt_wal.sh +--source include/rpl_end.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.test new file mode 100644 index 00000000000..3b660b2640f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.test @@ -0,0 +1,12 @@ +-- source suite/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc + +connection slave; +-- let _SLAVE_PID_FILE= query_get_value(SELECT @@pid_file, @@pid_file, 1) + +# Verify the log file contains the Last binlog line, but only if the slave server's pid is found +--exec perl suite/rocksdb_rpl/t/rpl_check_for_binlog_info.pl $slave_pid_file $MYSQLTEST_VARDIR/log/mysqld.2.err + +--disable_query_log +connection slave; +call mtr.add_suppression("Recovery from master pos"); +--enable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-master.opt new file mode 100644 index 00000000000..d828b6c01f4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-master.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-slave.opt new file mode 100644 index 00000000000..d828b6c01f4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-slave.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header.test new file mode 100644 index 00000000000..56c0eac2517 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header.test @@ -0,0 +1,39 @@ +# based on rpl/rpl_gtid_innondb_sys_header.test +source include/master-slave.inc; +source include/have_gtid.inc; +source include/have_debug.inc; +source include/not_valgrind.inc; + +--let $old_debug = `select @@global.debug;` + +connection master; +create table t1 (a int primary key) engine=rocksdb; +insert into t1 values(1); +--eval SET GLOBAL debug = '+d,crash_before_writing_xid' +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--error 2013 +insert into t1 values(2); + +--source include/wait_until_disconnected.inc +--let $rpl_server_number = 1 +--source include/rpl_reconnect.inc + +--eval SET GLOBAL debug = `$old_debug` + +connection slave; +disable_warnings; +source include/start_slave.inc; +enable_warnings; +connection master; +sync_slave_with_master; + +connection master; +--let $master_uuid= query_get_value(select @@server_uuid, @@server_uuid, 1) +--replace_result $master_uuid master_uuid +--exec grep 'RocksDB: Last MySQL Gtid $master_uuid' $MYSQLTEST_VARDIR/log/mysqld.1.err + +drop table t1; +source include/rpl_end.inc; +-- move_file $MYSQLTEST_VARDIR/log/mysqld.1.err $MYSQLTEST_VARDIR/log/mysqld.1.err.orig +-- write_file $MYSQLTEST_VARDIR/log/mysqld.1.err +EOF diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag-slave.opt new file mode 100644 index 00000000000..1c8dc1e62e9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag-slave.opt @@ -0,0 +1 @@ +--unique-check-lag-threshold=5 diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag.test new file mode 100644 index 00000000000..8c79d2afa03 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag.test @@ -0,0 +1,6 @@ +--echo # +--echo # Ensure skip_unique_check is set when lag exceeds lag_threshold +--echo # + +--source ../include/rpl_no_unique_check_on_lag.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts-slave.opt new file mode 100644 index 00000000000..1c8dc1e62e9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts-slave.opt @@ -0,0 +1 @@ +--unique-check-lag-threshold=5 diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts.test new file mode 100644 index 00000000000..c5cf1a8ae92 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts.test @@ -0,0 +1,2 @@ +--source ../include/rpl_no_unique_check_on_lag.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-master.opt new file mode 100644 index 00000000000..a990dc22129 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-master.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates --rocksdb_write_sync=ON --rocksdb_write_disable_wal=OFF diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-slave.opt new file mode 100644 index 00000000000..c747adc94d5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-slave.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover.test new file mode 100644 index 00000000000..5f99e1aabd1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover.test @@ -0,0 +1,56 @@ +--source include/have_binlog_format_row.inc +--source include/have_rocksdb.inc +--source include/have_debug.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +create table t1 (a int primary key, msg varchar(255)) engine=rocksdb; + +SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="d,crash_commit_after_prepare"; +--error 0,2013 +insert into t1 values (1, 'dogz'); +--enable_reconnect +--source include/wait_until_connected_again.inc +select * from t1; + +SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="d,crash_commit_after_log"; +--error 0,2013 +insert into t1 values (2, 'catz'), (3, 'men'); +--enable_reconnect +--source include/wait_until_connected_again.inc +select * from t1; + +SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="d,crash_commit_after"; +--error 0,2013 +insert into t1 values (4, 'cars'), (5, 'foo'); +--enable_reconnect +--source include/wait_until_connected_again.inc +select * from t1; + +SET GLOBAL ROCKSDB_DISABLE_2PC = ON; +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="d,crash_commit_after_log"; +--error 0,2013 +insert into t1 values (6, 'shipz'), (7, 'tankz'); +--enable_reconnect +--source include/wait_until_connected_again.inc +select * from t1; + +SET GLOBAL ROCKSDB_DISABLE_2PC = ON; +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="d,crash_commit_after"; +--error 0,2013 +insert into t1 values (8, 'space'), (9, 'time'); +--enable_reconnect +--source include/wait_until_connected_again.inc +select * from t1; + +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-master.opt new file mode 100644 index 00000000000..c747adc94d5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-master.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-slave.opt new file mode 100644 index 00000000000..c747adc94d5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-slave.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot.test new file mode 100644 index 00000000000..37f80c8ace5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot.test @@ -0,0 +1,373 @@ +--source include/master-slave.inc +--source include/have_binlog_format_row.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--echo # Establish connection con1 (user=root) +connect (con1,localhost,root,,); +--echo # Establish connection con2 (user=root) +connect (con2,localhost,root,,); +--echo # Establish connection con3 (user=root) +connect (con3,localhost,root,,); +--echo # Establish connection con4 (user=root) +connect (con4,localhost,root,,); + +--echo # reset replication to guarantee that master-bin.000001 is used +connection slave; +--source include/stop_slave.inc +RESET SLAVE; +RESET MASTER; + +connection master; +RESET MASTER; + +connection slave; +--replace_result $MASTER_MYPORT MASTER_PORT +eval CHANGE MASTER TO master_host="127.0.0.1",master_port=$MASTER_MYPORT,master_user="root"; +--source include/start_slave.inc + +### Test 1: +### - While a consistent snapshot transaction is executed, +### no external inserts should be visible to the transaction. + +--echo # Switch to connection con1 +connection con1; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); + +SET TRANSACTION ISOLATION LEVEL READ COMMITTED; +--error ER_UNKNOWN_ERROR +START TRANSACTION WITH CONSISTENT SNAPSHOT; +--error ER_UNKNOWN_ERROR +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +ROLLBACK; +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; + + +--disable_query_log +--disable_result_log +let $x=1000; +while ($x) { + START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; + dec $x; +} +--enable_query_log +--enable_result_log + +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; + +--echo # Switch to connection con2 +connection con2; +INSERT INTO t1 VALUES(2); +INSERT INTO t1 VALUES(3); + +--echo # Switch to connection con1 +connection con1; +SELECT * FROM t1; # should fetch one row +COMMIT; + +SELECT * FROM t1; # should fetch three rows + +DROP TABLE t1; + +### Test 2: +### - confirm result from snapshot select and replication replay matches original + +--echo # Switch to connection con1 +connection con1; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); + +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; + +let $binlog_pos = query_get_value(START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT, Position, 1); + +--echo # Switch to connection con2 +connection con2; +INSERT INTO t1 VALUES(2); +INSERT INTO t1 VALUES(3); + +--echo # Switch to connection con1 +connection con1; +SELECT * FROM t1; + +--let $outfile = $MYSQLTEST_VARDIR/tmp/rpl_rocksdb_snapshot.out.file + +--replace_result $MYSQLTEST_VARDIR +eval SELECT * INTO OUTFILE '$outfile' FROM t1; +COMMIT; + +--echo # Switch to slave +sync_slave_with_master slave; + +CREATE TABLE t1_backup LIKE t1; +INSERT INTO t1_backup SELECT * FROM t1; +--source include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +DELETE FROM t1; +--replace_result $MYSQLTEST_VARDIR +eval LOAD DATA INFILE '$outfile' INTO TABLE t1; +SELECT * FROM t1; + +--replace_result $MASTER_MYPORT MASTER_PORT $binlog_pos binlog_pos +eval CHANGE MASTER TO master_host="127.0.0.1",master_port=$MASTER_MYPORT,master_user="root",master_log_file="master-bin.000001",master_log_pos=$binlog_pos; +--source include/start_slave.inc + +connection master; +sync_slave_with_master slave; + +SELECT * FROM t1; +SELECT * FROM t1_backup; +DROP TABLE t1_backup; + +connection master; +DROP TABLE t1; +--remove_file $outfile + +### Test 3: +### - confirm result from snapshot select and replication replay matches original +### - use non-deterministic concurrency + +--echo # Switch to connection con1 +connection con1; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); + +--echo # async queries from con2 +connection con2; +send INSERT INTO t1 VALUES(2); + +--echo # async queries from con3 +connection con3; +send INSERT INTO t1 VALUES(21); + +--echo # Switch to connection con1 +connection con1; + +let $binlog_pos = query_get_value(START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT, Position, 1); + +--echo # Switch to connection con4 +connection con4; +INSERT INTO t1 VALUES(9); + +--echo # Switch to connection con1 +connection con1; + +--let $outfile = $MYSQLTEST_VARDIR/tmp/rpl_rocksdb_snapshot.out.file + +--replace_result $MYSQLTEST_VARDIR +eval SELECT * INTO OUTFILE '$outfile' FROM t1; +COMMIT; + +--echo # reap async statements +connection con2; +reap; + +connection con3; +reap; + +--echo # Switch to slave +sync_slave_with_master slave; + +CREATE TABLE t1_backup LIKE t1; +INSERT INTO t1_backup SELECT * FROM t1; +--source include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +DELETE FROM t1; +--replace_result $MYSQLTEST_VARDIR +eval LOAD DATA INFILE '$outfile' INTO TABLE t1; + +--replace_result $MASTER_MYPORT MASTER_PORT $binlog_pos binlog_pos +eval CHANGE MASTER TO master_host="127.0.0.1",master_port=$MASTER_MYPORT,master_user="root",master_log_file="master-bin.000001",master_log_pos=$binlog_pos; +--source include/start_slave.inc + +--echo # sync and then query slave +connection master; +sync_slave_with_master slave; + +let $sum1 = `SELECT SUM(a) from t1`; +let $sum2 = `SELECT SUM(a) from t1_backup`; +--disable_query_log +eval select $sum2 - $sum1 ShouldBeZero; +--enable_query_log + +DROP TABLE t1_backup; + +connection master; +DROP TABLE t1; +--remove_file $outfile + +### Test 4: +### - confirm result from snapshot select and replication relay using gtid protocol matches original + +--echo # Switch to connection con1 +connection con1; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); + +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; + +let $gtid_executed = query_get_value(START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT, Gtid_executed, 1); + +--echo # Switch to connection con2 +connection con2; +INSERT INTO t1 VALUES(2); +INSERT INTO t1 VALUES(3); + +--echo # Switch to connection con1 +connection con1; +SELECT * FROM t1; + +--let $outfile = $MYSQLTEST_VARDIR/tmp/rpl_rocksdb_snapshot.out.file + +--replace_result $MYSQLTEST_VARDIR +eval SELECT * INTO OUTFILE '$outfile' FROM t1; +COMMIT; + +--echo # Switch to slave +sync_slave_with_master slave; + +CREATE TABLE t1_backup LIKE t1; +INSERT INTO t1_backup SELECT * FROM t1; +--source include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +--replace_result $gtid_executed gtid_executed_from_snapshot +eval SET @@global.gtid_purged='$gtid_executed'; +DELETE FROM t1; +--replace_result $MYSQLTEST_VARDIR +eval LOAD DATA INFILE '$outfile' INTO TABLE t1; +SELECT * FROM t1; + +--replace_result $MASTER_MYPORT MASTER_PORT +eval CHANGE MASTER TO master_host="127.0.0.1",master_port=$MASTER_MYPORT,master_user="root", master_auto_position=1; +--source include/start_slave.inc + +connection master; +sync_slave_with_master slave; + +SELECT * FROM t1; +SELECT * FROM t1_backup; +DROP TABLE t1_backup; + +connection master; +DROP TABLE t1; +--remove_file $outfile + +### Test 5: +### - confirm result from snapshot select and replication replay using gtid_protocol matches original +### - use non-deterministic concurrency + +--echo # Switch to connection con1 +connection con1; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); + +--echo # async queries from con2 +connection con2; +send INSERT INTO t1 VALUES(2); + +--echo # async queries from con3 +connection con3; +send INSERT INTO t1 VALUES(21); + +--echo # Switch to connection con1 +connection con1; + +let $gtid_executed = query_get_value(START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT, Gtid_executed, 1); + +--echo # Switch to connection con4 +connection con4; +INSERT INTO t1 VALUES(9); + +--echo # Switch to connection con1 +connection con1; + +--let $outfile = $MYSQLTEST_VARDIR/tmp/rpl_rocksdb_snapshot.out.file + +--replace_result $MYSQLTEST_VARDIR +eval SELECT * INTO OUTFILE '$outfile' FROM t1; +COMMIT; + +--echo # reap async statements +connection con2; +reap; + +connection con3; +reap; + +--echo # Switch to slave +sync_slave_with_master slave; + +CREATE TABLE t1_backup LIKE t1; +INSERT INTO t1_backup SELECT * FROM t1; +--source include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +--replace_result $gtid_executed gtid_executed_from_snapshot +eval SET @@global.gtid_purged='$gtid_executed'; +DELETE FROM t1; + +--replace_result $MYSQLTEST_VARDIR +eval LOAD DATA INFILE '$outfile' INTO TABLE t1; + +--replace_result $MASTER_MYPORT MASTER_PORT +eval CHANGE MASTER TO master_host="127.0.0.1",master_port=$MASTER_MYPORT,master_user="root", master_auto_position=1; +--source include/start_slave.inc + +--echo # sync and then query slave +connection master; +sync_slave_with_master slave; + +let $sum1 = `SELECT SUM(a) from t1`; +let $sum2 = `SELECT SUM(a) from t1_backup`; +--disable_query_log +eval select $sum2 - $sum1 ShouldBeZero; +--enable_query_log + +DROP TABLE t1_backup; + +connection master; +DROP TABLE t1; +--remove_file $outfile + +--echo # Switch to connection default + close connections con1 and con2 +connection con1; +disconnect con1; +--source include/wait_until_disconnected.inc +connection con2; +disconnect con2; +--source include/wait_until_disconnected.inc +connection con3; +disconnect con3; +--source include/wait_until_disconnected.inc +connection con4; +disconnect con4; +--source include/wait_until_disconnected.inc + +connection default; +sync_slave_with_master slave; +--source include/stop_slave.inc +CHANGE MASTER to master_auto_position=0; +--source include/start_slave.inc + +--source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot_without_gtid.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot_without_gtid.test new file mode 100644 index 00000000000..2b590f84653 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot_without_gtid.test @@ -0,0 +1,17 @@ +--source include/master-slave.inc +--source include/have_binlog_format_row.inc + +--connection master +create table t1(a int primary key); + +FLUSH LOGS; + +insert into t1 values(1); +insert into t1 values(2); + +FLUSH LOGS; + +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; + +drop table t1; +-- source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-master.opt new file mode 100644 index 00000000000..5c5a73bf2a4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-master.opt @@ -0,0 +1,2 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates +--binlog_rows_query_log_events=TRUE --rocksdb_unsafe_for_binlog=TRUE diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-slave.opt new file mode 100644 index 00000000000..67f0fcf77f0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-slave.opt @@ -0,0 +1,2 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates --max_binlog_size=50000 +--slave_parallel_workers=30 --relay_log_recovery=1 --rocksdb_unsafe_for_binlog=TRUE diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash.test new file mode 100644 index 00000000000..17b866060b7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash.test @@ -0,0 +1,26 @@ +-- source include/big_test.inc +-- source include/master-slave.inc +-- source include/not_valgrind.inc +-- source include/have_gtid.inc +-- source include/have_rocksdb.inc + +connection master; +call mtr.add_suppression(".*"); +sync_slave_with_master; +-- source include/stop_slave.inc +change master to master_auto_position=1; +-- source include/start_slave.inc + +-- let $iter=100 +-- let $databases=30 +-- let $num_crashes=100 +-- let $include_silent=1 +-- let $storage_engine='rocksdb' +-- source extra/rpl_tests/rpl_parallel_load_innodb.test +-- let $include_silent=0 + +-- source include/stop_slave.inc +change master to master_auto_position=0; +-- source include/start_slave.inc + +-- source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/include/rocksdb_stress.inc b/storage/rocksdb/mysql-test/rocksdb_stress/include/rocksdb_stress.inc new file mode 100644 index 00000000000..a8ac90fcc3f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_stress/include/rocksdb_stress.inc @@ -0,0 +1,56 @@ +# Run the load generator to populate the table and generate concurrent +# updates. After the load generator is complete, verify the tables on the +# master and the slave are consistent + +--sync_slave_with_master + +--connection master +--let $master_host = 127.0.0.1 +let $MYSQL_BASEDIR = `SELECT @@BASEDIR`; + +let $exec = + python $MYSQL_BASEDIR/mysql-test/suite/rocksdb_stress/t/load_generator.py + -L $MYSQL_TMP_DIR/load_generator.log -H $master_host -t $table + -P $MASTER_MYPORT -n $num_records -m $max_records + -l $num_loaders -c $num_checkers -r $num_requests + -E $MYSQLTEST_VARDIR/tmp/mysqld.1.expect + -D $reap_delay; + +exec $exec; + +enable_reconnect; +source include/wait_until_connected_again.inc; + +--let $master_checksum = query_get_value(CHECKSUM TABLE $table, Checksum, 1) + +# if sync_slave_with_master had a configurable timeout this would not be needed +let $slave_sync_timeout = 7200; +--source include/wait_for_slave_to_sync_with_master.inc + +--connection slave +--let $slave_checksum = query_get_value(CHECKSUM TABLE $table, Checksum, 1) + +let $not_same = `SELECT $master_checksum-$slave_checksum`; +if ($not_same) +{ + --die "The checksums of table $table for the master and slave do not match!" +} + +# Cleanup +--connection master +--let $cleanup = DROP TABLE $table +eval $cleanup; + +# if sync_slave_with_master had a configurable timeout this would not be needed +let $slave_sync_timeout = 7200; +--source include/wait_for_slave_to_sync_with_master.inc + +--connection slave +--source include/stop_slave.inc +# For stress tests sometimes the replication thread can not connect to master +# temporarily. This is either because the master crashed and it is recovering +# or the master is too busy and could not service the slave's requests. +# mtr's internal check requires that there be no errors in slave status. +# restarting replication clears the errors. +--source include/start_slave.inc +--source include/stop_slave.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/my.cnf b/storage/rocksdb/mysql-test/rocksdb_stress/my.cnf new file mode 100644 index 00000000000..fb985f5d1b4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_stress/my.cnf @@ -0,0 +1,8 @@ +!include include/default_my.cnf +!include suite/rocksdb/my.cnf +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=row +[mysqld.2] +binlog_format=row diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress.result b/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress.result new file mode 100644 index 00000000000..3d76e035e05 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress.result @@ -0,0 +1,21 @@ +include/master-slave.inc +[connection master] +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(id INT PRIMARY KEY, +thread_id INT NOT NULL, +request_id BIGINT UNSIGNED NOT NULL, +update_count INT UNSIGNED NOT NULL DEFAULT 0, +zero_sum INT DEFAULT 0, +msg VARCHAR(1024), +msg_length int, +msg_checksum varchar(128), +KEY msg_i(msg(255), zero_sum)) +ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; +stop slave; +start slave; +DROP TABLE t1; +stop slave; +start slave; +include/stop_slave.inc +include/start_slave.inc +include/stop_slave.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress_crash.result b/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress_crash.result new file mode 100644 index 00000000000..3d76e035e05 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress_crash.result @@ -0,0 +1,21 @@ +include/master-slave.inc +[connection master] +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(id INT PRIMARY KEY, +thread_id INT NOT NULL, +request_id BIGINT UNSIGNED NOT NULL, +update_count INT UNSIGNED NOT NULL DEFAULT 0, +zero_sum INT DEFAULT 0, +msg VARCHAR(1024), +msg_length int, +msg_checksum varchar(128), +KEY msg_i(msg(255), zero_sum)) +ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; +stop slave; +start slave; +DROP TABLE t1; +stop slave; +start slave; +include/stop_slave.inc +include/start_slave.inc +include/stop_slave.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/t/load_generator.py b/storage/rocksdb/mysql-test/rocksdb_stress/t/load_generator.py new file mode 100644 index 00000000000..20098f49b42 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_stress/t/load_generator.py @@ -0,0 +1,1029 @@ +import cStringIO +import array +import hashlib +import MySQLdb +from MySQLdb.constants import CR +from MySQLdb.constants import ER +from collections import deque +import os +import random +import signal +import sys +import threading +import time +import string +import traceback +import logging +import argparse + +# This is a generic load_generator for mysqld which persists across server +# restarts and attempts to verify both committed and uncommitted transactions +# are persisted correctly. +# +# The table schema used should look something like: +# +# CREATE TABLE t1(id INT PRIMARY KEY, +# thread_id INT NOT NULL, +# request_id BIGINT UNSIGNED NOT NULL, +# update_count INT UNSIGNED NOT NULL DEFAULT 0, +# zero_sum INT DEFAULT 0, +# msg VARCHAR(1024), +# msg_length int, +# msg_checksum varchar(128), +# KEY msg_i(msg(255), zero_sum)) +# ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; +# +# zero_sum should always sum up to 0 regardless of when the transaction tries +# to process the transaction. Each transaction always maintain this sum to 0. +# +# request_id should be unique across transactions. It is used during +# transaction verification and is monotonically increasing.. +# +# Several threads are spawned at the start of the test to populate the table. +# Once the table is populated, both loader and checker threads are created. +# +# The row id space is split into two sections: exclusive and shared. Each +# loader thread owns some part of the exclusive section which it maintains +# complete information on insert/updates/deletes. Since this section is only +# modified by one thread, the thread can maintain an accurate picture of all +# changes. The shared section contains rows which multiple threads can +# update/delete/insert. For checking purposes, the request_id is used to +# determine if a row is consistent with a committed transaction. +# +# Each loader thread's transaction consists of selecting some number of rows +# randomly. The thread can choose to delete the row, update the row or insert +# the row if it doesn't exist. The state of rows that are owned by the loader +# thread are tracked within the thread's id_map. This map contains the row id +# and the request_id of the latest update. For indicating deleted rows, the +# -request_id marker is used. Thus, at any point in time, the thread's id_map +# should reflect the exact state of the rows that are owned. +# +# The loader thread also maintains the state of older transactions that were +# successfully processed in addition to the current transaction, which may or +# may not be committed. Each transaction state consists of the row id, and the +# request_id. Again, -request_id is used to indicate a delete. For committed +# transactions, the thread can verify the request_id of the row is larger than +# what the thread has recorded. For uncommitted transactions, the thread would +# verify the request_id of the row does not match that of the transaction. To +# determine whether or not a transaction succeeded in case of a crash right at +# commit, each thread always includes a particular row in the transaction which +# it could use to check the request id against. +# +# Checker threads run continuously to verify the checksums on the rows and to +# verify the zero_sum column sums up to zero at any point in time. The checker +# threads run both point lookups and range scans for selecting the rows. + +class ValidateError(Exception): + """Raised when validation fails""" + pass + +class TestError(Exception): + """Raised when the test cannot make forward progress""" + pass + +CHARS = string.letters + string.digits +OPTIONS = {} + +# max number of rows per transaction +MAX_ROWS_PER_REQ = 10 + +# global variable checked by threads to determine if the test is stopping +TEST_STOP = False +LOADERS_READY = 0 + +# global monotonically increasing request id counter +REQUEST_ID = 1 +REQUEST_ID_LOCK = threading.Lock() + +def get_next_request_id(): + global REQUEST_ID + with REQUEST_ID_LOCK: + REQUEST_ID += 1 + return REQUEST_ID + +# given a percentage value, rolls a 100-sided die and return whether the +# given value is above or equal to the die roll +# +# passing 0 should always return false and 100 should always return true +def roll_d100(p): + assert p >= 0 and p <= 100 + return p >= random.randint(1, 100) + +def sha1(x): + return hashlib.sha1(str(x)).hexdigest() + +def is_connection_error(exc): + error_code = exc.args[0] + return (error_code == MySQLdb.constants.CR.CONNECTION_ERROR or + error_code == MySQLdb.constants.CR.CONN_HOST_ERROR or + error_code == MySQLdb.constants.CR.SERVER_LOST or + error_code == MySQLdb.constants.CR.SERVER_GONE_ERROR or + error_code == MySQLdb.constants.ER.QUERY_INTERRUPTED or + error_code == MySQLdb.constants.ER.SERVER_SHUTDOWN) + +def is_deadlock_error(exc): + error_code = exc.args[0] + return (error_code == MySQLdb.constants.ER.LOCK_DEADLOCK or + error_code == MySQLdb.constants.ER.LOCK_WAIT_TIMEOUT) + +# should be deterministic given an idx +def gen_msg(idx, thread_id, request_id): + random.seed(idx); + # field length is 1024 bytes, but 32 are reserved for the tid and req tag + blob_length = random.randint(1, 1024 - 32) + + if roll_d100(50): + # blob that cannot be compressed (well, compresses to 85% of original size) + msg = ''.join([random.choice(CHARS) for x in xrange(blob_length)]) + else: + # blob that can be compressed + msg = random.choice(CHARS) * blob_length + + # append the thread_id and request_id to the end of the msg + return ''.join([msg, ' tid: %d req: %d' % (thread_id, request_id)]) + +def execute(cur, stmt): + ROW_COUNT_ERROR = 18446744073709551615L + logging.debug("Executing %s" % stmt) + cur.execute(stmt) + if cur.rowcount < 0 or cur.rowcount == ROW_COUNT_ERROR: + raise MySQLdb.OperationalError(MySQLdb.constants.CR.CONNECTION_ERROR, + "Possible connection error, rowcount is %d" + % cur.rowcount) + +def wait_for_workers(workers, min_active = 0): + logging.info("Waiting for %d workers", len(workers)) + # min_active needs to include the current waiting thread + min_active += 1 + + # polling here allows this thread to be responsive to keyboard interrupt + # exceptions, otherwise a user hitting ctrl-c would see the load_generator as + # hanging and unresponsive + try: + while threading.active_count() > min_active: + time.sleep(1) + except KeyboardInterrupt, e: + os._exit(1) + + num_failures = 0 + for w in workers: + w.join() + if w.exception: + logging.error(w.exception) + num_failures += 1 + + return num_failures + +# base class for worker threads and contains logic for handling reconnecting to +# the mysqld server during connection failure +class WorkerThread(threading.Thread): + def __init__(self, name): + threading.Thread.__init__(self) + self.name = name + self.exception = None + self.con = None + self.cur = None + self.isolation_level = None + self.start_time = time.time() + self.total_time = 0 + + def run(self): + global TEST_STOP + + try: + logging.info("Started") + self.runme() + logging.info("Completed successfully") + except Exception, e: + self.exception = traceback.format_exc() + logging.error(self.exception) + TEST_STOP = True + finally: + self.total_time = time.time() - self.start_time + logging.info("Total run time: %.2f s" % self.total_time) + self.finish() + + def reconnect(self, timeout=900): + global TEST_STOP + + self.con = None + SECONDS_BETWEEN_RETRY = 10 + attempts = 1 + logging.info("Attempting to connect to MySQL Server") + while not self.con and timeout > 0 and not TEST_STOP: + try: + self.con = MySQLdb.connect(user=OPTIONS.user, host=OPTIONS.host, + port=OPTIONS.port, db=OPTIONS.db) + if self.con: + self.con.autocommit(False) + self.cur = self.con.cursor() + self.set_isolation_level(self.isolation_level) + logging.info("Connection successful after attempt %d" % attempts) + break + except MySQLdb.Error, e: + logging.debug(traceback.format_exc()) + time.sleep(SECONDS_BETWEEN_RETRY) + timeout -= SECONDS_BETWEEN_RETRY + attempts += 1 + return self.con is None + + def get_isolation_level(self): + execute(self.cur, "SELECT @@SESSION.tx_isolation") + if self.cur.rowcount != 1: + raise TestError("Unable to retrieve tx_isolation") + return self.cur.fetchone()[0] + + def set_isolation_level(self, isolation_level, persist = False): + if isolation_level is not None: + execute(self.cur, "SET @@SESSION.tx_isolation = '%s'" % isolation_level) + if self.cur.rowcount != 0: + raise TestError("Unable to set the isolation level to %s") + + if isolation_level is None or persist: + self.isolation_level = isolation_level + +# periodically kills the server +class ReaperWorker(WorkerThread): + def __init__(self): + WorkerThread.__init__(self, 'reaper') + self.start() + self.kills = 0 + + def finish(self): + logging.info('complete with %d kills' % self.kills) + if self.con: + self.con.close() + + def get_server_pid(self): + execute(self.cur, "SELECT @@pid_file") + if self.cur.rowcount != 1: + raise TestError("Unable to retrieve pid_file") + return int(open(self.cur.fetchone()[0]).read()) + + def runme(self): + global TEST_STOP + time_remain = random.randint(10, 30) + while not TEST_STOP: + if time_remain > 0: + time_remain -= 1 + time.sleep(1) + continue + if self.reconnect(): + raise Exception("Unable to connect to MySQL server") + logging.info('killing server...') + with open(OPTIONS.expect_file, 'w+') as expect_file: + expect_file.write('restart') + os.kill(self.get_server_pid(), signal.SIGTERM) + self.kills += 1 + time_remain = random.randint(0, 30) + OPTIONS.reap_delay; + +# runs initially to populate the table with the given number of rows +class PopulateWorker(WorkerThread): + def __init__(self, thread_id, start_id, num_to_add): + WorkerThread.__init__(self, 'populate-%d' % thread_id) + self.thread_id = thread_id + self.start_id = start_id + self.num_to_add = num_to_add + self.table = OPTIONS.table + self.start() + + def finish(self): + if self.con: + self.con.commit() + self.con.close() + + def runme(self): + if self.reconnect(): + raise Exception("Unable to connect to MySQL server") + + stmt = None + for i in xrange(self.start_id, self.start_id + self.num_to_add): + stmt = gen_insert(self.table, i, 0, 0, 0) + execute(self.cur, stmt) + if i % 101 == 0: + self.con.commit() + self.con.commit() + logging.info("Inserted %d rows starting at id %d" % + (self.num_to_add, self.start_id)) + +def populate_table(num_records): + + logging.info("Populate_table started for %d records" % num_records) + if num_records == 0: + return False + + num_workers = min(10, num_records / 100) + workers = [] + + N = num_records / num_workers + start_id = 0 + for i in xrange(num_workers): + workers.append(PopulateWorker(i, start_id, N)) + start_id += N + if num_records > start_id: + workers.append(PopulateWorker(num_workers, start_id, + num_records - start_id)) + + # Wait for the populate threads to complete + return wait_for_workers(workers) > 0 + +def gen_insert(table, idx, thread_id, request_id, zero_sum): + msg = gen_msg(idx, thread_id, request_id) + return ("INSERT INTO %s (id, thread_id, request_id, zero_sum, " + "msg, msg_length, msg_checksum) VALUES (%d,%d,%d,%d,'%s',%d,'%s')" + % (table, idx, thread_id, request_id, + zero_sum, msg, len(msg), sha1(msg))) + +def gen_update(table, idx, thread_id, request_id, zero_sum): + msg = gen_msg(idx, thread_id, request_id) + return ("UPDATE %s SET thread_id = %d, request_id = %d, " + "update_count = update_count + 1, zero_sum = zero_sum + (%d), " + "msg = '%s', msg_length = %d, msg_checksum = '%s' WHERE id = %d " + % (table, thread_id, request_id, zero_sum, msg, len(msg), + sha1(msg), idx)) + +def gen_delete(table, idx): + return "DELETE FROM %s WHERE id = %d" % (table, idx) + +def gen_insert_on_dup(table, idx, thread_id, request_id, zero_sum): + msg = gen_msg(idx, thread_id, request_id) + msg_checksum = sha1(msg) + return ("INSERT INTO %s (id, thread_id, request_id, zero_sum, " + "msg, msg_length, msg_checksum) VALUES (%d,%d,%d,%d,'%s',%d,'%s') " + "ON DUPLICATE KEY UPDATE " + "thread_id=%d, request_id=%d, " + "update_count=update_count+1, " + "zero_sum=zero_sum + (%d), msg='%s', msg_length=%d, " + "msg_checksum='%s'" % + (table, idx, thread_id, request_id, + zero_sum, msg, len(msg), msg_checksum, thread_id, request_id, + zero_sum, msg, len(msg), msg_checksum)) + +# Each loader thread owns a part of the id space which it maintains inventory +# for. The loader thread generates inserts, updates and deletes for the table. +# The latest successful transaction and the latest open transaction are kept to +# verify after a disconnect that the rows were recovered properly. +class LoadGenWorker(WorkerThread): + TXN_UNCOMMITTED = 0 + TXN_COMMIT_STARTED = 1 + TXN_COMMITTED = 2 + + def __init__(self, thread_id): + WorkerThread.__init__(self, 'loader-%02d' % thread_id) + self.thread_id = thread_id + self.rand = random.Random() + self.rand.seed(thread_id) + self.loop_num = 0 + + # id_map contains the array of id's owned by this worker thread. It needs + # to be offset by start_id for the actual id + self.id_map = array.array('l') + self.start_id = thread_id * OPTIONS.ids_per_loader + self.num_id = OPTIONS.ids_per_loader + self.start_share_id = OPTIONS.num_loaders * OPTIONS.ids_per_loader + self.max_id = OPTIONS.max_id + self.table = OPTIONS.table + self.num_requests = OPTIONS.num_requests + + # stores information about the latest series of successful transactions + # + # each transaction is simply a map of id -> request_id + # deleted rows are indicated by -request_id + self.prev_txn = deque() + self.cur_txn = None + self.cur_txn_state = None + + self.start() + + def finish(self): + if self.total_time: + req_per_sec = self.loop_num / self.total_time + else: + req_per_sec = -1 + logging.info("total txns: %d, txn/s: %.2f rps" % + (self.loop_num, req_per_sec)) + + # constructs the internal hash map of the ids owned by this thread and + # the request id of each id + def populate_id_map(self): + logging.info("Populating id map") + + REQ_ID_COL = 0 + stmt = "SELECT request_id FROM %s WHERE id = %d" + + # the start_id is used for tracking active transactions, so the row needs + # to exist + idx = self.start_id + execute(self.cur, stmt % (self.table, idx)) + if self.cur.rowcount > 0: + request_id = self.cur.fetchone()[REQ_ID_COL] + else: + request_id = get_next_request_id() + execute(self.cur, gen_insert(self.table, idx, self.thread_id, + request_id, 0)) + self.con.commit() + + self.id_map.append(request_id) + + self.cur_txn = {idx:request_id} + self.cur_txn_state = self.TXN_COMMITTED + for i in xrange(OPTIONS.committed_txns): + self.prev_txn.append(self.cur_txn) + + # fetch the rest of the row for the id space owned by this thread + for idx in xrange(self.start_id + 1, self.start_id + self.num_id): + execute(self.cur, stmt % (self.table, idx)) + if self.cur.rowcount == 0: + # Negative number is used to indicated a missing row + self.id_map.append(-1) + else: + res = self.cur.fetchone() + self.id_map.append(res[REQ_ID_COL]) + + self.con.commit() + + def apply_cur_txn_changes(self): + # apply the changes to the id_map + for idx in self.cur_txn: + if idx < self.start_id + self.num_id: + assert idx >= self.start_id + self.id_map[idx - self.start_id] = self.cur_txn[idx] + self.cur_txn_state = self.TXN_COMMITTED + + self.prev_txn.append(self.cur_txn) + self.prev_txn.popleft() + + def verify_txn(self, txn, committed): + request_id = txn[self.start_id] + if not committed: + # if the transaction was not committed, then there should be no rows + # in the table that have this request_id + cond = '=' + # it is possible the start_id used to track this transaction is in + # the process of being deleted + if request_id < 0: + request_id = -request_id + else: + # if the transaction was committed, then no rows modified by this + # transaction should have a request_id less than this transaction's id + cond = '<' + stmt = ("SELECT COUNT(*) FROM %s WHERE id IN (%s) AND request_id %s %d" % + (self.table, ','.join(str(x) for x in txn), cond, request_id)) + execute(self.cur, stmt) + if (self.cur.rowcount != 1): + raise TestError("Unable to retrieve results for query '%s'" % stmt) + count = self.cur.fetchone()[0] + if (count > 0): + raise TestError("Expected '%s' to return 0 rows, but %d returned " + "instead" % (stmt, count)) + self.con.commit() + + def verify_data(self): + # if the state of the current transaction is unknown (i.e. a commit was + # issued, but the connection failed before, check the start_id row to + # determine if it was committed + request_id = self.cur_txn[self.start_id] + if self.cur_txn_state == self.TXN_COMMIT_STARTED: + assert request_id >= 0 + idx = self.start_id + stmt = "SELECT id, request_id FROM %s where id = %d" % (self.table, idx) + execute(self.cur, stmt) + if (self.cur.rowcount == 0): + raise TestError("Fetching start_id %d via '%s' returned no data! " + "This row should never be deleted!" % (idx, stmt)) + REQUEST_ID_COL = 1 + res = self.cur.fetchone() + if res[REQUEST_ID_COL] == self.cur_txn[idx]: + self.apply_cur_txn_changes() + else: + self.cur_txn_state = self.TXN_UNCOMMITTED + self.con.commit() + + # if the transaction was not committed, verify there are no rows at this + # request id + # + # however, if the transaction was committed, then verify none of the rows + # have a request_id below the request_id recorded by the start_id row. + if self.cur_txn_state == self.TXN_UNCOMMITTED: + self.verify_txn(self.cur_txn, False) + + # verify all committed transactions + for txn in self.prev_txn: + self.verify_txn(txn, True) + + # verify the rows owned by this worker matches the request_id at which + # they were set. + idx = self.start_id + max_map_id = self.start_id + self.num_id + row_count = 0 + ID_COL = 0 + REQ_ID_COL = ID_COL + 1 + + while idx < max_map_id: + if (row_count == 0): + num_rows_to_check = random.randint(50, 100) + execute(self.cur, + "SELECT id, request_id FROM %s where id >= %d and id < %d " + "ORDER BY id LIMIT %d" + % (self.table, idx, max_map_id, num_rows_to_check)) + + # prevent future queries from being issued since we've hit the end of + # the rows that exist in the table + row_count = self.cur.rowcount if self.cur.rowcount != 0 else -1 + + # determine the id of the next available row in the table + if (row_count > 0): + res = self.cur.fetchone() + assert idx <= res[ID_COL] + next_id = res[ID_COL] + row_count -= 1 + else: + next_id = max_map_id + + # rows up to the next id don't exist within the table, verify our + # map has them as removed + while idx < next_id: + # see if the latest transaction may have modified this id. If so, use + # that value. + if self.id_map[idx - self.start_id] >= 0: + raise ValidateError("Row id %d was not found in table, but " + "id_map has it at request_id %d" % + (idx, self.id_map[idx - self.start_id])) + idx += 1 + + if idx == max_map_id: + break + + if (self.id_map[idx - self.start_id] != res[REQ_ID_COL]): + raise ValidateError("Row id %d has req id %d, but %d is the " + "expected value!" % + (idx, res[REQ_ID_COL], + self.id_map[idx - self.start_id])) + idx += 1 + + self.con.commit() + logging.debug("Verified data successfully") + + def execute_one(self): + # select a number of rows; perform an insert; update or delete operation on + # them + num_rows = random.randint(1, MAX_ROWS_PER_REQ) + ids = array.array('L') + + # allocate at least one row in the id space owned by this worker + idx = random.randint(self.start_id, self.start_id + self.num_id - 1) + ids.append(idx) + + for i in xrange(1, num_rows): + # The valid ranges for ids is from start_id to start_id + num_id and from + # start_share_id to max_id. The randint() uses the range from + # start_share_id to max_id + num_id - 1. start_share_id to max_id covers + # the shared range. The exclusive range is covered by max_id to max_id + + # num_id - 1. If any number lands in this >= max_id section, it is + # remapped to start_id and used for selecting a row in the exclusive + # section. + idx = random.randint(self.start_share_id, self.max_id + self.num_id - 1) + if idx >= self.max_id: + idx -= self.max_id - self.start_id + if ids.count(idx) == 0: + ids.append(idx) + + # perform a read of these rows + ID_COL = 0 + ZERO_SUM_COL = ID_COL + 1 + + # For repeatable-read isolation levels on MyRocks, during the lock + # acquisition part of this transaction, it is possible the selected rows + # conflict with another thread's transaction. This results in a deadlock + # error that requires the whole transaction to be rolled back because the + # transaction's current snapshot will always be reading an older version of + # the row. MyRocks will prevent any updates to this row until the + # snapshot is released and re-acquired. + NUM_RETRIES = 100 + for i in xrange(NUM_RETRIES): + ids_found = {} + try: + for idx in ids: + stmt = ("SELECT id, zero_sum FROM %s WHERE id = %d " + "FOR UPDATE" % (self.table, idx)) + execute(self.cur, stmt) + if self.cur.rowcount > 0: + res = self.cur.fetchone() + ids_found[res[ID_COL]] = res[ZERO_SUM_COL] + break + except MySQLdb.OperationalError, e: + if not is_deadlock_error(e): + raise e + + # if a deadlock occurred, rollback the transaction and wait a short time + # before retrying. + logging.debug("%s generated deadlock, retry %d of %d" % + (stmt, i, NUM_RETRIES)) + self.con.rollback() + time.sleep(0.2) + + if i == NUM_RETRIES - 1: + raise TestError("Unable to acquire locks after a number of retries " + "for query '%s'" % stmt) + + # ensure that the zero_sum column remains summed up to zero at the + # end of this operation + current_sum = 0 + + # all row locks acquired at this point, so allocate a request_id + request_id = get_next_request_id() + self.cur_txn = {self.start_id:request_id} + self.cur_txn_state = self.TXN_UNCOMMITTED + + for idx in ids: + stmt = None + zero_sum = self.rand.randint(-1000, 1000) + action = self.rand.randint(0, 3) + is_delete = False + + if idx in ids_found: + # for each row found, determine if it should be updated or deleted + if action == 0: + stmt = gen_delete(self.table, idx) + is_delete = True + current_sum -= ids_found[idx] + else: + stmt = gen_update(self.table, idx, self.thread_id, request_id, + zero_sum) + current_sum += zero_sum + else: + # if it does not exist, then determine if an insert should happen + if action <= 1: + stmt = gen_insert(self.table, idx, self.thread_id, request_id, + zero_sum) + current_sum += zero_sum + + if stmt is not None: + # mark in self.cur_txn what these new changes will be + if is_delete: + self.cur_txn[idx] = -request_id + else: + self.cur_txn[idx] = request_id + execute(self.cur, stmt) + if self.cur.rowcount == 0: + raise TestError("Executing %s returned row count of 0!" % stmt) + + # the start_id row is used to determine if this transaction has been + # committed if the connect fails and it is used to adjust the zero_sum + # correctly + idx = self.start_id + ids.append(idx) + self.cur_txn[idx] = request_id + stmt = gen_insert_on_dup(self.table, idx, self.thread_id, request_id, + -current_sum) + execute(self.cur, stmt) + if self.cur.rowcount == 0: + raise TestError("Executing '%s' returned row count of 0!" % stmt) + + # 90% commit, 10% rollback + if roll_d100(90): + self.con.rollback() + logging.debug("request %s was rolled back" % request_id) + else: + self.cur_txn_state = self.TXN_COMMIT_STARTED + self.con.commit() + if not self.con.get_server_info(): + raise MySQLdb.OperationalError(MySQLdb.constants.CR.CONNECTION_ERROR, + "Possible connection error on commit") + self.apply_cur_txn_changes() + + self.loop_num += 1 + if self.loop_num % 1000 == 0: + logging.info("Processed %d transactions so far" % self.loop_num) + + def runme(self): + global TEST_STOP, LOADERS_READY + + self.start_time = time.time() + if self.reconnect(): + raise Exception("Unable to connect to MySQL server") + + self.populate_id_map() + self.verify_data() + + logging.info("Starting load generator") + reconnected = False + LOADERS_READY += 1 + + while self.loop_num < self.num_requests and not TEST_STOP: + try: + # verify our data on each reconnect and also on ocassion + if reconnected or random.randint(1, 500) == 1: + self.verify_data() + reconnected = False + + self.execute_one() + self.loop_num += 1 + except MySQLdb.OperationalError, e: + if not is_connection_error(e): + raise e + if self.reconnect(): + raise Exception("Unable to connect to MySQL server") + reconnected = True + return + +# the checker thread is running read only transactions to verify the row +# checksums match the message. +class CheckerWorker(WorkerThread): + def __init__(self, thread_id): + WorkerThread.__init__(self, 'checker-%02d' % thread_id) + self.thread_id = thread_id + self.rand = random.Random() + self.rand.seed(thread_id) + self.max_id = OPTIONS.max_id + self.table = OPTIONS.table + self.loop_num = 0 + self.start() + + def finish(self): + logging.info("total loops: %d" % self.loop_num) + + def check_zerosum(self): + # two methods for checking zero sum + # 1. request the server to do it (90% of the time for now) + # 2. read all rows and calculate directly + if roll_d100(90): + stmt = "SELECT SUM(zero_sum) FROM %s" % self.table + if roll_d100(50): + stmt += " FORCE INDEX(msg_i)" + execute(self.cur, stmt) + + if self.cur.rowcount != 1: + raise ValidateError("Error with query '%s'" % stmt) + res = self.cur.fetchone()[0] + if res != 0: + raise ValidateError("Expected zero_sum to be 0, but %d returned " + "instead" % res) + else: + cur_isolation_level = self.get_isolation_level() + self.set_isolation_level('REPEATABLE-READ') + num_rows_to_check = random.randint(500, 1000) + idx = 0 + sum = 0 + + stmt = "SELECT id, zero_sum FROM %s where id >= %d ORDER BY id LIMIT %d" + ID_COL = 0 + ZERO_SUM_COL = 1 + + while idx < self.max_id: + execute(self.cur, stmt % (self.table, idx, num_rows_to_check)) + if self.cur.rowcount == 0: + break + + for i in xrange(self.cur.rowcount - 1): + sum += self.cur.fetchone()[ZERO_SUM_COL] + + last_row = self.cur.fetchone() + idx = last_row[ID_COL] + 1 + sum += last_row[ZERO_SUM_COL] + + if sum != 0: + raise TestError("Zero sum column expected to total 0, but sum is %d " + "instead!" % sum) + self.set_isolation_level(cur_isolation_level) + + def check_rows(self): + class id_range(): + def __init__(self, min_id, min_inclusive, max_id, max_inclusive): + self.min_id = min_id if min_inclusive else min_id + 1 + self.max_id = max_id if max_inclusive else max_id - 1 + def count(self, idx): + return idx >= self.min_id and idx <= self.max_id + + stmt = ("SELECT id, msg, msg_length, msg_checksum FROM %s WHERE " % + self.table) + + # two methods for checking rows + # 1. pick a number of rows at random + # 2. range scan + if roll_d100(90): + ids = [] + for i in xrange(random.randint(1, MAX_ROWS_PER_REQ)): + ids.append(random.randint(0, self.max_id - 1)) + stmt += "id in (%s)" % ','.join(str(x) for x in ids) + else: + id1 = random.randint(0, self.max_id - 1) + id2 = random.randint(0, self.max_id - 1) + min_inclusive = random.randint(0, 1) + cond1 = '>=' if min_inclusive else '>' + max_inclusive = random.randint(0, 1) + cond2 = '<=' if max_inclusive else '<' + stmt += ("id %s %d AND id %s %d" % + (cond1, min(id1, id2), cond2, max(id1, id2))) + ids = id_range(min(id1, id2), min_inclusive, max(id1, id2), max_inclusive) + + execute(self.cur, stmt) + + ID_COL = 0 + MSG_COL = ID_COL + 1 + MSG_LENGTH_COL = MSG_COL + 1 + MSG_CHECKSUM_COL = MSG_LENGTH_COL + 1 + + for row in self.cur.fetchall(): + idx = row[ID_COL] + msg = row[MSG_COL] + msg_length = row[MSG_LENGTH_COL] + msg_checksum = row[MSG_CHECKSUM_COL] + if ids.count(idx) < 1: + raise ValidateError( + "id %d returned from database, but query was '%s'" % (idx, stmt)) + if (len(msg) != msg_length): + raise ValidateError( + "id %d contains msg_length %d, but msg '%s' is only %d " + "characters long" % (idx, msg_length, msg, len(msg))) + if (sha1(msg) != msg_checksum): + raise ValidateError("id %d has checksum '%s', but expected checksum " + "is '%s'" % (idx, msg_checksum, sha1(msg))) + + def runme(self): + global TEST_STOP + + self.start_time = time.time() + if self.reconnect(): + raise Exception("Unable to connect to MySQL server") + logging.info("Starting checker") + + while not TEST_STOP: + try: + # choose one of three options: + # 1. compute zero_sum across all rows is 0 + # 2. read a number of rows and verify checksums + if roll_d100(25): + self.check_zerosum() + else: + self.check_rows() + + self.con.commit() + self.loop_num += 1 + if self.loop_num % 10000 == 0: + logging.info("Processed %d transactions so far" % self.loop_num) + except MySQLdb.OperationalError, e: + if not is_connection_error(e): + raise e + if self.reconnect(): + raise Exception("Unable to reconnect to MySQL server") + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Concurrent load generator.') + + parser.add_argument('-C, --committed-txns', dest='committed_txns', + default=3, type=int, + help="number of committed txns to verify") + + parser.add_argument('-c, --num-checkers', dest='num_checkers', type=int, + default=4, + help="number of reader/checker threads to test with") + + parser.add_argument('-d, --db', dest='db', default='test', + help="mysqld server database to test with") + + parser.add_argument('-H, --host', dest='host', default='127.0.0.1', + help="mysqld server host ip address") + + parser.add_argument('-i, --ids-per-loader', dest='ids_per_loader', + type=int, default=100, + help="number of records which each loader owns " + "exclusively, up to max-id / 2 / num-loaders") + + parser.add_argument('-L, --log-file', dest='log_file', default=None, + help="log file for output") + + parser.add_argument('-l, --num-loaders', dest='num_loaders', type=int, + default=16, + help="number of loader threads to test with") + + parser.add_argument('-m, --max-id', dest='max_id', type=int, default=1000, + help="maximum number of records which the table " + "extends to, must be larger than ids_per_loader * " + "num_loaders") + + parser.add_argument('-n, --num-records', dest='num_records', type=int, + default=0, + help="number of records to populate the table with") + + parser.add_argument('-P, --port', dest='port', default=3307, type=int, + help='mysqld server host port') + + parser.add_argument('-r, --num-requests', dest='num_requests', type=int, + default=100000000, + help="number of requests issued per worker thread") + + parser.add_argument('-T, --truncate', dest='truncate', action='store_true', + help="truncates or creates the table before the test") + + parser.add_argument('-t, --table', dest='table', default='t1', + help="mysqld server table to test with") + + parser.add_argument('-u, --user', dest='user', default='root', + help="user to log into the mysql server") + + parser.add_argument('-v, --verbose', dest='verbose', action='store_true', + help="enable debug logging") + + parser.add_argument('-E, --expect-file', dest='expect_file', default=None, + help="expect file for server restart") + + parser.add_argument('-D, --reap-delay', dest='reap_delay', type=int, + default=0, + help="seconds to sleep after each server reap") + + OPTIONS = parser.parse_args() + + if OPTIONS.verbose: + log_level = logging.DEBUG + else: + log_level = logging.INFO + + logging.basicConfig(level=log_level, + format='%(asctime)s %(threadName)s [%(levelname)s] ' + '%(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + filename=OPTIONS.log_file) + + logging.info("Command line given: %s" % ' '.join(sys.argv)) + + if (OPTIONS.max_id < 0 or OPTIONS.ids_per_loader <= 0 or + OPTIONS.max_id < OPTIONS.ids_per_loader * OPTIONS.num_loaders): + logging.error("ids-per-loader must be larger tha 0 and max-id must be " + "larger than ids_per_loader * num_loaders") + exit(1) + + logging.info("Using table %s.%s for test" % (OPTIONS.db, OPTIONS.table)) + + if OPTIONS.truncate: + logging.info("Truncating table") + con = MySQLdb.connect(user=OPTIONS.user, host=OPTIONS.host, + port=OPTIONS.port, db=OPTIONS.db) + if not con: + raise TestError("Unable to connect to mysqld server to create/truncate " + "table") + cur = con.cursor() + cur.execute("SELECT COUNT(*) FROM INFORMATION_SCHEMA.tables WHERE " + "table_schema = '%s' AND table_name = '%s'" % + (OPTIONS.db, OPTIONS.table)) + if cur.rowcount != 1: + logging.error("Unable to retrieve information about table %s " + "from information_schema!" % OPTIONS.table) + exit(1) + + if cur.fetchone()[0] == 0: + logging.info("Table %s not found, creating a new one" % OPTIONS.table) + cur.execute("CREATE TABLE %s (id INT PRIMARY KEY, " + "thread_id INT NOT NULL, " + "request_id BIGINT UNSIGNED NOT NULL, " + "update_count INT UNSIGNED NOT NULL DEFAULT 0, " + "zero_sum INT DEFAULT 0, " + "msg VARCHAR(1024), " + "msg_length int, " + "msg_checksum varchar(128), " + "KEY msg_i(msg(255), zero_sum)) " + "ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin" % + OPTIONS.table) + else: + logging.info("Table %s found, truncating" % OPTIONS.table) + cur.execute("TRUNCATE TABLE %s" % OPTIONS.table) + con.commit() + + if populate_table(OPTIONS.num_records): + logging.error("Populate table returned an error") + exit(1) + + logging.info("Starting %d loaders" % OPTIONS.num_loaders) + loaders = [] + for i in xrange(OPTIONS.num_loaders): + loaders.append(LoadGenWorker(i)) + + logging.info("Starting %d checkers" % OPTIONS.num_checkers) + checkers = [] + for i in xrange(OPTIONS.num_checkers): + checkers.append(CheckerWorker(i)) + + while LOADERS_READY < OPTIONS.num_loaders: + time.sleep(0.5) + + if OPTIONS.expect_file and OPTIONS.reap_delay > 0: + logging.info('Starting reaper') + checkers.append(ReaperWorker()) + + workers_failed = 0 + workers_failed += wait_for_workers(loaders, len(checkers)) + + if TEST_STOP: + logging.error("Detected test failure, aborting") + os._exit(1) + + TEST_STOP = True + + workers_failed += wait_for_workers(checkers) + + if workers_failed > 0: + logging.error("Test detected %d failures, aborting" % workers_failed) + sys.exit(1) + + logging.info("Test completed successfully") + sys.exit(0) diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress.test b/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress.test new file mode 100644 index 00000000000..7d92bb3f83a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress.test @@ -0,0 +1,31 @@ +# basic stress tests for myrocks, just runs the load generator without any crashes + +# Don't test this under valgrind, memory leaks will occur +--disable_warnings +--source include/not_valgrind.inc +--source include/have_rocksdb.inc +--source include/master-slave.inc +DROP TABLE IF EXISTS t1; +--enable_warnings + +# create the actual table +CREATE TABLE t1(id INT PRIMARY KEY, + thread_id INT NOT NULL, + request_id BIGINT UNSIGNED NOT NULL, + update_count INT UNSIGNED NOT NULL DEFAULT 0, + zero_sum INT DEFAULT 0, + msg VARCHAR(1024), + msg_length int, + msg_checksum varchar(128), + KEY msg_i(msg(255), zero_sum)) +ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; + +--let $table = t1 +--let $num_loaders = 8 +--let $num_checkers = 2 +--let $num_records = 200 +--let $max_records = 100000 +--let $num_requests = 10000 +--let $reap_delay = 0 + +--source suite/rocksdb_stress/include/rocksdb_stress.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress_crash.test b/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress_crash.test new file mode 100644 index 00000000000..6f6128579b5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress_crash.test @@ -0,0 +1,32 @@ +# basic stress tests for myrocks, runs the load generator with periodic crashes + +# Don't test this under valgrind, memory leaks will occur +--disable_warnings +--source include/not_valgrind.inc +--source include/have_rocksdb.inc +--source include/master-slave.inc +--source include/have_binlog_format_row.inc +DROP TABLE IF EXISTS t1; +--enable_warnings + +# create the actual table +CREATE TABLE t1(id INT PRIMARY KEY, + thread_id INT NOT NULL, + request_id BIGINT UNSIGNED NOT NULL, + update_count INT UNSIGNED NOT NULL DEFAULT 0, + zero_sum INT DEFAULT 0, + msg VARCHAR(1024), + msg_length int, + msg_checksum varchar(128), + KEY msg_i(msg(255), zero_sum)) +ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; + +--let $table = t1 +--let $num_loaders = 8 +--let $num_checkers = 2 +--let $num_records = 200 +--let $max_records = 100000 +--let $num_requests = 10000 +--let $reap_delay = 180 + +--source suite/rocksdb_stress/include/rocksdb_stress.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result new file mode 100644 index 00000000000..159d6a983c8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result @@ -0,0 +1,13 @@ +create table t1 (test_name text) engine=MyISAM; +create table t2 (variable_name text) engine=MyISAM; +load data infile "MYSQLTEST_VARDIR/tmp/rocksdb_sys_vars.all_vars.txt" into table t1; +insert into t2 select variable_name from information_schema.global_variables where variable_name like "rocksdb_%"; +insert into t2 select variable_name from information_schema.session_variables where variable_name like "rocksdb_%"; +select variable_name as `There should be *no* long test name listed below:` from t2 +where length(variable_name) > 50; +There should be *no* long test name listed below: +select variable_name as `There should be *no* variables listed below:` from t2 +left join t1 on variable_name=test_name where test_name is null ORDER BY variable_name; +There should be *no* variables listed below: +drop table t1; +drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_access_hint_on_compaction_start_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_access_hint_on_compaction_start_basic.result new file mode 100644 index 00000000000..4398563d064 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_access_hint_on_compaction_start_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_ACCESS_HINT_ON_COMPACTION_START; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_ACCESS_HINT_ON_COMPACTION_START to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ACCESS_HINT_ON_COMPACTION_START = 444; +ERROR HY000: Variable 'rocksdb_access_hint_on_compaction_start' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_advise_random_on_open_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_advise_random_on_open_basic.result new file mode 100644 index 00000000000..f7175fd91a3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_advise_random_on_open_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_ADVISE_RANDOM_ON_OPEN; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_ADVISE_RANDOM_ON_OPEN to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ADVISE_RANDOM_ON_OPEN = 444; +ERROR HY000: Variable 'rocksdb_advise_random_on_open' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_concurrent_memtable_write_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_concurrent_memtable_write_basic.result new file mode 100644 index 00000000000..93ec1aec407 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_concurrent_memtable_write_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 1" +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 1; +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = DEFAULT; +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +0 +"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 0" +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 0; +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = DEFAULT; +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +0 +"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to on" +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = on; +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = DEFAULT; +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +0 +"Trying to set variable @@session.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 444. It should fail because it is not session." +SET @@session.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 444; +ERROR HY000: Variable 'rocksdb_allow_concurrent_memtable_write' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 'aaa'" +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +0 +"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 'bbb'" +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +0 +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = @start_global_value; +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_reads_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_reads_basic.result new file mode 100644 index 00000000000..f0f1b077ae0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_reads_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_ALLOW_MMAP_READS; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_ALLOW_MMAP_READS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ALLOW_MMAP_READS = 444; +ERROR HY000: Variable 'rocksdb_allow_mmap_reads' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_writes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_writes_basic.result new file mode 100644 index 00000000000..3fa1f14e1df --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_writes_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_ALLOW_MMAP_WRITES; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_ALLOW_MMAP_WRITES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ALLOW_MMAP_WRITES = 444; +ERROR HY000: Variable 'rocksdb_allow_mmap_writes' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_os_buffer_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_os_buffer_basic.result new file mode 100644 index 00000000000..6099c3af344 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_os_buffer_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_ALLOW_OS_BUFFER; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_ALLOW_OS_BUFFER to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ALLOW_OS_BUFFER = 444; +ERROR HY000: Variable 'rocksdb_allow_os_buffer' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_background_sync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_background_sync_basic.result new file mode 100644 index 00000000000..8998bfee64d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_background_sync_basic.result @@ -0,0 +1,68 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_BACKGROUND_SYNC; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to 1" +SET @@global.ROCKSDB_BACKGROUND_SYNC = 1; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BACKGROUND_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +0 +"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to 0" +SET @@global.ROCKSDB_BACKGROUND_SYNC = 0; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BACKGROUND_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +0 +"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to on" +SET @@global.ROCKSDB_BACKGROUND_SYNC = on; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BACKGROUND_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +0 +"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to off" +SET @@global.ROCKSDB_BACKGROUND_SYNC = off; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BACKGROUND_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +0 +"Trying to set variable @@session.ROCKSDB_BACKGROUND_SYNC to 444. It should fail because it is not session." +SET @@session.ROCKSDB_BACKGROUND_SYNC = 444; +ERROR HY000: Variable 'rocksdb_background_sync' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to 'aaa'" +SET @@global.ROCKSDB_BACKGROUND_SYNC = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +0 +SET @@global.ROCKSDB_BACKGROUND_SYNC = @start_global_value; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_base_background_compactions_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_base_background_compactions_basic.result new file mode 100644 index 00000000000..09acaada0c6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_base_background_compactions_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_BASE_BACKGROUND_COMPACTIONS; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_BASE_BACKGROUND_COMPACTIONS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_BASE_BACKGROUND_COMPACTIONS = 444; +ERROR HY000: Variable 'rocksdb_base_background_compactions' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_cache_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_cache_size_basic.result new file mode 100644 index 00000000000..fbd9d97e994 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_cache_size_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_BLOCK_CACHE_SIZE; +SELECT @start_global_value; +@start_global_value +8388608 +"Trying to set variable @@global.ROCKSDB_BLOCK_CACHE_SIZE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_BLOCK_CACHE_SIZE = 444; +ERROR HY000: Variable 'rocksdb_block_cache_size' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_restart_interval_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_restart_interval_basic.result new file mode 100644 index 00000000000..4d02e197a67 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_restart_interval_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_BLOCK_RESTART_INTERVAL; +SELECT @start_global_value; +@start_global_value +16 +"Trying to set variable @@global.ROCKSDB_BLOCK_RESTART_INTERVAL to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_BLOCK_RESTART_INTERVAL = 444; +ERROR HY000: Variable 'rocksdb_block_restart_interval' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_basic.result new file mode 100644 index 00000000000..0382184f2a0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_BLOCK_SIZE; +SELECT @start_global_value; +@start_global_value +4096 +"Trying to set variable @@global.ROCKSDB_BLOCK_SIZE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_BLOCK_SIZE = 444; +ERROR HY000: Variable 'rocksdb_block_size' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_deviation_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_deviation_basic.result new file mode 100644 index 00000000000..83513f814ed --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_deviation_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_BLOCK_SIZE_DEVIATION; +SELECT @start_global_value; +@start_global_value +10 +"Trying to set variable @@global.ROCKSDB_BLOCK_SIZE_DEVIATION to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_BLOCK_SIZE_DEVIATION = 444; +ERROR HY000: Variable 'rocksdb_block_size_deviation' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_basic.result new file mode 100644 index 00000000000..96b78cf669e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_BULK_LOAD; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_BULK_LOAD; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_BULK_LOAD to 1" +SET @@global.ROCKSDB_BULK_LOAD = 1; +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BULK_LOAD = DEFAULT; +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +0 +"Trying to set variable @@global.ROCKSDB_BULK_LOAD to 0" +SET @@global.ROCKSDB_BULK_LOAD = 0; +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BULK_LOAD = DEFAULT; +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +0 +"Trying to set variable @@global.ROCKSDB_BULK_LOAD to on" +SET @@global.ROCKSDB_BULK_LOAD = on; +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BULK_LOAD = DEFAULT; +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_BULK_LOAD to 1" +SET @@session.ROCKSDB_BULK_LOAD = 1; +SELECT @@session.ROCKSDB_BULK_LOAD; +@@session.ROCKSDB_BULK_LOAD +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_BULK_LOAD = DEFAULT; +SELECT @@session.ROCKSDB_BULK_LOAD; +@@session.ROCKSDB_BULK_LOAD +0 +"Trying to set variable @@session.ROCKSDB_BULK_LOAD to 0" +SET @@session.ROCKSDB_BULK_LOAD = 0; +SELECT @@session.ROCKSDB_BULK_LOAD; +@@session.ROCKSDB_BULK_LOAD +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_BULK_LOAD = DEFAULT; +SELECT @@session.ROCKSDB_BULK_LOAD; +@@session.ROCKSDB_BULK_LOAD +0 +"Trying to set variable @@session.ROCKSDB_BULK_LOAD to on" +SET @@session.ROCKSDB_BULK_LOAD = on; +SELECT @@session.ROCKSDB_BULK_LOAD; +@@session.ROCKSDB_BULK_LOAD +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_BULK_LOAD = DEFAULT; +SELECT @@session.ROCKSDB_BULK_LOAD; +@@session.ROCKSDB_BULK_LOAD +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_BULK_LOAD to 'aaa'" +SET @@global.ROCKSDB_BULK_LOAD = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +0 +"Trying to set variable @@global.ROCKSDB_BULK_LOAD to 'bbb'" +SET @@global.ROCKSDB_BULK_LOAD = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +0 +SET @@global.ROCKSDB_BULK_LOAD = @start_global_value; +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +0 +SET @@session.ROCKSDB_BULK_LOAD = @start_session_value; +SELECT @@session.ROCKSDB_BULK_LOAD; +@@session.ROCKSDB_BULK_LOAD +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_size_basic.result new file mode 100644 index 00000000000..40404d2fab5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_size_basic.result @@ -0,0 +1,72 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_BULK_LOAD_SIZE; +SELECT @start_global_value; +@start_global_value +1000 +SET @start_session_value = @@session.ROCKSDB_BULK_LOAD_SIZE; +SELECT @start_session_value; +@start_session_value +1000 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_BULK_LOAD_SIZE to 1" +SET @@global.ROCKSDB_BULK_LOAD_SIZE = 1; +SELECT @@global.ROCKSDB_BULK_LOAD_SIZE; +@@global.ROCKSDB_BULK_LOAD_SIZE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BULK_LOAD_SIZE = DEFAULT; +SELECT @@global.ROCKSDB_BULK_LOAD_SIZE; +@@global.ROCKSDB_BULK_LOAD_SIZE +1000 +"Trying to set variable @@global.ROCKSDB_BULK_LOAD_SIZE to 1024" +SET @@global.ROCKSDB_BULK_LOAD_SIZE = 1024; +SELECT @@global.ROCKSDB_BULK_LOAD_SIZE; +@@global.ROCKSDB_BULK_LOAD_SIZE +1024 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BULK_LOAD_SIZE = DEFAULT; +SELECT @@global.ROCKSDB_BULK_LOAD_SIZE; +@@global.ROCKSDB_BULK_LOAD_SIZE +1000 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_BULK_LOAD_SIZE to 1" +SET @@session.ROCKSDB_BULK_LOAD_SIZE = 1; +SELECT @@session.ROCKSDB_BULK_LOAD_SIZE; +@@session.ROCKSDB_BULK_LOAD_SIZE +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_BULK_LOAD_SIZE = DEFAULT; +SELECT @@session.ROCKSDB_BULK_LOAD_SIZE; +@@session.ROCKSDB_BULK_LOAD_SIZE +1000 +"Trying to set variable @@session.ROCKSDB_BULK_LOAD_SIZE to 1024" +SET @@session.ROCKSDB_BULK_LOAD_SIZE = 1024; +SELECT @@session.ROCKSDB_BULK_LOAD_SIZE; +@@session.ROCKSDB_BULK_LOAD_SIZE +1024 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_BULK_LOAD_SIZE = DEFAULT; +SELECT @@session.ROCKSDB_BULK_LOAD_SIZE; +@@session.ROCKSDB_BULK_LOAD_SIZE +1000 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_BULK_LOAD_SIZE to 'aaa'" +SET @@global.ROCKSDB_BULK_LOAD_SIZE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BULK_LOAD_SIZE; +@@global.ROCKSDB_BULK_LOAD_SIZE +1000 +SET @@global.ROCKSDB_BULK_LOAD_SIZE = @start_global_value; +SELECT @@global.ROCKSDB_BULK_LOAD_SIZE; +@@global.ROCKSDB_BULK_LOAD_SIZE +1000 +SET @@session.ROCKSDB_BULK_LOAD_SIZE = @start_session_value; +SELECT @@session.ROCKSDB_BULK_LOAD_SIZE; +@@session.ROCKSDB_BULK_LOAD_SIZE +1000 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bytes_per_sync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bytes_per_sync_basic.result new file mode 100644 index 00000000000..ede02afcb60 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bytes_per_sync_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_BYTES_PER_SYNC; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_BYTES_PER_SYNC to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_BYTES_PER_SYNC = 444; +ERROR HY000: Variable 'rocksdb_bytes_per_sync' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_cache_index_and_filter_blocks_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_cache_index_and_filter_blocks_basic.result new file mode 100644 index 00000000000..12c25ad63dc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_cache_index_and_filter_blocks_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_CACHE_INDEX_AND_FILTER_BLOCKS; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_CACHE_INDEX_AND_FILTER_BLOCKS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_CACHE_INDEX_AND_FILTER_BLOCKS = 444; +ERROR HY000: Variable 'rocksdb_cache_index_and_filter_blocks' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_checksums_pct_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_checksums_pct_basic.result new file mode 100644 index 00000000000..694c9a4f1dc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_checksums_pct_basic.result @@ -0,0 +1,93 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(99); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_CHECKSUMS_PCT; +SELECT @start_global_value; +@start_global_value +100 +SET @start_session_value = @@session.ROCKSDB_CHECKSUMS_PCT; +SELECT @start_session_value; +@start_session_value +100 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_CHECKSUMS_PCT to 0" +SET @@global.ROCKSDB_CHECKSUMS_PCT = 0; +SELECT @@global.ROCKSDB_CHECKSUMS_PCT; +@@global.ROCKSDB_CHECKSUMS_PCT +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_CHECKSUMS_PCT = DEFAULT; +SELECT @@global.ROCKSDB_CHECKSUMS_PCT; +@@global.ROCKSDB_CHECKSUMS_PCT +100 +"Trying to set variable @@global.ROCKSDB_CHECKSUMS_PCT to 1" +SET @@global.ROCKSDB_CHECKSUMS_PCT = 1; +SELECT @@global.ROCKSDB_CHECKSUMS_PCT; +@@global.ROCKSDB_CHECKSUMS_PCT +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_CHECKSUMS_PCT = DEFAULT; +SELECT @@global.ROCKSDB_CHECKSUMS_PCT; +@@global.ROCKSDB_CHECKSUMS_PCT +100 +"Trying to set variable @@global.ROCKSDB_CHECKSUMS_PCT to 99" +SET @@global.ROCKSDB_CHECKSUMS_PCT = 99; +SELECT @@global.ROCKSDB_CHECKSUMS_PCT; +@@global.ROCKSDB_CHECKSUMS_PCT +99 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_CHECKSUMS_PCT = DEFAULT; +SELECT @@global.ROCKSDB_CHECKSUMS_PCT; +@@global.ROCKSDB_CHECKSUMS_PCT +100 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_CHECKSUMS_PCT to 0" +SET @@session.ROCKSDB_CHECKSUMS_PCT = 0; +SELECT @@session.ROCKSDB_CHECKSUMS_PCT; +@@session.ROCKSDB_CHECKSUMS_PCT +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_CHECKSUMS_PCT = DEFAULT; +SELECT @@session.ROCKSDB_CHECKSUMS_PCT; +@@session.ROCKSDB_CHECKSUMS_PCT +100 +"Trying to set variable @@session.ROCKSDB_CHECKSUMS_PCT to 1" +SET @@session.ROCKSDB_CHECKSUMS_PCT = 1; +SELECT @@session.ROCKSDB_CHECKSUMS_PCT; +@@session.ROCKSDB_CHECKSUMS_PCT +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_CHECKSUMS_PCT = DEFAULT; +SELECT @@session.ROCKSDB_CHECKSUMS_PCT; +@@session.ROCKSDB_CHECKSUMS_PCT +100 +"Trying to set variable @@session.ROCKSDB_CHECKSUMS_PCT to 99" +SET @@session.ROCKSDB_CHECKSUMS_PCT = 99; +SELECT @@session.ROCKSDB_CHECKSUMS_PCT; +@@session.ROCKSDB_CHECKSUMS_PCT +99 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_CHECKSUMS_PCT = DEFAULT; +SELECT @@session.ROCKSDB_CHECKSUMS_PCT; +@@session.ROCKSDB_CHECKSUMS_PCT +100 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_CHECKSUMS_PCT to 'aaa'" +SET @@global.ROCKSDB_CHECKSUMS_PCT = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_CHECKSUMS_PCT; +@@global.ROCKSDB_CHECKSUMS_PCT +100 +SET @@global.ROCKSDB_CHECKSUMS_PCT = @start_global_value; +SELECT @@global.ROCKSDB_CHECKSUMS_PCT; +@@global.ROCKSDB_CHECKSUMS_PCT +100 +SET @@session.ROCKSDB_CHECKSUMS_PCT = @start_session_value; +SELECT @@session.ROCKSDB_CHECKSUMS_PCT; +@@session.ROCKSDB_CHECKSUMS_PCT +100 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_collect_sst_properties_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_collect_sst_properties_basic.result new file mode 100644 index 00000000000..2f101987332 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_collect_sst_properties_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_COLLECT_SST_PROPERTIES; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_COLLECT_SST_PROPERTIES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_COLLECT_SST_PROPERTIES = 444; +ERROR HY000: Variable 'rocksdb_collect_sst_properties' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_commit_in_the_middle_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_commit_in_the_middle_basic.result new file mode 100644 index 00000000000..4664ccb2b1e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_commit_in_the_middle_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE to 1" +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = 1; +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT; +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +"Trying to set variable @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE to 0" +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = 0; +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT; +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +"Trying to set variable @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE to on" +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = on; +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT; +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE to 1" +SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = 1; +SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT; +SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +"Trying to set variable @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE to 0" +SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = 0; +SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT; +SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +"Trying to set variable @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE to on" +SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = on; +SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT; +SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE to 'aaa'" +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +"Trying to set variable @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE to 'bbb'" +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = @start_global_value; +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = @start_session_value; +SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compact_cf_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compact_cf_basic.result new file mode 100644 index 00000000000..85517df6ce6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compact_cf_basic.result @@ -0,0 +1,39 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES('abc'); +INSERT INTO valid_values VALUES('def'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +SET @start_global_value = @@global.ROCKSDB_COMPACT_CF; +SELECT @start_global_value; +@start_global_value + +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_COMPACT_CF to abc" +SET @@global.ROCKSDB_COMPACT_CF = abc; +SELECT @@global.ROCKSDB_COMPACT_CF; +@@global.ROCKSDB_COMPACT_CF + +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACT_CF = DEFAULT; +SELECT @@global.ROCKSDB_COMPACT_CF; +@@global.ROCKSDB_COMPACT_CF + +"Trying to set variable @@global.ROCKSDB_COMPACT_CF to def" +SET @@global.ROCKSDB_COMPACT_CF = def; +SELECT @@global.ROCKSDB_COMPACT_CF; +@@global.ROCKSDB_COMPACT_CF + +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACT_CF = DEFAULT; +SELECT @@global.ROCKSDB_COMPACT_CF; +@@global.ROCKSDB_COMPACT_CF + +"Trying to set variable @@session.ROCKSDB_COMPACT_CF to 444. It should fail because it is not session." +SET @@session.ROCKSDB_COMPACT_CF = 444; +ERROR HY000: Variable 'rocksdb_compact_cf' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +SET @@global.ROCKSDB_COMPACT_CF = @start_global_value; +SELECT @@global.ROCKSDB_COMPACT_CF; +@@global.ROCKSDB_COMPACT_CF + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_readahead_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_readahead_size_basic.result new file mode 100644 index 00000000000..d971396f9e8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_readahead_size_basic.result @@ -0,0 +1,70 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(222333); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @@global.rocksdb_compaction_readahead_size = -1; +Warnings: +Warning 1292 Truncated incorrect rocksdb_compaction_readahead_siz value: '-1' +SELECT @@global.rocksdb_compaction_readahead_size; +@@global.rocksdb_compaction_readahead_size +0 +SET @start_global_value = @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE to 1" +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = 1; +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE to 0" +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = 0; +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE to 222333" +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = 222333; +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +222333 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +0 +"Trying to set variable @@session.ROCKSDB_COMPACTION_READAHEAD_SIZE to 444. It should fail because it is not session." +SET @@session.ROCKSDB_COMPACTION_READAHEAD_SIZE = 444; +ERROR HY000: Variable 'rocksdb_compaction_readahead_size' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE to 'aaa'" +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE to 'bbb'" +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +0 +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = @start_global_value; +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_basic.result new file mode 100644 index 00000000000..311184a17d4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +INSERT INTO valid_values VALUES(2000000); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'2000001\''); +SET @start_global_value = @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to 1" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = 1; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to 1024" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = 1024; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +1024 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to 2000000" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = 2000000; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +2000000 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +0 +"Trying to set variable @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to 444. It should fail because it is not session." +SET @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = 444; +ERROR HY000: Variable 'rocksdb_compaction_sequential_deletes' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to 'aaa'" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to '2000001'" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = '2000001'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +0 +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = @start_global_value; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_count_sd_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_count_sd_basic.result new file mode 100644 index 00000000000..d4e7e28bebc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_count_sd_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to 1" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = 1; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to 0" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = 0; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to on" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = on; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +0 +"Trying to set variable @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to 444. It should fail because it is not session." +SET @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = 444; +ERROR HY000: Variable 'rocksdb_compaction_sequential_deletes_count_sd' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to 'aaa'" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to 'bbb'" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +0 +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = @start_global_value; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_file_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_file_size_basic.result new file mode 100644 index 00000000000..703e235ed18 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_file_size_basic.result @@ -0,0 +1,46 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE to 1" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = 1; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE to 1024" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = 1024; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE +1024 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE +0 +"Trying to set variable @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE to 444. It should fail because it is not session." +SET @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = 444; +ERROR HY000: Variable 'rocksdb_compaction_sequential_deletes_file_size' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE to 'aaa'" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE +0 +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = @start_global_value; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_window_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_window_basic.result new file mode 100644 index 00000000000..84436b65795 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_window_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +INSERT INTO valid_values VALUES(2000000); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'2000001\''); +SET @start_global_value = @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to 1" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = 1; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to 1024" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = 1024; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +1024 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to 2000000" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = 2000000; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +2000000 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +0 +"Trying to set variable @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to 444. It should fail because it is not session." +SET @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = 444; +ERROR HY000: Variable 'rocksdb_compaction_sequential_deletes_window' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to 'aaa'" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to '2000001'" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = '2000001'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +0 +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = @start_global_value; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_checkpoint_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_checkpoint_basic.result new file mode 100644 index 00000000000..35e4d252e11 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_checkpoint_basic.result @@ -0,0 +1,15 @@ +SET @start_value = @@global.ROCKSDB_CREATE_CHECKPOINT; +SET @@global.ROCKSDB_CREATE_CHECKPOINT = 'TMP/abc'; +SELECT @@global.ROCKSDB_CREATE_CHECKPOINT; +@@global.ROCKSDB_CREATE_CHECKPOINT + +SET @@global.ROCKSDB_CREATE_CHECKPOINT = DEFAULT; +SET @@global.ROCKSDB_CREATE_CHECKPOINT = 'TMP/def'; +SELECT @@global.ROCKSDB_CREATE_CHECKPOINT; +@@global.ROCKSDB_CREATE_CHECKPOINT + +SET @@global.ROCKSDB_CREATE_CHECKPOINT = DEFAULT; +SET @@session.ROCKSDB_CREATE_CHECKPOINT = 444; +ERROR HY000: Variable 'rocksdb_create_checkpoint' is a GLOBAL variable and should be set with SET GLOBAL +SET @@global.ROCKSDB_CREATE_CHECKPOINT = @start_value; +ERROR HY000: RocksDB: Failed to create checkpoint directory. status 5 IO error: .tmp: No such file or directory diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_if_missing_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_if_missing_basic.result new file mode 100644 index 00000000000..26dd14fbb68 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_if_missing_basic.result @@ -0,0 +1,14 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_CREATE_IF_MISSING; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_CREATE_IF_MISSING to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_CREATE_IF_MISSING = 444; +ERROR HY000: Variable 'rocksdb_create_if_missing' is a read only variable +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_missing_column_families_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_missing_column_families_basic.result new file mode 100644 index 00000000000..7debadc2bb1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_missing_column_families_basic.result @@ -0,0 +1,14 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_CREATE_MISSING_COLUMN_FAMILIES; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_CREATE_MISSING_COLUMN_FAMILIES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_CREATE_MISSING_COLUMN_FAMILIES = 444; +ERROR HY000: Variable 'rocksdb_create_missing_column_families' is a read only variable +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_datadir_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_datadir_basic.result new file mode 100644 index 00000000000..a3f9eff6c1f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_datadir_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_DATADIR; +SELECT @start_global_value; +@start_global_value +./.rocksdb +"Trying to set variable @@global.ROCKSDB_DATADIR to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_DATADIR = 444; +ERROR HY000: Variable 'rocksdb_datadir' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_db_write_buffer_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_db_write_buffer_size_basic.result new file mode 100644 index 00000000000..6c588b7e060 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_db_write_buffer_size_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_DB_WRITE_BUFFER_SIZE; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_DB_WRITE_BUFFER_SIZE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_DB_WRITE_BUFFER_SIZE = 444; +ERROR HY000: Variable 'rocksdb_db_write_buffer_size' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_optimizer_no_zero_cardinality_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_optimizer_no_zero_cardinality_basic.result new file mode 100644 index 00000000000..5e64ccc69c3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_optimizer_no_zero_cardinality_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +SELECT @start_global_value; +@start_global_value +1 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to 1" +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = 1; +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = DEFAULT; +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +1 +"Trying to set variable @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to 0" +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = 0; +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = DEFAULT; +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +1 +"Trying to set variable @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to on" +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = on; +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = DEFAULT; +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +1 +"Trying to set variable @@session.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to 444. It should fail because it is not session." +SET @@session.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = 444; +ERROR HY000: Variable 'rocksdb_debug_optimizer_no_zero_cardinality' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to 'aaa'" +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +1 +"Trying to set variable @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to 'bbb'" +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +1 +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = @start_global_value; +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +1 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_default_cf_options_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_default_cf_options_basic.result new file mode 100644 index 00000000000..b2b1c0e4c97 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_default_cf_options_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_DEFAULT_CF_OPTIONS; +SELECT @start_global_value; +@start_global_value + +"Trying to set variable @@global.ROCKSDB_DEFAULT_CF_OPTIONS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_DEFAULT_CF_OPTIONS = 444; +ERROR HY000: Variable 'rocksdb_default_cf_options' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delete_obsolete_files_period_micros_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delete_obsolete_files_period_micros_basic.result new file mode 100644 index 00000000000..2dc220fbe20 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delete_obsolete_files_period_micros_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICROS; +SELECT @start_global_value; +@start_global_value +21600000000 +"Trying to set variable @@global.ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICROS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICROS = 444; +ERROR HY000: Variable 'rocksdb_delete_obsolete_files_period_micros' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result new file mode 100644 index 00000000000..708dd462dfe --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result @@ -0,0 +1,75 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_DISABLE_2PC; +SELECT @start_global_value; +@start_global_value +1 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to 1" +SET @@global.ROCKSDB_DISABLE_2PC = 1; +SELECT @@global.ROCKSDB_DISABLE_2PC; +@@global.ROCKSDB_DISABLE_2PC +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DISABLE_2PC = DEFAULT; +SELECT @@global.ROCKSDB_DISABLE_2PC; +@@global.ROCKSDB_DISABLE_2PC +1 +"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to 0" +SET @@global.ROCKSDB_DISABLE_2PC = 0; +SELECT @@global.ROCKSDB_DISABLE_2PC; +@@global.ROCKSDB_DISABLE_2PC +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DISABLE_2PC = DEFAULT; +SELECT @@global.ROCKSDB_DISABLE_2PC; +@@global.ROCKSDB_DISABLE_2PC +1 +"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to on" +SET @@global.ROCKSDB_DISABLE_2PC = on; +SELECT @@global.ROCKSDB_DISABLE_2PC; +@@global.ROCKSDB_DISABLE_2PC +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DISABLE_2PC = DEFAULT; +SELECT @@global.ROCKSDB_DISABLE_2PC; +@@global.ROCKSDB_DISABLE_2PC +1 +"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to off" +SET @@global.ROCKSDB_DISABLE_2PC = off; +SELECT @@global.ROCKSDB_DISABLE_2PC; +@@global.ROCKSDB_DISABLE_2PC +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DISABLE_2PC = DEFAULT; +SELECT @@global.ROCKSDB_DISABLE_2PC; +@@global.ROCKSDB_DISABLE_2PC +1 +"Trying to set variable @@session.ROCKSDB_DISABLE_2PC to 444. It should fail because it is not session." +SET @@session.ROCKSDB_DISABLE_2PC = 444; +ERROR HY000: Variable 'rocksdb_disable_2pc' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to 'aaa'" +SET @@global.ROCKSDB_DISABLE_2PC = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DISABLE_2PC; +@@global.ROCKSDB_DISABLE_2PC +1 +"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to 'bbb'" +SET @@global.ROCKSDB_DISABLE_2PC = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DISABLE_2PC; +@@global.ROCKSDB_DISABLE_2PC +1 +SET @@global.ROCKSDB_DISABLE_2PC = @start_global_value; +SELECT @@global.ROCKSDB_DISABLE_2PC; +@@global.ROCKSDB_DISABLE_2PC +1 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disabledatasync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disabledatasync_basic.result new file mode 100644 index 00000000000..9b3000f8f3c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disabledatasync_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_DISABLEDATASYNC; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_DISABLEDATASYNC to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_DISABLEDATASYNC = 444; +ERROR HY000: Variable 'rocksdb_disabledatasync' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_bulk_load_api_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_bulk_load_api_basic.result new file mode 100644 index 00000000000..2c0ff289d8a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_bulk_load_api_basic.result @@ -0,0 +1,14 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_ENABLE_BULK_LOAD_API; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_ENABLE_BULK_LOAD_API to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ENABLE_BULK_LOAD_API = 444; +ERROR HY000: Variable 'rocksdb_enable_bulk_load_api' is a read only variable +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_thread_tracking_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_thread_tracking_basic.result new file mode 100644 index 00000000000..f12e39fff93 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_thread_tracking_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_ENABLE_THREAD_TRACKING; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_ENABLE_THREAD_TRACKING to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ENABLE_THREAD_TRACKING = 444; +ERROR HY000: Variable 'rocksdb_enable_thread_tracking' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_write_thread_adaptive_yield_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_write_thread_adaptive_yield_basic.result new file mode 100644 index 00000000000..c93152c4756 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_write_thread_adaptive_yield_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 1" +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 1; +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +0 +"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 0" +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 0; +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +0 +"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to on" +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = on; +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +0 +"Trying to set variable @@session.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 444. It should fail because it is not session." +SET @@session.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 444; +ERROR HY000: Variable 'rocksdb_enable_write_thread_adaptive_yield' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 'aaa'" +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +0 +"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 'bbb'" +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +0 +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = @start_global_value; +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_error_if_exists_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_error_if_exists_basic.result new file mode 100644 index 00000000000..650e2956e23 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_error_if_exists_basic.result @@ -0,0 +1,14 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_ERROR_IF_EXISTS; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_ERROR_IF_EXISTS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ERROR_IF_EXISTS = 444; +ERROR HY000: Variable 'rocksdb_error_if_exists' is a read only variable +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result new file mode 100644 index 00000000000..ae4b0ac05a1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result @@ -0,0 +1,58 @@ +drop table if exists t1; +CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, PRIMARY KEY(a)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +INSERT INTO t1 (b) VALUES (1); +INSERT INTO t1 (b) VALUES (2); +INSERT INTO t1 (b) VALUES (3); +SELECT * FROM t1; +a b +1 1 +2 2 +3 3 +set session rocksdb_flush_memtable_on_analyze=off; +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +SHOW INDEXES FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE +set session rocksdb_flush_memtable_on_analyze=on; +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +SHOW INDEXES FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A 3 NULL NULL LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, PRIMARY KEY(a)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +INSERT INTO t1 (b) VALUES (1); +INSERT INTO t1 (b) VALUES (2); +INSERT INTO t1 (b) VALUES (3); +SELECT * FROM t1; +a b +1 1 +2 2 +3 3 +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 0 0 69 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 3 8 24 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_flush_memtable_now_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_flush_memtable_now_basic.result new file mode 100644 index 00000000000..30444e26d98 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_flush_memtable_now_basic.result @@ -0,0 +1,50 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +SET @start_global_value = @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW to 1" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = 1; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = DEFAULT; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW +0 +"Trying to set variable @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW to 0" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = 0; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = DEFAULT; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW +0 +"Trying to set variable @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW to on" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = on; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = DEFAULT; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW +0 +"Trying to set variable @@session.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW to 444. It should fail because it is not session." +SET @@session.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = 444; +ERROR HY000: Variable 'rocksdb_force_flush_memtable_now' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = @start_global_value; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_index_records_in_range_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_index_records_in_range_basic.result new file mode 100644 index 00000000000..1a7a21c3a9f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_index_records_in_range_basic.result @@ -0,0 +1,106 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(222333); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @@session.rocksdb_force_index_records_in_range = -1; +Warnings: +Warning 1292 Truncated incorrect rocksdb_force_index_records_in_r value: '-1' +SELECT @@session.rocksdb_force_index_records_in_range; +@@session.rocksdb_force_index_records_in_range +0 +SET @start_global_value = @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 1" +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 1; +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT; +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +"Trying to set variable @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 0" +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 0; +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT; +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +"Trying to set variable @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 222333" +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 222333; +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +222333 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT; +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 1" +SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 1; +SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT; +SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +"Trying to set variable @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 0" +SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 0; +SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT; +SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +"Trying to set variable @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 222333" +SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 222333; +SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +222333 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT; +SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 'aaa'" +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +"Trying to set variable @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 'bbb'" +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = @start_global_value; +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = @start_session_value; +SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_hash_index_allow_collision_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_hash_index_allow_collision_basic.result new file mode 100644 index 00000000000..34deca6ce85 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_hash_index_allow_collision_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_HASH_INDEX_ALLOW_COLLISION; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_HASH_INDEX_ALLOW_COLLISION to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_HASH_INDEX_ALLOW_COLLISION = 444; +ERROR HY000: Variable 'rocksdb_hash_index_allow_collision' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_index_type_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_index_type_basic.result new file mode 100644 index 00000000000..97c6ed84de7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_index_type_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_INDEX_TYPE; +SELECT @start_global_value; +@start_global_value +kBinarySearch +"Trying to set variable @@global.ROCKSDB_INDEX_TYPE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_INDEX_TYPE = 444; +ERROR HY000: Variable 'rocksdb_index_type' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_info_log_level_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_info_log_level_basic.result new file mode 100644 index 00000000000..1509f9ae95d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_info_log_level_basic.result @@ -0,0 +1,93 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES('fatal_level'); +INSERT INTO valid_values VALUES('error_level'); +INSERT INTO valid_values VALUES('warn_level'); +INSERT INTO valid_values VALUES('info_level'); +INSERT INTO valid_values VALUES('debug_level'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES(5); +INSERT INTO invalid_values VALUES(6); +INSERT INTO invalid_values VALUES('foo'); +SET @start_global_value = @@global.ROCKSDB_INFO_LOG_LEVEL; +SELECT @start_global_value; +@start_global_value +error_level +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to fatal_level" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = fatal_level; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +fatal_level +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to error_level" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = error_level; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to warn_level" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = warn_level; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +warn_level +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to info_level" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = info_level; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +info_level +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to debug_level" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = debug_level; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +debug_level +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +"Trying to set variable @@session.ROCKSDB_INFO_LOG_LEVEL to 444. It should fail because it is not session." +SET @@session.ROCKSDB_INFO_LOG_LEVEL = 444; +ERROR HY000: Variable 'rocksdb_info_log_level' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to 5" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = 5; +Got one of the listed errors +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to 6" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = 6; +Got one of the listed errors +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to foo" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = foo; +Got one of the listed errors +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +SET @@global.ROCKSDB_INFO_LOG_LEVEL = @start_global_value; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_is_fd_close_on_exec_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_is_fd_close_on_exec_basic.result new file mode 100644 index 00000000000..87dd0e90511 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_is_fd_close_on_exec_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_IS_FD_CLOSE_ON_EXEC; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_IS_FD_CLOSE_ON_EXEC to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_IS_FD_CLOSE_ON_EXEC = 444; +ERROR HY000: Variable 'rocksdb_is_fd_close_on_exec' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_keep_log_file_num_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_keep_log_file_num_basic.result new file mode 100644 index 00000000000..3a0c5060d00 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_keep_log_file_num_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_KEEP_LOG_FILE_NUM; +SELECT @start_global_value; +@start_global_value +1000 +"Trying to set variable @@global.ROCKSDB_KEEP_LOG_FILE_NUM to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_KEEP_LOG_FILE_NUM = 444; +ERROR HY000: Variable 'rocksdb_keep_log_file_num' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_scanned_rows_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_scanned_rows_basic.result new file mode 100644 index 00000000000..eff9e619967 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_scanned_rows_basic.result @@ -0,0 +1,170 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +INSERT INTO valid_values VALUES('true'); +INSERT INTO valid_values VALUES('false'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES(2); +INSERT INTO invalid_values VALUES(1000); +SET @start_global_value = @@global.ROCKSDB_LOCK_SCANNED_ROWS; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_LOCK_SCANNED_ROWS; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to 1" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = 1; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to 0" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = 0; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to on" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = on; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to off" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = off; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to true" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = true; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to false" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = false; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to 1" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = 1; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to 0" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = 0; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to on" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = on; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to off" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = off; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to true" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = true; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to false" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = false; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to 'aaa'" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to 2" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = 2; +Got one of the listed errors +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to 1000" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = 1000; +Got one of the listed errors +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = @start_global_value; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = @start_session_value; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_wait_timeout_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_wait_timeout_basic.result new file mode 100644 index 00000000000..38df5820298 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_wait_timeout_basic.result @@ -0,0 +1,72 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_LOCK_WAIT_TIMEOUT; +SELECT @start_global_value; +@start_global_value +1 +SET @start_session_value = @@session.ROCKSDB_LOCK_WAIT_TIMEOUT; +SELECT @start_session_value; +@start_session_value +1 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_LOCK_WAIT_TIMEOUT to 1" +SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = 1; +SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@global.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = DEFAULT; +SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@global.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +"Trying to set variable @@global.ROCKSDB_LOCK_WAIT_TIMEOUT to 1024" +SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = 1024; +SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@global.ROCKSDB_LOCK_WAIT_TIMEOUT +1024 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = DEFAULT; +SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@global.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_LOCK_WAIT_TIMEOUT to 1" +SET @@session.ROCKSDB_LOCK_WAIT_TIMEOUT = 1; +SELECT @@session.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@session.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_LOCK_WAIT_TIMEOUT = DEFAULT; +SELECT @@session.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@session.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +"Trying to set variable @@session.ROCKSDB_LOCK_WAIT_TIMEOUT to 1024" +SET @@session.ROCKSDB_LOCK_WAIT_TIMEOUT = 1024; +SELECT @@session.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@session.ROCKSDB_LOCK_WAIT_TIMEOUT +1024 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_LOCK_WAIT_TIMEOUT = DEFAULT; +SELECT @@session.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@session.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_LOCK_WAIT_TIMEOUT to 'aaa'" +SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@global.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = @start_global_value; +SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@global.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +SET @@session.ROCKSDB_LOCK_WAIT_TIMEOUT = @start_session_value; +SELECT @@session.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@session.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_log_file_time_to_roll_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_log_file_time_to_roll_basic.result new file mode 100644 index 00000000000..24cff58426a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_log_file_time_to_roll_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_LOG_FILE_TIME_TO_ROLL; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_LOG_FILE_TIME_TO_ROLL to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_LOG_FILE_TIME_TO_ROLL = 444; +ERROR HY000: Variable 'rocksdb_log_file_time_to_roll' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_manifest_preallocation_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_manifest_preallocation_size_basic.result new file mode 100644 index 00000000000..dbb331d235d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_manifest_preallocation_size_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_MANIFEST_PREALLOCATION_SIZE; +SELECT @start_global_value; +@start_global_value +4194304 +"Trying to set variable @@global.ROCKSDB_MANIFEST_PREALLOCATION_SIZE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_MANIFEST_PREALLOCATION_SIZE = 444; +ERROR HY000: Variable 'rocksdb_manifest_preallocation_size' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result new file mode 100644 index 00000000000..903e393d5ea --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = 444; +ERROR HY000: Variable 'rocksdb_max_background_compactions' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_flushes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_flushes_basic.result new file mode 100644 index 00000000000..ff8f2b5997b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_flushes_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_MAX_BACKGROUND_FLUSHES; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_FLUSHES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_MAX_BACKGROUND_FLUSHES = 444; +ERROR HY000: Variable 'rocksdb_max_background_flushes' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_log_file_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_log_file_size_basic.result new file mode 100644 index 00000000000..4359ee725d4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_log_file_size_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_MAX_LOG_FILE_SIZE; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_MAX_LOG_FILE_SIZE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_MAX_LOG_FILE_SIZE = 444; +ERROR HY000: Variable 'rocksdb_max_log_file_size' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_manifest_file_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_manifest_file_size_basic.result new file mode 100644 index 00000000000..27cddc9f60a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_manifest_file_size_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_MAX_MANIFEST_FILE_SIZE; +SELECT @start_global_value; +@start_global_value +18446744073709551615 +"Trying to set variable @@global.ROCKSDB_MAX_MANIFEST_FILE_SIZE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_MAX_MANIFEST_FILE_SIZE = 444; +ERROR HY000: Variable 'rocksdb_max_manifest_file_size' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_open_files_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_open_files_basic.result new file mode 100644 index 00000000000..b058ebf05f8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_open_files_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_MAX_OPEN_FILES; +SELECT @start_global_value; +@start_global_value +-1 +"Trying to set variable @@global.ROCKSDB_MAX_OPEN_FILES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_MAX_OPEN_FILES = 444; +ERROR HY000: Variable 'rocksdb_max_open_files' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_row_locks_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_row_locks_basic.result new file mode 100644 index 00000000000..e417e4d5c4e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_row_locks_basic.result @@ -0,0 +1,72 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_MAX_ROW_LOCKS; +SELECT @start_global_value; +@start_global_value +1073741824 +SET @start_session_value = @@session.ROCKSDB_MAX_ROW_LOCKS; +SELECT @start_session_value; +@start_session_value +1073741824 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_MAX_ROW_LOCKS to 1" +SET @@global.ROCKSDB_MAX_ROW_LOCKS = 1; +SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; +@@global.ROCKSDB_MAX_ROW_LOCKS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_MAX_ROW_LOCKS = DEFAULT; +SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; +@@global.ROCKSDB_MAX_ROW_LOCKS +1073741824 +"Trying to set variable @@global.ROCKSDB_MAX_ROW_LOCKS to 1024" +SET @@global.ROCKSDB_MAX_ROW_LOCKS = 1024; +SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; +@@global.ROCKSDB_MAX_ROW_LOCKS +1024 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_MAX_ROW_LOCKS = DEFAULT; +SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; +@@global.ROCKSDB_MAX_ROW_LOCKS +1073741824 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_MAX_ROW_LOCKS to 1" +SET @@session.ROCKSDB_MAX_ROW_LOCKS = 1; +SELECT @@session.ROCKSDB_MAX_ROW_LOCKS; +@@session.ROCKSDB_MAX_ROW_LOCKS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_MAX_ROW_LOCKS = DEFAULT; +SELECT @@session.ROCKSDB_MAX_ROW_LOCKS; +@@session.ROCKSDB_MAX_ROW_LOCKS +1073741824 +"Trying to set variable @@session.ROCKSDB_MAX_ROW_LOCKS to 1024" +SET @@session.ROCKSDB_MAX_ROW_LOCKS = 1024; +SELECT @@session.ROCKSDB_MAX_ROW_LOCKS; +@@session.ROCKSDB_MAX_ROW_LOCKS +1024 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_MAX_ROW_LOCKS = DEFAULT; +SELECT @@session.ROCKSDB_MAX_ROW_LOCKS; +@@session.ROCKSDB_MAX_ROW_LOCKS +1073741824 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_MAX_ROW_LOCKS to 'aaa'" +SET @@global.ROCKSDB_MAX_ROW_LOCKS = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; +@@global.ROCKSDB_MAX_ROW_LOCKS +1073741824 +SET @@global.ROCKSDB_MAX_ROW_LOCKS = @start_global_value; +SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; +@@global.ROCKSDB_MAX_ROW_LOCKS +1073741824 +SET @@session.ROCKSDB_MAX_ROW_LOCKS = @start_session_value; +SELECT @@session.ROCKSDB_MAX_ROW_LOCKS; +@@session.ROCKSDB_MAX_ROW_LOCKS +1073741824 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_subcompactions_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_subcompactions_basic.result new file mode 100644 index 00000000000..58452f580f2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_subcompactions_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_MAX_SUBCOMPACTIONS; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_MAX_SUBCOMPACTIONS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_MAX_SUBCOMPACTIONS = 444; +ERROR HY000: Variable 'rocksdb_max_subcompactions' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_total_wal_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_total_wal_size_basic.result new file mode 100644 index 00000000000..22c17c24e19 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_total_wal_size_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_MAX_TOTAL_WAL_SIZE; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_MAX_TOTAL_WAL_SIZE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_MAX_TOTAL_WAL_SIZE = 444; +ERROR HY000: Variable 'rocksdb_max_total_wal_size' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_buf_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_buf_size_basic.result new file mode 100644 index 00000000000..e82e987bf96 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_buf_size_basic.result @@ -0,0 +1,43 @@ +drop table if exists t1; +set session rocksdb_merge_buf_size=250; +set session rocksdb_merge_combine_read_size=1000; +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL DEFAULT '0', + `j` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `kj` (`j`), + KEY `kij` (`i`,`j`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP INDEX kj on t1; +DROP INDEX kij ON t1; +ALTER TABLE t1 ADD INDEX kj(j), ADD INDEX kij(i,j), ADD INDEX kji(j,i), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL DEFAULT '0', + `j` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `kj` (`j`), + KEY `kij` (`i`,`j`), + KEY `kji` (`j`,`i`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB; +ALTER TABLE t1 ADD INDEX kb(b) comment 'rev:cf1', ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `kb` (`b`) COMMENT 'rev:cf1' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT COUNT(*) FROM t1 FORCE INDEX(kb); +COUNT(*) +100 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_combine_read_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_combine_read_size_basic.result new file mode 100644 index 00000000000..122e2451f39 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_combine_read_size_basic.result @@ -0,0 +1,29 @@ +drop table if exists t1; +set session rocksdb_merge_buf_size=250; +set session rocksdb_merge_combine_read_size=1000; +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL DEFAULT '0', + `j` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `kj` (`j`), + KEY `kij` (`i`,`j`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP INDEX kj on t1; +DROP INDEX kij ON t1; +ALTER TABLE t1 ADD INDEX kj(j), ADD INDEX kij(i,j), ADD INDEX kji(j,i), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL DEFAULT '0', + `j` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `kj` (`j`), + KEY `kij` (`i`,`j`), + KEY `kji` (`j`,`i`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_new_table_reader_for_compaction_inputs_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_new_table_reader_for_compaction_inputs_basic.result new file mode 100644 index 00000000000..c2daec327a2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_new_table_reader_for_compaction_inputs_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_NEW_TABLE_READER_FOR_COMPACTION_INPUTS; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_NEW_TABLE_READER_FOR_COMPACTION_INPUTS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_NEW_TABLE_READER_FOR_COMPACTION_INPUTS = 444; +ERROR HY000: Variable 'rocksdb_new_table_reader_for_compaction_inputs' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_no_block_cache_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_no_block_cache_basic.result new file mode 100644 index 00000000000..7bd32950303 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_no_block_cache_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_NO_BLOCK_CACHE; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_NO_BLOCK_CACHE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_NO_BLOCK_CACHE = 444; +ERROR HY000: Variable 'rocksdb_no_block_cache' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_override_cf_options_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_override_cf_options_basic.result new file mode 100644 index 00000000000..59042124dc8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_override_cf_options_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_OVERRIDE_CF_OPTIONS; +SELECT @start_global_value; +@start_global_value + +"Trying to set variable @@global.ROCKSDB_OVERRIDE_CF_OPTIONS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_OVERRIDE_CF_OPTIONS = 444; +ERROR HY000: Variable 'rocksdb_override_cf_options' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_paranoid_checks_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_paranoid_checks_basic.result new file mode 100644 index 00000000000..102d4926e65 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_paranoid_checks_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_PARANOID_CHECKS; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_PARANOID_CHECKS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_PARANOID_CHECKS = 444; +ERROR HY000: Variable 'rocksdb_paranoid_checks' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pause_background_work_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pause_background_work_basic.result new file mode 100644 index 00000000000..5849fe09a20 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pause_background_work_basic.result @@ -0,0 +1,75 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to 1" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = DEFAULT; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to 0" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = 0; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = DEFAULT; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to on" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = on; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = DEFAULT; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to off" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = off; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = DEFAULT; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +"Trying to set variable @@session.ROCKSDB_PAUSE_BACKGROUND_WORK to 444. It should fail because it is not session." +SET @@session.ROCKSDB_PAUSE_BACKGROUND_WORK = 444; +ERROR HY000: Variable 'rocksdb_pause_background_work' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to 'aaa'" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to 'bbb'" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = @start_global_value; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_perf_context_level_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_perf_context_level_basic.result new file mode 100644 index 00000000000..292ba58a3a3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_perf_context_level_basic.result @@ -0,0 +1,114 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(2); +INSERT INTO valid_values VALUES(3); +INSERT INTO valid_values VALUES(4); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_PERF_CONTEXT_LEVEL to 1" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = 1; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +0 +"Trying to set variable @@global.ROCKSDB_PERF_CONTEXT_LEVEL to 2" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = 2; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +2 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +0 +"Trying to set variable @@global.ROCKSDB_PERF_CONTEXT_LEVEL to 3" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = 3; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +3 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +0 +"Trying to set variable @@global.ROCKSDB_PERF_CONTEXT_LEVEL to 4" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = 4; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +4 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_PERF_CONTEXT_LEVEL to 1" +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = 1; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +0 +"Trying to set variable @@session.ROCKSDB_PERF_CONTEXT_LEVEL to 2" +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = 2; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +2 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +0 +"Trying to set variable @@session.ROCKSDB_PERF_CONTEXT_LEVEL to 3" +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = 3; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +3 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +0 +"Trying to set variable @@session.ROCKSDB_PERF_CONTEXT_LEVEL to 4" +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = 4; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +4 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_PERF_CONTEXT_LEVEL to 'aaa'" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +0 +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = @start_global_value; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +0 +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = @start_session_value; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.result new file mode 100644 index 00000000000..c152ecf1e5a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE = 444; +ERROR HY000: Variable 'rocksdb_pin_l0_filter_and_index_blocks_in_cache' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rate_limiter_bytes_per_sec_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rate_limiter_bytes_per_sec_basic.result new file mode 100644 index 00000000000..94eb9e34057 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rate_limiter_bytes_per_sec_basic.result @@ -0,0 +1,101 @@ +SET @@global.rocksdb_rate_limiter_bytes_per_sec = 10000; +Warnings: +Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0. +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1), (1000), (1000000), (1000000000), (1000000000000); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''), (3.14); +SET @start_global_value = @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +SELECT @start_global_value; +@start_global_value +10000 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 1" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 1; +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = DEFAULT; +Warnings: +Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0. +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1 +"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 1000" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 1000; +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = DEFAULT; +Warnings: +Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0. +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000 +"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 1000000" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 1000000; +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000000 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = DEFAULT; +Warnings: +Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0. +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000000 +"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 1000000000" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 1000000000; +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000000000 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = DEFAULT; +Warnings: +Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0. +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000000000 +"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 1000000000000" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 1000000000000; +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000000000000 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = DEFAULT; +Warnings: +Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0. +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000000000000 +"Trying to set variable @@session.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 444. It should fail because it is not session." +SET @@session.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 444; +ERROR HY000: Variable 'rocksdb_rate_limiter_bytes_per_sec' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 'aaa'" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000000000000 +"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 3.14" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 3.14; +Got one of the listed errors +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000000000000 +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = @start_global_value; +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +10000 +DROP TABLE valid_values; +DROP TABLE invalid_values; +SET @@global.rocksdb_rate_limiter_bytes_per_sec = 0; +Warnings: +Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0. +SET @@global.rocksdb_rate_limiter_bytes_per_sec = -1; +Warnings: +Warning 1292 Truncated incorrect rocksdb_rate_limiter_bytes_per_s value: '-1' +Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0. diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_read_free_rpl_tables_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_read_free_rpl_tables_basic.result new file mode 100644 index 00000000000..b218fe034aa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_read_free_rpl_tables_basic.result @@ -0,0 +1,65 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES('a'); +INSERT INTO valid_values VALUES('b'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +SET @start_global_value = @@global.ROCKSDB_READ_FREE_RPL_TABLES; +SELECT @start_global_value; +@start_global_value + +SET @start_session_value = @@session.ROCKSDB_READ_FREE_RPL_TABLES; +SELECT @start_session_value; +@start_session_value + +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_READ_FREE_RPL_TABLES to a" +SET @@global.ROCKSDB_READ_FREE_RPL_TABLES = a; +SELECT @@global.ROCKSDB_READ_FREE_RPL_TABLES; +@@global.ROCKSDB_READ_FREE_RPL_TABLES +a +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_READ_FREE_RPL_TABLES = DEFAULT; +SELECT @@global.ROCKSDB_READ_FREE_RPL_TABLES; +@@global.ROCKSDB_READ_FREE_RPL_TABLES + +"Trying to set variable @@global.ROCKSDB_READ_FREE_RPL_TABLES to b" +SET @@global.ROCKSDB_READ_FREE_RPL_TABLES = b; +SELECT @@global.ROCKSDB_READ_FREE_RPL_TABLES; +@@global.ROCKSDB_READ_FREE_RPL_TABLES +b +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_READ_FREE_RPL_TABLES = DEFAULT; +SELECT @@global.ROCKSDB_READ_FREE_RPL_TABLES; +@@global.ROCKSDB_READ_FREE_RPL_TABLES + +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_READ_FREE_RPL_TABLES to a" +SET @@session.ROCKSDB_READ_FREE_RPL_TABLES = a; +SELECT @@session.ROCKSDB_READ_FREE_RPL_TABLES; +@@session.ROCKSDB_READ_FREE_RPL_TABLES +a +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_READ_FREE_RPL_TABLES = DEFAULT; +SELECT @@session.ROCKSDB_READ_FREE_RPL_TABLES; +@@session.ROCKSDB_READ_FREE_RPL_TABLES + +"Trying to set variable @@session.ROCKSDB_READ_FREE_RPL_TABLES to b" +SET @@session.ROCKSDB_READ_FREE_RPL_TABLES = b; +SELECT @@session.ROCKSDB_READ_FREE_RPL_TABLES; +@@session.ROCKSDB_READ_FREE_RPL_TABLES +b +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_READ_FREE_RPL_TABLES = DEFAULT; +SELECT @@session.ROCKSDB_READ_FREE_RPL_TABLES; +@@session.ROCKSDB_READ_FREE_RPL_TABLES + +'# Testing with invalid values in global scope #' +SET @@global.ROCKSDB_READ_FREE_RPL_TABLES = @start_global_value; +SELECT @@global.ROCKSDB_READ_FREE_RPL_TABLES; +@@global.ROCKSDB_READ_FREE_RPL_TABLES + +SET @@session.ROCKSDB_READ_FREE_RPL_TABLES = @start_session_value; +SELECT @@session.ROCKSDB_READ_FREE_RPL_TABLES; +@@session.ROCKSDB_READ_FREE_RPL_TABLES + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_records_in_range_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_records_in_range_basic.result new file mode 100644 index 00000000000..e866787efe0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_records_in_range_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(222333); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_RECORDS_IN_RANGE; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_RECORDS_IN_RANGE; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_RECORDS_IN_RANGE to 1" +SET @@global.ROCKSDB_RECORDS_IN_RANGE = 1; +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RECORDS_IN_RANGE = DEFAULT; +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +0 +"Trying to set variable @@global.ROCKSDB_RECORDS_IN_RANGE to 0" +SET @@global.ROCKSDB_RECORDS_IN_RANGE = 0; +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RECORDS_IN_RANGE = DEFAULT; +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +0 +"Trying to set variable @@global.ROCKSDB_RECORDS_IN_RANGE to 222333" +SET @@global.ROCKSDB_RECORDS_IN_RANGE = 222333; +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +222333 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RECORDS_IN_RANGE = DEFAULT; +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_RECORDS_IN_RANGE to 1" +SET @@session.ROCKSDB_RECORDS_IN_RANGE = 1; +SELECT @@session.ROCKSDB_RECORDS_IN_RANGE; +@@session.ROCKSDB_RECORDS_IN_RANGE +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_RECORDS_IN_RANGE = DEFAULT; +SELECT @@session.ROCKSDB_RECORDS_IN_RANGE; +@@session.ROCKSDB_RECORDS_IN_RANGE +0 +"Trying to set variable @@session.ROCKSDB_RECORDS_IN_RANGE to 0" +SET @@session.ROCKSDB_RECORDS_IN_RANGE = 0; +SELECT @@session.ROCKSDB_RECORDS_IN_RANGE; +@@session.ROCKSDB_RECORDS_IN_RANGE +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_RECORDS_IN_RANGE = DEFAULT; +SELECT @@session.ROCKSDB_RECORDS_IN_RANGE; +@@session.ROCKSDB_RECORDS_IN_RANGE +0 +"Trying to set variable @@session.ROCKSDB_RECORDS_IN_RANGE to 222333" +SET @@session.ROCKSDB_RECORDS_IN_RANGE = 222333; +SELECT @@session.ROCKSDB_RECORDS_IN_RANGE; +@@session.ROCKSDB_RECORDS_IN_RANGE +222333 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_RECORDS_IN_RANGE = DEFAULT; +SELECT @@session.ROCKSDB_RECORDS_IN_RANGE; +@@session.ROCKSDB_RECORDS_IN_RANGE +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_RECORDS_IN_RANGE to 'aaa'" +SET @@global.ROCKSDB_RECORDS_IN_RANGE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +0 +"Trying to set variable @@global.ROCKSDB_RECORDS_IN_RANGE to 'bbb'" +SET @@global.ROCKSDB_RECORDS_IN_RANGE = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +0 +SET @@global.ROCKSDB_RECORDS_IN_RANGE = @start_global_value; +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +0 +SET @@session.ROCKSDB_RECORDS_IN_RANGE = @start_session_value; +SELECT @@session.ROCKSDB_RECORDS_IN_RANGE; +@@session.ROCKSDB_RECORDS_IN_RANGE +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.test new file mode 100644 index 00000000000..5f6522e4488 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.test @@ -0,0 +1,68 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_RPL_SKIP_TX_API; +SELECT @start_global_value; +@start_global_value +1 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_RPL_SKIP_TX_API to 1" +SET @@global.ROCKSDB_RPL_SKIP_TX_API = 1; +SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; +@@global.ROCKSDB_RPL_SKIP_TX_API +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RPL_SKIP_TX_API = DEFAULT; +SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; +@@global.ROCKSDB_RPL_SKIP_TX_API +1 +"Trying to set variable @@global.ROCKSDB_RPL_SKIP_TX_API to 0" +SET @@global.ROCKSDB_RPL_SKIP_TX_API = 0; +SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; +@@global.ROCKSDB_RPL_SKIP_TX_API +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RPL_SKIP_TX_API = DEFAULT; +SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; +@@global.ROCKSDB_RPL_SKIP_TX_API +1 +"Trying to set variable @@global.ROCKSDB_RPL_SKIP_TX_API to on" +SET @@global.ROCKSDB_RPL_SKIP_TX_API = on; +SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; +@@global.ROCKSDB_RPL_SKIP_TX_API +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RPL_SKIP_TX_API = DEFAULT; +SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; +@@global.ROCKSDB_RPL_SKIP_TX_API +1 +"Trying to set variable @@global.ROCKSDB_RPL_SKIP_TX_API to off" +SET @@global.ROCKSDB_RPL_SKIP_TX_API = off; +SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; +@@global.ROCKSDB_RPL_SKIP_TX_API +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RPL_SKIP_TX_API = DEFAULT; +SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; +@@global.ROCKSDB_RPL_SKIP_TX_API +1 +"Trying to set variable @@session.ROCKSDB_RPL_SKIP_TX_API to 444. It should fail because it is not session." +SET @@session.ROCKSDB_RPL_SKIP_TX_API = 444; +ERROR HY000: Variable 'rocksdb_rpl_skip_tx_api' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_RPL_SKIP_TX_API to 'aaa'" +SET @@global.ROCKSDB_RPL_SKIP_TX_API = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; +@@global.ROCKSDB_RPL_SKIP_TX_API +1 +SET @@global.ROCKSDB_RPL_SKIP_TX_API = @start_global_value; +SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; +@@global.ROCKSDB_RPL_SKIP_TX_API +1 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_seconds_between_stat_computes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_seconds_between_stat_computes_basic.result new file mode 100644 index 00000000000..ea80d88f653 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_seconds_between_stat_computes_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +SELECT @start_global_value; +@start_global_value +3600 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 1" +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 1; +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = DEFAULT; +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +3600 +"Trying to set variable @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 0" +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 0; +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = DEFAULT; +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +3600 +"Trying to set variable @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 1024" +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 1024; +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +1024 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = DEFAULT; +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +3600 +"Trying to set variable @@session.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 444. It should fail because it is not session." +SET @@session.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 444; +ERROR HY000: Variable 'rocksdb_seconds_between_stat_computes' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 'aaa'" +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +3600 +"Trying to set variable @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 'bbb'" +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +3600 +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = @start_global_value; +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +3600 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_signal_drop_index_thread_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_signal_drop_index_thread_basic.result new file mode 100644 index 00000000000..94a15275900 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_signal_drop_index_thread_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to 1" +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = 1; +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = DEFAULT; +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +"Trying to set variable @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to 0" +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = 0; +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = DEFAULT; +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +"Trying to set variable @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to on" +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = on; +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = DEFAULT; +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +"Trying to set variable @@session.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to 444. It should fail because it is not session." +SET @@session.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = 444; +ERROR HY000: Variable 'rocksdb_signal_drop_index_thread' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to 'aaa'" +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +"Trying to set variable @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to 'bbb'" +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = @start_global_value; +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_bloom_filter_on_read_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_bloom_filter_on_read_basic.result new file mode 100644 index 00000000000..201bc5009ce --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_bloom_filter_on_read_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 1" +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 1; +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +"Trying to set variable @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 0" +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 0; +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +"Trying to set variable @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to on" +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = on; +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 1" +SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 1; +SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +"Trying to set variable @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 0" +SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 0; +SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +"Trying to set variable @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to on" +SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = on; +SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 'aaa'" +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +"Trying to set variable @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 'bbb'" +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = @start_global_value; +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = @start_session_value; +SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_fill_cache_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_fill_cache_basic.result new file mode 100644 index 00000000000..a843851cf26 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_fill_cache_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_SKIP_FILL_CACHE; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_SKIP_FILL_CACHE; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_SKIP_FILL_CACHE to 1" +SET @@global.ROCKSDB_SKIP_FILL_CACHE = 1; +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_FILL_CACHE = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +0 +"Trying to set variable @@global.ROCKSDB_SKIP_FILL_CACHE to 0" +SET @@global.ROCKSDB_SKIP_FILL_CACHE = 0; +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_FILL_CACHE = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +0 +"Trying to set variable @@global.ROCKSDB_SKIP_FILL_CACHE to on" +SET @@global.ROCKSDB_SKIP_FILL_CACHE = on; +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_FILL_CACHE = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_SKIP_FILL_CACHE to 1" +SET @@session.ROCKSDB_SKIP_FILL_CACHE = 1; +SELECT @@session.ROCKSDB_SKIP_FILL_CACHE; +@@session.ROCKSDB_SKIP_FILL_CACHE +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_FILL_CACHE = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_FILL_CACHE; +@@session.ROCKSDB_SKIP_FILL_CACHE +0 +"Trying to set variable @@session.ROCKSDB_SKIP_FILL_CACHE to 0" +SET @@session.ROCKSDB_SKIP_FILL_CACHE = 0; +SELECT @@session.ROCKSDB_SKIP_FILL_CACHE; +@@session.ROCKSDB_SKIP_FILL_CACHE +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_FILL_CACHE = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_FILL_CACHE; +@@session.ROCKSDB_SKIP_FILL_CACHE +0 +"Trying to set variable @@session.ROCKSDB_SKIP_FILL_CACHE to on" +SET @@session.ROCKSDB_SKIP_FILL_CACHE = on; +SELECT @@session.ROCKSDB_SKIP_FILL_CACHE; +@@session.ROCKSDB_SKIP_FILL_CACHE +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_FILL_CACHE = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_FILL_CACHE; +@@session.ROCKSDB_SKIP_FILL_CACHE +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_SKIP_FILL_CACHE to 'aaa'" +SET @@global.ROCKSDB_SKIP_FILL_CACHE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +0 +"Trying to set variable @@global.ROCKSDB_SKIP_FILL_CACHE to 'bbb'" +SET @@global.ROCKSDB_SKIP_FILL_CACHE = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +0 +SET @@global.ROCKSDB_SKIP_FILL_CACHE = @start_global_value; +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +0 +SET @@session.ROCKSDB_SKIP_FILL_CACHE = @start_session_value; +SELECT @@session.ROCKSDB_SKIP_FILL_CACHE; +@@session.ROCKSDB_SKIP_FILL_CACHE +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_basic.result new file mode 100644 index 00000000000..a1244723b05 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_basic.result @@ -0,0 +1,163 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +INSERT INTO valid_values VALUES('true'); +INSERT INTO valid_values VALUES('false'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_SKIP_UNIQUE_CHECK; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_SKIP_UNIQUE_CHECK; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to 0" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = 0; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to 1" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = 1; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to on" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = on; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to off" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = off; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to true" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = true; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to false" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = false; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to 0" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = 0; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to 1" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = 1; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to on" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = on; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to off" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = off; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to true" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = true; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to false" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = false; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to 'aaa'" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK +0 +"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to 'bbb'" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK +0 +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = @start_global_value; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK +0 +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = @start_session_value; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_tables_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_tables_basic.result new file mode 100644 index 00000000000..3e169671cc0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_tables_basic.result @@ -0,0 +1,65 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES("aaa"); +INSERT INTO valid_values VALUES("bbb"); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +SET @start_global_value = @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +SELECT @start_global_value; +@start_global_value +.* +SET @start_session_value = @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +SELECT @start_session_value; +@start_session_value +.* +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES to aaa" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = aaa; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +aaa +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +.* +"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES to bbb" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = bbb; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +bbb +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +.* +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES to aaa" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = aaa; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +aaa +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +.* +"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES to bbb" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = bbb; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +bbb +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +.* +'# Testing with invalid values in global scope #' +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = @start_global_value; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +.* +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = @start_session_value; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +.* +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_stats_dump_period_sec_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_stats_dump_period_sec_basic.result new file mode 100644 index 00000000000..2dbf5a55b87 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_stats_dump_period_sec_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_STATS_DUMP_PERIOD_SEC; +SELECT @start_global_value; +@start_global_value +600 +"Trying to set variable @@global.ROCKSDB_STATS_DUMP_PERIOD_SEC to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_STATS_DUMP_PERIOD_SEC = 444; +ERROR HY000: Variable 'rocksdb_stats_dump_period_sec' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_checksums_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_checksums_basic.result new file mode 100644 index 00000000000..904a0bc536e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_checksums_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_STORE_CHECKSUMS; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_STORE_CHECKSUMS; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to 1" +SET @@global.ROCKSDB_STORE_CHECKSUMS = 1; +SELECT @@global.ROCKSDB_STORE_CHECKSUMS; +@@global.ROCKSDB_STORE_CHECKSUMS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STORE_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_STORE_CHECKSUMS; +@@global.ROCKSDB_STORE_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to 0" +SET @@global.ROCKSDB_STORE_CHECKSUMS = 0; +SELECT @@global.ROCKSDB_STORE_CHECKSUMS; +@@global.ROCKSDB_STORE_CHECKSUMS +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STORE_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_STORE_CHECKSUMS; +@@global.ROCKSDB_STORE_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to on" +SET @@global.ROCKSDB_STORE_CHECKSUMS = on; +SELECT @@global.ROCKSDB_STORE_CHECKSUMS; +@@global.ROCKSDB_STORE_CHECKSUMS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STORE_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_STORE_CHECKSUMS; +@@global.ROCKSDB_STORE_CHECKSUMS +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_STORE_CHECKSUMS to 1" +SET @@session.ROCKSDB_STORE_CHECKSUMS = 1; +SELECT @@session.ROCKSDB_STORE_CHECKSUMS; +@@session.ROCKSDB_STORE_CHECKSUMS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_STORE_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_STORE_CHECKSUMS; +@@session.ROCKSDB_STORE_CHECKSUMS +0 +"Trying to set variable @@session.ROCKSDB_STORE_CHECKSUMS to 0" +SET @@session.ROCKSDB_STORE_CHECKSUMS = 0; +SELECT @@session.ROCKSDB_STORE_CHECKSUMS; +@@session.ROCKSDB_STORE_CHECKSUMS +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_STORE_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_STORE_CHECKSUMS; +@@session.ROCKSDB_STORE_CHECKSUMS +0 +"Trying to set variable @@session.ROCKSDB_STORE_CHECKSUMS to on" +SET @@session.ROCKSDB_STORE_CHECKSUMS = on; +SELECT @@session.ROCKSDB_STORE_CHECKSUMS; +@@session.ROCKSDB_STORE_CHECKSUMS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_STORE_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_STORE_CHECKSUMS; +@@session.ROCKSDB_STORE_CHECKSUMS +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to 'aaa'" +SET @@global.ROCKSDB_STORE_CHECKSUMS = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_STORE_CHECKSUMS; +@@global.ROCKSDB_STORE_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to 'bbb'" +SET @@global.ROCKSDB_STORE_CHECKSUMS = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_STORE_CHECKSUMS; +@@global.ROCKSDB_STORE_CHECKSUMS +0 +SET @@global.ROCKSDB_STORE_CHECKSUMS = @start_global_value; +SELECT @@global.ROCKSDB_STORE_CHECKSUMS; +@@global.ROCKSDB_STORE_CHECKSUMS +0 +SET @@session.ROCKSDB_STORE_CHECKSUMS = @start_session_value; +SELECT @@session.ROCKSDB_STORE_CHECKSUMS; +@@session.ROCKSDB_STORE_CHECKSUMS +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_check_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_check_basic.result new file mode 100644 index 00000000000..46d238d1fa3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_check_basic.result @@ -0,0 +1,75 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_CHECK; +SELECT @start_global_value; +@start_global_value +1 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to 1" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = 1; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = DEFAULT; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to 0" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = 0; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = DEFAULT; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to on" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = on; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = DEFAULT; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to off" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = off; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = DEFAULT; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +"Trying to set variable @@session.ROCKSDB_STRICT_COLLATION_CHECK to 444. It should fail because it is not session." +SET @@session.ROCKSDB_STRICT_COLLATION_CHECK = 444; +ERROR HY000: Variable 'rocksdb_strict_collation_check' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to 'aaa'" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to 'bbb'" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = @start_global_value; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_exceptions_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_exceptions_basic.result new file mode 100644 index 00000000000..5f748621d25 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_exceptions_basic.result @@ -0,0 +1,36 @@ +SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +SELECT @start_global_value; +@start_global_value + +"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to simple table name." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = mytable; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS +mytable +"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to regex table name(s)." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = "t.*"; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS +t.* +"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to multiple regex table names." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = "s.*,t.*"; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS +s.*,t.* +"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to empty." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = ""; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS + +"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to default." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = DEFAULT; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS + +"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to 444. It should fail because it is not session." +SET @@session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = 444; +ERROR HY000: Variable 'rocksdb_strict_collation_exceptions' is a GLOBAL variable and should be set with SET GLOBAL +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = @start_global_value; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_cache_numshardbits_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_cache_numshardbits_basic.result new file mode 100644 index 00000000000..0161a339082 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_cache_numshardbits_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_TABLE_CACHE_NUMSHARDBITS; +SELECT @start_global_value; +@start_global_value +6 +"Trying to set variable @@global.ROCKSDB_TABLE_CACHE_NUMSHARDBITS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_TABLE_CACHE_NUMSHARDBITS = 444; +ERROR HY000: Variable 'rocksdb_table_cache_numshardbits' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_stats_sampling_pct_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_stats_sampling_pct_basic.result new file mode 100644 index 00000000000..6ff47ab9569 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_stats_sampling_pct_basic.result @@ -0,0 +1,85 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(100); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +INSERT INTO invalid_values VALUES('\'-1\''); +INSERT INTO invalid_values VALUES('\'101\''); +INSERT INTO invalid_values VALUES('\'484436\''); +SET @start_global_value = @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +SELECT @start_global_value; +@start_global_value +10 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 100" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 100; +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +100 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = DEFAULT; +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 1" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 1; +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = DEFAULT; +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 0" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 0; +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = DEFAULT; +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +"Trying to set variable @@session.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 444. It should fail because it is not session." +SET @@session.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 444; +ERROR HY000: Variable 'rocksdb_table_stats_sampling_pct' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 'aaa'" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 'bbb'" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to '-1'" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = '-1'; +Got one of the listed errors +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to '101'" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = '101'; +Got one of the listed errors +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to '484436'" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = '484436'; +Got one of the listed errors +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = @start_global_value; +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_unsafe_for_binlog_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_unsafe_for_binlog_basic.result new file mode 100644 index 00000000000..c9748cc6306 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_unsafe_for_binlog_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_UNSAFE_FOR_BINLOG; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_UNSAFE_FOR_BINLOG to 1" +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = 1; +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT; +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +0 +"Trying to set variable @@global.ROCKSDB_UNSAFE_FOR_BINLOG to 0" +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = 0; +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT; +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +0 +"Trying to set variable @@global.ROCKSDB_UNSAFE_FOR_BINLOG to on" +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = on; +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT; +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_UNSAFE_FOR_BINLOG to 1" +SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = 1; +SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG; +@@session.ROCKSDB_UNSAFE_FOR_BINLOG +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT; +SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG; +@@session.ROCKSDB_UNSAFE_FOR_BINLOG +0 +"Trying to set variable @@session.ROCKSDB_UNSAFE_FOR_BINLOG to 0" +SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = 0; +SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG; +@@session.ROCKSDB_UNSAFE_FOR_BINLOG +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT; +SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG; +@@session.ROCKSDB_UNSAFE_FOR_BINLOG +0 +"Trying to set variable @@session.ROCKSDB_UNSAFE_FOR_BINLOG to on" +SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = on; +SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG; +@@session.ROCKSDB_UNSAFE_FOR_BINLOG +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT; +SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG; +@@session.ROCKSDB_UNSAFE_FOR_BINLOG +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_UNSAFE_FOR_BINLOG to 'aaa'" +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +0 +"Trying to set variable @@global.ROCKSDB_UNSAFE_FOR_BINLOG to 'bbb'" +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +0 +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = @start_global_value; +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +0 +SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = @start_session_value; +SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG; +@@session.ROCKSDB_UNSAFE_FOR_BINLOG +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_adaptive_mutex_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_adaptive_mutex_basic.result new file mode 100644 index 00000000000..ef4007c7549 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_adaptive_mutex_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_USE_ADAPTIVE_MUTEX; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_USE_ADAPTIVE_MUTEX to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_USE_ADAPTIVE_MUTEX = 444; +ERROR HY000: Variable 'rocksdb_use_adaptive_mutex' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_fsync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_fsync_basic.result new file mode 100644 index 00000000000..254cc2ceb5d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_fsync_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_USE_FSYNC; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_USE_FSYNC to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_USE_FSYNC = 444; +ERROR HY000: Variable 'rocksdb_use_fsync' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_validate_tables_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_validate_tables_basic.result new file mode 100644 index 00000000000..c7b874877f8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_validate_tables_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_VALIDATE_TABLES; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_VALIDATE_TABLES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_VALIDATE_TABLES = 444; +ERROR HY000: Variable 'rocksdb_validate_tables' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_checksums_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_checksums_basic.result new file mode 100644 index 00000000000..da4cae7a151 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_checksums_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_VERIFY_CHECKSUMS; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_VERIFY_CHECKSUMS; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_VERIFY_CHECKSUMS to 1" +SET @@global.ROCKSDB_VERIFY_CHECKSUMS = 1; +SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; +@@global.ROCKSDB_VERIFY_CHECKSUMS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; +@@global.ROCKSDB_VERIFY_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_VERIFY_CHECKSUMS to 0" +SET @@global.ROCKSDB_VERIFY_CHECKSUMS = 0; +SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; +@@global.ROCKSDB_VERIFY_CHECKSUMS +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; +@@global.ROCKSDB_VERIFY_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_VERIFY_CHECKSUMS to on" +SET @@global.ROCKSDB_VERIFY_CHECKSUMS = on; +SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; +@@global.ROCKSDB_VERIFY_CHECKSUMS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; +@@global.ROCKSDB_VERIFY_CHECKSUMS +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_VERIFY_CHECKSUMS to 1" +SET @@session.ROCKSDB_VERIFY_CHECKSUMS = 1; +SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS; +@@session.ROCKSDB_VERIFY_CHECKSUMS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS; +@@session.ROCKSDB_VERIFY_CHECKSUMS +0 +"Trying to set variable @@session.ROCKSDB_VERIFY_CHECKSUMS to 0" +SET @@session.ROCKSDB_VERIFY_CHECKSUMS = 0; +SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS; +@@session.ROCKSDB_VERIFY_CHECKSUMS +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS; +@@session.ROCKSDB_VERIFY_CHECKSUMS +0 +"Trying to set variable @@session.ROCKSDB_VERIFY_CHECKSUMS to on" +SET @@session.ROCKSDB_VERIFY_CHECKSUMS = on; +SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS; +@@session.ROCKSDB_VERIFY_CHECKSUMS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS; +@@session.ROCKSDB_VERIFY_CHECKSUMS +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_VERIFY_CHECKSUMS to 'aaa'" +SET @@global.ROCKSDB_VERIFY_CHECKSUMS = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; +@@global.ROCKSDB_VERIFY_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_VERIFY_CHECKSUMS to 'bbb'" +SET @@global.ROCKSDB_VERIFY_CHECKSUMS = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; +@@global.ROCKSDB_VERIFY_CHECKSUMS +0 +SET @@global.ROCKSDB_VERIFY_CHECKSUMS = @start_global_value; +SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; +@@global.ROCKSDB_VERIFY_CHECKSUMS +0 +SET @@session.ROCKSDB_VERIFY_CHECKSUMS = @start_session_value; +SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS; +@@session.ROCKSDB_VERIFY_CHECKSUMS +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_bytes_per_sync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_bytes_per_sync_basic.result new file mode 100644 index 00000000000..7da628b73fd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_bytes_per_sync_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_WAL_BYTES_PER_SYNC; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_WAL_BYTES_PER_SYNC to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_WAL_BYTES_PER_SYNC = 444; +ERROR HY000: Variable 'rocksdb_wal_bytes_per_sync' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_dir_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_dir_basic.result new file mode 100644 index 00000000000..fd76a5ec00f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_dir_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_WAL_DIR; +SELECT @start_global_value; +@start_global_value + +"Trying to set variable @@global.ROCKSDB_WAL_DIR to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_WAL_DIR = 444; +ERROR HY000: Variable 'rocksdb_wal_dir' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_recovery_mode_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_recovery_mode_basic.result new file mode 100644 index 00000000000..cf11f295c29 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_recovery_mode_basic.result @@ -0,0 +1,46 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_WAL_RECOVERY_MODE; +SELECT @start_global_value; +@start_global_value +2 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_WAL_RECOVERY_MODE to 1" +SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 1; +SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; +@@global.ROCKSDB_WAL_RECOVERY_MODE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WAL_RECOVERY_MODE = DEFAULT; +SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; +@@global.ROCKSDB_WAL_RECOVERY_MODE +2 +"Trying to set variable @@global.ROCKSDB_WAL_RECOVERY_MODE to 0" +SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 0; +SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; +@@global.ROCKSDB_WAL_RECOVERY_MODE +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WAL_RECOVERY_MODE = DEFAULT; +SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; +@@global.ROCKSDB_WAL_RECOVERY_MODE +2 +"Trying to set variable @@session.ROCKSDB_WAL_RECOVERY_MODE to 444. It should fail because it is not session." +SET @@session.ROCKSDB_WAL_RECOVERY_MODE = 444; +ERROR HY000: Variable 'rocksdb_wal_recovery_mode' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_WAL_RECOVERY_MODE to 'aaa'" +SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; +@@global.ROCKSDB_WAL_RECOVERY_MODE +2 +SET @@global.ROCKSDB_WAL_RECOVERY_MODE = @start_global_value; +SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; +@@global.ROCKSDB_WAL_RECOVERY_MODE +2 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_size_limit_mb_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_size_limit_mb_basic.result new file mode 100644 index 00000000000..5f03597df3a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_size_limit_mb_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_WAL_SIZE_LIMIT_MB; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_WAL_SIZE_LIMIT_MB to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_WAL_SIZE_LIMIT_MB = 444; +ERROR HY000: Variable 'rocksdb_wal_size_limit_mb' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_ttl_seconds_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_ttl_seconds_basic.result new file mode 100644 index 00000000000..23f7fc81e7f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_ttl_seconds_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_WAL_TTL_SECONDS; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_WAL_TTL_SECONDS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_WAL_TTL_SECONDS = 444; +ERROR HY000: Variable 'rocksdb_wal_ttl_seconds' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_whole_key_filtering_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_whole_key_filtering_basic.result new file mode 100644 index 00000000000..0d6f7216e9a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_whole_key_filtering_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_WHOLE_KEY_FILTERING; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_WHOLE_KEY_FILTERING to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_WHOLE_KEY_FILTERING = 444; +ERROR HY000: Variable 'rocksdb_whole_key_filtering' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_disable_wal_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_disable_wal_basic.result new file mode 100644 index 00000000000..b71ee7f91cc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_disable_wal_basic.result @@ -0,0 +1,114 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_WRITE_DISABLE_WAL; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_WRITE_DISABLE_WAL; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_WRITE_DISABLE_WAL to 1" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = 1; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +0 +"Trying to set variable @@global.ROCKSDB_WRITE_DISABLE_WAL to 0" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = 0; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +0 +"Trying to set variable @@global.ROCKSDB_WRITE_DISABLE_WAL to on" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = on; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +0 +"Trying to set variable @@global.ROCKSDB_WRITE_DISABLE_WAL to off" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = off; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_WRITE_DISABLE_WAL to 1" +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = 1; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +0 +"Trying to set variable @@session.ROCKSDB_WRITE_DISABLE_WAL to 0" +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = 0; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +0 +"Trying to set variable @@session.ROCKSDB_WRITE_DISABLE_WAL to on" +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = on; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +0 +"Trying to set variable @@session.ROCKSDB_WRITE_DISABLE_WAL to off" +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = off; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_WRITE_DISABLE_WAL to 'aaa'" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +0 +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = @start_global_value; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +0 +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = @start_session_value; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_ignore_missing_column_families_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_ignore_missing_column_families_basic.result new file mode 100644 index 00000000000..dbe46858c94 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_ignore_missing_column_families_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 1" +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 1; +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +"Trying to set variable @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 0" +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 0; +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +"Trying to set variable @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to on" +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = on; +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 1" +SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 1; +SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +"Trying to set variable @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 0" +SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 0; +SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +"Trying to set variable @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to on" +SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = on; +SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 'aaa'" +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +"Trying to set variable @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 'bbb'" +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = @start_global_value; +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = @start_session_value; +SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_sync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_sync_basic.result new file mode 100644 index 00000000000..9848e491b80 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_sync_basic.result @@ -0,0 +1,114 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_WRITE_SYNC; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_WRITE_SYNC; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to 1" +SET @@global.ROCKSDB_WRITE_SYNC = 1; +SELECT @@global.ROCKSDB_WRITE_SYNC; +@@global.ROCKSDB_WRITE_SYNC +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_SYNC; +@@global.ROCKSDB_WRITE_SYNC +0 +"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to 0" +SET @@global.ROCKSDB_WRITE_SYNC = 0; +SELECT @@global.ROCKSDB_WRITE_SYNC; +@@global.ROCKSDB_WRITE_SYNC +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_SYNC; +@@global.ROCKSDB_WRITE_SYNC +0 +"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to on" +SET @@global.ROCKSDB_WRITE_SYNC = on; +SELECT @@global.ROCKSDB_WRITE_SYNC; +@@global.ROCKSDB_WRITE_SYNC +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_SYNC; +@@global.ROCKSDB_WRITE_SYNC +0 +"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to off" +SET @@global.ROCKSDB_WRITE_SYNC = off; +SELECT @@global.ROCKSDB_WRITE_SYNC; +@@global.ROCKSDB_WRITE_SYNC +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_SYNC; +@@global.ROCKSDB_WRITE_SYNC +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to 1" +SET @@session.ROCKSDB_WRITE_SYNC = 1; +SELECT @@session.ROCKSDB_WRITE_SYNC; +@@session.ROCKSDB_WRITE_SYNC +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_SYNC; +@@session.ROCKSDB_WRITE_SYNC +0 +"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to 0" +SET @@session.ROCKSDB_WRITE_SYNC = 0; +SELECT @@session.ROCKSDB_WRITE_SYNC; +@@session.ROCKSDB_WRITE_SYNC +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_SYNC; +@@session.ROCKSDB_WRITE_SYNC +0 +"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to on" +SET @@session.ROCKSDB_WRITE_SYNC = on; +SELECT @@session.ROCKSDB_WRITE_SYNC; +@@session.ROCKSDB_WRITE_SYNC +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_SYNC; +@@session.ROCKSDB_WRITE_SYNC +0 +"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to off" +SET @@session.ROCKSDB_WRITE_SYNC = off; +SELECT @@session.ROCKSDB_WRITE_SYNC; +@@session.ROCKSDB_WRITE_SYNC +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_SYNC; +@@session.ROCKSDB_WRITE_SYNC +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to 'aaa'" +SET @@global.ROCKSDB_WRITE_SYNC = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_WRITE_SYNC; +@@global.ROCKSDB_WRITE_SYNC +0 +SET @@global.ROCKSDB_WRITE_SYNC = @start_global_value; +SELECT @@global.ROCKSDB_WRITE_SYNC; +@@global.ROCKSDB_WRITE_SYNC +0 +SET @@session.ROCKSDB_WRITE_SYNC = @start_session_value; +SELECT @@session.ROCKSDB_WRITE_SYNC; +@@session.ROCKSDB_WRITE_SYNC +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/all_vars.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/all_vars.test new file mode 100644 index 00000000000..fefd9e39af2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/all_vars.test @@ -0,0 +1,39 @@ +--source include/not_embedded.inc +--source include/not_threadpool.inc + +# This test verifies that *all* MyRocks system variables are tested by the +# rocksdb_sys_vars suite. For every MyRocks system variable there must be a +# _basic.test file. +# + +# +# we can diff in perl or in sql, as it's my_SQL_test suite, do it in sql +# + +perl; + use File::Basename; + my $dirname=dirname($ENV{MYSQLTEST_FILE}); + my @all_tests=<$dirname/*_basic{,_32,_64}.test>; + open(F, '>', "$ENV{MYSQLTEST_VARDIR}/tmp/rocksdb_sys_vars.all_vars.txt") or die; + binmode F; + print F join "\n", sort map { s/_basic(_32|_64)?\.test$//; basename $_ } @all_tests; +EOF + +create table t1 (test_name text) engine=MyISAM; +create table t2 (variable_name text) engine=MyISAM; +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +eval load data infile "$MYSQLTEST_VARDIR/tmp/rocksdb_sys_vars.all_vars.txt" into table t1; + +insert into t2 select variable_name from information_schema.global_variables where variable_name like "rocksdb_%"; +insert into t2 select variable_name from information_schema.session_variables where variable_name like "rocksdb_%"; + +--sorted_result +select variable_name as `There should be *no* long test name listed below:` from t2 + where length(variable_name) > 50; + +--sorted_result +select variable_name as `There should be *no* variables listed below:` from t2 + left join t1 on variable_name=test_name where test_name is null ORDER BY variable_name; + +drop table t1; +drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test new file mode 100644 index 00000000000..a6b753ba87a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_ACCESS_HINT_ON_COMPACTION_START +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test new file mode 100644 index 00000000000..b6ccea0f882 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_ADVISE_RANDOM_ON_OPEN +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test new file mode 100644 index 00000000000..b250aa5eb7f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test new file mode 100644 index 00000000000..067f5820045 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_ALLOW_MMAP_READS +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test new file mode 100644 index 00000000000..51fbf62d5a9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_ALLOW_MMAP_WRITES +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_os_buffer_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_os_buffer_basic.test new file mode 100644 index 00000000000..c38d0c7b210 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_os_buffer_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_ALLOW_OS_BUFFER +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test new file mode 100644 index 00000000000..e0c2bd366cc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_BACKGROUND_SYNC +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test new file mode 100644 index 00000000000..8e49110513a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_BASE_BACKGROUND_COMPACTIONS +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test new file mode 100644 index 00000000000..68715796a04 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_BLOCK_CACHE_SIZE +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test new file mode 100644 index 00000000000..2b14e1fb654 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_BLOCK_RESTART_INTERVAL +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test new file mode 100644 index 00000000000..11d18e3223f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_BLOCK_SIZE +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test new file mode 100644 index 00000000000..a54700aae4d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_BLOCK_SIZE_DEVIATION +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test new file mode 100644 index 00000000000..6cd9e0e1560 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_BULK_LOAD +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test new file mode 100644 index 00000000000..1b57255202b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_BULK_LOAD_SIZE +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test new file mode 100644 index 00000000000..2958273695d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_BYTES_PER_SYNC +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test new file mode 100644 index 00000000000..db1f5936812 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_CACHE_INDEX_AND_FILTER_BLOCKS +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test new file mode 100644 index 00000000000..44126e35f57 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test @@ -0,0 +1,17 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(99); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_CHECKSUMS_PCT +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test new file mode 100644 index 00000000000..c47c62e41b4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_COLLECT_SST_PROPERTIES +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test new file mode 100644 index 00000000000..62c8e680aab --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_COMMIT_IN_THE_MIDDLE +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test new file mode 100644 index 00000000000..c65f722fe6e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES('abc'); +INSERT INTO valid_values VALUES('def'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; + +--let $sys_var=ROCKSDB_COMPACT_CF +--let $read_only=0 +--let $session=0 +--let $sticky=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test new file mode 100644 index 00000000000..ba45defb7a1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(222333); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +# Attempt to set the value to -1 - this should first truncate to 0 and then generate a warning as +# we can't set it to or from 0 +SET @@global.rocksdb_compaction_readahead_size = -1; +SELECT @@global.rocksdb_compaction_readahead_size; + +--let $sys_var=ROCKSDB_COMPACTION_READAHEAD_SIZE +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test new file mode 100644 index 00000000000..5ec719baeb6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +INSERT INTO valid_values VALUES(2000000); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'2000001\''); + +--let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test new file mode 100644 index 00000000000..6c35ed634f7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test new file mode 100644 index 00000000000..ff132f7049c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test new file mode 100644 index 00000000000..b38c79b5ef0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +INSERT INTO valid_values VALUES(2000000); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'2000001\''); + +--let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_checkpoint_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_checkpoint_basic.test new file mode 100644 index 00000000000..2850c7a1a38 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_checkpoint_basic.test @@ -0,0 +1,29 @@ +--source include/have_rocksdb.inc + +--eval SET @start_value = @@global.ROCKSDB_CREATE_CHECKPOINT + +# Test using tmp/abc +--replace_result $MYSQL_TMP_DIR TMP +--eval SET @@global.ROCKSDB_CREATE_CHECKPOINT = '$MYSQL_TMP_DIR/abc' +--eval SELECT @@global.ROCKSDB_CREATE_CHECKPOINT +--eval SET @@global.ROCKSDB_CREATE_CHECKPOINT = DEFAULT + +# Test using tmp/def +--replace_result $MYSQL_TMP_DIR TMP +--eval SET @@global.ROCKSDB_CREATE_CHECKPOINT = '$MYSQL_TMP_DIR/def' +--eval SELECT @@global.ROCKSDB_CREATE_CHECKPOINT +--eval SET @@global.ROCKSDB_CREATE_CHECKPOINT = DEFAULT + +# Should fail because it is not a session +--Error ER_GLOBAL_VARIABLE +--eval SET @@session.ROCKSDB_CREATE_CHECKPOINT = 444 + +# Set back to original value +# validate that DEFAULT causes failure in creating checkpoint since +# DEFAULT == '' +--error ER_UNKNOWN_ERROR +--eval SET @@global.ROCKSDB_CREATE_CHECKPOINT = @start_value + +# clean up +--exec rm -r $MYSQL_TMP_DIR/abc +--exec rm -r $MYSQL_TMP_DIR/def diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test new file mode 100644 index 00000000000..77422aa164c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_CREATE_IF_MISSING +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test new file mode 100644 index 00000000000..b8aeb6c9b19 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_CREATE_MISSING_COLUMN_FAMILIES +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test new file mode 100644 index 00000000000..20f33d6bdfd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_DATADIR +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test new file mode 100644 index 00000000000..7ef5422dcd3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_DB_WRITE_BUFFER_SIZE +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test new file mode 100644 index 00000000000..52e25ab358f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test new file mode 100644 index 00000000000..f756d1eb2f5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_DEFAULT_CF_OPTIONS +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test new file mode 100644 index 00000000000..744bd946d9a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICROS +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test new file mode 100644 index 00000000000..061a4c902b5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test @@ -0,0 +1,20 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_DISABLE_2PC +--let $read_only=0 +--let $session=0 +--let $sticky=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disabledatasync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disabledatasync_basic.test new file mode 100644 index 00000000000..b365370f214 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disabledatasync_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_DISABLEDATASYNC +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test new file mode 100644 index 00000000000..407093acbea --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_ENABLE_BULK_LOAD_API +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test new file mode 100644 index 00000000000..251d7d5803d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_ENABLE_THREAD_TRACKING +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test new file mode 100644 index 00000000000..9d6502598b0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test new file mode 100644 index 00000000000..495770e8efb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_ERROR_IF_EXISTS +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test new file mode 100644 index 00000000000..7fc4c3a77f9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test @@ -0,0 +1,44 @@ +--source include/have_rocksdb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +## +## test cardinality for analyze statements after flushing table +## + +CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, PRIMARY KEY(a)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +INSERT INTO t1 (b) VALUES (1); +INSERT INTO t1 (b) VALUES (2); +INSERT INTO t1 (b) VALUES (3); +--sorted_result +SELECT * FROM t1; + +set session rocksdb_flush_memtable_on_analyze=off; +ANALYZE TABLE t1; +SHOW INDEXES FROM t1; + +set session rocksdb_flush_memtable_on_analyze=on; +ANALYZE TABLE t1; +SHOW INDEXES FROM t1; +DROP TABLE t1; + +## +## test data length for show table status statements for tables with few rows +## + +CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, PRIMARY KEY(a)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +INSERT INTO t1 (b) VALUES (1); +INSERT INTO t1 (b) VALUES (2); +INSERT INTO t1 (b) VALUES (3); +--sorted_result +SELECT * FROM t1; + +SHOW TABLE STATUS LIKE 't1'; +ANALYZE TABLE t1; +SHOW TABLE STATUS LIKE 't1'; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test new file mode 100644 index 00000000000..9529fae7516 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test @@ -0,0 +1,17 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; + +--let $sys_var=ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW +--let $read_only=0 +--let $session=0 +--let $sticky=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test new file mode 100644 index 00000000000..08e8d0c16de --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(222333); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +# Attempt to set the value to -1 - this should first truncate to 0 and then generate a warning as +# we can't set it to or from 0 +SET @@session.rocksdb_force_index_records_in_range = -1; +SELECT @@session.rocksdb_force_index_records_in_range; + +--let $sys_var=ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test new file mode 100644 index 00000000000..5899f7b67d0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_HASH_INDEX_ALLOW_COLLISION +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test new file mode 100644 index 00000000000..711703c2148 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_INDEX_TYPE +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test new file mode 100644 index 00000000000..990a9a62148 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test @@ -0,0 +1,21 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES('fatal_level'); +INSERT INTO valid_values VALUES('error_level'); +INSERT INTO valid_values VALUES('warn_level'); +INSERT INTO valid_values VALUES('info_level'); +INSERT INTO valid_values VALUES('debug_level'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES(5); +INSERT INTO invalid_values VALUES(6); +INSERT INTO invalid_values VALUES('foo'); + +--let $sys_var=ROCKSDB_INFO_LOG_LEVEL +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test new file mode 100644 index 00000000000..741e20fac9f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_IS_FD_CLOSE_ON_EXEC +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test new file mode 100644 index 00000000000..511f9f8a06d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_KEEP_LOG_FILE_NUM +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test new file mode 100644 index 00000000000..52f7f502d96 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test @@ -0,0 +1,22 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +INSERT INTO valid_values VALUES('true'); +INSERT INTO valid_values VALUES('false'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES(2); +INSERT INTO invalid_values VALUES(1000); + +--let $sys_var=ROCKSDB_LOCK_SCANNED_ROWS +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test new file mode 100644 index 00000000000..0c524db9cbd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_LOCK_WAIT_TIMEOUT +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test new file mode 100644 index 00000000000..76aee161efc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_LOG_FILE_TIME_TO_ROLL +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test new file mode 100644 index 00000000000..48d14fbf9f6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_MANIFEST_PREALLOCATION_SIZE +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test new file mode 100644 index 00000000000..441c0577c10 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_MAX_BACKGROUND_COMPACTIONS +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test new file mode 100644 index 00000000000..de3ab148ec6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_MAX_BACKGROUND_FLUSHES +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test new file mode 100644 index 00000000000..b0dca55e18b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_MAX_LOG_FILE_SIZE +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test new file mode 100644 index 00000000000..9464f0aa1ad --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_MAX_MANIFEST_FILE_SIZE +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test new file mode 100644 index 00000000000..c82af39f7b5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_MAX_OPEN_FILES +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test new file mode 100644 index 00000000000..a9e440d4b98 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_MAX_ROW_LOCKS +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test new file mode 100644 index 00000000000..0ebc9c204fb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_MAX_SUBCOMPACTIONS +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test new file mode 100644 index 00000000000..0f881868ae2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_MAX_TOTAL_WAL_SIZE +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_buf_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_buf_size_basic.test new file mode 100644 index 00000000000..8e2dda64d4a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_buf_size_basic.test @@ -0,0 +1,50 @@ +--source include/have_rocksdb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +set session rocksdb_merge_buf_size=250; +set session rocksdb_merge_combine_read_size=1000; + +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; + +--disable_query_log +let $max = 100; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, FLOOR(RAND() * 100)); + inc $i; + eval $insert; +} +--enable_query_log + +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; + +DROP INDEX kj on t1; +DROP INDEX kij ON t1; + +ALTER TABLE t1 ADD INDEX kj(j), ADD INDEX kij(i,j), ADD INDEX kji(j,i), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + +# Reverse CF testing, needs to be added to SSTFileWriter in reverse order +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB; +--disable_query_log +let $max = 100; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, FLOOR(RAND() * 100)); + inc $i; + eval $insert; +} +--enable_query_log + +ALTER TABLE t1 ADD INDEX kb(b) comment 'rev:cf1', ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +SELECT COUNT(*) FROM t1 FORCE INDEX(kb); +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_combine_read_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_combine_read_size_basic.test new file mode 100644 index 00000000000..48e89137344 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_combine_read_size_basic.test @@ -0,0 +1,32 @@ +--source include/have_rocksdb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +set session rocksdb_merge_buf_size=250; +set session rocksdb_merge_combine_read_size=1000; + +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; + +--disable_query_log +let $max = 100; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; + +DROP INDEX kj on t1; +DROP INDEX kij ON t1; + +ALTER TABLE t1 ADD INDEX kj(j), ADD INDEX kij(i,j), ADD INDEX kji(j,i), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test new file mode 100644 index 00000000000..cc84a2c60be --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_NEW_TABLE_READER_FOR_COMPACTION_INPUTS +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test new file mode 100644 index 00000000000..39c84fb2c2d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_NO_BLOCK_CACHE +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test new file mode 100644 index 00000000000..bc680c0772a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_OVERRIDE_CF_OPTIONS +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test new file mode 100644 index 00000000000..5b0e4798678 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_PARANOID_CHECKS +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test new file mode 100644 index 00000000000..fd2f3098840 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test @@ -0,0 +1,20 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_PAUSE_BACKGROUND_WORK +--let $read_only=0 +--let $session=0 +--let $sticky=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test new file mode 100644 index 00000000000..1fd61a80955 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(2); +INSERT INTO valid_values VALUES(3); +INSERT INTO valid_values VALUES(4); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_PERF_CONTEXT_LEVEL +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test new file mode 100644 index 00000000000..af095097909 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test new file mode 100644 index 00000000000..d683e8045da --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test @@ -0,0 +1,63 @@ +--source include/have_rocksdb.inc + +# Attempt to set the value - this should generate a warning as we can't set it to or from 0 +SET @@global.rocksdb_rate_limiter_bytes_per_sec = 10000; + +# Now shut down and come back up with the rate limiter enabled and retest setting the variable + +# Write file to make mysql-test-run.pl expect the "crash", but don't restart the +# server until it is told to +--let $_server_id= `SELECT @@server_id` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--exec echo "wait" >$_expect_file_name + +# Send shutdown to the connected server and give it 10 seconds to die before +# zapping it +shutdown_server 10; + +# Attempt to restart the server with the rate limiter on +--exec echo "restart:--rocksdb_rate_limiter_bytes_per_sec=10000" >$_expect_file_name +--sleep 5 + +# Wait for reconnect +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + +# The valid_values table lists the values that we want to make sure that the system will allow +# us to set for rocksdb_rate_limiter_bytes_per_sec +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1), (1000), (1000000), (1000000000), (1000000000000); + +# The invalid_values table lists the values that we don't want to allow for the variable +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''), (3.14); + +# Test all the valid and invalid values +--let $sys_var=ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; + +# Zero is an invalid value if the rate limiter is turned on, but it won't be rejected by the +# SET command but will generate a warning. + +# Attempt to set the value to 0 - this should generate a warning as we can't set it to or from 0 +SET @@global.rocksdb_rate_limiter_bytes_per_sec = 0; + +# Attempt to set the value to -1 - this should first truncate to 0 and then generate a warning as +# we can't set it to or from 0 +SET @@global.rocksdb_rate_limiter_bytes_per_sec = -1; + +# Restart the server without the rate limiter +--exec echo "wait" >$_expect_file_name +shutdown_server 10; +--exec echo "restart" >$_expect_file_name +--sleep 5 + +# Wait for reconnect +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test new file mode 100644 index 00000000000..9ff20edcfb2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test @@ -0,0 +1,15 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES('a'); +INSERT INTO valid_values VALUES('b'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; + +--let $sys_var=ROCKSDB_READ_FREE_RPL_TABLES +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test new file mode 100644 index 00000000000..4fab0b3123c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(222333); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_RECORDS_IN_RANGE +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rpl_skip_tx_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rpl_skip_tx_api_basic.test new file mode 100644 index 00000000000..f6c0a219a9f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rpl_skip_tx_api_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_RPL_SKIP_TX_API +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test new file mode 100644 index 00000000000..a71df41affc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test new file mode 100644 index 00000000000..b33f444199b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test @@ -0,0 +1,19 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_SIGNAL_DROP_INDEX_THREAD +--let $read_only=0 +--let $session=0 +--let $sticky=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test new file mode 100644 index 00000000000..80a9c4b3c43 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test new file mode 100644 index 00000000000..2465e569f79 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_SKIP_FILL_CACHE +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_basic.test new file mode 100644 index 00000000000..fe90a49365b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_basic.test @@ -0,0 +1,21 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +INSERT INTO valid_values VALUES('true'); +INSERT INTO valid_values VALUES('false'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_SKIP_UNIQUE_CHECK +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test new file mode 100644 index 00000000000..c64eeedb594 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test @@ -0,0 +1,15 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES("aaa"); +INSERT INTO valid_values VALUES("bbb"); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; + +--let $sys_var=ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test new file mode 100644 index 00000000000..7854faa8ddf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_STATS_DUMP_PERIOD_SEC +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_checksums_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_checksums_basic.test new file mode 100644 index 00000000000..023b6420b96 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_checksums_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_STORE_CHECKSUMS +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test new file mode 100644 index 00000000000..eabc45ef6be --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test @@ -0,0 +1,19 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_STRICT_COLLATION_CHECK +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_exceptions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_exceptions_basic.test new file mode 100644 index 00000000000..4eb96488840 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_exceptions_basic.test @@ -0,0 +1,35 @@ +--source include/have_rocksdb.inc + +# We cannot use the rocskdb_sys_var.inc script as some of the strings we set +# need to be quoted and that doesn't work with this script. Run through +# valid options by hand. + +SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +SELECT @start_global_value; + +--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to simple table name." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = mytable; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; + +--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to regex table name(s)." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = "t.*"; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; + +--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to multiple regex table names." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = "s.*,t.*"; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; + +--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to empty." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = ""; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; + +--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to default." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = DEFAULT; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; + +--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to 444. It should fail because it is not session." +--Error ER_GLOBAL_VARIABLE +SET @@session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = 444; + +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = @start_global_value; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test new file mode 100644 index 00000000000..77da9df9acd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_TABLE_CACHE_NUMSHARDBITS +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test new file mode 100644 index 00000000000..c3016742042 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test @@ -0,0 +1,22 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(100); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +INSERT INTO invalid_values VALUES('\'-1\''); +INSERT INTO invalid_values VALUES('\'101\''); +INSERT INTO invalid_values VALUES('\'484436\''); + +--let $sys_var=ROCKSDB_TABLE_STATS_SAMPLING_PCT +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test new file mode 100644 index 00000000000..302a4173efc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_UNSAFE_FOR_BINLOG +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test new file mode 100644 index 00000000000..a0f0a212987 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_USE_ADAPTIVE_MUTEX +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test new file mode 100644 index 00000000000..0d8e35d03cb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_USE_FSYNC +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test new file mode 100644 index 00000000000..6eb965c5863 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_VALIDATE_TABLES +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_checksums_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_checksums_basic.test new file mode 100644 index 00000000000..d8c9c559703 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_checksums_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_VERIFY_CHECKSUMS +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test new file mode 100644 index 00000000000..eeeeed8f767 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_WAL_BYTES_PER_SYNC +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test new file mode 100644 index 00000000000..0d667d8de2c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_WAL_DIR +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test new file mode 100644 index 00000000000..e0d345b4268 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test @@ -0,0 +1,17 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_WAL_RECOVERY_MODE +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test new file mode 100644 index 00000000000..d597b5d8016 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_WAL_SIZE_LIMIT_MB +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test new file mode 100644 index 00000000000..9342c861168 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_WAL_TTL_SECONDS +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test new file mode 100644 index 00000000000..ff332a6fbe4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_WHOLE_KEY_FILTERING +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test new file mode 100644 index 00000000000..f7fa33e03e6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_WRITE_DISABLE_WAL +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test new file mode 100644 index 00000000000..1c9d35f4a57 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_sync_basic.test new file mode 100644 index 00000000000..3655ddb8c69 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_sync_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_WRITE_SYNC +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/properties_collector.cc b/storage/rocksdb/properties_collector.cc new file mode 100644 index 00000000000..fc8cd97fc72 --- /dev/null +++ b/storage/rocksdb/properties_collector.cc @@ -0,0 +1,555 @@ +/* + Copyright (c) 2015, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* This C++ file's header file */ +#include "./properties_collector.h" + +/* Standard C++ header files */ +#include +#include +#include +#include + +/* MySQL header files */ +#include "./log.h" +#include "./my_stacktrace.h" +#include "./sql_array.h" + +/* MyRocks header files */ +#include "./rdb_datadic.h" +#include "./rdb_utils.h" + +namespace myrocks { + +std::atomic rocksdb_num_sst_entry_put(0); +std::atomic rocksdb_num_sst_entry_delete(0); +std::atomic rocksdb_num_sst_entry_singledelete(0); +std::atomic rocksdb_num_sst_entry_merge(0); +std::atomic rocksdb_num_sst_entry_other(0); +my_bool rocksdb_compaction_sequential_deletes_count_sd = false; + +Rdb_tbl_prop_coll::Rdb_tbl_prop_coll( + Rdb_ddl_manager* ddl_manager, + Rdb_compact_params params, + uint32_t cf_id, + const uint8_t table_stats_sampling_pct +) : + m_cf_id(cf_id), + m_ddl_manager(ddl_manager), + m_last_stats(nullptr), + m_rows(0l), m_window_pos(0l), m_deleted_rows(0l), m_max_deleted_rows(0l), + m_file_size(0), m_params(params), + m_table_stats_sampling_pct(table_stats_sampling_pct), + m_seed(time(nullptr)), + m_card_adj_extra(1.) +{ + // We need to adjust the index cardinality numbers based on the sampling + // rate so that the output of "SHOW INDEX" command will reflect reality + // more closely. It will still be an approximation, just a better one. + if (m_table_stats_sampling_pct > 0) { + m_card_adj_extra = 100. / m_table_stats_sampling_pct; + } + + m_deleted_rows_window.resize(m_params.m_window, false); +} + +/* + This function is called by RocksDB for every key in the SST file +*/ +rocksdb::Status +Rdb_tbl_prop_coll::AddUserKey( + const rocksdb::Slice& key, const rocksdb::Slice& value, + rocksdb::EntryType type, rocksdb::SequenceNumber seq, + uint64_t file_size +) { + if (key.size() >= 4) { + AdjustDeletedRows(type); + + m_rows++; + + CollectStatsForRow(key, value, type, file_size); + } + + return rocksdb::Status::OK(); +} + +void Rdb_tbl_prop_coll::AdjustDeletedRows(rocksdb::EntryType type) +{ + if (m_params.m_window > 0) + { + // record the "is deleted" flag into the sliding window + // the sliding window is implemented as a circular buffer + // in m_deleted_rows_window vector + // the current position in the circular buffer is pointed at by + // m_rows % m_deleted_rows_window.size() + // m_deleted_rows is the current number of 1's in the vector + // --update the counter for the element which will be overridden + bool is_delete= (type == rocksdb::kEntryDelete || + (type == rocksdb::kEntrySingleDelete && + rocksdb_compaction_sequential_deletes_count_sd)); + + // Only make changes if the value at the current position needs to change + if (is_delete != m_deleted_rows_window[m_window_pos]) + { + // Set or clear the flag at the current position as appropriate + m_deleted_rows_window[m_window_pos]= is_delete; + if (!is_delete) + { + m_deleted_rows--; + } + else if (++m_deleted_rows > m_max_deleted_rows) + { + m_max_deleted_rows = m_deleted_rows; + } + } + + if (++m_window_pos == m_params.m_window) + { + m_window_pos = 0; + } + } +} + +Rdb_index_stats* Rdb_tbl_prop_coll::AccessStats( + const rocksdb::Slice& key) +{ + GL_INDEX_ID gl_index_id = { + .cf_id = m_cf_id, + .index_id = rdb_netbuf_to_uint32(reinterpret_cast(key.data())) + }; + + if (m_last_stats == nullptr || m_last_stats->m_gl_index_id != gl_index_id) + { + m_keydef = nullptr; + + // starting a new table + // add the new element into m_stats + m_stats.emplace_back(gl_index_id); + m_last_stats = &m_stats.back(); + + if (m_ddl_manager) + { + // safe_find() returns a std::shared_ptr with the count + // incremented (so it can't be deleted out from under us) and with + // the mutex locked (if setup has not occurred yet). We must make + // sure to free the mutex (via unblock_setup()) when we are done + // with this object. Currently this happens earlier in this function + // when we are switching to a new Rdb_key_def and when this object + // is destructed. + m_keydef = m_ddl_manager->safe_find(gl_index_id); + if (m_keydef != nullptr) + { + // resize the array to the number of columns. + // It will be initialized with zeroes + m_last_stats->m_distinct_keys_per_prefix.resize( + m_keydef->get_key_parts()); + m_last_stats->m_name = m_keydef->get_name(); + } + } + m_last_key.clear(); + } + + return m_last_stats; +} + +void Rdb_tbl_prop_coll::CollectStatsForRow( + const rocksdb::Slice& key, const rocksdb::Slice& value, + rocksdb::EntryType type, uint64_t file_size) +{ + auto stats = AccessStats(key); + + stats->m_data_size += key.size()+value.size(); + + // Incrementing per-index entry-type statistics + switch (type) { + case rocksdb::kEntryPut: + stats->m_rows++; + break; + case rocksdb::kEntryDelete: + stats->m_entry_deletes++; + break; + case rocksdb::kEntrySingleDelete: + stats->m_entry_single_deletes++; + break; + case rocksdb::kEntryMerge: + stats->m_entry_merges++; + break; + case rocksdb::kEntryOther: + stats->m_entry_others++; + break; + default: + // NO_LINT_DEBUG + sql_print_error("RocksDB: Unexpected entry type found: %u. " + "This should not happen so aborting the system.", type); + abort_with_stack_traces(); + break; + } + + stats->m_actual_disk_size += file_size - m_file_size; + m_file_size = file_size; + + if (m_keydef != nullptr && ShouldCollectStats()) + { + std::size_t column = 0; + bool new_key = true; + + if (!m_last_key.empty()) + { + rocksdb::Slice last(m_last_key.data(), m_last_key.size()); + new_key = (m_keydef->compare_keys(&last, &key, &column) == 0); + } + + if (new_key) + { + DBUG_ASSERT(column <= stats->m_distinct_keys_per_prefix.size()); + + for (auto i = column; i < stats->m_distinct_keys_per_prefix.size(); i++) + { + stats->m_distinct_keys_per_prefix[i]++; + } + + // assign new last_key for the next call + // however, we only need to change the last key + // if one of the first n-1 columns is different + // If the n-1 prefix is the same, no sense in storing + // the new key + if (column < stats->m_distinct_keys_per_prefix.size()) + { + m_last_key.assign(key.data(), key.size()); + } + } + } +} + +const char* Rdb_tbl_prop_coll::INDEXSTATS_KEY = "__indexstats__"; + +/* + This function is called by RocksDB to compute properties to store in sst file +*/ +rocksdb::Status +Rdb_tbl_prop_coll::Finish( + rocksdb::UserCollectedProperties* properties +) { + uint64_t num_sst_entry_put = 0; + uint64_t num_sst_entry_delete = 0; + uint64_t num_sst_entry_singledelete = 0; + uint64_t num_sst_entry_merge = 0; + uint64_t num_sst_entry_other = 0; + + for (auto it = m_stats.begin(); it != m_stats.end(); it++) + { + num_sst_entry_put += it->m_rows; + num_sst_entry_delete += it->m_entry_deletes; + num_sst_entry_singledelete += it->m_entry_single_deletes; + num_sst_entry_merge += it->m_entry_merges; + num_sst_entry_other += it->m_entry_others; + } + + if (num_sst_entry_put > 0) + { + rocksdb_num_sst_entry_put += num_sst_entry_put; + } + + if (num_sst_entry_delete > 0) + { + rocksdb_num_sst_entry_delete += num_sst_entry_delete; + } + + if (num_sst_entry_singledelete > 0) + { + rocksdb_num_sst_entry_singledelete += num_sst_entry_singledelete; + } + + if (num_sst_entry_merge > 0) + { + rocksdb_num_sst_entry_merge += num_sst_entry_merge; + } + + if (num_sst_entry_other > 0) + { + rocksdb_num_sst_entry_other += num_sst_entry_other; + } + + properties->insert({INDEXSTATS_KEY, + Rdb_index_stats::materialize(m_stats, m_card_adj_extra)}); + return rocksdb::Status::OK(); +} + +bool Rdb_tbl_prop_coll::NeedCompact() const { + return + m_params.m_deletes && + (m_params.m_window > 0) && + (m_file_size > m_params.m_file_size) && + (m_max_deleted_rows > m_params.m_deletes); +} + +bool Rdb_tbl_prop_coll::ShouldCollectStats() { + // Zero means that we'll use all the keys to update statistics. + if (!m_table_stats_sampling_pct || + RDB_TBL_STATS_SAMPLE_PCT_MAX == m_table_stats_sampling_pct) { + return true; + } + + int val = rand_r(&m_seed) % + (RDB_TBL_STATS_SAMPLE_PCT_MAX - RDB_TBL_STATS_SAMPLE_PCT_MIN + 1) + + RDB_TBL_STATS_SAMPLE_PCT_MIN; + + DBUG_ASSERT(val >= RDB_TBL_STATS_SAMPLE_PCT_MIN); + DBUG_ASSERT(val <= RDB_TBL_STATS_SAMPLE_PCT_MAX); + + return val <= m_table_stats_sampling_pct; +} + +/* + Returns the same as above, but in human-readable way for logging +*/ +rocksdb::UserCollectedProperties +Rdb_tbl_prop_coll::GetReadableProperties() const { + std::string s; +#ifdef DBUG_OFF + s.append("[..."); + s.append(std::to_string(m_stats.size())); + s.append(" records...]"); +#else + bool first = true; + for (auto it : m_stats) { + if (first) { + first = false; + } else { + s.append(","); + } + s.append(GetReadableStats(it)); + } + #endif + return rocksdb::UserCollectedProperties{{INDEXSTATS_KEY, s}}; +} + +std::string +Rdb_tbl_prop_coll::GetReadableStats( + const Rdb_index_stats& it +) { + std::string s; + s.append("("); + s.append(std::to_string(it.m_gl_index_id.cf_id)); + s.append(", "); + s.append(std::to_string(it.m_gl_index_id.index_id)); + s.append("):{name:"); + s.append(it.m_name); + s.append(", size:"); + s.append(std::to_string(it.m_data_size)); + s.append(", m_rows:"); + s.append(std::to_string(it.m_rows)); + s.append(", m_actual_disk_size:"); + s.append(std::to_string(it.m_actual_disk_size)); + s.append(", deletes:"); + s.append(std::to_string(it.m_entry_deletes)); + s.append(", single_deletes:"); + s.append(std::to_string(it.m_entry_single_deletes)); + s.append(", merges:"); + s.append(std::to_string(it.m_entry_merges)); + s.append(", others:"); + s.append(std::to_string(it.m_entry_others)); + s.append(", distincts per prefix: ["); + for (auto num : it.m_distinct_keys_per_prefix) { + s.append(std::to_string(num)); + s.append(" "); + } + s.append("]}"); + return s; +} + +/* + Given the properties of an SST file, reads the stats from it and returns it. +*/ + +void Rdb_tbl_prop_coll::read_stats_from_tbl_props( + const std::shared_ptr& table_props, + std::vector* out_stats_vector) +{ + DBUG_ASSERT(out_stats_vector != nullptr); + const auto& user_properties = table_props->user_collected_properties; + auto it2 = user_properties.find(std::string(INDEXSTATS_KEY)); + if (it2 != user_properties.end()) + { + auto result __attribute__((__unused__)) = + Rdb_index_stats::unmaterialize(it2->second, out_stats_vector); + DBUG_ASSERT(result == 0); + } +} + + +/* + Serializes an array of Rdb_index_stats into a network string. +*/ +std::string Rdb_index_stats::materialize( + const std::vector& stats, + const float card_adj_extra) +{ + String ret; + rdb_netstr_append_uint16(&ret, INDEX_STATS_VERSION_ENTRY_TYPES); + for (auto i : stats) { + rdb_netstr_append_uint32(&ret, i.m_gl_index_id.cf_id); + rdb_netstr_append_uint32(&ret, i.m_gl_index_id.index_id); + DBUG_ASSERT(sizeof i.m_data_size <= 8); + rdb_netstr_append_uint64(&ret, i.m_data_size); + rdb_netstr_append_uint64(&ret, i.m_rows); + rdb_netstr_append_uint64(&ret, i.m_actual_disk_size); + rdb_netstr_append_uint64(&ret, i.m_distinct_keys_per_prefix.size()); + rdb_netstr_append_uint64(&ret, i.m_entry_deletes); + rdb_netstr_append_uint64(&ret, i.m_entry_single_deletes); + rdb_netstr_append_uint64(&ret, i.m_entry_merges); + rdb_netstr_append_uint64(&ret, i.m_entry_others); + for (auto num_keys : i.m_distinct_keys_per_prefix) { + float upd_num_keys = num_keys * card_adj_extra; + rdb_netstr_append_uint64(&ret, static_cast(upd_num_keys)); + } + } + + return std::string((char*) ret.ptr(), ret.length()); +} + +/** + @brief + Reads an array of Rdb_index_stats from a string. + @return 1 if it detects any inconsistency in the input + @return 0 if completes successfully +*/ +int Rdb_index_stats::unmaterialize( + const std::string& s, std::vector* ret) +{ + const uchar* p= rdb_std_str_to_uchar_ptr(s); + const uchar* p2= p + s.size(); + + DBUG_ASSERT(ret != nullptr); + + if (p+2 > p2) + { + return 1; + } + + int version= rdb_netbuf_read_uint16(&p); + Rdb_index_stats stats; + // Make sure version is within supported range. + if (version < INDEX_STATS_VERSION_INITIAL || + version > INDEX_STATS_VERSION_ENTRY_TYPES) + { + // NO_LINT_DEBUG + sql_print_error("Index stats version %d was outside of supported range. " + "This should not happen so aborting the system.", version); + abort_with_stack_traces(); + } + + size_t needed = sizeof(stats.m_gl_index_id.cf_id)+ + sizeof(stats.m_gl_index_id.index_id)+ + sizeof(stats.m_data_size)+ + sizeof(stats.m_rows)+ + sizeof(stats.m_actual_disk_size)+ + sizeof(uint64); + if (version >= INDEX_STATS_VERSION_ENTRY_TYPES) + { + needed += sizeof(stats.m_entry_deletes)+ + sizeof(stats.m_entry_single_deletes)+ + sizeof(stats.m_entry_merges)+ + sizeof(stats.m_entry_others); + } + + while (p < p2) + { + if (p+needed > p2) + { + return 1; + } + rdb_netbuf_read_gl_index(&p, &stats.m_gl_index_id); + stats.m_data_size= rdb_netbuf_read_uint64(&p); + stats.m_rows= rdb_netbuf_read_uint64(&p); + stats.m_actual_disk_size= rdb_netbuf_read_uint64(&p); + stats.m_distinct_keys_per_prefix.resize(rdb_netbuf_read_uint64(&p)); + if (version >= INDEX_STATS_VERSION_ENTRY_TYPES) + { + stats.m_entry_deletes= rdb_netbuf_read_uint64(&p); + stats.m_entry_single_deletes= rdb_netbuf_read_uint64(&p); + stats.m_entry_merges= rdb_netbuf_read_uint64(&p); + stats.m_entry_others= rdb_netbuf_read_uint64(&p); + } + if (p+stats.m_distinct_keys_per_prefix.size() + *sizeof(stats.m_distinct_keys_per_prefix[0]) > p2) + { + return 1; + } + for (std::size_t i= 0; i < stats.m_distinct_keys_per_prefix.size(); i++) + { + stats.m_distinct_keys_per_prefix[i]= rdb_netbuf_read_uint64(&p); + } + ret->push_back(stats); + } + return 0; +} + +/* + Merges one Rdb_index_stats into another. Can be used to come up with the stats + for the index based on stats for each sst +*/ +void Rdb_index_stats::merge( + const Rdb_index_stats& s, bool increment, int64_t estimated_data_len) +{ + std::size_t i; + + m_gl_index_id = s.m_gl_index_id; + if (m_distinct_keys_per_prefix.size() < s.m_distinct_keys_per_prefix.size()) + { + m_distinct_keys_per_prefix.resize(s.m_distinct_keys_per_prefix.size()); + } + if (increment) + { + m_rows += s.m_rows; + m_data_size += s.m_data_size; + + /* + The Data_length and Avg_row_length are trailing statistics, meaning + they don't get updated for the current SST until the next SST is + written. So, if rocksdb reports the data_length as 0, + we make a reasoned estimate for the data_file_length for the + index in the current SST. + */ + m_actual_disk_size += s.m_actual_disk_size ? s.m_actual_disk_size : + estimated_data_len * s.m_rows; + m_entry_deletes += s.m_entry_deletes; + m_entry_single_deletes += s.m_entry_single_deletes; + m_entry_merges += s.m_entry_merges; + m_entry_others += s.m_entry_others; + for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++) + { + m_distinct_keys_per_prefix[i] += s.m_distinct_keys_per_prefix[i]; + } + } + else + { + m_rows -= s.m_rows; + m_data_size -= s.m_data_size; + m_actual_disk_size -= s.m_actual_disk_size ? s.m_actual_disk_size : + estimated_data_len * s.m_rows; + m_entry_deletes -= s.m_entry_deletes; + m_entry_single_deletes -= s.m_entry_single_deletes; + m_entry_merges -= s.m_entry_merges; + m_entry_others -= s.m_entry_others; + for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++) + { + m_distinct_keys_per_prefix[i] -= s.m_distinct_keys_per_prefix[i]; + } + } +} + +} // namespace myrocks diff --git a/storage/rocksdb/properties_collector.h b/storage/rocksdb/properties_collector.h new file mode 100644 index 00000000000..1781d8a26c6 --- /dev/null +++ b/storage/rocksdb/properties_collector.h @@ -0,0 +1,190 @@ +/* + Copyright (c) 2015, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +/* C++ system header files */ +#include +#include +#include +#include +#include + +/* RocksDB header files */ +#include "rocksdb/db.h" + +/* MyRocks header files */ +#include "./ha_rocksdb.h" + +namespace myrocks { + +class Rdb_ddl_manager; +class Rdb_key_def; + +extern std::atomic rocksdb_num_sst_entry_put; +extern std::atomic rocksdb_num_sst_entry_delete; +extern std::atomic rocksdb_num_sst_entry_singledelete; +extern std::atomic rocksdb_num_sst_entry_merge; +extern std::atomic rocksdb_num_sst_entry_other; +extern my_bool rocksdb_compaction_sequential_deletes_count_sd; + + +struct Rdb_compact_params +{ + uint64_t m_deletes, m_window, m_file_size; +}; + + +struct Rdb_index_stats +{ + enum { + INDEX_STATS_VERSION_INITIAL= 1, + INDEX_STATS_VERSION_ENTRY_TYPES= 2, + }; + GL_INDEX_ID m_gl_index_id; + int64_t m_data_size, m_rows, m_actual_disk_size; + int64_t m_entry_deletes, m_entry_single_deletes; + int64_t m_entry_merges, m_entry_others; + std::vector m_distinct_keys_per_prefix; + std::string m_name; // name is not persisted + + static std::string materialize(const std::vector& stats, + const float card_adj_extra); + static int unmaterialize(const std::string& s, + std::vector* ret); + + Rdb_index_stats() : Rdb_index_stats({0, 0}) {} + explicit Rdb_index_stats(GL_INDEX_ID gl_index_id) : + m_gl_index_id(gl_index_id), + m_data_size(0), + m_rows(0), + m_actual_disk_size(0), + m_entry_deletes(0), + m_entry_single_deletes(0), + m_entry_merges(0), + m_entry_others(0) {} + + void merge(const Rdb_index_stats& s, bool increment = true, + int64_t estimated_data_len = 0); +}; + + +class Rdb_tbl_prop_coll : public rocksdb::TablePropertiesCollector +{ + public: + Rdb_tbl_prop_coll( + Rdb_ddl_manager* ddl_manager, + Rdb_compact_params params, + uint32_t cf_id, + const uint8_t table_stats_sampling_pct + ); + + /* + Override parent class's virtual methods of interest. + */ + + virtual rocksdb::Status AddUserKey( + const rocksdb::Slice& key, const rocksdb::Slice& value, + rocksdb::EntryType type, rocksdb::SequenceNumber seq, + uint64_t file_size); + + virtual rocksdb::Status Finish(rocksdb::UserCollectedProperties* properties) override; + + virtual const char* Name() const override { + return "Rdb_tbl_prop_coll"; + } + + rocksdb::UserCollectedProperties GetReadableProperties() const override; + + bool NeedCompact() const override; + + public: + uint64_t GetMaxDeletedRows() const { + return m_max_deleted_rows; + } + + static void read_stats_from_tbl_props( + const std::shared_ptr& table_props, + std::vector* out_stats_vector); + + private: + static std::string GetReadableStats(const Rdb_index_stats& it); + + bool ShouldCollectStats(); + void CollectStatsForRow(const rocksdb::Slice& key, + const rocksdb::Slice& value, rocksdb::EntryType type, uint64_t file_size); + Rdb_index_stats* AccessStats(const rocksdb::Slice& key); + void AdjustDeletedRows(rocksdb::EntryType type); + + private: + uint32_t m_cf_id; + std::shared_ptr m_keydef; + Rdb_ddl_manager* m_ddl_manager; + std::vector m_stats; + Rdb_index_stats* m_last_stats; + static const char* INDEXSTATS_KEY; + + // last added key + std::string m_last_key; + + // floating window to count deleted rows + std::vector m_deleted_rows_window; + uint64_t m_rows, m_window_pos, m_deleted_rows, m_max_deleted_rows; + uint64_t m_file_size; + Rdb_compact_params m_params; + uint8_t m_table_stats_sampling_pct; + unsigned int m_seed; + float m_card_adj_extra; +}; + + +class Rdb_tbl_prop_coll_factory + : public rocksdb::TablePropertiesCollectorFactory { + public: + explicit Rdb_tbl_prop_coll_factory(Rdb_ddl_manager* ddl_manager) + : m_ddl_manager(ddl_manager) { + } + + /* + Override parent class's virtual methods of interest. + */ + + virtual rocksdb::TablePropertiesCollector* CreateTablePropertiesCollector( + rocksdb::TablePropertiesCollectorFactory::Context context) override { + return new Rdb_tbl_prop_coll( + m_ddl_manager, m_params, context.column_family_id, + m_table_stats_sampling_pct); + } + + virtual const char* Name() const override { + return "Rdb_tbl_prop_coll_factory"; + } + + public: + void SetCompactionParams(const Rdb_compact_params& params) { + m_params = params; + } + + void SetTableStatsSamplingPct(const uint8_t table_stats_sampling_pct) { + m_table_stats_sampling_pct = table_stats_sampling_pct; + } + + private: + Rdb_ddl_manager* m_ddl_manager; + Rdb_compact_params m_params; + uint8_t m_table_stats_sampling_pct; +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_buff.h b/storage/rocksdb/rdb_buff.h new file mode 100644 index 00000000000..d29f365ba31 --- /dev/null +++ b/storage/rocksdb/rdb_buff.h @@ -0,0 +1,452 @@ +/* + Portions Copyright (c) 2016-Present, Facebook, Inc. + Portions Copyright (c) 2012,2013 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +#include +#include +#include + +namespace myrocks { + +/* + Basic composition functions for a network buffer presented as a MySQL String + ("netstr") which stores data in Network Byte Order (Big Endian). +*/ + +inline void rdb_netstr_append_uint64(my_core::String *out_netstr, uint64 val) +{ + DBUG_ASSERT(out_netstr != nullptr); + + // Convert from host machine byte order (usually Little Endian) to network + // byte order (Big Endian). + uint64 net_val= htobe64(val); + out_netstr->append(reinterpret_cast(&net_val), sizeof(net_val)); +} + +inline void rdb_netstr_append_uint32(my_core::String *out_netstr, uint32 val) +{ + DBUG_ASSERT(out_netstr != nullptr); + + // Convert from host machine byte order (usually Little Endian) to network + // byte order (Big Endian). + uint32 net_val= htobe32(val); + out_netstr->append(reinterpret_cast(&net_val), sizeof(net_val)); +} + +inline void rdb_netstr_append_uint16(my_core::String *out_netstr, uint16 val) +{ + DBUG_ASSERT(out_netstr != nullptr); + + // Convert from host machine byte order (usually Little Endian) to network + // byte order (Big Endian). + uint16 net_val= htobe16(val); + out_netstr->append(reinterpret_cast(&net_val), sizeof(net_val)); +} + + +/* + Basic network buffer ("netbuf") write helper functions. +*/ + +inline void rdb_netbuf_store_uint64(uchar *dst_netbuf, uint64 n) +{ + // Convert from host byte order (usually Little Endian) to network byte order + // (Big Endian). + uint64 net_val= htobe64(n); + memcpy(dst_netbuf, &net_val, sizeof(net_val)); +} + +inline void rdb_netbuf_store_uint32(uchar *dst_netbuf, uint32 n) +{ + DBUG_ASSERT(dst_netbuf != nullptr); + + // Convert from host byte order (usually Little Endian) to network byte order + // (Big Endian). + uint32 net_val= htobe32(n); + memcpy(dst_netbuf, &net_val, sizeof(net_val)); +} + +inline void rdb_netbuf_store_uint16(uchar *dst_netbuf, uint16 n) +{ + DBUG_ASSERT(dst_netbuf != nullptr); + + // Convert from host byte order (usually Little Endian) to network byte order + // (Big Endian). + uint16 net_val= htobe16(n); + memcpy(dst_netbuf, &net_val, sizeof(net_val)); +} + +inline void rdb_netbuf_store_byte(uchar *dst_netbuf, uchar c) +{ + DBUG_ASSERT(dst_netbuf != nullptr); + + *dst_netbuf= c; +} + +inline void rdb_netbuf_store_index(uchar *dst_netbuf, uint32 number) +{ + DBUG_ASSERT(dst_netbuf != nullptr); + + rdb_netbuf_store_uint32(dst_netbuf, number); +} + + +/* + Basic conversion helper functions from network byte order (Big Endian) to host + machine byte order (usually Little Endian). +*/ + +inline uint64 rdb_netbuf_to_uint64(const uchar *netbuf) +{ + DBUG_ASSERT(netbuf != nullptr); + + uint64 net_val; + memcpy(&net_val, netbuf, sizeof(net_val)); + + // Convert from network byte order (Big Endian) to host machine byte order + // (usually Little Endian). + return be64toh(net_val); +} + +inline uint32 rdb_netbuf_to_uint32(const uchar *netbuf) +{ + DBUG_ASSERT(netbuf != nullptr); + + uint32 net_val; + memcpy(&net_val, netbuf, sizeof(net_val)); + + // Convert from network byte order (Big Endian) to host machine byte order + // (usually Little Endian). + return be32toh(net_val); +} + +inline uint16 rdb_netbuf_to_uint16(const uchar *netbuf) +{ + DBUG_ASSERT(netbuf != nullptr); + + uint16 net_val; + memcpy(&net_val, netbuf, sizeof(net_val)); + + // Convert from network byte order (Big Endian) to host machine byte order + // (usually Little Endian). + return be16toh(net_val); +} + +inline uchar rdb_netbuf_to_byte(const uchar* netbuf) +{ + DBUG_ASSERT(netbuf != nullptr); + + return(uchar)netbuf[0]; +} + + +/* + Basic network buffer ("netbuf") read helper functions. + Network buffer stores data in Network Byte Order (Big Endian). + NB: The netbuf is passed as an input/output param, hence after reading, + the netbuf pointer gets advanced to the following byte. +*/ + +inline uint64 rdb_netbuf_read_uint64(const uchar **netbuf_ptr) +{ + DBUG_ASSERT(netbuf_ptr != nullptr); + + // Convert from network byte order (Big Endian) to host machine byte order + // (usually Little Endian). + uint64 host_val= rdb_netbuf_to_uint64(*netbuf_ptr); + + // Advance pointer. + *netbuf_ptr += sizeof(host_val); + + return host_val; +} + +inline uint32 rdb_netbuf_read_uint32(const uchar **netbuf_ptr) +{ + DBUG_ASSERT(netbuf_ptr != nullptr); + + // Convert from network byte order (Big Endian) to host machine byte order + // (usually Little Endian). + uint32 host_val= rdb_netbuf_to_uint32(*netbuf_ptr); + + // Advance pointer. + *netbuf_ptr += sizeof(host_val); + + return host_val; +} + +inline uint16 rdb_netbuf_read_uint16(const uchar **netbuf_ptr) +{ + DBUG_ASSERT(netbuf_ptr != nullptr); + + // Convert from network byte order (Big Endian) to host machine byte order + // (usually Little Endian). + uint16 host_val= rdb_netbuf_to_uint16(*netbuf_ptr); + + // Advance pointer. + *netbuf_ptr += sizeof(host_val); + + return host_val; +} + +inline void rdb_netbuf_read_gl_index(const uchar **netbuf_ptr, + GL_INDEX_ID *gl_index_id) +{ + DBUG_ASSERT(gl_index_id != nullptr); + DBUG_ASSERT(netbuf_ptr != nullptr); + + gl_index_id->cf_id= rdb_netbuf_read_uint32(netbuf_ptr); + gl_index_id->index_id= rdb_netbuf_read_uint32(netbuf_ptr); +} + +/* + A simple string reader: + - it keeps position within the string that we read from + - it prevents one from reading beyond the end of the string. +*/ + +class Rdb_string_reader +{ + const char* m_ptr; + uint m_len; + public: + explicit Rdb_string_reader(const std::string &str) + { + m_len= str.length(); + if (m_len) + { + m_ptr= &str.at(0); + } + else + { + /* + One can a create a Rdb_string_reader for reading from an empty string + (although attempts to read anything will fail). + We must not access str.at(0), since len==0, we can set ptr to any + value. + */ + m_ptr= nullptr; + } + } + + explicit Rdb_string_reader(const rocksdb::Slice *slice) + { + m_ptr= slice->data(); + m_len= slice->size(); + } + + /* + Read the next @param size bytes. Returns pointer to the bytes read, or + nullptr if the remaining string doesn't have that many bytes. + */ + const char *read(uint size) + { + const char *res; + if (m_len < size) + { + res= nullptr; + } + else + { + res= m_ptr; + m_ptr += size; + m_len -= size; + } + return res; + } + + bool read_uint8(uint* res) + { + const uchar *p; + if (!(p= reinterpret_cast(read(1)))) + return true; // error + else + { + *res= *p; + return false; // Ok + } + } + + bool read_uint16(uint* res) + { + const uchar *p; + if (!(p= reinterpret_cast(read(2)))) + return true; // error + else + { + *res= rdb_netbuf_to_uint16(p); + return false; // Ok + } + } + + uint remaining_bytes() const { return m_len; } + + /* + Return pointer to data that will be read by next read() call (if there is + nothing left to read, returns pointer to beyond the end of previous read() + call) + */ + const char *get_current_ptr() const { return m_ptr; } +}; + + +/* + @brief + A buffer one can write the data to. + + @detail + Suggested usage pattern: + + writer->clear(); + writer->write_XXX(...); + ... + // Ok, writer->ptr() points to the data written so far, + // and writer->get_current_pos() is the length of the data + +*/ + +class Rdb_string_writer +{ + std::vector m_data; + public: + void clear() { m_data.clear(); } + void write_uint8(uint val) + { + m_data.push_back(static_cast(val)); + } + + void write_uint16(uint val) + { + auto size= m_data.size(); + m_data.resize(size + 2); + rdb_netbuf_store_uint16(m_data.data() + size, val); + } + + void write_uint32(uint val) + { + auto size= m_data.size(); + m_data.resize(size + 4); + rdb_netbuf_store_uint32(m_data.data() + size, val); + } + + void write(uchar *new_data, size_t len) + { + m_data.insert(m_data.end(), new_data, new_data + len); + } + + uchar* ptr() { return m_data.data(); } + size_t get_current_pos() const { return m_data.size(); } + + void write_uint8_at(size_t pos, uint new_val) + { + // This function will only overwrite what was written + DBUG_ASSERT(pos < get_current_pos()); + m_data.data()[pos]= new_val; + } + + void write_uint16_at(size_t pos, uint new_val) + { + // This function will only overwrite what was written + DBUG_ASSERT(pos < get_current_pos() && (pos + 1) < get_current_pos()); + rdb_netbuf_store_uint16(m_data.data() + pos, new_val); + } +}; + + +/* + A helper class for writing bits into Rdb_string_writer. + + The class assumes (but doesn't check) that nobody tries to write + anything to the Rdb_string_writer that it is writing to. +*/ +class Rdb_bit_writer +{ + Rdb_string_writer *m_writer; + uchar m_offset; + public: + explicit Rdb_bit_writer(Rdb_string_writer* writer_arg) + : m_writer(writer_arg), + m_offset(0) + { + } + + void write(uint size, uint value) + { + DBUG_ASSERT((value & ((1 << size) - 1)) == value); + + while (size > 0) + { + if (m_offset == 0) + { + m_writer->write_uint8(0); + } + // number of bits to put in this byte + uint bits = std::min(size, (uint)(8 - m_offset)); + uchar *last_byte= m_writer->ptr() + m_writer->get_current_pos() - 1; + *last_byte |= + (uchar) ((value >> (size - bits)) & ((1 << bits) - 1)) << m_offset; + size -= bits; + m_offset = (m_offset + bits) & 0x7; + } + } +}; + +class Rdb_bit_reader +{ + const uchar *m_cur; + uchar m_offset; + uint m_ret; + Rdb_string_reader *m_reader; + public: + explicit Rdb_bit_reader(Rdb_string_reader *reader) + : m_cur(nullptr), + m_offset(0), + m_reader(reader) + { + } + + // Returns a pointer to an uint containing the bits read. On subsequent + // reads, the value being pointed to will be overwritten. Returns nullptr + // on failure. + uint *read(uint size) + { + m_ret= 0; + DBUG_ASSERT(size <= 32); + + while (size > 0) + { + if (m_offset == 0) + { + m_cur= (const uchar *)m_reader->read(1); + if (m_cur == nullptr) + { + return nullptr; + } + } + // how many bits from the current byte? + uint bits = std::min((uint)(8 - m_offset), size); + m_ret <<= bits; + m_ret |= (*m_cur >> m_offset) & ((1 << bits) - 1); + size -= bits; + m_offset = (m_offset + bits) & 0x7; + } + + return &m_ret; + } +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_cf_manager.cc b/storage/rocksdb/rdb_cf_manager.cc new file mode 100644 index 00000000000..3ee28b49cb6 --- /dev/null +++ b/storage/rocksdb/rdb_cf_manager.cc @@ -0,0 +1,236 @@ +/* + Copyright (c) 2014, SkySQL Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +/* This C++ files header file */ +#include "./rdb_cf_manager.h" + +/* MyRocks header files */ +#include "./ha_rocksdb.h" +#include "./ha_rocksdb_proto.h" + +namespace myrocks { + +/* Check if ColumnFamily name says it's a reverse-ordered CF */ +bool Rdb_cf_manager::is_cf_name_reverse(const char *name) +{ + /* nullptr means the default CF is used.. (TODO: can the default CF be + * reverse?) */ + if (name && !strncmp(name, "rev:", 4)) + return true; + else + return false; +} + +#ifdef HAVE_PSI_INTERFACE +static PSI_mutex_key ex_key_cfm; +#endif + +void Rdb_cf_manager::init( + Rdb_cf_options* cf_options, + std::vector *handles) +{ + mysql_mutex_init(ex_key_cfm, &m_mutex, MY_MUTEX_INIT_FAST); + + DBUG_ASSERT(cf_options != nullptr); + DBUG_ASSERT(handles != nullptr); + DBUG_ASSERT(handles->size() > 0); + + m_cf_options = cf_options; + + for (auto cfh : *handles) { + DBUG_ASSERT(cfh != nullptr); + m_cf_name_map[cfh->GetName()] = cfh; + m_cf_id_map[cfh->GetID()] = cfh; + } +} + + +void Rdb_cf_manager::cleanup() +{ + for (auto it : m_cf_name_map) { + delete it.second; + } + mysql_mutex_destroy(&m_mutex); +} + + +/** + Generate Column Family name for per-index column families + + @param res OUT Column Family name +*/ + +void Rdb_cf_manager::get_per_index_cf_name(const std::string& db_table_name, + const char *index_name, + std::string *res) +{ + DBUG_ASSERT(index_name != nullptr); + DBUG_ASSERT(res != nullptr); + + *res = db_table_name + "." + index_name; +} + + +/* + @brief + Find column family by name. If it doesn't exist, create it + + @detail + See Rdb_cf_manager::get_cf +*/ +rocksdb::ColumnFamilyHandle* +Rdb_cf_manager::get_or_create_cf(rocksdb::DB *rdb, + const char *cf_name, + const std::string& db_table_name, + const char *index_name, + bool *is_automatic) +{ + DBUG_ASSERT(rdb != nullptr); + DBUG_ASSERT(is_automatic != nullptr); + + rocksdb::ColumnFamilyHandle* cf_handle; + + mysql_mutex_lock(&m_mutex); + *is_automatic= false; + if (cf_name == nullptr) + cf_name= DEFAULT_CF_NAME; + + std::string per_index_name; + if (!strcmp(cf_name, PER_INDEX_CF_NAME)) + { + get_per_index_cf_name(db_table_name, index_name, &per_index_name); + cf_name= per_index_name.c_str(); + *is_automatic= true; + } + + auto it = m_cf_name_map.find(cf_name); + if (it != m_cf_name_map.end()) + cf_handle= it->second; + else + { + /* Create a Column Family. */ + std::string cf_name_str(cf_name); + rocksdb::ColumnFamilyOptions opts; + m_cf_options->get_cf_options(cf_name_str, &opts); + + sql_print_information("RocksDB: creating column family %s", cf_name_str.c_str()); + sql_print_information(" write_buffer_size=%ld", opts.write_buffer_size); + sql_print_information(" target_file_size_base=%" PRIu64, + opts.target_file_size_base); + + rocksdb::Status s= rdb->CreateColumnFamily(opts, cf_name_str, &cf_handle); + if (s.ok()) { + m_cf_name_map[cf_handle->GetName()] = cf_handle; + m_cf_id_map[cf_handle->GetID()] = cf_handle; + } else { + cf_handle= nullptr; + } + } + mysql_mutex_unlock(&m_mutex); + + return cf_handle; +} + + +/* + Find column family by its cf_name. + + @detail + dbname.tablename and index_name are also parameters, because + cf_name=PER_INDEX_CF_NAME means that column family name is a function + of table/index name. + + @param out is_automatic TRUE<=> column family name is auto-assigned based on + db_table_name and index_name. +*/ + +rocksdb::ColumnFamilyHandle* +Rdb_cf_manager::get_cf(const char *cf_name, + const std::string& db_table_name, + const char *index_name, + bool *is_automatic) const +{ + DBUG_ASSERT(cf_name != nullptr); + DBUG_ASSERT(is_automatic != nullptr); + + rocksdb::ColumnFamilyHandle* cf_handle; + + *is_automatic= false; + mysql_mutex_lock(&m_mutex); + if (cf_name == nullptr) + cf_name= DEFAULT_CF_NAME; + + std::string per_index_name; + if (!strcmp(cf_name, PER_INDEX_CF_NAME)) + { + get_per_index_cf_name(db_table_name, index_name, &per_index_name); + cf_name= per_index_name.c_str(); + *is_automatic= true; + } + + auto it = m_cf_name_map.find(cf_name); + cf_handle = (it != m_cf_name_map.end()) ? it->second : nullptr; + + mysql_mutex_unlock(&m_mutex); + + return cf_handle; +} + +rocksdb::ColumnFamilyHandle* Rdb_cf_manager::get_cf(const uint32_t id) const +{ + rocksdb::ColumnFamilyHandle* cf_handle = nullptr; + + mysql_mutex_lock(&m_mutex); + auto it = m_cf_id_map.find(id); + if (it != m_cf_id_map.end()) + cf_handle = it->second; + mysql_mutex_unlock(&m_mutex); + + return cf_handle; +} + +std::vector +Rdb_cf_manager::get_cf_names(void) const +{ + std::vector names; + + mysql_mutex_lock(&m_mutex); + for (auto it : m_cf_name_map) { + names.push_back(it.first); + } + mysql_mutex_unlock(&m_mutex); + return names; +} + +std::vector +Rdb_cf_manager::get_all_cf(void) const +{ + std::vector list; + + mysql_mutex_lock(&m_mutex); + for (auto it : m_cf_id_map) { + list.push_back(it.second); + } + mysql_mutex_unlock(&m_mutex); + + return list; +} + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_cf_manager.h b/storage/rocksdb/rdb_cf_manager.h new file mode 100644 index 00000000000..5a43b533c6d --- /dev/null +++ b/storage/rocksdb/rdb_cf_manager.h @@ -0,0 +1,106 @@ +/* + Copyright (c) 2014, SkySQL Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#pragma once + +/* C++ system header files */ +#include +#include +#include + +/* MySQL header files */ +#include "./sql_class.h" + +/* RocksDB header files */ +#include "rocksdb/db.h" + +/* MyRocks header files */ +#include "./rdb_cf_options.h" + +namespace myrocks { + +/* + We need a Column Family (CF) manager. Its functions: + - create column families (synchronized, don't create the same twice) + - keep count in each column family. + = the count is kept on-disk. + = there are no empty CFs. initially count=1. + = then, when doing DDL, we increase or decrease it. + (atomicity is maintained by being in the same WriteBatch with DDLs) + = if DROP discovers that now count=0, it removes the CF. + + Current state is: + - CFs are created in a synchronized way. We can't remove them, yet. +*/ + +class Rdb_cf_manager +{ + std::map m_cf_name_map; + std::map m_cf_id_map; + + mutable mysql_mutex_t m_mutex; + + static + void get_per_index_cf_name(const std::string& db_table_name, + const char *index_name, std::string *res); + + Rdb_cf_options* m_cf_options= nullptr; + +public: + static bool is_cf_name_reverse(const char *name); + + /* + This is called right after the DB::Open() call. The parameters describe column + families that are present in the database. The first CF is the default CF. + */ + void init(Rdb_cf_options* cf_options, + std::vector *handles); + void cleanup(); + + /* + Used by CREATE TABLE. + - cf_name=nullptr means use default column family + - cf_name=_auto_ means use 'dbname.tablename.indexname' + */ + rocksdb::ColumnFamilyHandle* get_or_create_cf( + rocksdb::DB *rdb, const char *cf_name, const std::string& db_table_name, + const char *index_name, bool *is_automatic); + + /* Used by table open */ + rocksdb::ColumnFamilyHandle* get_cf(const char *cf_name, + const std::string& db_table_name, + const char *index_name, + bool *is_automatic) const; + + /* Look up cf by id; used by datadic */ + rocksdb::ColumnFamilyHandle* get_cf(const uint32_t id) const; + + /* Used to iterate over column families for show status */ + std::vector get_cf_names(void) const; + + /* Used to iterate over column families */ + std::vector get_all_cf(void) const; + + // void drop_cf(); -- not implemented so far. + + void get_cf_options( + const std::string &cf_name, + rocksdb::ColumnFamilyOptions *opts) __attribute__((__nonnull__)) { + m_cf_options->get_cf_options(cf_name, opts); + } +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_cf_options.cc b/storage/rocksdb/rdb_cf_options.cc new file mode 100644 index 00000000000..ccdb46a654d --- /dev/null +++ b/storage/rocksdb/rdb_cf_options.cc @@ -0,0 +1,340 @@ +/* + Copyright (c) 2014, SkySQL Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +/* This C++ files header file */ +#include "./rdb_cf_options.h" + +/* C++ system header files */ +#include + +/* MySQL header files */ +#include "./log.h" + +/* RocksDB header files */ +#include "rocksdb/utilities/convenience.h" + +/* MyRocks header files */ +#include "./ha_rocksdb.h" +#include "./rdb_cf_manager.h" +#include "./rdb_compact_filter.h" + +namespace myrocks { + +Rdb_pk_comparator Rdb_cf_options::s_pk_comparator; +Rdb_rev_comparator Rdb_cf_options::s_rev_pk_comparator; + +bool Rdb_cf_options::init( + size_t default_write_buffer_size, + const rocksdb::BlockBasedTableOptions& table_options, + std::shared_ptr prop_coll_factory, + const char * default_cf_options, + const char * override_cf_options) +{ + m_default_cf_opts.comparator = &s_pk_comparator; + m_default_cf_opts.compaction_filter_factory.reset( + new Rdb_compact_filter_factory); + m_default_cf_opts.write_buffer_size = default_write_buffer_size; + + m_default_cf_opts.table_factory.reset( + rocksdb::NewBlockBasedTableFactory(table_options)); + + if (prop_coll_factory) { + m_default_cf_opts.table_properties_collector_factories.push_back( + prop_coll_factory); + } + + if (!set_default(std::string(default_cf_options)) || + !set_override(std::string(override_cf_options))) { + return false; + } + + return true; +} + +void Rdb_cf_options::get(const std::string &cf_name, + rocksdb::ColumnFamilyOptions *opts) +{ + DBUG_ASSERT(opts != nullptr); + + // set defaults + rocksdb::GetColumnFamilyOptionsFromString(*opts, + m_default_config, + opts); + + // set per-cf config if we have one + Name_to_config_t::iterator it = m_name_map.find(cf_name); + if (it != m_name_map.end()) { + rocksdb::GetColumnFamilyOptionsFromString(*opts, + it->second, + opts); + } +} + +bool Rdb_cf_options::set_default(const std::string &default_config) +{ + rocksdb::ColumnFamilyOptions options; + + if (!default_config.empty() && + !rocksdb::GetColumnFamilyOptionsFromString(options, + default_config, + &options).ok()) { + fprintf(stderr, + "Invalid default column family config: %s\n", + default_config.c_str()); + return false; + } + + m_default_config = default_config; + return true; +} + +// Skip over any spaces in the input string. +void Rdb_cf_options::skip_spaces(const std::string& input, size_t* pos) +{ + DBUG_ASSERT(pos != nullptr); + + while (*pos < input.size() && isspace(input[*pos])) + ++(*pos); +} + +// Find a valid column family name. Note that all characters except a +// semicolon are valid (should this change?) and all spaces are trimmed from +// the beginning and end but are not removed between other characters. +bool Rdb_cf_options::find_column_family(const std::string& input, size_t* pos, + std::string* key) +{ + DBUG_ASSERT(pos != nullptr); + DBUG_ASSERT(key != nullptr); + + size_t beg_pos = *pos; + size_t end_pos = *pos - 1; + + // Loop through the characters in the string until we see a '='. + for ( ; *pos < input.size() && input[*pos] != '='; ++(*pos)) + { + // If this is not a space, move the end position to the current position. + if (input[*pos] != ' ') + end_pos = *pos; + } + + if (end_pos == beg_pos - 1) + { + // NO_LINT_DEBUG + sql_print_warning("No column family found (options: %s)", input.c_str()); + return false; + } + + *key = input.substr(beg_pos, end_pos - beg_pos + 1); + return true; +} + +// Find a valid options portion. Everything is deemed valid within the options +// portion until we hit as many close curly braces as we have seen open curly +// braces. +bool Rdb_cf_options::find_options(const std::string& input, size_t* pos, + std::string* options) +{ + DBUG_ASSERT(pos != nullptr); + DBUG_ASSERT(options != nullptr); + + // Make sure we have an open curly brace at the current position. + if (*pos < input.size() && input[*pos] != '{') + { + // NO_LINT_DEBUG + sql_print_warning("Invalid cf options, '{' expected (options: %s)", + input.c_str()); + return false; + } + + // Skip the open curly brace and any spaces. + ++(*pos); + skip_spaces(input, pos); + + // Set up our brace_count, the begin position and current end position. + size_t brace_count = 1; + size_t beg_pos = *pos; + + // Loop through the characters in the string until we find the appropriate + // number of closing curly braces. + while (*pos < input.size()) + { + switch (input[*pos]) + { + case '}': + // If this is a closing curly brace and we bring the count down to zero + // we can exit the loop with a valid options string. + if (--brace_count == 0) + { + *options = input.substr(beg_pos, *pos - beg_pos); + ++(*pos); // Move past the last closing curly brace + return true; + } + + break; + + case '{': + // If this is an open curly brace increment the count. + ++brace_count; + break; + + default: + break; + } + + // Move to the next character. + ++(*pos); + } + + // We never found the correct number of closing curly braces. + // Generate an error. + // NO_LINT_DEBUG + sql_print_warning("Mismatched cf options, '}' expected (options: %s)", + input.c_str()); + return false; +} + +bool Rdb_cf_options::find_cf_options_pair(const std::string& input, + size_t* pos, + std::string* cf, + std::string* opt_str) +{ + DBUG_ASSERT(pos != nullptr); + DBUG_ASSERT(cf != nullptr); + DBUG_ASSERT(opt_str != nullptr); + + // Skip any spaces. + skip_spaces(input, pos); + + // We should now have a column family name. + if (!find_column_family(input, pos, cf)) + return false; + + // If we are at the end of the input then we generate an error. + if (*pos == input.size()) + { + // NO_LINT_DEBUG + sql_print_warning("Invalid cf options, '=' expected (options: %s)", + input.c_str()); + return false; + } + + // Skip equal sign and any spaces after it + ++(*pos); + skip_spaces(input, pos); + + // Find the options for this column family. This should be in the format + // {} where may contain embedded pairs of curly braces. + if (!find_options(input, pos, opt_str)) + return false; + + // Skip any trailing spaces after the option string. + skip_spaces(input, pos); + + // We should either be at the end of the input string or at a semicolon. + if (*pos < input.size()) + { + if (input[*pos] != ';') + { + // NO_LINT_DEBUG + sql_print_warning("Invalid cf options, ';' expected (options: %s)", + input.c_str()); + return false; + } + + ++(*pos); + } + + return true; +} + +bool Rdb_cf_options::set_override(const std::string &override_config) +{ + // TODO(???): support updates? + + std::string cf; + std::string opt_str; + rocksdb::ColumnFamilyOptions options; + Name_to_config_t configs; + + // Loop through the characters of the string until we reach the end. + size_t pos = 0; + while (pos < override_config.size()) + { + // Attempt to find ={}. + if (!find_cf_options_pair(override_config, &pos, &cf, &opt_str)) + return false; + + // Generate an error if we have already seen this column family. + if (configs.find(cf) != configs.end()) + { + // NO_LINT_DEBUG + sql_print_warning( + "Duplicate entry for %s in override options (options: %s)", + cf.c_str(), override_config.c_str()); + return false; + } + + // Generate an error if the is not valid according to RocksDB. + if (!rocksdb::GetColumnFamilyOptionsFromString( + options, opt_str, &options).ok()) + { + // NO_LINT_DEBUG + sql_print_warning( + "Invalid cf config for %s in override options (options: %s)", + cf.c_str(), override_config.c_str()); + return false; + } + + // If everything is good, add this cf/opt_str pair to the map. + configs[cf] = opt_str; + } + + // Everything checked out - make the map live + m_name_map = configs; + + return true; +} + +const rocksdb::Comparator* Rdb_cf_options::get_cf_comparator( + const std::string& cf_name) +{ + if (Rdb_cf_manager::is_cf_name_reverse(cf_name.c_str())) + { + return &s_rev_pk_comparator; + } + else + { + return &s_pk_comparator; + } +} + +void Rdb_cf_options::get_cf_options(const std::string &cf_name, + rocksdb::ColumnFamilyOptions *opts) +{ + DBUG_ASSERT(opts != nullptr); + + *opts = m_default_cf_opts; + get(cf_name, opts); + + // Set the comparator according to 'rev:' + opts->comparator= get_cf_comparator(cf_name); +} + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_cf_options.h b/storage/rocksdb/rdb_cf_options.h new file mode 100644 index 00000000000..e709e42e8b5 --- /dev/null +++ b/storage/rocksdb/rdb_cf_options.h @@ -0,0 +1,92 @@ +/* + Copyright (c) 2014, SkySQL Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#pragma once + +/* C++ system header files */ +#include +#include + +/* RocksDB header files */ +#include "rocksdb/table.h" +#include "rocksdb/utilities/options_util.h" + +/* MyRocks header files */ +#include "./rdb_comparator.h" + +namespace myrocks { + +/* + Per-column family options configs. + + Per-column family option can be set + - Globally (the same value applies to all column families) + - Per column family: there is a {cf_name -> value} map, + and also there is a default value which applies to column + families not found in the map. +*/ +class Rdb_cf_options +{ + public: + void get(const std::string &cf_name, rocksdb::ColumnFamilyOptions *opts); + + bool init( + size_t default_write_buffer_size, + const rocksdb::BlockBasedTableOptions& table_options, + std::shared_ptr prop_coll_factory, + const char * default_cf_options, + const char * override_cf_options); + + const rocksdb::ColumnFamilyOptions& get_defaults() const { + return m_default_cf_opts; + } + + static const rocksdb::Comparator* get_cf_comparator( + const std::string& cf_name); + + void get_cf_options( + const std::string &cf_name, + rocksdb::ColumnFamilyOptions *opts) __attribute__((__nonnull__)); + + private: + bool set_default(const std::string &default_config); + bool set_override(const std::string &overide_config); + + /* Helper string manipulation functions */ + static void skip_spaces(const std::string& input, size_t* pos); + static bool find_column_family(const std::string& input, size_t* pos, + std::string* key); + static bool find_options(const std::string& input, size_t* pos, + std::string* options); + static bool find_cf_options_pair(const std::string& input, size_t* pos, + std::string* cf, std::string* opt_str); + + private: + static Rdb_pk_comparator s_pk_comparator; + static Rdb_rev_comparator s_rev_pk_comparator; + + typedef std::unordered_map Name_to_config_t; + + /* CF name -> value map */ + Name_to_config_t m_name_map; + + /* The default value (if there is only one value, it is stored here) */ + std::string m_default_config; + + rocksdb::ColumnFamilyOptions m_default_cf_opts; +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_compact_filter.h b/storage/rocksdb/rdb_compact_filter.h new file mode 100644 index 00000000000..db2011721b7 --- /dev/null +++ b/storage/rocksdb/rdb_compact_filter.h @@ -0,0 +1,115 @@ +/* + Portions Copyright (c) 2016-Present, Facebook, Inc. + Portions Copyright (c) 2012, Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +/* C++ system header files */ +#include + +/* RocksDB includes */ +#include "rocksdb/compaction_filter.h" + +/* MyRocks includes */ +#include "./ha_rocksdb_proto.h" +#include "./rdb_datadic.h" + +namespace myrocks { + +class Rdb_compact_filter : public rocksdb::CompactionFilter +{ + public: + explicit Rdb_compact_filter(uint32_t _cf_id) : m_cf_id(_cf_id) {} + ~Rdb_compact_filter() {} + + // keys are passed in sorted order within the same sst. + // V1 Filter is thread safe on our usage (creating from Factory). + // Make sure to protect instance variables when switching to thread + // unsafe in the future. + virtual bool Filter(int level, + const rocksdb::Slice& key, + const rocksdb::Slice& existing_value, + std::string* new_value, + bool* value_changed) const override + { + DBUG_ASSERT(key.size() >= sizeof(uint32)); + + GL_INDEX_ID gl_index_id; + gl_index_id.cf_id= m_cf_id; + gl_index_id.index_id= rdb_netbuf_to_uint32((const uchar*)key.data()); + DBUG_ASSERT(gl_index_id.index_id >= 1); + + if (gl_index_id != m_prev_index) // processing new index id + { + if (m_num_deleted > 0) + { + m_num_deleted= 0; + } + m_should_delete= + rdb_get_dict_manager()->is_drop_index_ongoing(gl_index_id); + m_prev_index= gl_index_id; + } + + if (m_should_delete) + { + m_num_deleted++; + } + + return m_should_delete; + } + + virtual bool IgnoreSnapshots() const override { return true; } + + virtual const char* Name() const override + { + return "Rdb_compact_filter"; + } + + private: + // Column family for this compaction filter + const uint32_t m_cf_id; + // Index id of the previous record + mutable GL_INDEX_ID m_prev_index= {0, 0}; + // Number of rows deleted for the same index id + mutable uint64 m_num_deleted= 0; + // Current index id should be deleted or not (should be deleted if true) + mutable bool m_should_delete= false; +}; + +class Rdb_compact_filter_factory : public rocksdb::CompactionFilterFactory +{ + public: + Rdb_compact_filter_factory() {} + + ~Rdb_compact_filter_factory() {} + + const char* Name() const override + { + return "Rdb_compact_filter_factory"; + } + + std::unique_ptr CreateCompactionFilter( + const rocksdb::CompactionFilter::Context& context) override + { + return std::unique_ptr( + new Rdb_compact_filter(context.column_family_id)); + } +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_comparator.h b/storage/rocksdb/rdb_comparator.h new file mode 100644 index 00000000000..7e56c5ab00a --- /dev/null +++ b/storage/rocksdb/rdb_comparator.h @@ -0,0 +1,93 @@ +/* + Copyright (c) 2012,2015 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +/* C++ system header files */ +#include + +/* MySQL includes */ +#include "./m_ctype.h" + +/* RocksDB header files */ +#include "rocksdb/comparator.h" + +namespace myrocks { + +/* + The keys are in form: {index_number} {mem-comparable-key} + + (todo: knowledge about this format is shared between this class and + Rdb_key_def) +*/ +class Rdb_pk_comparator : public rocksdb::Comparator +{ + public: + static int bytewise_compare(const rocksdb::Slice& a, const rocksdb::Slice& b) + { + size_t a_size= a.size(); + size_t b_size= b.size(); + size_t len= (a_size < b_size) ? a_size : b_size; + int res; + + if ((res= memcmp(a.data(), b.data(), len))) + return res; + + /* Ok, res== 0 */ + if (a_size != b_size) + { + return a_size < b_size? -1 : 1; + } + return 0; + } + + /* Override virtual methods of interest */ + + int Compare(const rocksdb::Slice& a, const rocksdb::Slice& b) const override + { + return bytewise_compare(a,b); + } + + const char* Name() const override { return "RocksDB_SE_v3.10"; } + + //TODO: advanced funcs: + // - FindShortestSeparator + // - FindShortSuccessor + + // for now, do-nothing implementations: + void FindShortestSeparator(std::string* start, + const rocksdb::Slice& limit) const override {} + void FindShortSuccessor(std::string* key) const override {} +}; + +class Rdb_rev_comparator : public rocksdb::Comparator +{ + public: + static int bytewise_compare(const rocksdb::Slice& a, const rocksdb::Slice& b) + { + return -Rdb_pk_comparator::bytewise_compare(a, b); + } + + int Compare(const rocksdb::Slice& a, const rocksdb::Slice& b) const override + { + return -Rdb_pk_comparator::bytewise_compare(a, b); + } + const char* Name() const override { return "rev:RocksDB_SE_v3.10"; } + void FindShortestSeparator(std::string* start, + const rocksdb::Slice& limit) const override {} + void FindShortSuccessor(std::string* key) const override {} +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc new file mode 100644 index 00000000000..d80c81a3d9a --- /dev/null +++ b/storage/rocksdb/rdb_datadic.cc @@ -0,0 +1,4584 @@ +/* + Copyright (c) 2012,2013 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +/* This C++ file's header file */ +#include "./rdb_datadic.h" + +/* C++ standard header files */ +#include +#include +#include +#include +#include +#include +#include + +/* MySQL header files */ +#include "./key.h" +#include "./m_ctype.h" +#include "./my_bit.h" + +/* MyRocks header files */ +#include "./ha_rocksdb_proto.h" +#include "./my_stacktrace.h" +#include "./rdb_cf_manager.h" +#include "./rdb_utils.h" + +namespace myrocks { + +void get_mem_comparable_space(const CHARSET_INFO *cs, + const std::vector **xfrm, + size_t *xfrm_len, + size_t *mb_len); + +/* + Rdb_key_def class implementation +*/ + +Rdb_key_def::Rdb_key_def( + uint indexnr_arg, uint keyno_arg, + rocksdb::ColumnFamilyHandle* cf_handle_arg, + uint16_t index_dict_version_arg, + uchar index_type_arg, + uint16_t kv_format_version_arg, + bool is_reverse_cf_arg, bool is_auto_cf_arg, + const char* _name, + Rdb_index_stats _stats +) : + m_index_number(indexnr_arg), + m_cf_handle(cf_handle_arg), + m_index_dict_version(index_dict_version_arg), + m_index_type(index_type_arg), + m_kv_format_version(kv_format_version_arg), + m_is_reverse_cf(is_reverse_cf_arg), + m_is_auto_cf(is_auto_cf_arg), + m_name(_name), + m_stats(_stats), + m_pk_part_no(nullptr), + m_pack_info(nullptr), + m_keyno(keyno_arg), + m_key_parts(0), + m_maxlength(0) // means 'not intialized' +{ + mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST); + rdb_netbuf_store_index(m_index_number_storage_form, m_index_number); + DBUG_ASSERT(m_cf_handle != nullptr); +} + +Rdb_key_def::Rdb_key_def(const Rdb_key_def& k) : + m_index_number(k.m_index_number), + m_cf_handle(k.m_cf_handle), + m_is_reverse_cf(k.m_is_reverse_cf), + m_is_auto_cf(k.m_is_auto_cf), + m_name(k.m_name), + m_stats(k.m_stats), + m_pk_part_no(k.m_pk_part_no), + m_pack_info(k.m_pack_info), + m_keyno(k.m_keyno), + m_key_parts(k.m_key_parts), + m_maxlength(k.m_maxlength) +{ + mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST); + rdb_netbuf_store_index(m_index_number_storage_form, m_index_number); + if (k.m_pack_info) + { + size_t size= sizeof(Rdb_field_packing) * k.m_key_parts; + m_pack_info= reinterpret_cast(my_malloc(size, MYF(0))); + memcpy(m_pack_info, k.m_pack_info, size); + } + + if (k.m_pk_part_no) + { + size_t size = sizeof(uint)*m_key_parts; + m_pk_part_no= reinterpret_cast(my_malloc(size, MYF(0))); + memcpy(m_pk_part_no, k.m_pk_part_no, size); + } +} + +Rdb_key_def::~Rdb_key_def() +{ + mysql_mutex_destroy(&m_mutex); + + my_free(m_pk_part_no); + m_pk_part_no = nullptr; + + my_free(m_pack_info); + m_pack_info = nullptr; +} + +void Rdb_key_def::setup(const TABLE *tbl, const Rdb_tbl_def *tbl_def) +{ + DBUG_ASSERT(tbl != nullptr); + DBUG_ASSERT(tbl_def != nullptr); + + /* + Set max_length based on the table. This can be called concurrently from + multiple threads, so there is a mutex to protect this code. + */ + const bool is_hidden_pk= (m_index_type == INDEX_TYPE_HIDDEN_PRIMARY); + const bool hidden_pk_exists= table_has_hidden_pk(tbl); + const bool secondary_key= (m_index_type == INDEX_TYPE_SECONDARY); + if (!m_maxlength) + { + mysql_mutex_lock(&m_mutex); + if (m_maxlength != 0) + { + mysql_mutex_unlock(&m_mutex); + return; + } + + KEY *key_info= nullptr; + KEY *pk_info= nullptr; + if (!is_hidden_pk) + { + key_info= &tbl->key_info[m_keyno]; + if (!hidden_pk_exists) + pk_info= &tbl->key_info[tbl->s->primary_key]; + m_name= std::string(key_info->name); + } + else + { + m_name= HIDDEN_PK_NAME; + } + + if (secondary_key) + m_pk_key_parts= hidden_pk_exists ? 1 : pk_info->actual_key_parts; + else + { + pk_info= nullptr; + m_pk_key_parts= 0; + } + + // "unique" secondary keys support: + m_key_parts= is_hidden_pk ? 1 : key_info->actual_key_parts; + + if (secondary_key) + { + /* + In most cases, SQL layer puts PK columns as invisible suffix at the + end of secondary key. There are cases where this doesn't happen: + - unique secondary indexes. + - partitioned tables. + + Internally, we always need PK columns as suffix (and InnoDB does, + too, if you were wondering). + + The loop below will attempt to put all PK columns at the end of key + definition. Columns that are already included in the index (either + by the user or by "extended keys" feature) are not included for the + second time. + */ + m_key_parts += m_pk_key_parts; + } + + if (secondary_key) + m_pk_part_no= reinterpret_cast(my_malloc(sizeof(uint)*m_key_parts, + MYF(0))); + else + m_pk_part_no= nullptr; + + size_t size= sizeof(Rdb_field_packing) * m_key_parts; + m_pack_info= reinterpret_cast(my_malloc(size, MYF(0))); + + size_t max_len= INDEX_NUMBER_SIZE; + int unpack_len= 0; + int max_part_len= 0; + bool simulating_extkey= false; + uint dst_i= 0; + + uint keyno_to_set= m_keyno; + uint keypart_to_set= 0; + + if (is_hidden_pk) + { + Field *field= nullptr; + m_pack_info[dst_i].setup(this, field, keyno_to_set, 0, 0); + m_pack_info[dst_i].m_unpack_data_offset= unpack_len; + max_len += m_pack_info[dst_i].m_max_image_len; + max_part_len= std::max(max_part_len, m_pack_info[dst_i].m_max_image_len); + dst_i++; + } + else + { + KEY_PART_INFO *key_part= key_info->key_part; + + /* this loop also loops over the 'extended key' tail */ + for (uint src_i= 0; src_i < m_key_parts; src_i++, keypart_to_set++) + { + Field *field= key_part ? key_part->field : nullptr; + + if (simulating_extkey && !hidden_pk_exists) + { + /* Check if this field is already present in the key definition */ + bool found= false; + for (uint j= 0; j < key_info->actual_key_parts; j++) + { + if (field->field_index == key_info->key_part[j].field->field_index) + { + found= true; + break; + } + } + + if (found) + { + key_part++; + continue; + } + } + + if (field && field->real_maybe_null()) + max_len +=1; // NULL-byte + + m_pack_info[dst_i].setup(this, field, keyno_to_set, keypart_to_set, + key_part ? key_part->length : 0); + m_pack_info[dst_i].m_unpack_data_offset= unpack_len; + + if (pk_info) + { + m_pk_part_no[dst_i]= -1; + for (uint j= 0; j < m_pk_key_parts; j++) + { + if (field->field_index == pk_info->key_part[j].field->field_index) + { + m_pk_part_no[dst_i]= j; + break; + } + } + } + else if (secondary_key && hidden_pk_exists) + { + /* + The hidden pk can never be part of the sk. So it is always + appended to the end of the sk. + */ + m_pk_part_no[dst_i]= -1; + if (simulating_extkey) + m_pk_part_no[dst_i]= 0; + } + + max_len += m_pack_info[dst_i].m_max_image_len; + + max_part_len= std::max(max_part_len, + m_pack_info[dst_i].m_max_image_len); + + key_part++; + /* + For "unique" secondary indexes, pretend they have + "index extensions" + */ + if (secondary_key && src_i+1 == key_info->actual_key_parts) + { + simulating_extkey= true; + if (!hidden_pk_exists) + { + keyno_to_set= tbl->s->primary_key; + key_part= pk_info->key_part; + keypart_to_set= (uint)-1; + } + else + { + keyno_to_set= tbl_def->m_key_count - 1; + key_part= nullptr; + keypart_to_set= 0; + } + } + + dst_i++; + } + } + + m_key_parts= dst_i; + + /* Initialize the memory needed by the stats structure */ + m_stats.m_distinct_keys_per_prefix.resize(get_key_parts()); + + /* + This should be the last member variable set before releasing the mutex + so that other threads can't see the object partially set up. + */ + m_maxlength= max_len; + + mysql_mutex_unlock(&m_mutex); + } +} + + +/** + Get a mem-comparable form of Primary Key from mem-comparable form of this key + + @param + pk_descr Primary Key descriptor + key Index tuple from this key in mem-comparable form + pk_buffer OUT Put here mem-comparable form of the Primary Key. + + @note + It may or may not be possible to restore primary key columns to their + mem-comparable form. To handle all cases, this function copies mem- + comparable forms directly. + + RocksDB SE supports "Extended keys". This means that PK columns are present + at the end of every key. If the key already includes PK columns, then + these columns are not present at the end of the key. + + Because of the above, we copy each primary key column. + + @todo + If we checked crc32 checksums in this function, we would catch some CRC + violations that we currently don't. On the other hand, there is a broader + set of queries for which we would check the checksum twice. +*/ + +uint Rdb_key_def::get_primary_key_tuple(TABLE *table, + const std::shared_ptr& pk_descr, + const rocksdb::Slice *key, + uchar *pk_buffer) const +{ + DBUG_ASSERT(table != nullptr); + DBUG_ASSERT(pk_descr != nullptr); + DBUG_ASSERT(key != nullptr); + DBUG_ASSERT(pk_buffer); + + uint size= 0; + uchar *buf= pk_buffer; + DBUG_ASSERT(m_pk_key_parts); + + /* Put the PK number */ + rdb_netbuf_store_index(buf, pk_descr->m_index_number); + buf += INDEX_NUMBER_SIZE; + size += INDEX_NUMBER_SIZE; + + const char* start_offs[MAX_REF_PARTS]; + const char* end_offs[MAX_REF_PARTS]; + int pk_key_part; + uint i; + Rdb_string_reader reader(key); + + // Skip the index number + if ((!reader.read(INDEX_NUMBER_SIZE))) + return RDB_INVALID_KEY_LEN; + + for (i= 0; i < m_key_parts; i++) + { + if ((pk_key_part= m_pk_part_no[i]) != -1) + { + start_offs[pk_key_part]= reader.get_current_ptr(); + } + + bool have_value= true; + /* It is impossible to unpack the column. Skip it. */ + if (m_pack_info[i].m_maybe_null) + { + const char* nullp; + if (!(nullp= reader.read(1))) + return RDB_INVALID_KEY_LEN; + if (*nullp == 0) + { + /* This is a NULL value */ + have_value= false; + } + else + { + /* If NULL marker is not '0', it can be only '1' */ + if (*nullp != 1) + return RDB_INVALID_KEY_LEN; + } + } + + if (have_value) + { + Rdb_field_packing *fpi= &m_pack_info[i]; + + DBUG_ASSERT(table->s != nullptr); + bool is_hidden_pk_part= (i + 1 == m_key_parts) && + (table->s->primary_key == MAX_INDEXES); + Field *field= nullptr; + if (!is_hidden_pk_part) + field= fpi->get_field_in_table(table); + if (fpi->m_skip_func(fpi, field, &reader)) + return RDB_INVALID_KEY_LEN; + } + + if (pk_key_part != -1) + { + end_offs[pk_key_part]= reader.get_current_ptr(); + } + } + + for (i= 0; i < m_pk_key_parts; i++) + { + uint part_size= end_offs[i] - start_offs[i]; + memcpy(buf, start_offs[i], end_offs[i] - start_offs[i]); + buf += part_size; + size += part_size; + } + + return size; +} + + +/** + Convert index tuple into storage (i.e. mem-comparable) format + + @detail + Currently this is done by unpacking into table->record[0] and then + packing index columns into storage format. + + @param pack_buffer Temporary area for packing varchar columns. Its + size is at least max_storage_fmt_length() bytes. +*/ + +uint Rdb_key_def::pack_index_tuple(TABLE *tbl, uchar *pack_buffer, + uchar *packed_tuple, const uchar *key_tuple, + key_part_map keypart_map) const +{ + DBUG_ASSERT(tbl != nullptr); + DBUG_ASSERT(pack_buffer != nullptr); + DBUG_ASSERT(packed_tuple != nullptr); + DBUG_ASSERT(key_tuple != nullptr); + + /* We were given a record in KeyTupleFormat. First, save it to record */ + uint key_len= calculate_key_len(tbl, m_keyno, key_tuple, keypart_map); + key_restore(tbl->record[0], key_tuple, &tbl->key_info[m_keyno], key_len); + + uint n_used_parts= my_count_bits(keypart_map); + if (keypart_map == HA_WHOLE_KEY) + n_used_parts= 0; // Full key is used + + /* Then, convert the record into a mem-comparable form */ + return pack_record(tbl, pack_buffer, tbl->record[0], packed_tuple, nullptr, + false, 0, n_used_parts); +} + + +/** + @brief + Check if "unpack info" data includes checksum. + + @detail + This is used only by CHECK TABLE to count the number of rows that have + checksums. +*/ + +bool Rdb_key_def::unpack_info_has_checksum(const rocksdb::Slice &unpack_info) +{ + const uchar* ptr= (const uchar*)unpack_info.data(); + size_t size= unpack_info.size(); + + // Skip unpack info if present. + if (size >= RDB_UNPACK_HEADER_SIZE && + ptr[0] == RDB_UNPACK_DATA_TAG) + { + uint16 skip_len= rdb_netbuf_to_uint16(ptr + 1); + SHIP_ASSERT(size >= skip_len); + + size -= skip_len; + ptr += skip_len; + } + + return (size == RDB_CHECKSUM_CHUNK_SIZE && ptr[0] == RDB_CHECKSUM_DATA_TAG); +} + +/* + @return Number of bytes that were changed +*/ +int Rdb_key_def::successor(uchar *packed_tuple, uint len) +{ + DBUG_ASSERT(packed_tuple != nullptr); + + int changed= 0; + uchar *p= packed_tuple + len - 1; + for (; p > packed_tuple; p--) + { + changed++; + if (*p != uchar(0xFF)) + { + *p= *p + 1; + break; + } + *p='\0'; + } + return changed; +} + + +/** + Get index columns from the record and pack them into mem-comparable form. + + @param + tbl Table we're working on + record IN Record buffer with fields in table->record format + pack_buffer IN Temporary area for packing varchars. The size is + at least max_storage_fmt_length() bytes. + packed_tuple OUT Key in the mem-comparable form + unpack_info OUT Unpack data + unpack_info_len OUT Unpack data length + n_key_parts Number of keyparts to process. 0 means all of them. + n_null_fields OUT Number of key fields with NULL value. + + @detail + Some callers do not need the unpack information, they can pass + unpack_info=nullptr, unpack_info_len=nullptr. + + @return + Length of the packed tuple +*/ + +uint Rdb_key_def::pack_record(const TABLE *tbl, uchar *pack_buffer, + const uchar *record, uchar *packed_tuple, + Rdb_string_writer *unpack_info, + bool should_store_checksums, + longlong hidden_pk_id, uint n_key_parts, + uint *n_null_fields) const +{ + DBUG_ASSERT(tbl != nullptr); + DBUG_ASSERT(pack_buffer != nullptr); + DBUG_ASSERT(record != nullptr); + DBUG_ASSERT(packed_tuple != nullptr); + // Checksums for PKs are made when record is packed. + // We should never attempt to make checksum just from PK values + DBUG_ASSERT_IMP(should_store_checksums, + (m_index_type == INDEX_TYPE_SECONDARY)); + + uchar *tuple= packed_tuple; + size_t unpack_len_pos= size_t(-1); + const bool hidden_pk_exists= table_has_hidden_pk(tbl); + + rdb_netbuf_store_index(tuple, m_index_number); + tuple += INDEX_NUMBER_SIZE; + + // If n_key_parts is 0, it means all columns. + // The following includes the 'extended key' tail. + // The 'extended key' includes primary key. This is done to 'uniqify' + // non-unique indexes + bool use_all_columns = n_key_parts == 0 || n_key_parts == MAX_REF_PARTS; + + // If hidden pk exists, but hidden pk wasnt passed in, we can't pack the + // hidden key part. So we skip it (its always 1 part). + if (hidden_pk_exists && !hidden_pk_id && use_all_columns) + n_key_parts= m_key_parts - 1; + else if (use_all_columns) + n_key_parts= m_key_parts; + + if (n_null_fields) + *n_null_fields = 0; + + if (unpack_info) + { + unpack_info->clear(); + unpack_info->write_uint8(RDB_UNPACK_DATA_TAG); + unpack_len_pos= unpack_info->get_current_pos(); + // we don't know the total length yet, so write a zero + unpack_info->write_uint16(0); + } + + for (uint i=0; i < n_key_parts; i++) + { + // Fill hidden pk id into the last key part for secondary keys for tables + // with no pk + if (hidden_pk_exists && hidden_pk_id && i + 1 == n_key_parts) + { + m_pack_info[i].fill_hidden_pk_val(&tuple, hidden_pk_id); + break; + } + + Field *field= m_pack_info[i].get_field_in_table(tbl); + DBUG_ASSERT(field != nullptr); + + // Old Field methods expected the record pointer to be at tbl->record[0]. + // The quick and easy way to fix this was to pass along the offset + // for the pointer. + my_ptrdiff_t ptr_diff= record - tbl->record[0]; + + if (field->real_maybe_null()) + { + DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 1)); + if (field->is_real_null(ptr_diff)) + { + /* NULL value. store '\0' so that it sorts before non-NULL values */ + *tuple++ = 0; + /* That's it, don't store anything else */ + if (n_null_fields) + (*n_null_fields)++; + continue; + } + else + { + /* Not a NULL value. Store '1' */ + *tuple++ = 1; + } + } + + bool create_unpack_info= + (unpack_info && // we were requested to generate unpack_info + m_pack_info[i].uses_unpack_info() && // and this keypart uses it + index_format_min_check(PRIMARY_FORMAT_VERSION_UPDATE1, + SECONDARY_FORMAT_VERSION_UPDATE1)); + Rdb_pack_field_context pack_ctx(unpack_info); + + // Set the offset for methods which do not take an offset as an argument + DBUG_ASSERT(is_storage_available(tuple - packed_tuple, + m_pack_info[i].m_max_image_len)); + field->move_field_offset(ptr_diff); + + m_pack_info[i].m_pack_func(&m_pack_info[i], field, + pack_buffer, &tuple, &pack_ctx); + + /* Make "unpack info" to be stored in the value */ + if (create_unpack_info) + { + m_pack_info[i].m_make_unpack_info_func(m_pack_info[i].m_charset_codec, + field, &pack_ctx); + } + field->move_field_offset(-ptr_diff); + } + + if (unpack_info) + { + size_t len= unpack_info->get_current_pos(); + DBUG_ASSERT(len <= std::numeric_limits::max()); + + // Don't store the unpack_info if it has only the header (that is, there's + // no meaningful content). + // Primary Keys are special: for them, store the unpack_info even if it's + // empty (provided m_maybe_unpack_info==true, see + // ha_rocksdb::convert_record_to_storage_format) + if (len == RDB_UNPACK_HEADER_SIZE && + m_index_type != Rdb_key_def::INDEX_TYPE_PRIMARY) + { + unpack_info->clear(); + } + else + { + unpack_info->write_uint16_at(unpack_len_pos, len); + } + + // + // Secondary keys have key and value checksums in the value part + // Primary key is a special case (the value part has non-indexed columns), + // so the checksums are computed and stored by + // ha_rocksdb::convert_record_to_storage_format + // + if (should_store_checksums) + { + uint32_t key_crc32= crc32(0, packed_tuple, tuple - packed_tuple); + uint32_t val_crc32= crc32(0, unpack_info->ptr(), + unpack_info->get_current_pos()); + + unpack_info->write_uint8(RDB_CHECKSUM_DATA_TAG); + unpack_info->write_uint32(key_crc32); + unpack_info->write_uint32(val_crc32); + } + } + + DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 0)); + + return tuple - packed_tuple; +} + +/** + Pack the hidden primary key into mem-comparable form. + + @param + tbl Table we're working on + hidden_pk_id IN New value to be packed into key + packed_tuple OUT Key in the mem-comparable form + + @return + Length of the packed tuple +*/ + +uint Rdb_key_def::pack_hidden_pk(longlong hidden_pk_id, + uchar *packed_tuple) const +{ + DBUG_ASSERT(packed_tuple != nullptr); + + uchar *tuple= packed_tuple; + rdb_netbuf_store_index(tuple, m_index_number); + tuple += INDEX_NUMBER_SIZE; + DBUG_ASSERT(m_key_parts == 1); + DBUG_ASSERT(is_storage_available(tuple - packed_tuple, + m_pack_info[0].m_max_image_len)); + + m_pack_info[0].fill_hidden_pk_val(&tuple, hidden_pk_id); + + DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 0)); + return tuple - packed_tuple; +} + + +/* + Function of type rdb_index_field_pack_t +*/ + +void rdb_pack_with_make_sort_key(Rdb_field_packing *fpi, Field *field, + uchar *buf __attribute__((__unused__)), + uchar **dst, + Rdb_pack_field_context *pack_ctx + __attribute__((__unused__))) +{ + DBUG_ASSERT(fpi != nullptr); + DBUG_ASSERT(field != nullptr); + DBUG_ASSERT(dst != nullptr); + DBUG_ASSERT(*dst != nullptr); + + const int max_len= fpi->m_max_image_len; + field->make_sort_key(*dst, max_len); + *dst += max_len; +} + +/* + Compares two keys without unpacking + + @detail + @return + 0 - Ok. column_index is the index of the first column which is different. + -1 if two kes are equal + 1 - Data format error. +*/ +int Rdb_key_def::compare_keys( + const rocksdb::Slice *key1, + const rocksdb::Slice *key2, + std::size_t* column_index +) const +{ + DBUG_ASSERT(key1 != nullptr); + DBUG_ASSERT(key2 != nullptr); + DBUG_ASSERT(column_index != nullptr); + + // the caller should check the return value and + // not rely on column_index being valid + *column_index = 0xbadf00d; + + Rdb_string_reader reader1(key1); + Rdb_string_reader reader2(key2); + + // Skip the index number + if ((!reader1.read(INDEX_NUMBER_SIZE))) + return 1; + + if ((!reader2.read(INDEX_NUMBER_SIZE))) + return 1; + + for (uint i= 0; i < m_key_parts ; i++) + { + Rdb_field_packing *fpi= &m_pack_info[i]; + if (fpi->m_maybe_null) + { + auto nullp1= reader1.read(1); + auto nullp2= reader2.read(1); + if (nullp1 == nullptr || nullp2 == nullptr) + return 1; //error + + if (*nullp1 != *nullp2) + { + *column_index = i; + return 0; + } + + if (*nullp1 == 0) + { + /* This is a NULL value */ + continue; + } + } + + auto before_skip1 = reader1.get_current_ptr(); + auto before_skip2 = reader2.get_current_ptr(); + DBUG_ASSERT(fpi->m_skip_func); + if (fpi->m_skip_func(fpi, nullptr, &reader1)) + return 1; + if (fpi->m_skip_func(fpi, nullptr, &reader2)) + return 1; + auto size1 = reader1.get_current_ptr() - before_skip1; + auto size2 = reader2.get_current_ptr() - before_skip2; + if (size1 != size2) + { + *column_index = i; + return 0; + } + + if (memcmp(before_skip1, before_skip2, size1) != 0) { + *column_index = i; + return 0; + } + } + + *column_index = m_key_parts; + return 0; + +} + + +/* + @brief + Given a zero-padded key, determine its real key length + + @detail + Fixed-size skip functions just read. +*/ + +size_t Rdb_key_def::key_length(TABLE *table, const rocksdb::Slice &key) const +{ + DBUG_ASSERT(table != nullptr); + + Rdb_string_reader reader(&key); + + if ((!reader.read(INDEX_NUMBER_SIZE))) + return size_t(-1); + + for (uint i= 0; i < m_key_parts ; i++) + { + Rdb_field_packing *fpi= &m_pack_info[i]; + Field *field= nullptr; + if (m_index_type != INDEX_TYPE_HIDDEN_PRIMARY) + field= fpi->get_field_in_table(table); + if (fpi->m_skip_func(fpi, field, &reader)) + return size_t(-1); + } + return key.size() - reader.remaining_bytes(); +} + + +/* + Take mem-comparable form and unpack_info and unpack it to Table->record + + @detail + not all indexes support this + + @return + UNPACK_SUCCESS - Ok + UNPACK_FAILURE - Data format error. + UNPACK_INFO_MISSING - Unpack info was unavailable and was required for + unpacking. +*/ + +int Rdb_key_def::unpack_record(TABLE *table, uchar *buf, + const rocksdb::Slice *packed_key, + const rocksdb::Slice *unpack_info, + bool verify_checksums) const +{ + Rdb_string_reader reader(packed_key); + Rdb_string_reader unp_reader(""); + const bool is_hidden_pk= (m_index_type == INDEX_TYPE_HIDDEN_PRIMARY); + const bool hidden_pk_exists= table_has_hidden_pk(table); + const bool secondary_key= (m_index_type == INDEX_TYPE_SECONDARY); + // There is no checksuming data after unpack_info for primary keys, because + // the layout there is different. The checksum is verified in + // ha_rocksdb::convert_record_from_storage_format instead. + DBUG_ASSERT_IMP(!secondary_key, !verify_checksums); + + if (unpack_info) + { + unp_reader= Rdb_string_reader(unpack_info); + } + + // Old Field methods expected the record pointer to be at tbl->record[0]. + // The quick and easy way to fix this was to pass along the offset + // for the pointer. + my_ptrdiff_t ptr_diff= buf - table->record[0]; + + // Skip the index number + if ((!reader.read(INDEX_NUMBER_SIZE))) + { + return 1; + } + + // For secondary keys, we expect the value field to contain unpack data and + // checksum data in that order. One or both can be missing, but they cannot + // be reordered. + bool has_unpack_info= unp_reader.remaining_bytes() && + *unp_reader.get_current_ptr() == RDB_UNPACK_DATA_TAG; + if (has_unpack_info && !unp_reader.read(RDB_UNPACK_HEADER_SIZE)) + { + return 1; + } + + for (uint i= 0; i < m_key_parts ; i++) + { + Rdb_field_packing *fpi= &m_pack_info[i]; + + /* + Hidden pk field is packed at the end of the secondary keys, but the SQL + layer does not know about it. Skip retrieving field if hidden pk. + */ + if ((secondary_key && hidden_pk_exists && i + 1 == m_key_parts) || + is_hidden_pk) + { + DBUG_ASSERT(fpi->m_unpack_func); + if (fpi->m_skip_func(fpi, nullptr, &reader)) + { + return 1; + } + continue; + } + + Field *field= fpi->get_field_in_table(table); + + bool do_unpack= secondary_key || + !fpi->uses_unpack_info() || + (m_kv_format_version >= Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1); + if (fpi->m_unpack_func && do_unpack) + { + /* It is possible to unpack this column. Do it. */ + + if (fpi->m_maybe_null) + { + const char* nullp; + if (!(nullp= reader.read(1))) + return 1; + if (*nullp == 0) + { + /* Set the NULL-bit of this field */ + field->set_null(ptr_diff); + /* Also set the field to its default value */ + uint field_offset= field->ptr - table->record[0]; + memcpy(buf + field_offset, + table->s->default_values + field_offset, + field->pack_length()); + continue; + } + else if (*nullp == 1) + field->set_notnull(ptr_diff); + else + return 1; + } + + // If we need unpack info, but there is none, tell the unpack function + // this by passing unp_reader as nullptr. If we never read unpack_info + // during unpacking anyway, then there won't an error. + int res; + bool maybe_missing_unpack= !has_unpack_info && fpi->uses_unpack_info(); + res= fpi->m_unpack_func(fpi, field, field->ptr + ptr_diff, + &reader, + maybe_missing_unpack ? nullptr : &unp_reader); + + if (res) + return res; + } + else + { + /* It is impossible to unpack the column. Skip it. */ + if (fpi->m_maybe_null) + { + const char* nullp; + if (!(nullp= reader.read(1))) + return 1; + if (*nullp == 0) + { + /* This is a NULL value */ + continue; + } + /* If NULL marker is not '0', it can be only '1' */ + if (*nullp != 1) + return 1; + } + if (fpi->m_skip_func(fpi, field, &reader)) + return 1; + } + } + + /* + Check checksum values if present + */ + const char* ptr; + if ((ptr= unp_reader.read(1)) && *ptr == RDB_CHECKSUM_DATA_TAG) + { + if (verify_checksums) + { + uint32_t stored_key_chksum= rdb_netbuf_to_uint32( + (const uchar*)unp_reader.read(RDB_CHECKSUM_SIZE)); + uint32_t stored_val_chksum= rdb_netbuf_to_uint32( + (const uchar*)unp_reader.read(RDB_CHECKSUM_SIZE)); + + uint32_t computed_key_chksum= + crc32(0, (const uchar*)packed_key->data(), packed_key->size()); + uint32_t computed_val_chksum= + crc32(0, (const uchar*) unpack_info->data(), + unpack_info->size() - RDB_CHECKSUM_CHUNK_SIZE); + + DBUG_EXECUTE_IF("myrocks_simulate_bad_key_checksum1", + stored_key_chksum++;); + + if (stored_key_chksum != computed_key_chksum) + { + report_checksum_mismatch(true, packed_key->data(), + packed_key->size()); + return 1; + } + + if (stored_val_chksum != computed_val_chksum) + { + report_checksum_mismatch( + false, unpack_info->data(), + unpack_info->size() - RDB_CHECKSUM_CHUNK_SIZE); + return 1; + } + } + else + { + /* The checksums are present but we are not checking checksums */ + } + } + + if (reader.remaining_bytes()) + return 1; + + return 0; +} + +bool Rdb_key_def::table_has_hidden_pk(const TABLE* table) +{ + return table->s->primary_key == MAX_INDEXES; +} + +void Rdb_key_def::report_checksum_mismatch(bool is_key, const char *data, + size_t data_size) const +{ + std::string buf; + // NO_LINT_DEBUG + sql_print_error("Checksum mismatch in %s of key-value pair for index 0x%x", + is_key? "key" : "value", get_index_number()); + + buf = rdb_hexdump(data, data_size, 1000); + // NO_LINT_DEBUG + sql_print_error("Data with incorrect checksum (%" PRIu64 " bytes): %s", + (uint64_t)data_size, buf.c_str()); + + my_error(ER_INTERNAL_ERROR, MYF(0), "Record checksum mismatch"); +} + +bool Rdb_key_def::index_format_min_check(int pk_min, int sk_min) const +{ + switch (m_index_type) + { + case INDEX_TYPE_PRIMARY: + case INDEX_TYPE_HIDDEN_PRIMARY: + return (m_kv_format_version >= pk_min); + case INDEX_TYPE_SECONDARY: + return (m_kv_format_version >= sk_min); + default: + DBUG_ASSERT(0); + return false; + } +} + +/////////////////////////////////////////////////////////////////////////////////////////// +// Rdb_field_packing +/////////////////////////////////////////////////////////////////////////////////////////// + +/* + Function of type rdb_index_field_skip_t +*/ + +int rdb_skip_max_length(const Rdb_field_packing *fpi, + const Field *field __attribute__((__unused__)), + Rdb_string_reader *reader) +{ + if (!reader->read(fpi->m_max_image_len)) + return 1; + return 0; +} + +/* + (RDB_ESCAPE_LENGTH-1) must be an even number so that pieces of lines are not + split in the middle of an UTF-8 character. See the implementation of + rdb_unpack_binary_or_utf8_varchar. +*/ + +const uint RDB_ESCAPE_LENGTH= 9; +static_assert((RDB_ESCAPE_LENGTH - 1) % 2 == 0, + "RDB_ESCAPE_LENGTH-1 must be even."); + +/* + Function of type rdb_index_field_skip_t +*/ + +static int rdb_skip_variable_length( + const Rdb_field_packing *fpi __attribute__((__unused__)), + const Field *field, Rdb_string_reader *reader) +{ + const uchar *ptr; + bool finished= false; + + size_t dst_len; /* How much data can be there */ + if (field) + { + const Field_varstring* field_var= + static_cast(field); + dst_len= field_var->pack_length() - field_var->length_bytes; + } + else + { + dst_len= UINT_MAX; + } + + /* Decode the length-emitted encoding here */ + while ((ptr= (const uchar*)reader->read(RDB_ESCAPE_LENGTH))) + { + /* See rdb_pack_with_varchar_encoding. */ + uchar pad= 255 - ptr[RDB_ESCAPE_LENGTH - 1]; // number of padding bytes + uchar used_bytes= RDB_ESCAPE_LENGTH - 1 - pad; + + if (used_bytes > RDB_ESCAPE_LENGTH - 1 || used_bytes > dst_len) + { + return 1; /* cannot store that much, invalid data */ + } + + if (used_bytes < RDB_ESCAPE_LENGTH - 1) + { + finished= true; + break; + } + dst_len -= used_bytes; + } + + if (!finished) + { + return 1; + } + + return 0; +} + +const int VARCHAR_CMP_LESS_THAN_SPACES = 1; +const int VARCHAR_CMP_EQUAL_TO_SPACES = 2; +const int VARCHAR_CMP_GREATER_THAN_SPACES = 3; + +/* + Skip a keypart that uses Variable-Length Space-Padded encoding +*/ + +static int rdb_skip_variable_space_pad( + const Rdb_field_packing *fpi, + const Field *field, Rdb_string_reader *reader) +{ + const uchar *ptr; + bool finished= false; + + size_t dst_len= UINT_MAX; /* How much data can be there */ + + if (field) + { + const Field_varstring* field_var= + static_cast(field); + dst_len= field_var->pack_length() - field_var->length_bytes; + } + + /* Decode the length-emitted encoding here */ + while ((ptr= (const uchar*)reader->read(fpi->m_segment_size))) + { + // See rdb_pack_with_varchar_space_pad + uchar c= ptr[fpi->m_segment_size-1]; + if (c == VARCHAR_CMP_EQUAL_TO_SPACES) + { + // This is the last segment + finished= true; + break; + } + else if (c == VARCHAR_CMP_LESS_THAN_SPACES || + c == VARCHAR_CMP_GREATER_THAN_SPACES) + { + // This is not the last segment + if ((fpi->m_segment_size-1) > dst_len) + { + // The segment is full of data but the table field can't hold that + // much! This must be data corruption. + return 1; + } + dst_len -= (fpi->m_segment_size-1); + } + else + { + // Encountered a value that's none of the VARCHAR_CMP* constants + // It's data corruption. + return 1; + } + } + return finished? 0: 1; +} + + +/* + Function of type rdb_index_field_unpack_t +*/ + +int rdb_unpack_integer( + Rdb_field_packing *fpi, Field *field, uchar *to, + Rdb_string_reader *reader, + Rdb_string_reader *unp_reader __attribute__((__unused__))) +{ + const int length= fpi->m_max_image_len; + + const uchar *from; + if (!(from= (const uchar*)reader->read(length))) + return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ + +#ifdef WORDS_BIGENDIAN + { + if (((Field_num*)field)->unsigned_flag) + to[0]= from[0]; + else + to[0]= (char)(from[0] ^ 128); // Reverse the sign bit. + memcpy(to + 1, from + 1, length - 1); + } +#else + { + const int sign_byte= from[0]; + if (((Field_num*)field)->unsigned_flag) + to[length - 1]= sign_byte; + else + to[length - 1]= static_cast(sign_byte ^ 128); // Reverse the sign bit. + for (int i= 0, j= length - 1; i < length-1; ++i, --j) + to[i]= from[j]; + } +#endif + return UNPACK_SUCCESS; +} + +#if !defined(WORDS_BIGENDIAN) +static void rdb_swap_double_bytes(uchar *dst, const uchar *src) +{ +#if defined(__FLOAT_WORD_ORDER) && (__FLOAT_WORD_ORDER == __BIG_ENDIAN) + // A few systems store the most-significant _word_ first on little-endian + dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; + dst[4] = src[7]; dst[5] = src[6]; dst[6] = src[5]; dst[7] = src[4]; +#else + dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; + dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; +#endif +} + +static void rdb_swap_float_bytes(uchar *dst, const uchar *src) +{ + dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; +} +#else +#define rdb_swap_double_bytes nullptr +#define rdb_swap_float_bytes nullptr +#endif + +static int rdb_unpack_floating_point( + uchar *dst, Rdb_string_reader *reader, + size_t size, int exp_digit, + const uchar *zero_pattern, + const uchar *zero_val, + void (*swap_func)(uchar *, const uchar *)) +{ + const uchar* from; + + from= (const uchar*) reader->read(size); + if (from == nullptr) + return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ + + /* Check to see if the value is zero */ + if (memcmp(from, zero_pattern, size) == 0) + { + memcpy(dst, zero_val, size); + return UNPACK_SUCCESS; + } + +#if defined(WORDS_BIGENDIAN) + // On big-endian, output can go directly into result + uchar *tmp = dst; +#else + // Otherwise use a temporary buffer to make byte-swapping easier later + uchar tmp[8]; +#endif + + memcpy(tmp, from, size); + + if (tmp[0] & 0x80) + { + // If the high bit is set the original value was positive so + // remove the high bit and subtract one from the exponent. + ushort exp_part= ((ushort) tmp[0] << 8) | (ushort) tmp[1]; + exp_part &= 0x7FFF; // clear high bit; + exp_part -= (ushort) 1 << (16 - 1 - exp_digit); // subtract from exponent + tmp[0] = (uchar) (exp_part >> 8); + tmp[1] = (uchar) exp_part; + } + else + { + // Otherwise the original value was negative and all bytes have been + // negated. + for (size_t ii = 0; ii < size; ii++) + tmp[ii] ^= 0xFF; + } + +#if !defined(WORDS_BIGENDIAN) + // On little-endian, swap the bytes around + swap_func(dst, tmp); +#else + static_assert(swap_func == nullptr, "Assuming that no swapping is needed."); +#endif + + return UNPACK_SUCCESS; +} + +#if !defined(DBL_EXP_DIG) +#define DBL_EXP_DIG (sizeof(double) * 8 - DBL_MANT_DIG) +#endif + + +/* + Function of type rdb_index_field_unpack_t + + Unpack a double by doing the reverse action of change_double_for_sort + (sql/filesort.cc). Note that this only works on IEEE values. + Note also that this code assumes that NaN and +/-Infinity are never + allowed in the database. +*/ +static int rdb_unpack_double( + Rdb_field_packing *fpi __attribute__((__unused__)), + Field *field __attribute__((__unused__)), + uchar *field_ptr, + Rdb_string_reader *reader, + Rdb_string_reader *unp_reader __attribute__((__unused__))) +{ + static double zero_val = 0.0; + static const uchar zero_pattern[8] = { 128, 0, 0, 0, 0, 0, 0, 0 }; + + return rdb_unpack_floating_point(field_ptr, reader, + sizeof(double), + DBL_EXP_DIG, zero_pattern, (const uchar *) &zero_val, + rdb_swap_double_bytes); +} + +#if !defined(FLT_EXP_DIG) +#define FLT_EXP_DIG (sizeof(float) * 8 - FLT_MANT_DIG) +#endif + +/* + Function of type rdb_index_field_unpack_t + + Unpack a float by doing the reverse action of Field_float::make_sort_key + (sql/field.cc). Note that this only works on IEEE values. + Note also that this code assumes that NaN and +/-Infinity are never + allowed in the database. +*/ +static int rdb_unpack_float( + Rdb_field_packing *, Field *field __attribute__((__unused__)), + uchar *field_ptr, + Rdb_string_reader *reader, + Rdb_string_reader *unp_reader __attribute__((__unused__))) +{ + static float zero_val = 0.0; + static const uchar zero_pattern[4] = { 128, 0, 0, 0 }; + + return rdb_unpack_floating_point(field_ptr, reader, + sizeof(float), + FLT_EXP_DIG, zero_pattern, (const uchar *) &zero_val, + rdb_swap_float_bytes); +} + +/* + Function of type rdb_index_field_unpack_t used to + Unpack by doing the reverse action to Field_newdate::make_sort_key. +*/ + +int rdb_unpack_newdate( + Rdb_field_packing *fpi, Field *field, + uchar *field_ptr, + Rdb_string_reader *reader, + Rdb_string_reader *unp_reader __attribute__((__unused__))) +{ + const char* from; + DBUG_ASSERT(fpi->m_max_image_len == 3); + + if (!(from= reader->read(3))) + return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ + + field_ptr[0]= from[2]; + field_ptr[1]= from[1]; + field_ptr[2]= from[0]; + return UNPACK_SUCCESS; +} + + +/* + Function of type rdb_index_field_unpack_t, used to + Unpack the string by copying it over. + This is for BINARY(n) where the value occupies the whole length. +*/ + +static int rdb_unpack_binary_str( + Rdb_field_packing *fpi, Field *field, + uchar *to, + Rdb_string_reader *reader, + Rdb_string_reader *unp_reader __attribute__((__unused__))) +{ + const char* from; + if (!(from= reader->read(fpi->m_max_image_len))) + return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ + + memcpy(to, from, fpi->m_max_image_len); + return UNPACK_SUCCESS; +} + + +/* + Function of type rdb_index_field_unpack_t. + For UTF-8, we need to convert 2-byte wide-character entities back into + UTF8 sequences. +*/ + +static int rdb_unpack_utf8_str( + Rdb_field_packing *fpi, Field *field, + uchar *dst, + Rdb_string_reader *reader, + Rdb_string_reader *unp_reader __attribute__((__unused__))) +{ + my_core::CHARSET_INFO *cset= (my_core::CHARSET_INFO*)field->charset(); + const uchar *src; + if (!(src= (const uchar*)reader->read(fpi->m_max_image_len))) + return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ + + const uchar *src_end= src + fpi->m_max_image_len; + uchar *dst_end= dst + field->pack_length(); + + while (src < src_end) + { + my_wc_t wc= (src[0] <<8) | src[1]; + src += 2; + int res= cset->cset->wc_mb(cset, wc, dst, dst_end); + DBUG_ASSERT(res > 0 && res <=3); + if (res < 0) + return UNPACK_FAILURE; + dst += res; + } + + cset->cset->fill(cset, reinterpret_cast(dst), + dst_end - dst, cset->pad_char); + return UNPACK_SUCCESS; +} + + +/* + Function of type rdb_index_field_pack_t +*/ + +static void rdb_pack_with_varchar_encoding( + Rdb_field_packing *fpi, Field *field, uchar *buf, uchar **dst, + Rdb_pack_field_context *pack_ctx __attribute__((__unused__))) +{ + /* + Use a flag byte every Nth byte. Set it to (255 - #pad) where #pad is 0 + when the var length field filled all N-1 previous bytes and #pad is + otherwise the number of padding bytes used. + + If N=8 and the field is: + * 3 bytes (1, 2, 3) this is encoded as: 1, 2, 3, 0, 0, 0, 0, 251 + * 4 bytes (1, 2, 3, 0) this is encoded as: 1, 2, 3, 0, 0, 0, 0, 252 + And the 4 byte string compares as greater than the 3 byte string + */ + const CHARSET_INFO *charset= field->charset(); + Field_varstring *field_var= (Field_varstring*)field; + + size_t value_length= (field_var->length_bytes == 1) ? + (uint) *field->ptr : + uint2korr(field->ptr); + size_t xfrm_len; + xfrm_len= charset->coll->strnxfrm(charset, + buf, fpi->m_max_image_len, + field_var->char_length(), + field_var->ptr + field_var->length_bytes, + value_length, + 0); + + /* Got a mem-comparable image in 'buf'. Now, produce varlength encoding */ + + size_t encoded_size= 0; + uchar *ptr= *dst; + while (1) + { + size_t copy_len= std::min((size_t)RDB_ESCAPE_LENGTH-1, xfrm_len); + size_t padding_bytes= RDB_ESCAPE_LENGTH - 1 - copy_len; + memcpy(ptr, buf, copy_len); + ptr += copy_len; + buf += copy_len; + // pad with zeros if necessary; + for (size_t idx= 0; idx < padding_bytes; idx++) + *(ptr++)= 0; + *(ptr++) = 255 - padding_bytes; + + xfrm_len -= copy_len; + encoded_size += RDB_ESCAPE_LENGTH; + if (padding_bytes !=0) + break; + } + *dst += encoded_size; +} + + +/* + Compare the string in [buf..buf_end) with a string that is an infinite + sequence of strings in space_xfrm +*/ + +static +int rdb_compare_string_with_spaces(const uchar *buf, const uchar *buf_end, + const std::vector *space_xfrm) +{ + int cmp= 0; + while (buf < buf_end) + { + size_t bytes = std::min((size_t) (buf_end - buf), space_xfrm->size()); + if ((cmp= memcmp(buf, space_xfrm->data(), bytes)) != 0) + break; + buf += bytes; + } + return cmp; +} + +static const int RDB_TRIMMED_CHARS_OFFSET= 8; +/* + Pack the data with Variable-Length Space-Padded Encoding. + + The encoding is there to meet two goals: + + Goal#1. Comparison. The SQL standard says + + " If the collation for the comparison has the PAD SPACE characteristic, + for the purposes of the comparison, the shorter value is effectively + extended to the length of the longer by concatenation of s on the + right. + + At the moment, all MySQL collations except one have the PAD SPACE + characteristic. The exception is the "binary" collation that is used by + [VAR]BINARY columns. (Note that binary collations for specific charsets, + like utf8_bin or latin1_bin are not the same as "binary" collation, they have + the PAD SPACE characteristic). + + Goal#2 is to preserve the number of trailing spaces in the original value. + + This is achieved by using the following encoding: + The key part: + - Stores mem-comparable image of the column + - It is stored in chunks of fpi->m_segment_size bytes (*) + = If the remainder of the chunk is not occupied, it is padded with mem- + comparable image of the space character (cs->pad_char to be precise). + - The last byte of the chunk shows how the rest of column's mem-comparable + image would compare to mem-comparable image of the column extended with + spaces. There are three possible values. + - VARCHAR_CMP_LESS_THAN_SPACES, + - VARCHAR_CMP_EQUAL_TO_SPACES + - VARCHAR_CMP_GREATER_THAN_SPACES + + VARCHAR_CMP_EQUAL_TO_SPACES means that this chunk is the last one (the rest + is spaces, or something that sorts as spaces, so there is no reason to store + it). + + Example: if fpi->m_segment_size=5, and the collation is latin1_bin: + + 'abcd\0' => [ 'abcd' ]['\0 ' ] + 'abcd' => [ 'abcd' ] + 'abcd ' => [ 'abcd' ] + 'abcdZZZZ' => [ 'abcd' ][ 'ZZZZ' ] + + As mentioned above, the last chunk is padded with mem-comparable images of + cs->pad_char. It can be 1-byte long (latin1), 2 (utf8_bin), 3 (utf8mb4), etc. + + fpi->m_segment_size depends on the used collation. It is chosen to be such + that no mem-comparable image of space will ever stretch across the segments + (see get_segment_size_from_collation). + + == The value part (aka unpack_info) == + The value part stores the number of space characters that one needs to add + when unpacking the string. + - If the number is positive, it means add this many spaces at the end + - If the number is negative, it means padding has added extra spaces which + must be removed. + + Storage considerations + - depending on column's max size, the number may occupy 1 or 2 bytes + - the number of spaces that need to be removed is not more than + RDB_TRIMMED_CHARS_OFFSET=8, so we offset the number by that value and + then store it as unsigned. + + @seealso + rdb_unpack_binary_or_utf8_varchar_space_pad + rdb_unpack_simple_varchar_space_pad + rdb_dummy_make_unpack_info + rdb_skip_variable_space_pad +*/ + +static void rdb_pack_with_varchar_space_pad( + Rdb_field_packing *fpi, Field *field, uchar *buf, uchar **dst, + Rdb_pack_field_context *pack_ctx) +{ + Rdb_string_writer *unpack_info= pack_ctx->writer; + const CHARSET_INFO *charset= field->charset(); + auto field_var= static_cast(field); + + size_t value_length= (field_var->length_bytes == 1) ? + (uint) *field->ptr : + uint2korr(field->ptr); + + size_t trimmed_len= + charset->cset->lengthsp(charset, + (const char*)field_var->ptr + + field_var->length_bytes, + value_length); + size_t xfrm_len; + xfrm_len= charset->coll->strnxfrm(charset, + buf, fpi->m_max_image_len, + field_var->char_length(), + field_var->ptr + field_var->length_bytes, + trimmed_len, + 0); + + /* Got a mem-comparable image in 'buf'. Now, produce varlength encoding */ + uchar *buf_end= buf + xfrm_len; + + size_t encoded_size= 0; + uchar *ptr= *dst; + size_t padding_bytes; + while (true) + { + size_t copy_len= std::min(fpi->m_segment_size-1, buf_end - buf); + padding_bytes= fpi->m_segment_size - 1 - copy_len; + memcpy(ptr, buf, copy_len); + ptr += copy_len; + buf += copy_len; + + if (padding_bytes) + { + memcpy(ptr, fpi->space_xfrm->data(), padding_bytes); + ptr+= padding_bytes; + *ptr= VARCHAR_CMP_EQUAL_TO_SPACES; // last segment + } + else + { + // Compare the string suffix with a hypothetical infinite string of + // spaces. It could be that the first difference is beyond the end of + // current chunk. + int cmp= rdb_compare_string_with_spaces(buf, buf_end, fpi->space_xfrm); + + if (cmp < 0) + *ptr= VARCHAR_CMP_LESS_THAN_SPACES; + else if (cmp > 0) + *ptr= VARCHAR_CMP_GREATER_THAN_SPACES; + else + { + // It turns out all the rest are spaces. + *ptr= VARCHAR_CMP_EQUAL_TO_SPACES; + } + } + encoded_size += fpi->m_segment_size; + + if (*(ptr++) == VARCHAR_CMP_EQUAL_TO_SPACES) + break; + } + + // m_unpack_info_stores_value means unpack_info stores the whole original + // value. There is no need to store the number of trimmed/padded endspaces + // in that case. + if (unpack_info && !fpi->m_unpack_info_stores_value) + { + // (value_length - trimmed_len) is the number of trimmed space *characters* + // then, padding_bytes is the number of *bytes* added as padding + // then, we add 8, because we don't store negative values. + DBUG_ASSERT(padding_bytes % fpi->space_xfrm_len == 0); + DBUG_ASSERT((value_length - trimmed_len)% fpi->space_mb_len == 0); + size_t removed_chars= RDB_TRIMMED_CHARS_OFFSET + + (value_length - trimmed_len) / fpi->space_mb_len - + padding_bytes/fpi->space_xfrm_len; + + if (fpi->m_unpack_info_uses_two_bytes) + { + unpack_info->write_uint16(removed_chars); + } + else + { + DBUG_ASSERT(removed_chars < 0x100); + unpack_info->write_uint8(removed_chars); + } + } + + *dst += encoded_size; +} + +/* + Function of type rdb_index_field_unpack_t +*/ + +static int rdb_unpack_binary_or_utf8_varchar( + Rdb_field_packing *fpi, Field *field, + uchar *dst, + Rdb_string_reader *reader, + Rdb_string_reader *unp_reader __attribute__((__unused__))) +{ + const uchar *ptr; + size_t len= 0; + bool finished= false; + uchar *d0= dst; + Field_varstring* field_var= (Field_varstring*)field; + dst += field_var->length_bytes; + // How much we can unpack + size_t dst_len= field_var->pack_length() - field_var->length_bytes; + uchar *dst_end= dst + dst_len; + + /* Decode the length-emitted encoding here */ + while ((ptr= (const uchar*)reader->read(RDB_ESCAPE_LENGTH))) + { + /* See rdb_pack_with_varchar_encoding. */ + uchar pad= 255 - ptr[RDB_ESCAPE_LENGTH - 1]; // number of padding bytes + uchar used_bytes= RDB_ESCAPE_LENGTH - 1 - pad; + + if (used_bytes > RDB_ESCAPE_LENGTH - 1) + { + return UNPACK_FAILURE; /* cannot store that much, invalid data */ + } + + if (dst_len < used_bytes) + { + /* Encoded index tuple is longer than the size in the record buffer? */ + return UNPACK_FAILURE; + } + + /* + Now, we need to decode used_bytes of data and append them to the value. + */ + if (fpi->m_varchar_charset == &my_charset_utf8_bin) + { + if (used_bytes & 1) + { + /* + UTF-8 characters are encoded into two-byte entities. There is no way + we can have an odd number of bytes after encoding. + */ + return UNPACK_FAILURE; + } + + const uchar *src= ptr; + const uchar *src_end= ptr + used_bytes; + while (src < src_end) + { + my_wc_t wc= (src[0] <<8) | src[1]; + src += 2; + const CHARSET_INFO *cset= fpi->m_varchar_charset; + int res= cset->cset->wc_mb(cset, wc, dst, dst_end); + DBUG_ASSERT(res > 0 && res <=3); + if (res < 0) + return UNPACK_FAILURE; + dst += res; + len += res; + dst_len -= res; + } + } + else + { + memcpy(dst, ptr, used_bytes); + dst += used_bytes; + dst_len -= used_bytes; + len += used_bytes; + } + + if (used_bytes < RDB_ESCAPE_LENGTH - 1) + { + finished= true; + break; + } + } + + if (!finished) + return UNPACK_FAILURE; + + /* Save the length */ + if (field_var->length_bytes == 1) + { + d0[0]= len; + } + else + { + DBUG_ASSERT(field_var->length_bytes == 2); + int2store(d0, len); + } + return UNPACK_SUCCESS; +} + +/* + @seealso + rdb_pack_with_varchar_space_pad - packing function + rdb_unpack_simple_varchar_space_pad - unpacking function for 'simple' + charsets. + rdb_skip_variable_space_pad - skip function +*/ +static int rdb_unpack_binary_or_utf8_varchar_space_pad( + Rdb_field_packing *fpi, Field *field, + uchar *dst, + Rdb_string_reader *reader, + Rdb_string_reader *unp_reader) +{ + const uchar *ptr; + size_t len= 0; + bool finished= false; + Field_varstring* field_var= static_cast(field); + uchar *d0= dst; + uchar *dst_end= dst + field_var->pack_length(); + dst += field_var->length_bytes; + + uint space_padding_bytes= 0; + uint extra_spaces; + if ((fpi->m_unpack_info_uses_two_bytes? + unp_reader->read_uint16(&extra_spaces): + unp_reader->read_uint8(&extra_spaces))) + { + return UNPACK_FAILURE; + } + + if (extra_spaces <= RDB_TRIMMED_CHARS_OFFSET) + { + space_padding_bytes= -(static_cast(extra_spaces) - + RDB_TRIMMED_CHARS_OFFSET); + extra_spaces= 0; + } + else + extra_spaces -= RDB_TRIMMED_CHARS_OFFSET; + + space_padding_bytes *= fpi->space_xfrm_len; + + /* Decode the length-emitted encoding here */ + while ((ptr= (const uchar*)reader->read(fpi->m_segment_size))) + { + char last_byte= ptr[fpi->m_segment_size - 1]; + size_t used_bytes; + if (last_byte == VARCHAR_CMP_EQUAL_TO_SPACES) // this is the last segment + { + if (space_padding_bytes > (fpi->m_segment_size-1)) + return UNPACK_FAILURE; // Cannot happen, corrupted data + used_bytes= (fpi->m_segment_size-1) - space_padding_bytes; + finished= true; + } + else + { + if (last_byte != VARCHAR_CMP_LESS_THAN_SPACES && + last_byte != VARCHAR_CMP_GREATER_THAN_SPACES) + { + return UNPACK_FAILURE; // Invalid value + } + used_bytes= fpi->m_segment_size-1; + } + + // Now, need to decode used_bytes of data and append them to the value. + if (fpi->m_varchar_charset == &my_charset_utf8_bin) + { + if (used_bytes & 1) + { + /* + UTF-8 characters are encoded into two-byte entities. There is no way + we can have an odd number of bytes after encoding. + */ + return UNPACK_FAILURE; + } + + const uchar *src= ptr; + const uchar *src_end= ptr + used_bytes; + while (src < src_end) + { + my_wc_t wc= (src[0] <<8) | src[1]; + src += 2; + const CHARSET_INFO *cset= fpi->m_varchar_charset; + int res= cset->cset->wc_mb(cset, wc, dst, dst_end); + DBUG_ASSERT(res <=3); + if (res <= 0) + return UNPACK_FAILURE; + dst += res; + len += res; + } + } + else + { + if (dst + used_bytes > dst_end) + return UNPACK_FAILURE; + memcpy(dst, ptr, used_bytes); + dst += used_bytes; + len += used_bytes; + } + + if (finished) + { + if (extra_spaces) + { + // Both binary and UTF-8 charset store space as ' ', + // so the following is ok: + if (dst + extra_spaces > dst_end) + return UNPACK_FAILURE; + memset(dst, fpi->m_varchar_charset->pad_char, extra_spaces); + len += extra_spaces; + } + break; + } + } + + if (!finished) + return UNPACK_FAILURE; + + /* Save the length */ + if (field_var->length_bytes == 1) + { + d0[0]= len; + } + else + { + DBUG_ASSERT(field_var->length_bytes == 2); + int2store(d0, len); + } + return UNPACK_SUCCESS; +} + +///////////////////////////////////////////////////////////////////////// + +/* + Function of type rdb_make_unpack_info_t +*/ + +static void rdb_make_unpack_unknown( + const Rdb_collation_codec *codec __attribute__((__unused__)), + const Field *field, Rdb_pack_field_context *pack_ctx) +{ + pack_ctx->writer->write(field->ptr, field->pack_length()); +} + + +/* + This point of this function is only to indicate that unpack_info is + available. + + The actual unpack_info data is produced by the function that packs the key, + that is, rdb_pack_with_varchar_space_pad. +*/ + +static void rdb_dummy_make_unpack_info( + const Rdb_collation_codec *codec __attribute__((__unused__)), + const Field *field __attribute__((__unused__)), + Rdb_pack_field_context *pack_ctx __attribute__((__unused__))) +{ +} + +/* + Function of type rdb_index_field_unpack_t +*/ + +static int rdb_unpack_unknown(Rdb_field_packing *fpi, Field *field, + uchar *dst, + Rdb_string_reader *reader, + Rdb_string_reader *unp_reader) +{ + const uchar *ptr; + uint len = fpi->m_unpack_data_len; + // We don't use anything from the key, so skip over it. + if (rdb_skip_max_length(fpi, field, reader)) + { + return UNPACK_FAILURE; + } + // Unpack info is needed but none available. + if (len > 0 && unp_reader == nullptr) + { + return UNPACK_INFO_MISSING; + } + if ((ptr= (const uchar*)unp_reader->read(len))) + { + memcpy(dst, ptr, len); + return UNPACK_SUCCESS; + } + return UNPACK_FAILURE; +} + +/* + Function of type rdb_make_unpack_info_t +*/ + +static void rdb_make_unpack_unknown_varchar( + const Rdb_collation_codec *codec __attribute__((__unused__)), + const Field *field, Rdb_pack_field_context *pack_ctx) +{ + auto f= static_cast(field); + uint len= f->length_bytes == 1 ? (uint) *f->ptr : uint2korr(f->ptr); + len+= f->length_bytes; + pack_ctx->writer->write(field->ptr, len); +} + + +/* + Function of type rdb_index_field_unpack_t + + @detail + Unpack a key part in an "unknown" collation from its + (mem_comparable_form, unpack_info) form. + + "Unknown" means we have no clue about how mem_comparable_form is made from + the original string, so we keep the whole original string in the unpack_info. + + @seealso + rdb_make_unpack_unknown, rdb_unpack_unknown +*/ + +static int rdb_unpack_unknown_varchar(Rdb_field_packing *fpi, Field *field, + uchar *dst, + Rdb_string_reader *reader, + Rdb_string_reader *unp_reader) +{ + const uchar *ptr; + uchar *d0= dst; + auto f= static_cast(field); + dst += f->length_bytes; + uint len_bytes= f->length_bytes; + // We don't use anything from the key, so skip over it. + if (fpi->m_skip_func(fpi, field, reader)) + { + return UNPACK_FAILURE; + } + // Unpack info is needed but none available. + DBUG_ASSERT(len_bytes > 0); + if (unp_reader == nullptr) + { + return UNPACK_INFO_MISSING; + } + if ((ptr= (const uchar*)unp_reader->read(len_bytes))) + { + memcpy(d0, ptr, len_bytes); + uint len= len_bytes == 1 ? (uint) *ptr : uint2korr(ptr); + if ((ptr= (const uchar*)unp_reader->read(len))) + { + memcpy(dst, ptr, len); + return UNPACK_SUCCESS; + } + } + return UNPACK_FAILURE; +} + + +/* + Write unpack_data for a "simple" collation +*/ +static void rdb_write_unpack_simple(Rdb_bit_writer *writer, + const Rdb_collation_codec *codec, + const uchar *src, size_t src_len) +{ + for (uint i= 0; i < src_len; i++) + { + writer->write(codec->m_enc_size[src[i]], codec->m_enc_idx[src[i]]); + } +} + + +static uint rdb_read_unpack_simple(Rdb_bit_reader *reader, + const Rdb_collation_codec *codec, + const uchar *src, size_t src_len, + uchar *dst) +{ + for (uint i= 0; i < src_len; i++) + { + if (codec->m_dec_size[src[i]] > 0) + { + uint *ret; + // Unpack info is needed but none available. + if (reader == nullptr) + { + return UNPACK_INFO_MISSING; + } + + if ((ret= reader->read(codec->m_dec_size[src[i]])) == nullptr) + { + return UNPACK_FAILURE; + } + dst[i]= codec->m_dec_idx[*ret][src[i]]; + } + else + { + dst[i]= codec->m_dec_idx[0][src[i]]; + } + } + + return UNPACK_SUCCESS; +} + +/* + Function of type rdb_make_unpack_info_t + + @detail + Make unpack_data for VARCHAR(n) in a "simple" charset. +*/ + +static void +rdb_make_unpack_simple_varchar(const Rdb_collation_codec* codec, + const Field *field, + Rdb_pack_field_context *pack_ctx) +{ + auto f= static_cast(field); + uchar *src= f->ptr + f->length_bytes; + size_t src_len= f->length_bytes == 1 ? (uint) *f->ptr : uint2korr(f->ptr); + Rdb_bit_writer bit_writer(pack_ctx->writer); + // The std::min compares characters with bytes, but for simple collations, + // mbmaxlen = 1. + rdb_write_unpack_simple(&bit_writer, codec, src, + std::min((size_t)f->char_length(), src_len)); +} + +/* + Function of type rdb_index_field_unpack_t + + @seealso + rdb_pack_with_varchar_space_pad - packing function + rdb_unpack_binary_or_utf8_varchar_space_pad - a similar unpacking function +*/ + +int +rdb_unpack_simple_varchar_space_pad(Rdb_field_packing *fpi, Field *field, + uchar *dst, + Rdb_string_reader *reader, + Rdb_string_reader *unp_reader) +{ + const uchar *ptr; + size_t len= 0; + bool finished= false; + uchar *d0= dst; + Field_varstring* field_var= static_cast(field); + // For simple collations, char_length is also number of bytes. + DBUG_ASSERT((size_t)fpi->m_max_image_len >= field_var->char_length()); + uchar *dst_end= dst + field_var->pack_length(); + dst += field_var->length_bytes; + Rdb_bit_reader bit_reader(unp_reader); + + uint space_padding_bytes= 0; + uint extra_spaces; + if (!unp_reader) + { + return UNPACK_INFO_MISSING; + } + + if ((fpi->m_unpack_info_uses_two_bytes? + unp_reader->read_uint16(&extra_spaces): + unp_reader->read_uint8(&extra_spaces))) + { + return UNPACK_FAILURE; + } + + if (extra_spaces <= 8) + { + space_padding_bytes= -(static_cast(extra_spaces) - 8); + extra_spaces= 0; + } + else + extra_spaces -= 8; + + space_padding_bytes *= fpi->space_xfrm_len; + + /* Decode the length-emitted encoding here */ + while ((ptr= (const uchar*)reader->read(fpi->m_segment_size))) + { + char last_byte= ptr[fpi->m_segment_size - 1]; // number of padding bytes + size_t used_bytes; + if (last_byte == VARCHAR_CMP_EQUAL_TO_SPACES) + { + // this is the last one + if (space_padding_bytes > (fpi->m_segment_size-1)) + return UNPACK_FAILURE; // Cannot happen, corrupted data + used_bytes= (fpi->m_segment_size-1) - space_padding_bytes; + finished= true; + } + else + { + if (last_byte != VARCHAR_CMP_LESS_THAN_SPACES && + last_byte != VARCHAR_CMP_GREATER_THAN_SPACES) + { + return UNPACK_FAILURE; + } + used_bytes= fpi->m_segment_size-1; + } + + if (dst + used_bytes > dst_end) + { + // The value on disk is longer than the field definition allows? + return UNPACK_FAILURE; + } + + uint ret; + if ((ret= rdb_read_unpack_simple(&bit_reader, + fpi->m_charset_codec, ptr, used_bytes, + dst)) != UNPACK_SUCCESS) + { + return ret; + } + + dst += used_bytes; + len += used_bytes; + + if (finished) + { + if (extra_spaces) + { + if (dst + extra_spaces > dst_end) + return UNPACK_FAILURE; + // pad_char has a 1-byte form in all charsets that + // are handled by rdb_init_collation_mapping. + memset(dst, field_var->charset()->pad_char, extra_spaces); + len += extra_spaces; + } + break; + } + } + + if (!finished) + return UNPACK_FAILURE; + + /* Save the length */ + if (field_var->length_bytes == 1) + { + d0[0]= len; + } + else + { + DBUG_ASSERT(field_var->length_bytes == 2); + int2store(d0, len); + } + return UNPACK_SUCCESS; +} + + +/* + Function of type rdb_make_unpack_info_t + + @detail + Make unpack_data for CHAR(n) value in a "simple" charset. + It is CHAR(N), so SQL layer has padded the value with spaces up to N chars. + + @seealso + The VARCHAR variant is in rdb_make_unpack_simple_varchar +*/ + +static void rdb_make_unpack_simple(const Rdb_collation_codec *codec, + const Field *field, + Rdb_pack_field_context *pack_ctx) +{ + uchar *src= field->ptr; + Rdb_bit_writer bit_writer(pack_ctx->writer); + rdb_write_unpack_simple(&bit_writer, codec, src, field->pack_length()); +} + +/* + Function of type rdb_index_field_unpack_t +*/ + +static int rdb_unpack_simple(Rdb_field_packing *fpi, + Field *field __attribute__((__unused__)), + uchar *dst, + Rdb_string_reader *reader, + Rdb_string_reader *unp_reader) +{ + const uchar *ptr; + uint len = fpi->m_max_image_len; + Rdb_bit_reader bit_reader(unp_reader); + + if (!(ptr= (const uchar*)reader->read(len))) + { + return UNPACK_FAILURE; + } + + return rdb_read_unpack_simple(unp_reader ? &bit_reader : nullptr, + fpi->m_charset_codec, ptr, len, dst); +} + + +// See Rdb_charset_space_info::spaces_xfrm +const int RDB_SPACE_XFRM_SIZE= 32; + +// A class holding information about how space character is represented in a +// charset. +class Rdb_charset_space_info +{ + public: + // A few strxfrm'ed space characters, at least RDB_SPACE_XFRM_SIZE bytes + std::vector spaces_xfrm; + + // length(strxfrm(' ')) + size_t space_xfrm_len; + + // length of the space character itself + // Typically space is just 0x20 (length=1) but in ucs2 it is 0x00 0x20 + // (length=2) + size_t space_mb_len; +}; + +static std::array, MY_ALL_CHARSETS_SIZE> +rdb_mem_comparable_space; + + +/* + @brief + For a given charset, get + - strxfrm(' '), a sample that is at least RDB_SPACE_XFRM_SIZE bytes long. + - length of strxfrm(charset, ' ') + - length of the space character in the charset + + @param cs IN Charset to get the space for + @param ptr OUT A few space characters + @param len OUT Return length of the space (in bytes) + + @detail + It is tempting to pre-generate mem-comparable form of space character for + every charset on server startup. + One can't do that: some charsets are not initialized until somebody + attempts to use them (e.g. create or open a table that has a field that + uses the charset). +*/ + +static +void rdb_get_mem_comparable_space(const CHARSET_INFO *cs, + const std::vector **xfrm, + size_t *xfrm_len, + size_t *mb_len) +{ + DBUG_ASSERT(cs->number < MY_ALL_CHARSETS_SIZE); + if (!rdb_mem_comparable_space[cs->number].get()) + { + mysql_mutex_lock(&rdb_mem_cmp_space_mutex); + if (!rdb_mem_comparable_space[cs->number].get()) + { + // Upper bound of how many bytes can be occupied by multi-byte form of a + // character in any charset. + const int MAX_MULTI_BYTE_CHAR_SIZE= 4; + DBUG_ASSERT(cs->mbmaxlen <= MAX_MULTI_BYTE_CHAR_SIZE); + + // multi-byte form of the ' ' (space) character + uchar space_mb[MAX_MULTI_BYTE_CHAR_SIZE]; + + size_t space_mb_len= cs->cset->wc_mb(cs, (my_wc_t) cs->pad_char, + space_mb, + space_mb + sizeof(space_mb)); + + uchar space[20]; // mem-comparable image of the space character + + size_t space_len= cs->coll->strnxfrm(cs, + space, sizeof(space), + 1, + space_mb, + space_mb_len, + 0); + Rdb_charset_space_info *info= new Rdb_charset_space_info; + info->space_xfrm_len= space_len; + info->space_mb_len= space_mb_len; + while (info->spaces_xfrm.size() < RDB_SPACE_XFRM_SIZE) + { + info->spaces_xfrm.insert(info->spaces_xfrm.end(), space, + space + space_len); + } + rdb_mem_comparable_space[cs->number].reset(info); + } + mysql_mutex_unlock(&rdb_mem_cmp_space_mutex); + } + + *xfrm= &rdb_mem_comparable_space[cs->number]->spaces_xfrm; + *xfrm_len= rdb_mem_comparable_space[cs->number]->space_xfrm_len; + *mb_len= rdb_mem_comparable_space[cs->number]->space_mb_len; +} + +mysql_mutex_t rdb_mem_cmp_space_mutex; + +std::array + rdb_collation_data; +mysql_mutex_t rdb_collation_data_mutex; + +static bool rdb_is_collation_supported(const my_core::CHARSET_INFO * cs) +{ + return (cs->coll == &my_collation_8bit_simple_ci_handler); +} + +static const Rdb_collation_codec *rdb_init_collation_mapping( + const my_core::CHARSET_INFO *cs) +{ + DBUG_ASSERT(cs && cs->state & MY_CS_AVAILABLE); + const Rdb_collation_codec *codec= rdb_collation_data[cs->number]; + + if (codec == nullptr && rdb_is_collation_supported(cs)) + { + mysql_mutex_lock(&rdb_collation_data_mutex); + codec= rdb_collation_data[cs->number]; + if (codec == nullptr) + { + Rdb_collation_codec *cur= nullptr; + + // Compute reverse mapping for simple collations. + if (cs->coll == &my_collation_8bit_simple_ci_handler) + { + cur= new Rdb_collation_codec; + std::map> rev_map; + size_t max_conflict_size= 0; + for (int src = 0; src < 256; src++) + { + uchar dst= cs->sort_order[src]; + rev_map[dst].push_back(src); + max_conflict_size= std::max(max_conflict_size, rev_map[dst].size()); + } + cur->m_dec_idx.resize(max_conflict_size); + + for (auto const &p : rev_map) + { + uchar dst= p.first; + for (uint idx = 0; idx < p.second.size(); idx++) + { + uchar src= p.second[idx]; + uchar bits= my_bit_log2(my_round_up_to_next_power(p.second.size())); + cur->m_enc_idx[src]= idx; + cur->m_enc_size[src]= bits; + cur->m_dec_size[dst]= bits; + cur->m_dec_idx[idx][dst]= src; + } + } + + cur->m_make_unpack_info_func= + {{ rdb_make_unpack_simple_varchar, rdb_make_unpack_simple }}; + cur->m_unpack_func= + {{ rdb_unpack_simple_varchar_space_pad, rdb_unpack_simple }}; + } + else + { + // Out of luck for now. + } + + if (cur != nullptr) + { + codec= cur; + cur->m_cs= cs; + rdb_collation_data[cs->number]= cur; + } + } + mysql_mutex_unlock(&rdb_collation_data_mutex); + } + + return codec; +} + + +static int get_segment_size_from_collation(const CHARSET_INFO* cs) +{ + int ret; + if (cs == &my_charset_utf8mb4_bin || + cs == &my_charset_utf16_bin || + cs == &my_charset_utf16le_bin || + cs == &my_charset_utf32_bin) + { + /* + In these collations, a character produces one weight, which is 3 bytes. + Segment has 3 characters, add one byte for VARCHAR_CMP_* marker, and we + get 3*3+1=10 + */ + ret= 10; + } + else + { + /* + All other collations. There are two classes: + - Unicode-based, except for collations mentioned in the if-condition. + For these all weights are 2 bytes long, a character may produce 0..8 + weights. + in any case, 8 bytes of payload in the segment guarantee that the last + space character won't span across segments. + + - Collations not based on unicode. These have length(strxfrm(' '))=1, + there nothing to worry about. + + In both cases, take 8 bytes payload + 1 byte for VARCHAR_CMP* marker. + */ + ret= 9; + } + DBUG_ASSERT(ret < RDB_SPACE_XFRM_SIZE); + return ret; +} + + +/* + @brief + Setup packing of index field into its mem-comparable form + + @detail + - It is possible produce mem-comparable form for any datatype. + - Some datatypes also allow to unpack the original value from its + mem-comparable form. + = Some of these require extra information to be stored in "unpack_info". + unpack_info is not a part of mem-comparable form, it is only used to + restore the original value + + @param + field IN field to be packed/un-packed + + @return + TRUE - Field can be read with index-only reads + FALSE - Otherwise +*/ + +bool Rdb_field_packing::setup(const Rdb_key_def *key_descr, const Field *field, + uint keynr_arg, uint key_part_arg, + uint16 key_length) +{ + int res= false; + enum_field_types type= field ? field->real_type() : MYSQL_TYPE_LONGLONG; + + m_keynr= keynr_arg; + m_key_part= key_part_arg; + + m_maybe_null= field ? field->real_maybe_null() : false; + m_unpack_func= nullptr; + m_make_unpack_info_func= nullptr; + m_unpack_data_len= 0; + space_xfrm= nullptr; // safety + + /* Calculate image length. By default, is is pack_length() */ + m_max_image_len= field ? field->pack_length() : + ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN; + m_skip_func= rdb_skip_max_length; + m_pack_func= rdb_pack_with_make_sort_key; + + switch (type) { + case MYSQL_TYPE_LONGLONG: + case MYSQL_TYPE_LONG: + case MYSQL_TYPE_INT24: + case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_TINY: + m_unpack_func= rdb_unpack_integer; + return true; + + case MYSQL_TYPE_DOUBLE: + m_unpack_func= rdb_unpack_double; + return true; + + case MYSQL_TYPE_FLOAT: + m_unpack_func= rdb_unpack_float; + return true; + + case MYSQL_TYPE_NEWDECIMAL: + /* + Decimal is packed with Field_new_decimal::make_sort_key, which just + does memcpy. + Unpacking decimal values was supported only after fix for issue#253, + because of that ha_rocksdb::get_storage_type() handles decimal values + in a special way. + */ + case MYSQL_TYPE_DATETIME2: + case MYSQL_TYPE_TIMESTAMP2: + /* These are packed with Field_temporal_with_date_and_timef::make_sort_key */ + case MYSQL_TYPE_TIME2: /* TIME is packed with Field_timef::make_sort_key */ + case MYSQL_TYPE_YEAR: /* YEAR is packed with Field_tiny::make_sort_key */ + /* Everything that comes here is packed with just a memcpy(). */ + m_unpack_func= rdb_unpack_binary_str; + return true; + + case MYSQL_TYPE_NEWDATE: + /* + This is packed by Field_newdate::make_sort_key. It assumes the data is + 3 bytes, and packing is done by swapping the byte order (for both big- + and little-endian) + */ + m_unpack_func= rdb_unpack_newdate; + return true; + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_BLOB: + { + if (key_descr && + key_descr->index_format_min_check( + Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1, + Rdb_key_def::SECONDARY_FORMAT_VERSION_UPDATE1)) + { + // The my_charset_bin collation is special in that it will consider + // shorter strings sorting as less than longer strings. + // + // See Field_blob::make_sort_key for details. + m_max_image_len= key_length + + (field->charset() == &my_charset_bin + ? reinterpret_cast(field)->pack_length_no_ptr() + : 0); + // Return false because indexes on text/blob will always require + // a prefix. With a prefix, the optimizer will not be able to do an + // index-only scan since there may be content occuring after the prefix + // length. + return false; + } + } + default: + break; + } + + m_unpack_info_stores_value= false; + /* Handle [VAR](CHAR|BINARY) */ + + if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING) + { + /* + For CHAR-based columns, check how strxfrm image will take. + field->field_length = field->char_length() * cs->mbmaxlen. + */ + const CHARSET_INFO *cs= field->charset(); + m_max_image_len= cs->coll->strnxfrmlen(cs, field->field_length); + } + const bool is_varchar= (type == MYSQL_TYPE_VARCHAR); + const CHARSET_INFO *cs= field->charset(); + // max_image_len before chunking is taken into account + int max_image_len_before_chunks= m_max_image_len; + + if (is_varchar) + { + // The default for varchar is variable-length, without space-padding for + // comparisons + m_varchar_charset= cs; + m_skip_func= rdb_skip_variable_length; + m_pack_func= rdb_pack_with_varchar_encoding; + m_max_image_len= + (m_max_image_len/(RDB_ESCAPE_LENGTH-1) + 1) * RDB_ESCAPE_LENGTH; + + auto field_var= static_cast(field); + m_unpack_info_uses_two_bytes= (field_var->field_length + 8 >= 0x100); + } + + if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING) + { + // See http://dev.mysql.com/doc/refman/5.7/en/string-types.html for + // information about character-based datatypes are compared. + bool use_unknown_collation= false; + DBUG_EXECUTE_IF("myrocks_enable_unknown_collation_index_only_scans", + use_unknown_collation= true;); + + if (cs == &my_charset_bin) + { + // - SQL layer pads BINARY(N) so that it always is N bytes long. + // - For VARBINARY(N), values may have different lengths, so we're using + // variable-length encoding. This is also the only charset where the + // values are not space-padded for comparison. + m_unpack_func= is_varchar? rdb_unpack_binary_or_utf8_varchar : + rdb_unpack_binary_str; + res= true; + } + else if (cs == &my_charset_latin1_bin || cs == &my_charset_utf8_bin) + { + // For _bin collations, mem-comparable form of the string is the string + // itself. + + if (is_varchar) + { + if (!key_descr || + key_descr->index_format_min_check( + Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1, + Rdb_key_def::SECONDARY_FORMAT_VERSION_UPDATE1)) + { + // VARCHARs + // - are compared as if they were space-padded + // - but are not actually space-padded (reading the value back + // produces the original value, without the padding) + m_unpack_func= rdb_unpack_binary_or_utf8_varchar_space_pad; + m_skip_func= rdb_skip_variable_space_pad; + m_pack_func= rdb_pack_with_varchar_space_pad; + m_make_unpack_info_func= rdb_dummy_make_unpack_info; + m_segment_size= get_segment_size_from_collation(cs); + m_max_image_len= + (max_image_len_before_chunks/(m_segment_size-1) + 1) * + m_segment_size; + rdb_get_mem_comparable_space(cs, &space_xfrm, &space_xfrm_len, + &space_mb_len); + } + else + { + // Older variant where VARCHARs were not compared as space-padded: + m_unpack_func= rdb_unpack_binary_or_utf8_varchar; + m_skip_func= rdb_skip_variable_length; + m_pack_func= rdb_pack_with_varchar_encoding; + } + } + else + { + // SQL layer pads CHAR(N) values to their maximum length. + // We just store that and restore it back. + m_unpack_func= (cs == &my_charset_latin1_bin)? rdb_unpack_binary_str: + rdb_unpack_utf8_str; + } + res= true; + } + else + { + // This is [VAR]CHAR(n) and the collation is not $(charset_name)_bin + + res= true; // index-only scans are possible + m_unpack_data_len= is_varchar ? 0 : field->field_length; + uint idx= is_varchar ? 0 : 1; + const Rdb_collation_codec *codec= nullptr; + + if (is_varchar) + { + if (cs->levels_for_order != 1) + { + // NO_LINT_DEBUG + sql_print_warning("RocksDB: you're trying to create an index " + "with a multi-level collation %s", cs->name); + // NO_LINT_DEBUG + sql_print_warning("MyRocks will handle this collation internally " + " as if it had a NO_PAD attribute."); + } + // VARCHAR requires space-padding for doing comparisons + // + // The check for cs->levels_for_order is to catch + // latin2_czech_cs and cp1250_czech_cs - multi-level collations + // that Variable-Length Space Padded Encoding can't handle. + // It is not expected to work for any other multi-level collations, + // either. + // Currently we handle these collations as NO_PAD, even if they have + // PAD_SPACE attribute. + if ((!key_descr || + key_descr->index_format_min_check( + Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1, + Rdb_key_def::SECONDARY_FORMAT_VERSION_UPDATE1)) && + cs->levels_for_order == 1) + { + m_pack_func= rdb_pack_with_varchar_space_pad; + m_skip_func= rdb_skip_variable_space_pad; + m_segment_size= get_segment_size_from_collation(cs); + m_max_image_len= + (max_image_len_before_chunks/(m_segment_size-1) + 1) * + m_segment_size; + rdb_get_mem_comparable_space(cs, &space_xfrm, &space_xfrm_len, + &space_mb_len); + } + else + { + m_pack_func= rdb_pack_with_varchar_encoding; + m_skip_func= rdb_skip_variable_length; + } + } + + if ((codec= rdb_init_collation_mapping(cs)) != nullptr) + { + // The collation allows to store extra information in the unpack_info + // which can be used to restore the original value from the + // mem-comparable form. + m_make_unpack_info_func= codec->m_make_unpack_info_func[idx]; + m_unpack_func= codec->m_unpack_func[idx]; + m_charset_codec= codec; + } + else if (use_unknown_collation) + { + // We have no clue about how this collation produces mem-comparable + // form. Our way of restoring the original value is to keep a copy of + // the original value in unpack_info. + m_unpack_info_stores_value= true; + m_make_unpack_info_func= is_varchar ? rdb_make_unpack_unknown_varchar + : rdb_make_unpack_unknown; + m_unpack_func= is_varchar ? rdb_unpack_unknown_varchar + : rdb_unpack_unknown; + } + else + { + // Same as above: we don't know how to restore the value from its + // mem-comparable form. + // Here, we just indicate to the SQL layer we can't do it. + DBUG_ASSERT(m_unpack_func == nullptr); + m_unpack_info_stores_value= false; + res= false; // Indicate that index-only reads are not possible + } + } + + // Make an adjustment: unpacking partially covered columns is not + // possible. field->table is populated when called through + // Rdb_key_def::setup, but not during ha_rocksdb::index_flags. + if (field->table) + { + // Get the original Field object and compare lengths. If this key part is + // a prefix of a column, then we can't do index-only scans. + if (field->table->field[field->field_index]->field_length != key_length) + { + m_unpack_func= nullptr; + m_make_unpack_info_func= nullptr; + m_unpack_info_stores_value= true; + res= false; + } + } + else + { + if (field->field_length != key_length) + { + m_unpack_func= nullptr; + m_make_unpack_info_func= nullptr; + m_unpack_info_stores_value= true; + res= false; + } + } + } + return res; +} + + +Field *Rdb_field_packing::get_field_in_table(const TABLE *tbl) const +{ + return tbl->key_info[m_keynr].key_part[m_key_part].field; +} + + +void Rdb_field_packing::fill_hidden_pk_val(uchar **dst, + longlong hidden_pk_id) const +{ + DBUG_ASSERT(m_max_image_len == 8); + + String to; + rdb_netstr_append_uint64(&to, hidden_pk_id); + memcpy(*dst, to.ptr(), m_max_image_len); + + *dst += m_max_image_len; +} + + +/////////////////////////////////////////////////////////////////////////////////////////// +// Rdb_ddl_manager +/////////////////////////////////////////////////////////////////////////////////////////// + +Rdb_tbl_def::~Rdb_tbl_def() +{ + auto ddl_manager= rdb_get_ddl_manager(); + /* Don't free key definitions */ + if (m_key_descr_arr) + { + for (uint i= 0; i < m_key_count; i++) { + if (ddl_manager && m_key_descr_arr[i]) { + ddl_manager->erase_index_num(m_key_descr_arr[i]->get_gl_index_id()); + } + + m_key_descr_arr[i]= nullptr; + } + + delete[] m_key_descr_arr; + m_key_descr_arr= nullptr; + } +} + +/* + Put table definition DDL entry. Actual write is done at + Rdb_dict_manager::commit. + + We write + dbname.tablename -> version + {key_entry, key_entry, key_entry, ... } + + Where key entries are a tuple of + ( cf_id, index_nr ) +*/ + +bool Rdb_tbl_def::put_dict(Rdb_dict_manager* dict, rocksdb::WriteBatch *batch, + uchar *key, size_t keylen) +{ + StringBuffer<8 * Rdb_key_def::PACKED_SIZE> indexes; + indexes.alloc(Rdb_key_def::VERSION_SIZE + + m_key_count * Rdb_key_def::PACKED_SIZE * 2); + rdb_netstr_append_uint16(&indexes, Rdb_key_def::DDL_ENTRY_INDEX_VERSION); + + for (uint i = 0; i < m_key_count; i++) + { + const std::shared_ptr& kd= m_key_descr_arr[i]; + + uchar flags = + (kd->m_is_reverse_cf ? Rdb_key_def::REVERSE_CF_FLAG : 0) | + (kd->m_is_auto_cf ? Rdb_key_def::AUTO_CF_FLAG : 0); + + uint cf_id= kd->get_cf()->GetID(); + /* + If cf_id already exists, cf_flags must be the same. + To prevent race condition, reading/modifying/committing CF flags + need to be protected by mutex (dict_manager->lock()). + When RocksDB supports transaction with pessimistic concurrency + control, we can switch to use it and removing mutex. + */ + uint existing_cf_flags; + if (dict->get_cf_flags(cf_id, &existing_cf_flags)) + { + if (existing_cf_flags != flags) + { + my_printf_error(ER_UNKNOWN_ERROR, + "Column Family Flag is different from existing flag. " + "Assign a new CF flag, or do not change existing " + "CF flag.", MYF(0)); + return true; + } + } + else + { + dict->add_cf_flags(batch, cf_id, flags); + } + + rdb_netstr_append_uint32(&indexes, cf_id); + rdb_netstr_append_uint32(&indexes, kd->m_index_number); + dict->add_or_update_index_cf_mapping(batch, kd->m_index_type, + kd->m_kv_format_version, + kd->m_index_number, cf_id); + } + + rocksdb::Slice skey((char*)key, keylen); + rocksdb::Slice svalue(indexes.c_ptr(), indexes.length()); + + dict->put_key(batch, skey, svalue); + return false; +} + +void Rdb_tbl_def::check_if_is_mysql_system_table() +{ + static const char *const system_dbs[] = { + "mysql", + "performance_schema", + "information_schema", + }; + + m_is_mysql_system_table= false; + for (uint ii = 0; ii < array_elements(system_dbs); ii++) { + if (strcmp(m_dbname.c_str(), system_dbs[ii]) == 0) { + m_is_mysql_system_table= true; + break; + } + } +} + +void Rdb_tbl_def::set_name(const std::string& name) +{ + int err __attribute__((__unused__)); + + m_dbname_tablename= name; + err= rdb_split_normalized_tablename(name, &m_dbname, &m_tablename, + &m_partition); + DBUG_ASSERT(err == 0); + + check_if_is_mysql_system_table(); +} + +/* + Static function of type my_hash_get_key that gets invoked by + the m_ddl_hash object of type my_core::HASH. + It manufactures a key (db+table name in our case) from a record + (Rdb_tbl_def in our case). +*/ +const uchar* Rdb_ddl_manager::get_hash_key( + Rdb_tbl_def *rec, size_t *length, + my_bool not_used __attribute__((__unused__))) +{ + const std::string& dbname_tablename= rec->full_tablename(); + *length= dbname_tablename.size(); + return reinterpret_cast(dbname_tablename.c_str()); +} + + +/* + Static function of type void (*my_hash_free_element_func_t)(void*) that gets + invoked by the m_ddl_hash object of type my_core::HASH. + It deletes a record (Rdb_tbl_def in our case). +*/ +void Rdb_ddl_manager::free_hash_elem(void* data) +{ + Rdb_tbl_def* elem= reinterpret_cast(data); + delete elem; +} + +void Rdb_ddl_manager::erase_index_num(GL_INDEX_ID gl_index_id) +{ + m_index_num_to_keydef.erase(gl_index_id); +} + + +namespace // anonymous namespace = not visible outside this source file +{ +struct Rdb_validate_tbls : public Rdb_tables_scanner +{ + using tbl_info_t= std::pair; + using tbl_list_t= std::map>; + + tbl_list_t m_list; + + int add_table(Rdb_tbl_def* tdef) override; + + bool compare_to_actual_tables( + const std::string& datadir, + bool* has_errors); + + bool scan_for_frms( + const std::string& datadir, + const std::string& dbname, + bool* has_errors); + + bool check_frm_file( + const std::string& fullpath, + const std::string& dbname, + const std::string& tablename, + bool* has_errors); +}; +} // anonymous namespace + + +/* + Get a list of tables that we expect to have .frm files for. This will use the + information just read from the RocksDB data dictionary. +*/ +int Rdb_validate_tbls::add_table(Rdb_tbl_def* tdef) +{ + DBUG_ASSERT(tdef != nullptr); + + /* Add the database/table into the list */ + bool is_partition = tdef->base_partition().size() != 0; + m_list[tdef->base_dbname()].insert( + tbl_info_t(tdef->base_tablename(), is_partition)); + + return 0; +} + +/* + Access the .frm file for this dbname/tablename and see if it is a RocksDB + table (or partition table). +*/ +bool Rdb_validate_tbls::check_frm_file( + const std::string& fullpath, + const std::string& dbname, + const std::string& tablename, + bool* has_errors) +{ + /* Check this .frm file to see what engine it uses */ + String fullfilename(fullpath.c_str(), &my_charset_bin); + fullfilename.append(FN_DIRSEP); + fullfilename.append(tablename.c_str()); + fullfilename.append(".frm"); + + /* + This function will return the legacy_db_type of the table. Currently + it does not reference the first parameter (THD* thd), but if it ever + did in the future we would need to make a version that does it without + the connection handle as we don't have one here. + */ + enum legacy_db_type eng_type; + frm_type_enum type = dd_frm_type(nullptr, fullfilename.c_ptr(), &eng_type); + if (type == FRMTYPE_ERROR) + { + sql_print_warning("RocksDB: Failed to open/read .from file: %s", + fullfilename.ptr()); + return false; + } + + if (type == FRMTYPE_TABLE) + { + /* For a RocksDB table do we have a reference in the data dictionary? */ + if (eng_type == DB_TYPE_ROCKSDB) + { + /* + Attempt to remove the table entry from the list of tables. If this + fails then we know we had a .frm file that wasn't registered in RocksDB. + */ + tbl_info_t element(tablename, false); + if (m_list.count(dbname) == 0 || + m_list[dbname].erase(element) == 0) + { + sql_print_warning("RocksDB: Schema mismatch - " + "A .frm file exists for table %s.%s, " + "but that table is not registered in RocksDB", + dbname.c_str(), tablename.c_str()); + *has_errors = true; + } + } + else if (eng_type == DB_TYPE_PARTITION_DB) + { + /* + For partition tables, see if it is in the m_list as a partition, + but don't generate an error if it isn't there - we don't know that the + .frm is for RocksDB. + */ + if (m_list.count(dbname) > 0) + { + m_list[dbname].erase(tbl_info_t(tablename, true)); + } + } + } + + return true; +} + +/* Scan the database subdirectory for .frm files */ +bool Rdb_validate_tbls::scan_for_frms( + const std::string& datadir, + const std::string& dbname, + bool* has_errors) +{ + bool result = true; + std::string fullpath = datadir + dbname; + struct st_my_dir* dir_info = my_dir(fullpath.c_str(), MYF(MY_DONT_SORT)); + + /* Access the directory */ + if (dir_info == nullptr) + { + sql_print_warning("RocksDB: Could not open database directory: %s", + fullpath.c_str()); + return false; + } + + /* Scan through the files in the directory */ + struct fileinfo* file_info = dir_info->dir_entry; + for (uint ii = 0; ii < dir_info->number_off_files; ii++, file_info++) + { + /* Find .frm files that are not temp files (those that start with '#') */ + const char* ext = strrchr(file_info->name, '.'); + if (ext != nullptr && !is_prefix(file_info->name, tmp_file_prefix) && + strcmp(ext, ".frm") == 0) + { + std::string tablename = std::string(file_info->name, + ext - file_info->name); + + /* Check to see if the .frm file is from RocksDB */ + if (!check_frm_file(fullpath, dbname, tablename, has_errors)) + { + result = false; + break; + } + } + } + + /* Remove any databases who have no more tables listed */ + if (m_list.count(dbname) == 1 && m_list[dbname].size() == 0) + { + m_list.erase(dbname); + } + + /* Release the directory entry */ + my_dirend(dir_info); + + return result; +} + +/* + Scan the datadir for all databases (subdirectories) and get a list of .frm + files they contain +*/ +bool Rdb_validate_tbls::compare_to_actual_tables( + const std::string& datadir, + bool* has_errors) +{ + bool result = true; + struct st_my_dir* dir_info; + struct fileinfo* file_info; + + dir_info = my_dir(datadir.c_str(), MYF(MY_DONT_SORT | MY_WANT_STAT)); + if (dir_info == nullptr) + { + sql_print_warning("RocksDB: could not open datadir: %s", datadir.c_str()); + return false; + } + + file_info = dir_info->dir_entry; + for (uint ii = 0; ii < dir_info->number_off_files; ii++, file_info++) + { + /* Ignore files/dirs starting with '.' */ + if (file_info->name[0] == '.') + continue; + + /* Ignore all non-directory files */ + if (!MY_S_ISDIR(file_info->mystat->st_mode)) + continue; + + /* Scan all the .frm files in the directory */ + if (!scan_for_frms(datadir, file_info->name, has_errors)) + { + result = false; + break; + } + } + + /* Release the directory info */ + my_dirend(dir_info); + + return result; +} + +/* + Validate that all the tables in the RocksDB database dictionary match the .frm + files in the datdir +*/ +bool Rdb_ddl_manager::validate_schemas(void) +{ + bool has_errors= false; + std::string datadir= std::string(mysql_real_data_home); + Rdb_validate_tbls table_list; + + /* Get the list of tables from the database dictionary */ + if (scan_for_tables(&table_list) != 0) + { + return false; + } + + /* Compare that to the list of actual .frm files */ + if (!table_list.compare_to_actual_tables(datadir, &has_errors)) + { + return false; + } + + /* + Any tables left in the tables list are ones that are registered in RocksDB + but don't have .frm files. + */ + for (const auto& db : table_list.m_list) + { + for (const auto& table : db.second) + { + sql_print_warning("RocksDB: Schema mismatch - " + "Table %s.%s is registered in RocksDB " + "but does not have a .frm file", db.first.c_str(), + table.first.c_str()); + has_errors = true; + } + } + + return !has_errors; +} + +bool Rdb_ddl_manager::init(Rdb_dict_manager *dict_arg, + Rdb_cf_manager *cf_manager, + uint32_t validate_tables) +{ + m_dict= dict_arg; + mysql_rwlock_init(0, &m_rwlock); + (void) my_hash_init(&m_ddl_hash, + /*system_charset_info*/ &my_charset_bin, + 32, 0, 0, + (my_hash_get_key) Rdb_ddl_manager::get_hash_key, + Rdb_ddl_manager::free_hash_elem, + 0); + + /* Read the data dictionary and populate the hash */ + uchar ddl_entry[Rdb_key_def::INDEX_NUMBER_SIZE]; + rdb_netbuf_store_index(ddl_entry, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER); + rocksdb::Slice ddl_entry_slice((char*)ddl_entry, + Rdb_key_def::INDEX_NUMBER_SIZE); + + /* Reading data dictionary should always skip bloom filter */ + rocksdb::Iterator* it= m_dict->new_iterator(); + int i= 0; + + uint max_index_id_in_dict= 0; + m_dict->get_max_index_id(&max_index_id_in_dict); + + for (it->Seek(ddl_entry_slice); it->Valid(); it->Next()) + { + const uchar *ptr; + const uchar *ptr_end; + rocksdb::Slice key= it->key(); + rocksdb::Slice val= it->value(); + + if (key.size() >= Rdb_key_def::INDEX_NUMBER_SIZE && + memcmp(key.data(), ddl_entry, Rdb_key_def::INDEX_NUMBER_SIZE)) + break; + + if (key.size() <= Rdb_key_def::INDEX_NUMBER_SIZE) + { + sql_print_error("RocksDB: Table_store: key has length %d (corruption?)", + (int)key.size()); + return true; + } + + Rdb_tbl_def *tdef= new Rdb_tbl_def(key, Rdb_key_def::INDEX_NUMBER_SIZE); + + // Now, read the DDLs. + int real_val_size= val.size() - Rdb_key_def::VERSION_SIZE; + if (real_val_size % Rdb_key_def::PACKED_SIZE*2) + { + sql_print_error("RocksDB: Table_store: invalid keylist for table %s", + tdef->full_tablename().c_str()); + return true; + } + tdef->m_key_count= real_val_size / (Rdb_key_def::PACKED_SIZE*2); + tdef->m_key_descr_arr= new std::shared_ptr[tdef->m_key_count]; + + ptr= reinterpret_cast(val.data()); + int version= rdb_netbuf_read_uint16(&ptr); + if (version != Rdb_key_def::DDL_ENTRY_INDEX_VERSION) + { + sql_print_error("RocksDB: DDL ENTRY Version was not expected." + "Expected: %d, Actual: %d", + Rdb_key_def::DDL_ENTRY_INDEX_VERSION, version); + return true; + } + ptr_end= ptr + real_val_size; + for (uint keyno= 0; ptr < ptr_end; keyno++) + { + GL_INDEX_ID gl_index_id; + rdb_netbuf_read_gl_index(&ptr, &gl_index_id); + uint16 m_index_dict_version= 0; + uchar m_index_type= 0; + uint16 kv_version= 0; + uint flags= 0; + if (!m_dict->get_index_info(gl_index_id, &m_index_dict_version, + &m_index_type, &kv_version)) + { + sql_print_error("RocksDB: Could not get index information " + "for Index Number (%u,%u), table %s", + gl_index_id.cf_id, gl_index_id.index_id, + tdef->full_tablename().c_str()); + return true; + } + if (max_index_id_in_dict < gl_index_id.index_id) + { + sql_print_error("RocksDB: Found max index id %u from data dictionary " + "but also found larger index id %u from dictionary. " + "This should never happen and possibly a bug.", + max_index_id_in_dict, gl_index_id.index_id); + return true; + } + if (!m_dict->get_cf_flags(gl_index_id.cf_id, &flags)) + { + sql_print_error("RocksDB: Could not get Column Family Flags " + "for CF Number %d, table %s", + gl_index_id.cf_id, + tdef->full_tablename().c_str()); + return true; + } + + rocksdb::ColumnFamilyHandle* cfh = cf_manager->get_cf(gl_index_id.cf_id); + DBUG_ASSERT(cfh != nullptr); + + /* + We can't fully initialize Rdb_key_def object here, because full + initialization requires that there is an open TABLE* where we could + look at Field* objects and set max_length and other attributes + */ + tdef->m_key_descr_arr[keyno]= + std::make_shared(gl_index_id.index_id, keyno, cfh, + m_index_dict_version, + m_index_type, kv_version, + flags & Rdb_key_def::REVERSE_CF_FLAG, + flags & Rdb_key_def::AUTO_CF_FLAG, "", + m_dict->get_stats(gl_index_id)); + } + put(tdef); + i++; + } + + /* + If validate_tables is greater than 0 run the validation. Only fail the + initialzation if the setting is 1. If the setting is 2 we continue. + */ + if (validate_tables > 0 && !validate_schemas()) { + if (validate_tables == 1) { + sql_print_error("RocksDB: Problems validating data dictionary " + "against .frm files, exiting"); + return true; + } + } + + // index ids used by applications should not conflict with + // data dictionary index ids + if (max_index_id_in_dict < Rdb_key_def::END_DICT_INDEX_ID) + { + max_index_id_in_dict= Rdb_key_def::END_DICT_INDEX_ID; + } + + m_sequence.init(max_index_id_in_dict+1); + + if (!it->status().ok()) + { + std::string s= it->status().ToString(); + sql_print_error("RocksDB: Table_store: load error: %s", s.c_str()); + return true; + } + delete it; + sql_print_information("RocksDB: Table_store: loaded DDL data for %d tables", i); + return false; +} + + +Rdb_tbl_def* Rdb_ddl_manager::find(const std::string& table_name, bool lock) +{ + if (lock) + { + mysql_rwlock_rdlock(&m_rwlock); + } + + Rdb_tbl_def* rec= reinterpret_cast( + my_hash_search(&m_ddl_hash, + reinterpret_cast(table_name.c_str()), + table_name.size())); + + if (lock) + { + mysql_rwlock_unlock(&m_rwlock); + } + + return rec; +} + +// this is a safe version of the find() function below. It acquires a read +// lock on m_rwlock to make sure the Rdb_key_def is not discarded while we +// are finding it. Copying it into 'ret' increments the count making sure +// that the object will not be discarded until we are finished with it. +std::shared_ptr Rdb_ddl_manager::safe_find(GL_INDEX_ID gl_index_id) +{ + std::shared_ptr ret(nullptr); + + mysql_rwlock_rdlock(&m_rwlock); + + auto it= m_index_num_to_keydef.find(gl_index_id); + if (it != m_index_num_to_keydef.end()) + { + auto table_def = find(it->second.first, false); + if (table_def && it->second.second < table_def->m_key_count) + { + auto& kd= table_def->m_key_descr_arr[it->second.second]; + if (kd->max_storage_fmt_length() != 0) + { + ret = kd; + } + } + } + + mysql_rwlock_unlock(&m_rwlock); + + return ret; +} + +// this method assumes at least read-only lock on m_rwlock +const std::shared_ptr& Rdb_ddl_manager::find( + GL_INDEX_ID gl_index_id) +{ + auto it= m_index_num_to_keydef.find(gl_index_id); + if (it != m_index_num_to_keydef.end()) { + auto table_def = find(it->second.first, false); + if (table_def) { + if (it->second.second < table_def->m_key_count) { + return table_def->m_key_descr_arr[it->second.second]; + } + } + } + + static std::shared_ptr empty = nullptr; + + return empty; +} + +void Rdb_ddl_manager::set_stats( + const std::unordered_map& stats) +{ + mysql_rwlock_wrlock(&m_rwlock); + for (auto src : stats) { + auto keydef = find(src.second.m_gl_index_id); + if (keydef) { + keydef->m_stats = src.second; + } + } + mysql_rwlock_unlock(&m_rwlock); +} + +void Rdb_ddl_manager::adjust_stats( + const std::vector& new_data, + const std::vector& deleted_data) +{ + mysql_rwlock_wrlock(&m_rwlock); + int i = 0; + for (const auto& data : {new_data, deleted_data}) + { + for (const auto& src : data) + { + auto keydef= find(src.m_gl_index_id); + if (keydef) + { + keydef->m_stats.merge(src, i == 0, keydef->max_storage_fmt_length()); + m_stats2store[keydef->m_stats.m_gl_index_id] = keydef->m_stats; + } + } + i++; + } + bool should_save_stats= !m_stats2store.empty(); + mysql_rwlock_unlock(&m_rwlock); + if (should_save_stats) + { + // Queue an async persist_stats(false) call to the background thread. + rdb_queue_save_stats_request(); + } +} + +void Rdb_ddl_manager::persist_stats(bool sync) +{ + mysql_rwlock_wrlock(&m_rwlock); + auto local_stats2store = std::move(m_stats2store); + m_stats2store.clear(); + mysql_rwlock_unlock(&m_rwlock); + + // Persist stats + std::unique_ptr wb = m_dict->begin(); + std::vector stats; + std::transform( + local_stats2store.begin(), local_stats2store.end(), + std::back_inserter(stats), + []( + const std::pair& s + ) {return s.second;}); + m_dict->add_stats(wb.get(), stats); + m_dict->commit(wb.get(), sync); +} + +/* + Put table definition of `tbl` into the mapping, and also write it to the + on-disk data dictionary. +*/ + +int Rdb_ddl_manager::put_and_write(Rdb_tbl_def *tbl, + rocksdb::WriteBatch *batch) +{ + uchar buf[FN_LEN * 2 + Rdb_key_def::INDEX_NUMBER_SIZE]; + uint pos= 0; + + rdb_netbuf_store_index(buf, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER); + pos+= Rdb_key_def::INDEX_NUMBER_SIZE; + + const std::string& dbname_tablename= tbl->full_tablename(); + memcpy(buf + pos, dbname_tablename.c_str(), dbname_tablename.size()); + pos += dbname_tablename.size(); + + int res; + if ((res= tbl->put_dict(m_dict, batch, buf, pos))) + { + return res; + } + if ((res= put(tbl))) + { + return res; + } + return 0; +} + + +/* Return 0 - ok, other value - error */ +/* TODO: + This function modifies m_ddl_hash and m_index_num_to_keydef. + However, these changes need to be reversed if dict_manager.commit fails + See the discussion here: https://reviews.facebook.net/D35925#inline-259167 + Tracked by https://github.com/facebook/mysql-5.6/issues/33 +*/ +int Rdb_ddl_manager::put(Rdb_tbl_def *tbl, bool lock) +{ + Rdb_tbl_def *rec; + my_bool result; + const std::string& dbname_tablename= tbl->full_tablename(); + + if (lock) + mysql_rwlock_wrlock(&m_rwlock); + + // We have to do this find because 'tbl' is not yet in the list. We need + // to find the one we are replacing ('rec') + rec= reinterpret_cast(find(dbname_tablename, false)); + if (rec) + { + // this will free the old record. + my_hash_delete(&m_ddl_hash, reinterpret_cast(rec)); + } + result= my_hash_insert(&m_ddl_hash, reinterpret_cast(tbl)); + + for (uint keyno= 0; keyno < tbl->m_key_count; keyno++) { + m_index_num_to_keydef[tbl->m_key_descr_arr[keyno]->get_gl_index_id()]= + std::make_pair(dbname_tablename, keyno); + } + + if (lock) + mysql_rwlock_unlock(&m_rwlock); + return result; +} + + +void Rdb_ddl_manager::remove(Rdb_tbl_def *tbl, + rocksdb::WriteBatch *batch, bool lock) +{ + if (lock) + mysql_rwlock_wrlock(&m_rwlock); + + uchar buf[FN_LEN * 2 + Rdb_key_def::INDEX_NUMBER_SIZE]; + uint pos= 0; + + rdb_netbuf_store_index(buf, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER); + pos+= Rdb_key_def::INDEX_NUMBER_SIZE; + + const std::string& dbname_tablename= tbl->full_tablename(); + memcpy(buf + pos, dbname_tablename.c_str(), dbname_tablename.size()); + pos += dbname_tablename.size(); + + rocksdb::Slice tkey((char*)buf, pos); + m_dict->delete_key(batch, tkey); + + /* The following will also delete the object: */ + my_hash_delete(&m_ddl_hash, reinterpret_cast(tbl)); + + if (lock) + mysql_rwlock_unlock(&m_rwlock); +} + + +bool Rdb_ddl_manager::rename(const std::string& from, const std::string& to, + rocksdb::WriteBatch *batch) +{ + Rdb_tbl_def *rec; + Rdb_tbl_def *new_rec; + bool res= true; + uchar new_buf[FN_LEN * 2 + Rdb_key_def::INDEX_NUMBER_SIZE]; + uint new_pos= 0; + + mysql_rwlock_wrlock(&m_rwlock); + if (!(rec= find(from, false))) + { + mysql_rwlock_unlock(&m_rwlock); + return true; + } + + new_rec= new Rdb_tbl_def(to); + + new_rec->m_key_count= rec->m_key_count; + new_rec->m_auto_incr_val= + rec->m_auto_incr_val.load(std::memory_order_relaxed); + new_rec->m_key_descr_arr= rec->m_key_descr_arr; + // so that it's not free'd when deleting the old rec + rec->m_key_descr_arr= nullptr; + + // Create a new key + rdb_netbuf_store_index(new_buf, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER); + new_pos+= Rdb_key_def::INDEX_NUMBER_SIZE; + + const std::string& dbname_tablename= new_rec->full_tablename(); + memcpy(new_buf + new_pos, dbname_tablename.c_str(), dbname_tablename.size()); + new_pos += dbname_tablename.size(); + + // Create a key to add + if (!new_rec->put_dict(m_dict, batch, new_buf, new_pos)) + { + remove(rec, batch, false); + put(new_rec, false); + res= false; // ok + } + + mysql_rwlock_unlock(&m_rwlock); + return res; +} + + +void Rdb_ddl_manager::cleanup() +{ + my_hash_free(&m_ddl_hash); + mysql_rwlock_destroy(&m_rwlock); + m_sequence.cleanup(); +} + + +int Rdb_ddl_manager::scan_for_tables(Rdb_tables_scanner* tables_scanner) +{ + int i, ret; + Rdb_tbl_def *rec; + + DBUG_ASSERT(tables_scanner != nullptr); + + mysql_rwlock_rdlock(&m_rwlock); + + ret= 0; + i= 0; + + while ((rec = reinterpret_cast(my_hash_element(&m_ddl_hash, + i)))) + { + ret = tables_scanner->add_table(rec); + if (ret) + break; + i++; + } + + mysql_rwlock_unlock(&m_rwlock); + return ret; +} + + +/* + Rdb_binlog_manager class implementation +*/ + +bool Rdb_binlog_manager::init(Rdb_dict_manager *dict_arg) +{ + DBUG_ASSERT(dict_arg != nullptr); + m_dict= dict_arg; + + rdb_netbuf_store_index(m_key_buf, Rdb_key_def::BINLOG_INFO_INDEX_NUMBER); + m_key_slice = rocksdb::Slice(reinterpret_cast(m_key_buf), + Rdb_key_def::INDEX_NUMBER_SIZE); + return false; +} + +void Rdb_binlog_manager::cleanup() +{ +} + +/** + Set binlog name, pos and optionally gtid into WriteBatch. + This function should be called as part of transaction commit, + since binlog info is set only at transaction commit. + Actual write into RocksDB is not done here, so checking if + write succeeded or not is not possible here. + @param binlog_name Binlog name + @param binlog_pos Binlog pos + @param binlog_gtid Binlog GTID + @param batch WriteBatch +*/ +void Rdb_binlog_manager::update(const char* binlog_name, + const my_off_t binlog_pos, + const char* binlog_gtid, + rocksdb::WriteBatchBase* batch) +{ + if (binlog_name && binlog_pos) + { + // max binlog length (512) + binlog pos (4) + binlog gtid (57) < 1024 + uchar value_buf[1024]; + m_dict->put_key(batch, m_key_slice, + pack_value(value_buf, binlog_name, + binlog_pos, binlog_gtid)); + } +} + +/** + Read binlog committed entry stored in RocksDB, then unpack + @param[OUT] binlog_name Binlog name + @param[OUT] binlog_pos Binlog pos + @param[OUT] binlog_gtid Binlog GTID + @return + true is binlog info was found (valid behavior) + false otherwise +*/ +bool Rdb_binlog_manager::read(char *binlog_name, my_off_t *binlog_pos, + char *binlog_gtid) +{ + bool ret= false; + if (binlog_name) + { + std::string value; + rocksdb::Status status= m_dict->get_value(m_key_slice, &value); + if(status.ok()) + { + if (!unpack_value((const uchar*)value.c_str(), + binlog_name, binlog_pos, binlog_gtid)) + ret= true; + } + } + return ret; +} + +/** + Pack binlog_name, binlog_pos, binlog_gtid into preallocated + buffer, then converting and returning a RocksDB Slice + @param buf Preallocated buffer to set binlog info. + @param binlog_name Binlog name + @param binlog_pos Binlog pos + @param binlog_gtid Binlog GTID + @return rocksdb::Slice converted from buf and its length +*/ +rocksdb::Slice Rdb_binlog_manager::pack_value(uchar *buf, + const char* binlog_name, + const my_off_t binlog_pos, + const char* binlog_gtid) +{ + uint pack_len= 0; + + // store version + rdb_netbuf_store_uint16(buf, Rdb_key_def::BINLOG_INFO_INDEX_NUMBER_VERSION); + pack_len += Rdb_key_def::VERSION_SIZE; + + // store binlog file name length + DBUG_ASSERT(strlen(binlog_name) <= 65535); + uint16_t binlog_name_len = strlen(binlog_name); + rdb_netbuf_store_uint16(buf+pack_len, binlog_name_len); + pack_len += 2; + + // store binlog file name + memcpy(buf+pack_len, binlog_name, binlog_name_len); + pack_len += binlog_name_len; + + // store binlog pos + rdb_netbuf_store_uint32(buf+pack_len, binlog_pos); + pack_len += 4; + + // store binlog gtid length. + // If gtid was not set, store 0 instead + uint16_t binlog_gtid_len = binlog_gtid? strlen(binlog_gtid) : 0; + rdb_netbuf_store_uint16(buf+pack_len, binlog_gtid_len); + pack_len += 2; + + if (binlog_gtid_len > 0) + { + // store binlog gtid + memcpy(buf+pack_len, binlog_gtid, binlog_gtid_len); + pack_len += binlog_gtid_len; + } + + return rocksdb::Slice((char*)buf, pack_len); +} + +/** + Unpack value then split into binlog_name, binlog_pos (and binlog_gtid) + @param[IN] value Binlog state info fetched from RocksDB + @param[OUT] binlog_name Binlog name + @param[OUT] binlog_pos Binlog pos + @param[OUT] binlog_gtid Binlog GTID + @return true on error +*/ +bool Rdb_binlog_manager::unpack_value(const uchar *value, char *binlog_name, + my_off_t *binlog_pos, + char *binlog_gtid) +{ + uint pack_len= 0; + + DBUG_ASSERT(binlog_pos != nullptr); + + // read version + uint16_t version= rdb_netbuf_to_uint16(value); + pack_len += Rdb_key_def::VERSION_SIZE; + if (version != Rdb_key_def::BINLOG_INFO_INDEX_NUMBER_VERSION) + return true; + + // read binlog file name length + uint16_t binlog_name_len= rdb_netbuf_to_uint16(value+pack_len); + pack_len += 2; + if (binlog_name_len) + { + // read and set binlog name + memcpy(binlog_name, value+pack_len, binlog_name_len); + binlog_name[binlog_name_len]= '\0'; + pack_len += binlog_name_len; + + // read and set binlog pos + *binlog_pos= rdb_netbuf_to_uint32(value+pack_len); + pack_len += 4; + + // read gtid length + uint16_t binlog_gtid_len= rdb_netbuf_to_uint16(value+pack_len); + pack_len += 2; + if (binlog_gtid && binlog_gtid_len > 0) + { + // read and set gtid + memcpy(binlog_gtid, value+pack_len, binlog_gtid_len); + binlog_gtid[binlog_gtid_len]= '\0'; + pack_len += binlog_gtid_len; + } + } + return false; +} + +/** + Inserts a row into mysql.slave_gtid_info table. Doing this inside + storage engine is more efficient than inserting/updating through MySQL. + + @param[IN] id Primary key of the table. + @param[IN] db Database name. This is column 2 of the table. + @param[IN] gtid Gtid in human readable form. This is column 3 of the table. + @param[IN] write_batch Handle to storage engine writer. +*/ +void Rdb_binlog_manager::update_slave_gtid_info( + uint id, const char* db, const char* gtid, + rocksdb::WriteBatchBase* write_batch) +{ + if (id && db && gtid) { + // Make sure that if the slave_gtid_info table exists we have a + // pointer to it via m_slave_gtid_info_tbl. + if (!m_slave_gtid_info_tbl.load()) { + m_slave_gtid_info_tbl.store( + rdb_get_ddl_manager()->find("mysql.slave_gtid_info")); + } + if (!m_slave_gtid_info_tbl.load()) { + // slave_gtid_info table is not present. Simply return. + return; + } + DBUG_ASSERT(m_slave_gtid_info_tbl.load()->m_key_count == 1); + + const std::shared_ptr& kd= + m_slave_gtid_info_tbl.load()->m_key_descr_arr[0]; + String value; + + // Build key + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE + 4]= {0}; + uchar* buf= key_buf; + rdb_netbuf_store_index(buf, kd->get_index_number()); + buf += Rdb_key_def::INDEX_NUMBER_SIZE; + rdb_netbuf_store_uint32(buf, id); + buf += 4; + rocksdb::Slice key_slice = + rocksdb::Slice((const char*)key_buf, buf-key_buf); + + // Build value + uchar value_buf[128]= {0}; + DBUG_ASSERT(gtid); + uint db_len= strlen(db); + uint gtid_len= strlen(gtid); + buf= value_buf; + // 1 byte used for flags. Empty here. + buf++; + + // Write column 1. + DBUG_ASSERT(strlen(db) <= 64); + rdb_netbuf_store_byte(buf, db_len); + buf++; + memcpy(buf, db, db_len); + buf += db_len; + + // Write column 2. + DBUG_ASSERT(gtid_len <= 56); + rdb_netbuf_store_byte(buf, gtid_len); + buf++; + memcpy(buf, gtid, gtid_len); + buf += gtid_len; + rocksdb::Slice value_slice = + rocksdb::Slice((const char*)value_buf, buf-value_buf); + + write_batch->Put(kd->get_cf(), key_slice, value_slice); + } +} + +bool Rdb_dict_manager::init(rocksdb::DB *rdb_dict, Rdb_cf_manager *cf_manager) +{ + mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST); + m_db= rdb_dict; + bool is_automatic; + m_system_cfh= cf_manager->get_or_create_cf(m_db, DEFAULT_SYSTEM_CF_NAME, + "", nullptr, &is_automatic); + rdb_netbuf_store_index(m_key_buf_max_index_id, + Rdb_key_def::MAX_INDEX_ID); + m_key_slice_max_index_id= rocksdb::Slice( + reinterpret_cast(m_key_buf_max_index_id), + Rdb_key_def::INDEX_NUMBER_SIZE); + resume_drop_indexes(); + rollback_ongoing_index_creation(); + + return (m_system_cfh == nullptr); +} + +std::unique_ptr Rdb_dict_manager::begin() +{ + return std::unique_ptr(new rocksdb::WriteBatch); +} + +void Rdb_dict_manager::put_key(rocksdb::WriteBatchBase *batch, + const rocksdb::Slice &key, + const rocksdb::Slice &value) +{ + batch->Put(m_system_cfh, key, value); +} + +rocksdb::Status Rdb_dict_manager::get_value(const rocksdb::Slice &key, + std::string *value) const +{ + rocksdb::ReadOptions options; + options.total_order_seek= true; + return m_db->Get(options, m_system_cfh, key, value); +} + +void Rdb_dict_manager::delete_key(rocksdb::WriteBatchBase *batch, + const rocksdb::Slice &key) const +{ + batch->Delete(m_system_cfh, key); +} + +rocksdb::Iterator* Rdb_dict_manager::new_iterator() +{ + /* Reading data dictionary should always skip bloom filter */ + rocksdb::ReadOptions read_options; + read_options.total_order_seek= true; + return m_db->NewIterator(read_options, m_system_cfh); +} + +int Rdb_dict_manager::commit(rocksdb::WriteBatch *batch, bool sync) +{ + if (!batch) + return 1; + int res= 0; + rocksdb::WriteOptions options; + options.sync= sync; + rocksdb::Status s= m_db->Write(options, batch); + res= !s.ok(); // we return true when something failed + if (res) + { + rdb_handle_io_error(s, RDB_IO_ERROR_DICT_COMMIT); + } + batch->Clear(); + return res; +} + +void Rdb_dict_manager::dump_index_id(uchar *netbuf, + Rdb_key_def::DATA_DICT_TYPE dict_type, + const GL_INDEX_ID &gl_index_id) +{ + rdb_netbuf_store_uint32(netbuf, dict_type); + rdb_netbuf_store_uint32(netbuf + Rdb_key_def::INDEX_NUMBER_SIZE, + gl_index_id.cf_id); + rdb_netbuf_store_uint32(netbuf + 2 * Rdb_key_def::INDEX_NUMBER_SIZE, + gl_index_id.index_id); +} + +void Rdb_dict_manager::delete_with_prefix(rocksdb::WriteBatch* batch, + Rdb_key_def::DATA_DICT_TYPE dict_type, + const GL_INDEX_ID &gl_index_id) const +{ + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; + dump_index_id(key_buf, dict_type, gl_index_id); + rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + + delete_key(batch, key); +} + +void Rdb_dict_manager::add_or_update_index_cf_mapping( + rocksdb::WriteBatch* batch, + const uchar m_index_type, + const uint16_t kv_version, + const uint32_t index_id, + const uint32_t cf_id) +{ + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; + uchar value_buf[256]= {0}; + GL_INDEX_ID gl_index_id= {cf_id, index_id}; + dump_index_id(key_buf, Rdb_key_def::INDEX_INFO, gl_index_id); + rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + + uchar* ptr= value_buf; + rdb_netbuf_store_uint16(ptr, Rdb_key_def::INDEX_INFO_VERSION_LATEST); + ptr+= 2; + rdb_netbuf_store_byte(ptr, m_index_type); + ptr+= 1; + rdb_netbuf_store_uint16(ptr, kv_version); + ptr+= 2; + + rocksdb::Slice value= rocksdb::Slice((char*)value_buf, ptr-value_buf); + batch->Put(m_system_cfh, key, value); +} + +void Rdb_dict_manager::add_cf_flags(rocksdb::WriteBatch* batch, + const uint32_t cf_id, + const uint32_t cf_flags) +{ + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]= {0}; + uchar value_buf[Rdb_key_def::VERSION_SIZE+ + Rdb_key_def::INDEX_NUMBER_SIZE]= {0}; + rdb_netbuf_store_uint32(key_buf, Rdb_key_def::CF_DEFINITION); + rdb_netbuf_store_uint32(key_buf + Rdb_key_def::INDEX_NUMBER_SIZE, cf_id); + rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + + rdb_netbuf_store_uint16(value_buf, Rdb_key_def::CF_DEFINITION_VERSION); + rdb_netbuf_store_uint32(value_buf + Rdb_key_def::VERSION_SIZE, cf_flags); + rocksdb::Slice value= rocksdb::Slice((char*)value_buf, sizeof(value_buf)); + batch->Put(m_system_cfh, key, value); +} + +void Rdb_dict_manager::delete_index_info(rocksdb::WriteBatch* batch, + const GL_INDEX_ID &gl_index_id) const +{ + delete_with_prefix(batch, Rdb_key_def::INDEX_INFO, gl_index_id); +} + + +bool Rdb_dict_manager::get_index_info(const GL_INDEX_ID &gl_index_id, + uint16_t *m_index_dict_version, + uchar *m_index_type, + uint16_t *kv_version) +{ + bool found= false; + bool error= false; + std::string value; + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; + dump_index_id(key_buf, Rdb_key_def::INDEX_INFO, gl_index_id); + rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + + rocksdb::Status status= get_value(key, &value); + if (status.ok()) + { + const uchar* val= (const uchar*)value.c_str(); + const uchar* ptr= val; + *m_index_dict_version= rdb_netbuf_to_uint16(val); + *kv_version= 0; + *m_index_type= 0; + ptr+= 2; + switch (*m_index_dict_version) { + + case Rdb_key_def::INDEX_INFO_VERSION_VERIFY_KV_FORMAT: + case Rdb_key_def::INDEX_INFO_VERSION_GLOBAL_ID: + *m_index_type= rdb_netbuf_to_byte(ptr); + ptr+= 1; + *kv_version= rdb_netbuf_to_uint16(ptr); + found= true; + break; + + default: + error= true; + break; + } + + switch (*m_index_type) + { + case Rdb_key_def::INDEX_TYPE_PRIMARY: + case Rdb_key_def::INDEX_TYPE_HIDDEN_PRIMARY: + { + error= *kv_version > Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; + break; + } + case Rdb_key_def::INDEX_TYPE_SECONDARY: + error= *kv_version > Rdb_key_def::SECONDARY_FORMAT_VERSION_LATEST; + break; + default: + error= true; + break; + } + } + + if (error) + { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Found invalid key version number (%u, %u, %u) " + "from data dictionary. This should never happen " + "and it may be a bug.", *m_index_dict_version, + *m_index_type, *kv_version); + abort_with_stack_traces(); + } + + return found; +} + +bool Rdb_dict_manager::get_cf_flags(const uint32_t cf_id, uint32_t *cf_flags) +{ + bool found= false; + std::string value; + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]= {0}; + rdb_netbuf_store_uint32(key_buf, Rdb_key_def::CF_DEFINITION); + rdb_netbuf_store_uint32(key_buf + Rdb_key_def::INDEX_NUMBER_SIZE, cf_id); + rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + + rocksdb::Status status= get_value(key, &value); + if (status.ok()) + { + const uchar* val= (const uchar*)value.c_str(); + uint16_t version= rdb_netbuf_to_uint16(val); + if (version == Rdb_key_def::CF_DEFINITION_VERSION) + { + *cf_flags= rdb_netbuf_to_uint32(val+Rdb_key_def::VERSION_SIZE); + found= true; + } + } + return found; +} + +/* + Returning index ids that were marked as deleted (via DROP TABLE) but + still not removed by drop_index_thread yet, or indexes that are marked as + ongoing creation. + */ +void Rdb_dict_manager::get_ongoing_index_operation( + std::vector* gl_index_ids, + Rdb_key_def::DATA_DICT_TYPE dd_type) +{ + DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || + dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + + uchar index_buf[Rdb_key_def::INDEX_NUMBER_SIZE]; + rdb_netbuf_store_uint32(index_buf, dd_type); + rocksdb::Slice index_slice(reinterpret_cast(index_buf), + Rdb_key_def::INDEX_NUMBER_SIZE); + + rocksdb::Iterator* it= new_iterator(); + for (it->Seek(index_slice); it->Valid(); it->Next()) + { + rocksdb::Slice key= it->key(); + const uchar* ptr= (const uchar*)key.data(); + + /* + Ongoing drop/create index operations require key to be of the form: + dd_type + cf_id + index_id (== INDEX_NUMBER_SIZE * 3) + + This may need to be changed in the future if we want to process a new + ddl_type with different format. + */ + if (key.size() != Rdb_key_def::INDEX_NUMBER_SIZE * 3 || + rdb_netbuf_to_uint32(ptr) != dd_type) + { + break; + } + + // We don't check version right now since currently we always store only + // Rdb_key_def::DDL_DROP_INDEX_ONGOING_VERSION = 1 as a value. + // If increasing version number, we need to add version check logic here. + GL_INDEX_ID gl_index_id; + gl_index_id.cf_id= rdb_netbuf_to_uint32(ptr+Rdb_key_def::INDEX_NUMBER_SIZE); + gl_index_id.index_id= rdb_netbuf_to_uint32( + ptr + 2 * Rdb_key_def::INDEX_NUMBER_SIZE); + gl_index_ids->push_back(gl_index_id); + } + delete it; +} + +/* + Returning true if index_id is create/delete ongoing (undergoing creation or + marked as deleted via DROP TABLE but drop_index_thread has not wiped yet) + or not. + */ +bool Rdb_dict_manager::is_index_operation_ongoing( + const GL_INDEX_ID& gl_index_id, + Rdb_key_def::DATA_DICT_TYPE dd_type) +{ + DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || + dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + + bool found= false; + std::string value; + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; + dump_index_id(key_buf, dd_type, gl_index_id); + rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + + rocksdb::Status status= get_value(key, &value); + if (status.ok()) + { + found= true; + } + return found; +} + +/* + Adding index_id to data dictionary so that the index id is removed + by drop_index_thread, or to track online index creation. + */ +void Rdb_dict_manager::start_ongoing_index_operation( + rocksdb::WriteBatch* batch, + const GL_INDEX_ID& gl_index_id, + Rdb_key_def::DATA_DICT_TYPE dd_type) +{ + DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || + dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; + uchar value_buf[Rdb_key_def::VERSION_SIZE]= {0}; + dump_index_id(key_buf, dd_type, gl_index_id); + + // version as needed + if (dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING) + { + rdb_netbuf_store_uint16(value_buf, + Rdb_key_def::DDL_DROP_INDEX_ONGOING_VERSION); + } + else + { + rdb_netbuf_store_uint16(value_buf, + Rdb_key_def::DDL_CREATE_INDEX_ONGOING_VERSION); + } + + rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + rocksdb::Slice value= rocksdb::Slice((char*)value_buf, sizeof(value_buf)); + batch->Put(m_system_cfh, key, value); +} + +/* + Removing index_id from data dictionary to confirm drop_index_thread + completed dropping entire key/values of the index_id + */ +void Rdb_dict_manager::end_ongoing_index_operation(rocksdb::WriteBatch* batch, + const GL_INDEX_ID& gl_index_id, + Rdb_key_def::DATA_DICT_TYPE dd_type) +{ + DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || + dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + + delete_with_prefix(batch, dd_type, gl_index_id); +} + +/* + Returning true if there is no target index ids to be removed + by drop_index_thread + */ +bool Rdb_dict_manager::is_drop_index_empty() +{ + std::vector gl_index_ids; + get_ongoing_drop_indexes(&gl_index_ids); + return gl_index_ids.empty(); +} + +/* + This function is supposed to be called by DROP TABLE. Logging messages + that dropping indexes started, and adding data dictionary so that + all associated indexes to be removed + */ +void Rdb_dict_manager::add_drop_table(std::shared_ptr* key_descr, + uint32 n_keys, + rocksdb::WriteBatch *batch) +{ + std::unordered_set dropped_index_ids; + for (uint32 i = 0; i < n_keys; i++) + { + dropped_index_ids.insert(key_descr[i]->get_gl_index_id()); + } + + add_drop_index(dropped_index_ids, batch); +} + +/* + Called during inplace index drop operations. Logging messages + that dropping indexes started, and adding data dictionary so that + all associated indexes to be removed + */ +void Rdb_dict_manager::add_drop_index( + const std::unordered_set& gl_index_ids, + rocksdb::WriteBatch *batch) +{ + for (const auto& gl_index_id : gl_index_ids) + { + log_start_drop_index(gl_index_id, "Begin"); + start_drop_index(batch, gl_index_id); + } +} + +/* + Called during inplace index creation operations. Logging messages + that adding indexes started, and updates data dictionary with all associated + indexes to be added. + */ +void Rdb_dict_manager::add_create_index( + const std::unordered_set& gl_index_ids, + rocksdb::WriteBatch *batch) +{ + for (const auto& gl_index_id : gl_index_ids) + { + // NO_LINT_DEBUG + sql_print_information("RocksDB: Begin index creation (%u,%u)", + gl_index_id.cf_id, gl_index_id.index_id); + start_create_index(batch, gl_index_id); + } +} + +/* + This function is supposed to be called by drop_index_thread, when it + finished dropping any index, or at the completion of online index creation. + */ +void Rdb_dict_manager::finish_indexes_operation( + const std::unordered_set& gl_index_ids, + Rdb_key_def::DATA_DICT_TYPE dd_type) +{ + DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || + dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + + std::unique_ptr wb= begin(); + rocksdb::WriteBatch *batch= wb.get(); + + for (const auto& gl_index_id : gl_index_ids) + { + if (is_index_operation_ongoing(gl_index_id, dd_type)) + { + // NO_LINT_DEBUG + sql_print_information("RocksDB: Finished %s (%u,%u)", + dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING ? + "filtering dropped index" : "index creation", + gl_index_id.cf_id, gl_index_id.index_id); + + end_ongoing_index_operation(batch, gl_index_id, dd_type); + } + + if (dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING) + { + delete_index_info(batch, gl_index_id); + } + } + commit(batch); +} + +/* + This function is supposed to be called when initializing + Rdb_dict_manager (at startup). If there is any index ids that are + drop ongoing, printing out messages for diagnostics purposes. + */ +void Rdb_dict_manager::resume_drop_indexes() +{ + std::vector gl_index_ids; + get_ongoing_drop_indexes(&gl_index_ids); + + uint max_index_id_in_dict= 0; + get_max_index_id(&max_index_id_in_dict); + + for (const auto& gl_index_id : gl_index_ids) + { + log_start_drop_index(gl_index_id, "Resume"); + if (max_index_id_in_dict < gl_index_id.index_id) + { + sql_print_error("RocksDB: Found max index id %u from data dictionary " + "but also found dropped index id (%u,%u) from drop_index " + "dictionary. This should never happen and is possibly a " + "bug.", max_index_id_in_dict, gl_index_id.cf_id, + gl_index_id.index_id); + abort_with_stack_traces(); + } + } +} + +void Rdb_dict_manager::rollback_ongoing_index_creation() +{ + std::unique_ptr wb= begin(); + rocksdb::WriteBatch *batch= wb.get(); + + std::vector gl_index_ids; + get_ongoing_create_indexes(&gl_index_ids); + + for (const auto& gl_index_id : gl_index_ids) + { + // NO_LINT_DEBUG + sql_print_information("RocksDB: Removing incomplete create index (%u,%u)", + gl_index_id.cf_id, gl_index_id.index_id); + + start_drop_index(batch, gl_index_id); + end_ongoing_index_operation(batch, gl_index_id, + Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + } + + commit(batch); +} + +void Rdb_dict_manager::log_start_drop_table( + const std::shared_ptr* key_descr, + uint32 n_keys, + const char* log_action) +{ + for (uint32 i = 0; i < n_keys; i++) { + log_start_drop_index(key_descr[i]->get_gl_index_id(), log_action); + } +} + +void Rdb_dict_manager::log_start_drop_index(GL_INDEX_ID gl_index_id, + const char* log_action) +{ + uint16 m_index_dict_version= 0; + uchar m_index_type= 0; + uint16 kv_version= 0; + if (!get_index_info(gl_index_id, &m_index_dict_version, + &m_index_type, &kv_version)) + { + sql_print_error("RocksDB: Failed to get column family info " + "from index id (%u,%u). MyRocks data dictionary may " + "get corrupted.", gl_index_id.cf_id, gl_index_id.index_id); + abort_with_stack_traces(); + } + sql_print_information("RocksDB: %s filtering dropped index (%u,%u)", + log_action, gl_index_id.cf_id, gl_index_id.index_id); +} + +bool Rdb_dict_manager::get_max_index_id(uint32_t *index_id) +{ + bool found= false; + std::string value; + + rocksdb::Status status= get_value(m_key_slice_max_index_id, &value); + if (status.ok()) + { + const uchar* val= (const uchar*)value.c_str(); + uint16_t version= rdb_netbuf_to_uint16(val); + if (version == Rdb_key_def::MAX_INDEX_ID_VERSION) + { + *index_id= rdb_netbuf_to_uint32(val+Rdb_key_def::VERSION_SIZE); + found= true; + } + } + return found; +} + +bool Rdb_dict_manager::update_max_index_id(rocksdb::WriteBatch* batch, + const uint32_t index_id) +{ + DBUG_ASSERT(batch != nullptr); + + uint32_t old_index_id= -1; + if (get_max_index_id(&old_index_id)) + { + if (old_index_id > index_id) + { + sql_print_error("RocksDB: Found max index id %u from data dictionary " + "but trying to update to older value %u. This should " + "never happen and possibly a bug.", old_index_id, + index_id); + return true; + } + } + + uchar value_buf[Rdb_key_def::VERSION_SIZE + Rdb_key_def::INDEX_NUMBER_SIZE]= + {0}; + rdb_netbuf_store_uint16(value_buf, Rdb_key_def::MAX_INDEX_ID_VERSION); + rdb_netbuf_store_uint32(value_buf + Rdb_key_def::VERSION_SIZE, index_id); + rocksdb::Slice value= rocksdb::Slice((char*)value_buf, sizeof(value_buf)); + batch->Put(m_system_cfh, m_key_slice_max_index_id, value); + return false; +} + +void Rdb_dict_manager::add_stats( + rocksdb::WriteBatch* batch, + const std::vector& stats +) +{ + DBUG_ASSERT(batch != nullptr); + + for (const auto& it : stats) { + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; + dump_index_id(key_buf, Rdb_key_def::INDEX_STATISTICS, it.m_gl_index_id); + + // IndexStats::materialize takes complete care of serialization including + // storing the version + auto value = Rdb_index_stats::materialize( + std::vector{it}, 1.); + + batch->Put( + m_system_cfh, + rocksdb::Slice((char*)key_buf, sizeof(key_buf)), + value + ); + } +} + +Rdb_index_stats Rdb_dict_manager::get_stats(GL_INDEX_ID gl_index_id) +{ + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; + dump_index_id(key_buf, Rdb_key_def::INDEX_STATISTICS, gl_index_id); + + std::string value; + rocksdb::Status status= get_value( + rocksdb::Slice(reinterpret_cast(key_buf), sizeof(key_buf)), + &value); + if (status.ok()) + { + std::vector v; + // unmaterialize checks if the version matches + if (Rdb_index_stats::unmaterialize(value, &v)== 0 && v.size() == 1) + { + return v[0]; + } + } + + return Rdb_index_stats(); +} + +uint Rdb_seq_generator::get_and_update_next_number(Rdb_dict_manager *dict) +{ + DBUG_ASSERT(dict != nullptr); + + uint res; + mysql_mutex_lock(&m_mutex); + + res= m_next_number++; + + std::unique_ptr wb= dict->begin(); + rocksdb::WriteBatch *batch= wb.get(); + + DBUG_ASSERT(batch != nullptr); + dict->update_max_index_id(batch, res); + dict->commit(batch); + + mysql_mutex_unlock(&m_mutex); + + return res; +} + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_datadic.h b/storage/rocksdb/rdb_datadic.h new file mode 100644 index 00000000000..7c277ddad7d --- /dev/null +++ b/storage/rocksdb/rdb_datadic.h @@ -0,0 +1,1054 @@ +/* + Copyright (c) 2012,2013 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +/* C++ standard header files */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* C standard header files */ +#include + +/* MyRocks header files */ +#include "./ha_rocksdb.h" +#include "./properties_collector.h" +#include "./rdb_buff.h" +#include "./rdb_utils.h" + +namespace myrocks { + +class Rdb_dict_manager; +class Rdb_key_def; +class Rdb_field_packing; +class Rdb_cf_manager; +class Rdb_ddl_manager; + +/* + @brief + Field packing context. + The idea is to ensure that a call to rdb_index_field_pack_t function + is followed by a call to rdb_make_unpack_info_t. + + @detail + For some datatypes, unpack_info is produced as a side effect of + rdb_index_field_pack_t function call. + For other datatypes, packing is just calling make_sort_key(), while + rdb_make_unpack_info_t is a custom function. + In order to accommodate both cases, we require both calls to be made and + unpack_info is passed as context data between the two. +*/ +class Rdb_pack_field_context +{ + public: + explicit Rdb_pack_field_context(Rdb_string_writer *writer_arg) : + writer(writer_arg) + {} + + // NULL means we're not producing unpack_info. + Rdb_string_writer *writer; +}; + +struct Rdb_collation_codec; + +/* + C-style "virtual table" allowing different handling of packing logic based + on the field type. See Rdb_field_packing::setup() implementation. + */ +using rdb_make_unpack_info_t= void (*)(const Rdb_collation_codec *codec, + const Field *field, + Rdb_pack_field_context *pack_ctx); +using rdb_index_field_unpack_t= int (*)(Rdb_field_packing *fpi, Field *field, + uchar *field_ptr, + Rdb_string_reader *reader, + Rdb_string_reader *unpack_reader); +using rdb_index_field_skip_t= int (*)(const Rdb_field_packing *fpi, + const Field *field, + Rdb_string_reader *reader); +using rdb_index_field_pack_t= void (*)(Rdb_field_packing *fpi, Field *field, + uchar* buf, uchar **dst, + Rdb_pack_field_context *pack_ctx); + +const uint RDB_INVALID_KEY_LEN= uint(-1); + +/* How much one checksum occupies when stored in the record */ +const size_t RDB_CHECKSUM_SIZE= sizeof(uint32_t); + +/* + How much the checksum data occupies in record, in total. + It is storing two checksums plus 1 tag-byte. +*/ +const size_t RDB_CHECKSUM_CHUNK_SIZE= 2 * RDB_CHECKSUM_SIZE + 1; + +/* + Checksum data starts from CHECKSUM_DATA_TAG which is followed by two CRC32 + checksums. +*/ +const char RDB_CHECKSUM_DATA_TAG= 0x01; + +/* + Unpack data is variable length. It is a 1 tag-byte plus a + two byte length field. The length field includes the header as well. +*/ +const char RDB_UNPACK_DATA_TAG= 0x02; +const size_t RDB_UNPACK_DATA_LEN_SIZE= sizeof(uint16_t); +const size_t RDB_UNPACK_HEADER_SIZE= sizeof(RDB_UNPACK_DATA_TAG) + + RDB_UNPACK_DATA_LEN_SIZE; + +// Possible return values for rdb_index_field_unpack_t functions. +enum { + UNPACK_SUCCESS= 0, + UNPACK_FAILURE= 1, + UNPACK_INFO_MISSING= 2, +}; + +/* + An object of this class represents information about an index in an SQL + table. It provides services to encode and decode index tuples. + + Note: a table (as in, on-disk table) has a single Rdb_key_def object which + is shared across multiple TABLE* objects and may be used simultaneously from + different threads. + + There are several data encodings: + + === SQL LAYER === + SQL layer uses two encodings: + + - "Table->record format". This is the format that is used for the data in + the record buffers, table->record[i] + + - KeyTupleFormat (see opt_range.cc) - this is used in parameters to index + lookup functions, like handler::index_read_map(). + + === Inside RocksDB === + Primary Key is stored as a mapping: + + index_tuple -> StoredRecord + + StoredRecord is in Table->record format, except for blobs, which are stored + in-place. See ha_rocksdb::convert_record_to_storage_format for details. + + Secondary indexes are stored as one of two variants: + + index_tuple -> unpack_info + index_tuple -> empty_string + + index_tuple here is the form of key that can be compared with memcmp(), aka + "mem-comparable form". + + unpack_info is extra data that allows to restore the original value from its + mem-comparable form. It is present only if the index supports index-only + reads. +*/ + +class Rdb_key_def +{ +public: + /* Convert a key from KeyTupleFormat to mem-comparable form */ + uint pack_index_tuple(TABLE *tbl, uchar *pack_buffer, uchar *packed_tuple, + const uchar *key_tuple, key_part_map keypart_map) const; + + /* Convert a key from Table->record format to mem-comparable form */ + uint pack_record(const TABLE *tbl, uchar *pack_buffer, const uchar *record, + uchar *packed_tuple, Rdb_string_writer *unpack_info, + bool should_store_checksums, + longlong hidden_pk_id= 0, uint n_key_parts= 0, + uint *n_null_fields= nullptr) const; + /* Pack the hidden primary key into mem-comparable form. */ + uint pack_hidden_pk(longlong hidden_pk_id, + uchar *packed_tuple) const; + int unpack_record(TABLE *table, uchar *buf, const rocksdb::Slice *packed_key, + const rocksdb::Slice *unpack_info, bool verify_checksums) + const; + + static bool unpack_info_has_checksum(const rocksdb::Slice& unpack_info); + int compare_keys(const rocksdb::Slice *key1, const rocksdb::Slice *key2, + std::size_t* column_index) const; + + size_t key_length(TABLE *table, const rocksdb::Slice &key) const; + + /* Get the key that is the "infimum" for this index */ + inline void get_infimum_key(uchar *key, uint *size) const + { + rdb_netbuf_store_index(key, m_index_number); + *size= INDEX_NUMBER_SIZE; + } + + /* Get the key that is a "supremum" for this index */ + inline void get_supremum_key(uchar *key, uint *size) const + { + rdb_netbuf_store_index(key, m_index_number+1); + *size= INDEX_NUMBER_SIZE; + } + + /* Make a key that is right after the given key. */ + static int successor(uchar *packed_tuple, uint len); + + /* + This can be used to compare prefixes. + if X is a prefix of Y, then we consider that X = Y. + */ + // b describes the lookup key, which can be a prefix of a. + int cmp_full_keys(const rocksdb::Slice& a, const rocksdb::Slice& b) const + { + DBUG_ASSERT(covers_key(a)); + DBUG_ASSERT(covers_key(b)); + + return memcmp(a.data(), b.data(), std::min(a.size(), b.size())); + } + + /* Check if given mem-comparable key belongs to this index */ + bool covers_key(const rocksdb::Slice &slice) const + { + if (slice.size() < INDEX_NUMBER_SIZE) + return false; + + if (memcmp(slice.data(), m_index_number_storage_form, INDEX_NUMBER_SIZE)) + return false; + + return true; + } + + /* + Return true if the passed mem-comparable key + - is from this index, and + - it matches the passed key prefix (the prefix is also in mem-comparable + form) + */ + bool value_matches_prefix(const rocksdb::Slice &value, + const rocksdb::Slice &prefix) const + { + return covers_key(value) && !cmp_full_keys(value, prefix); + } + + uint32 get_keyno() const + { + return m_keyno; + } + + uint32 get_index_number() const + { + return m_index_number; + } + + GL_INDEX_ID get_gl_index_id() const + { + GL_INDEX_ID gl_index_id = { m_cf_handle->GetID(), m_index_number }; + return gl_index_id; + } + + /* Must only be called for secondary keys: */ + uint get_primary_key_tuple(TABLE *tbl, + const std::shared_ptr& pk_descr, + const rocksdb::Slice *key, uchar *pk_buffer) const; + + /* Return max length of mem-comparable form */ + uint max_storage_fmt_length() const + { + return m_maxlength; + } + + uint get_key_parts() const + { + return m_key_parts; + } + + /* + Get a field object for key part #part_no + + @detail + SQL layer thinks unique secondary indexes and indexes in partitioned + tables are not "Extended" with Primary Key columns. + + Internally, we always extend all indexes with PK columns. This function + uses our definition of how the index is Extended. + */ + inline Field* get_table_field_for_part_no(TABLE *table, uint part_no) const; + + const std::string& get_name() const { + return m_name; + } + + Rdb_key_def(const Rdb_key_def& k); + Rdb_key_def(uint indexnr_arg, uint keyno_arg, + rocksdb::ColumnFamilyHandle* cf_handle_arg, + uint16_t index_dict_version_arg, + uchar index_type_arg, + uint16_t kv_format_version_arg, + bool is_reverse_cf_arg, bool is_auto_cf_arg, + const char* name, + Rdb_index_stats stats= Rdb_index_stats()); + ~Rdb_key_def(); + + enum { + INDEX_NUMBER_SIZE= 4, + VERSION_SIZE= 2, + CF_NUMBER_SIZE= 4, + CF_FLAG_SIZE= 4, + PACKED_SIZE= 4, // one int + }; + + // bit flags for combining bools when writing to disk + enum { + REVERSE_CF_FLAG= 1, + AUTO_CF_FLAG= 2, + }; + + // Data dictionary types + enum DATA_DICT_TYPE { + DDL_ENTRY_INDEX_START_NUMBER= 1, + INDEX_INFO= 2, + CF_DEFINITION= 3, + BINLOG_INFO_INDEX_NUMBER= 4, + DDL_DROP_INDEX_ONGOING= 5, + INDEX_STATISTICS= 6, + MAX_INDEX_ID= 7, + DDL_CREATE_INDEX_ONGOING= 8, + END_DICT_INDEX_ID= 255 + }; + + // Data dictionary schema version. Introduce newer versions + // if changing schema layout + enum { + DDL_ENTRY_INDEX_VERSION= 1, + CF_DEFINITION_VERSION= 1, + BINLOG_INFO_INDEX_NUMBER_VERSION= 1, + DDL_DROP_INDEX_ONGOING_VERSION= 1, + MAX_INDEX_ID_VERSION= 1, + DDL_CREATE_INDEX_ONGOING_VERSION= 1, + // Version for index stats is stored in IndexStats struct + }; + + // Index info version. Introduce newer versions when changing the + // INDEX_INFO layout. Update INDEX_INFO_VERSION_LATEST to point to the + // latest version number. + enum { + INDEX_INFO_VERSION_INITIAL= 1, // Obsolete + INDEX_INFO_VERSION_KV_FORMAT, + INDEX_INFO_VERSION_GLOBAL_ID, + // There is no change to data format in this version, but this version + // verifies KV format version, whereas previous versions do not. A version + // bump is needed to prevent older binaries from skipping the KV version + // check inadvertently. + INDEX_INFO_VERSION_VERIFY_KV_FORMAT, + // This normally point to the latest (currently it does). + INDEX_INFO_VERSION_LATEST= INDEX_INFO_VERSION_VERIFY_KV_FORMAT, + }; + + // MyRocks index types + enum { + INDEX_TYPE_PRIMARY= 1, + INDEX_TYPE_SECONDARY= 2, + INDEX_TYPE_HIDDEN_PRIMARY= 3, + }; + + // Key/Value format version for each index type + enum { + PRIMARY_FORMAT_VERSION_INITIAL= 10, + // This change includes: + // - For columns that can be unpacked with unpack_info, PK + // stores the unpack_info. + // - DECIMAL datatype is no longer stored in the row (because + // it can be decoded from its mem-comparable form) + // - VARCHAR-columns use endspace-padding. + PRIMARY_FORMAT_VERSION_UPDATE1= 11, + PRIMARY_FORMAT_VERSION_LATEST= PRIMARY_FORMAT_VERSION_UPDATE1, + + SECONDARY_FORMAT_VERSION_INITIAL= 10, + // This change the SK format to include unpack_info. + SECONDARY_FORMAT_VERSION_UPDATE1= 11, + SECONDARY_FORMAT_VERSION_LATEST= SECONDARY_FORMAT_VERSION_UPDATE1, + }; + + void setup(const TABLE *table, const Rdb_tbl_def *tbl_def); + + rocksdb::ColumnFamilyHandle *get_cf() const { return m_cf_handle; } + + /* Check if keypart #kp can be unpacked from index tuple */ + inline bool can_unpack(uint kp) const; + /* Check if keypart #kp needs unpack info */ + inline bool has_unpack_info(uint kp) const; + + /* Check if given table has a primary key */ + static bool table_has_hidden_pk(const TABLE* table); + + void report_checksum_mismatch(bool is_key, const char *data, + size_t data_size) const; + + /* Check if index is at least pk_min if it is a PK, + or at least sk_min if SK.*/ + bool index_format_min_check(int pk_min, int sk_min) const; + +private: + +#ifndef DBUG_OFF + inline bool is_storage_available(int offset, int needed) const + { + int storage_length= static_cast(max_storage_fmt_length()); + return (storage_length - offset) >= needed; + } +#endif // DBUG_OFF + + /* Global number of this index (used as prefix in StorageFormat) */ + const uint32 m_index_number; + + uchar m_index_number_storage_form[INDEX_NUMBER_SIZE]; + + rocksdb::ColumnFamilyHandle* m_cf_handle; + +public: + uint16_t m_index_dict_version; + uchar m_index_type; + /* KV format version for the index id */ + uint16_t m_kv_format_version; + /* If true, the column family stores data in the reverse order */ + bool m_is_reverse_cf; + + bool m_is_auto_cf; + std::string m_name; + mutable Rdb_index_stats m_stats; +private: + + friend class Rdb_tbl_def; // for m_index_number above + + /* Number of key parts in the primary key*/ + uint m_pk_key_parts; + + /* + pk_part_no[X]=Y means that keypart #X of this key is key part #Y of the + primary key. Y==-1 means this column is not present in the primary key. + */ + uint *m_pk_part_no; + + /* Array of index-part descriptors. */ + Rdb_field_packing *m_pack_info; + + uint m_keyno; /* number of this index in the table */ + + /* + Number of key parts in the index (including "index extension"). This is how + many elements are in the m_pack_info array. + */ + uint m_key_parts; + + /* Maximum length of the mem-comparable form. */ + uint m_maxlength; + + /* mutex to protect setup */ + mysql_mutex_t m_mutex; +}; + +// "Simple" collations (those specified in strings/ctype-simple.c) are simple +// because their strnxfrm function maps one byte to one byte. However, the +// mapping is not injective, so the inverse function will take in an extra +// index parameter containing information to disambiguate what the original +// character was. +// +// The m_enc* members are for encoding. Generally, we want encoding to be: +// src -> (dst, idx) +// +// Since strnxfrm already gives us dst, we just need m_enc_idx[src] to give us +// idx. +// +// For the inverse, we have: +// (dst, idx) -> src +// +// We have m_dec_idx[idx][dst] = src to get our original character back. +// +struct Rdb_collation_codec +{ + const my_core::CHARSET_INFO *m_cs; + // The first element unpacks VARCHAR(n), the second one - CHAR(n). + std::array m_make_unpack_info_func; + std::array m_unpack_func; + + std::array m_enc_idx; + std::array m_enc_size; + + std::array m_dec_size; + std::vector> m_dec_idx; +}; + +extern mysql_mutex_t rdb_collation_data_mutex; +extern mysql_mutex_t rdb_mem_cmp_space_mutex; +extern std::array + rdb_collation_data; + + +class Rdb_field_packing +{ +public: + /* Length of mem-comparable image of the field, in bytes */ + int m_max_image_len; + + /* Length of image in the unpack data */ + int m_unpack_data_len; + int m_unpack_data_offset; + + bool m_maybe_null; /* TRUE <=> NULL-byte is stored */ + + /* + Valid only for VARCHAR fields. + */ + const CHARSET_INFO *m_varchar_charset; + + // (Valid when Variable Length Space Padded Encoding is used): + uint m_segment_size; // size of segment used + + // number of bytes used to store number of trimmed (or added) + // spaces in the upack_info + bool m_unpack_info_uses_two_bytes; + + const std::vector* space_xfrm; + size_t space_xfrm_len; + size_t space_mb_len; + + const Rdb_collation_codec* m_charset_codec; + + /* + @return TRUE: this field makes use of unpack_info. + */ + bool uses_unpack_info() const + { + return (m_make_unpack_info_func != nullptr); + } + + /* TRUE means unpack_info stores the original field value */ + bool m_unpack_info_stores_value; + + rdb_index_field_pack_t m_pack_func; + rdb_make_unpack_info_t m_make_unpack_info_func; + + /* + This function takes + - mem-comparable form + - unpack_info data + and restores the original value. + */ + rdb_index_field_unpack_t m_unpack_func; + + /* + This function skips over mem-comparable form. + */ + rdb_index_field_skip_t m_skip_func; + +private: + /* + Location of the field in the table (key number and key part number). + + Note that this describes not the field, but rather a position of field in + the index. Consider an example: + + col1 VARCHAR (100), + INDEX idx1 (col1)), + INDEX idx2 (col1(10)), + + Here, idx2 has a special Field object that is set to describe a 10-char + prefix of col1. + + We must also store the keynr. It is needed for implicit "extended keys". + Every key in MyRocks needs to include PK columns. Generally, SQL layer + includes PK columns as part of its "Extended Keys" feature, but sometimes + it does not (known examples are unique secondary indexes and partitioned + tables). + In that case, MyRocks's index descriptor has invisible suffix of PK + columns (and the point is that these columns are parts of PK, not parts + of the current index). + */ + uint m_keynr; + uint m_key_part; +public: + bool setup(const Rdb_key_def *key_descr, const Field *field, + uint keynr_arg, uint key_part_arg, uint16 key_length); + Field *get_field_in_table(const TABLE *tbl) const; + void fill_hidden_pk_val(uchar **dst, longlong hidden_pk_id) const; +}; + +/* + Descriptor telling how to decode/encode a field to on-disk record storage + format. Not all information is in the structure yet, but eventually we + want to have as much as possible there to avoid virtual calls. + + For encoding/decoding of index tuples, see Rdb_key_def. + */ +class Rdb_field_encoder +{ + public: + /* + STORE_NONE is set when a column can be decoded solely from their + mem-comparable form. + STORE_SOME is set when a column can be decoded from their mem-comparable + form plus unpack_info. + STORE_ALL is set when a column cannot be decoded, so its original value + must be stored in the PK records. + */ + enum STORAGE_TYPE { + STORE_NONE, + STORE_SOME, + STORE_ALL, + }; + STORAGE_TYPE m_storage_type; + + uint m_null_offset; + uint16 m_field_index; + + uchar m_null_mask; // 0 means the field cannot be null + + my_core::enum_field_types m_field_type; + + uint m_pack_length_in_rec; + + bool maybe_null() const { return m_null_mask != 0; } + + bool uses_variable_len_encoding() const + { + return (m_field_type == MYSQL_TYPE_BLOB || + m_field_type == MYSQL_TYPE_VARCHAR); + } +}; + +inline Field* Rdb_key_def::get_table_field_for_part_no(TABLE *table, + uint part_no) const +{ + DBUG_ASSERT(part_no < get_key_parts()); + return m_pack_info[part_no].get_field_in_table(table); +} + +inline bool Rdb_key_def::can_unpack(uint kp) const +{ + DBUG_ASSERT(kp < m_key_parts); + return (m_pack_info[kp].m_unpack_func != nullptr); +} + +inline bool Rdb_key_def::has_unpack_info(uint kp) const +{ + DBUG_ASSERT(kp < m_key_parts); + return m_pack_info[kp].uses_unpack_info(); +} + + +/* + A table definition. This is an entry in the mapping + + dbname.tablename -> {index_nr, index_nr, ... } + + There is only one Rdb_tbl_def object for a given table. + That's why we keep auto_increment value here, too. +*/ + +class Rdb_tbl_def +{ + private: + void check_if_is_mysql_system_table(); + + /* Stores 'dbname.tablename' */ + std::string m_dbname_tablename; + + /* Store the db name, table name, and partition name */ + std::string m_dbname; + std::string m_tablename; + std::string m_partition; + + void set_name(const std::string& name); + + public: + explicit Rdb_tbl_def(const std::string& name) : + m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) + { + set_name(name); + } + + Rdb_tbl_def(const char* name, size_t len) : + m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) + { + set_name(std::string(name, len)); + } + + explicit Rdb_tbl_def(const rocksdb::Slice& slice, size_t pos= 0) : + m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) + { + set_name(std::string(slice.data() + pos, slice.size() - pos)); + } + + ~Rdb_tbl_def(); + + /* Number of indexes */ + uint m_key_count; + + /* Array of index descriptors */ + std::shared_ptr* m_key_descr_arr; + + std::atomic m_hidden_pk_val; + std::atomic m_auto_incr_val; + + /* Is this a system table */ + bool m_is_mysql_system_table; + + bool put_dict(Rdb_dict_manager *dict, rocksdb::WriteBatch *batch, + uchar *key, size_t keylen); + + const std::string& full_tablename() const { return m_dbname_tablename; } + const std::string& base_dbname() const { return m_dbname; } + const std::string& base_tablename() const { return m_tablename; } + const std::string& base_partition() const { return m_partition; } +}; + + +/* + A thread-safe sequential number generator. Its performance is not a concern + hence it is ok to protect it by a mutex. +*/ + +class Rdb_seq_generator +{ + uint m_next_number= 0; + + mysql_mutex_t m_mutex; +public: + void init(uint initial_number) + { + mysql_mutex_init(0 , &m_mutex, MY_MUTEX_INIT_FAST); + m_next_number= initial_number; + } + + uint get_and_update_next_number(Rdb_dict_manager *dict); + + void cleanup() + { + mysql_mutex_destroy(&m_mutex); + } +}; + + +interface Rdb_tables_scanner +{ + virtual int add_table(Rdb_tbl_def* tdef) =0; +}; + + +/* + This contains a mapping of + + dbname.table_name -> array{Rdb_key_def}. + + objects are shared among all threads. +*/ + +class Rdb_ddl_manager +{ + Rdb_dict_manager *m_dict= nullptr; + my_core::HASH m_ddl_hash; // Contains Rdb_tbl_def elements + // maps index id to + std::map> m_index_num_to_keydef; + mysql_rwlock_t m_rwlock; + + Rdb_seq_generator m_sequence; + // A queue of table stats to write into data dictionary + // It is produced by event listener (ie compaction and flush threads) + // and consumed by the rocksdb background thread + std::map m_stats2store; +public: + /* Load the data dictionary from on-disk storage */ + bool init(Rdb_dict_manager *dict_arg, Rdb_cf_manager *cf_manager, + uint32_t validate_tables); + + void cleanup(); + + Rdb_tbl_def* find(const std::string& table_name, bool lock= true); + const std::shared_ptr& find(GL_INDEX_ID gl_index_id); + std::shared_ptr safe_find(GL_INDEX_ID gl_index_id); + void set_stats( + const std::unordered_map& stats); + void adjust_stats( + const std::vector& new_data, + const std::vector& deleted_data + =std::vector()); + void persist_stats(bool sync = false); + + /* Modify the mapping and write it to on-disk storage */ + int put_and_write(Rdb_tbl_def *key_descr, rocksdb::WriteBatch *batch); + void remove(Rdb_tbl_def *rec, rocksdb::WriteBatch *batch, bool lock= true); + bool rename(const std::string& from, const std::string& to, + rocksdb::WriteBatch *batch); + + uint get_and_update_next_number(Rdb_dict_manager *dict) + { return m_sequence.get_and_update_next_number(dict); } + + /* Walk the data dictionary */ + int scan_for_tables(Rdb_tables_scanner* tables_scanner); + + void erase_index_num(GL_INDEX_ID gl_index_id); + +private: + /* Put the data into in-memory table (only) */ + int put(Rdb_tbl_def *key_descr, bool lock= true); + + /* Helper functions to be passed to my_core::HASH object */ + static const uchar* get_hash_key(Rdb_tbl_def *rec, size_t *length, + my_bool not_used __attribute__((unused))); + static void free_hash_elem(void* data); + + bool validate_schemas(); +}; + + +/* + Writing binlog information into RocksDB at commit(), + and retrieving binlog information at crash recovery. + commit() and recovery are always executed by at most single client + at the same time, so concurrency control is not needed. + + Binlog info is stored in RocksDB as the following. + key: BINLOG_INFO_INDEX_NUMBER + value: packed single row: + binlog_name_length (2 byte form) + binlog_name + binlog_position (4 byte form) + binlog_gtid_length (2 byte form) + binlog_gtid +*/ +class Rdb_binlog_manager +{ +public: + bool init(Rdb_dict_manager *dict); + void cleanup(); + void update(const char* binlog_name, const my_off_t binlog_pos, + const char* binlog_gtid, rocksdb::WriteBatchBase* batch); + bool read(char* binlog_name, my_off_t* binlog_pos, char* binlog_gtid); + void update_slave_gtid_info(uint id, const char* db, const char* gtid, + rocksdb::WriteBatchBase *write_batch); + +private: + Rdb_dict_manager *m_dict= nullptr; + uchar m_key_buf[Rdb_key_def::INDEX_NUMBER_SIZE]= {0}; + rocksdb::Slice m_key_slice; + + rocksdb::Slice pack_value(uchar *buf, + const char *binlog_name, + const my_off_t binlog_pos, + const char *binlog_gtid); + bool unpack_value(const uchar *value, char *binlog_name, + my_off_t *binlog_pos, char *binlog_gtid); + + std::atomic m_slave_gtid_info_tbl; +}; + + +/* + Rdb_dict_manager manages how MySQL on RocksDB (MyRocks) stores its + internal data dictionary. + MyRocks stores data dictionary on dedicated system column family + named __system__. The system column family is used by MyRocks + internally only, and not used by applications. + + Currently MyRocks has the following data dictionary data models. + + 1. Table Name => internal index id mappings + key: Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER(0x1) + dbname.tablename + value: version, {cf_id, index_id}*n_indexes_of_the_table + version is 2 bytes. cf_id and index_id are 4 bytes. + + 2. internal cf_id, index id => index information + key: Rdb_key_def::INDEX_INFO(0x2) + cf_id + index_id + value: version, index_type, kv_format_version + index_type is 1 byte, version and kv_format_version are 2 bytes. + + 3. CF id => CF flags + key: Rdb_key_def::CF_DEFINITION(0x3) + cf_id + value: version, {is_reverse_cf, is_auto_cf} + cf_flags is 4 bytes in total. + + 4. Binlog entry (updated at commit) + key: Rdb_key_def::BINLOG_INFO_INDEX_NUMBER (0x4) + value: version, {binlog_name,binlog_pos,binlog_gtid} + + 5. Ongoing drop index entry + key: Rdb_key_def::DDL_DROP_INDEX_ONGOING(0x5) + cf_id + index_id + value: version + + 6. index stats + key: Rdb_key_def::INDEX_STATISTICS(0x6) + cf_id + index_id + value: version, {materialized PropertiesCollector::IndexStats} + + 7. maximum index id + key: Rdb_key_def::MAX_INDEX_ID(0x7) + value: index_id + index_id is 4 bytes + + 8. Ongoing create index entry + key: Rdb_key_def::DDL_CREATE_INDEX_ONGOING(0x8) + cf_id + index_id + value: version + + Data dictionary operations are atomic inside RocksDB. For example, + when creating a table with two indexes, it is necessary to call Put + three times. They have to be atomic. Rdb_dict_manager has a wrapper function + begin() and commit() to make it easier to do atomic operations. + +*/ +class Rdb_dict_manager +{ +private: + mysql_mutex_t m_mutex; + rocksdb::DB *m_db= nullptr; + rocksdb::ColumnFamilyHandle *m_system_cfh= nullptr; + /* Utility to put INDEX_INFO and CF_DEFINITION */ + + uchar m_key_buf_max_index_id[Rdb_key_def::INDEX_NUMBER_SIZE]= {0}; + rocksdb::Slice m_key_slice_max_index_id; + + static void dump_index_id(uchar *netbuf, + Rdb_key_def::DATA_DICT_TYPE dict_type, + const GL_INDEX_ID &gl_index_id); + void delete_with_prefix(rocksdb::WriteBatch* batch, + Rdb_key_def::DATA_DICT_TYPE dict_type, + const GL_INDEX_ID &gl_index_id) const; + /* Functions for fast DROP TABLE/INDEX */ + void resume_drop_indexes(); + void log_start_drop_table(const std::shared_ptr* key_descr, + uint32 n_keys, + const char* log_action); + void log_start_drop_index(GL_INDEX_ID gl_index_id, + const char* log_action); +public: + bool init(rocksdb::DB *rdb_dict, Rdb_cf_manager *cf_manager); + + inline void cleanup() + { + mysql_mutex_destroy(&m_mutex); + } + + inline void lock() + { + mysql_mutex_lock(&m_mutex); + } + + inline void unlock() + { + mysql_mutex_unlock(&m_mutex); + } + + /* Raw RocksDB operations */ + std::unique_ptr begin(); + int commit(rocksdb::WriteBatch *batch, bool sync = true); + rocksdb::Status get_value(const rocksdb::Slice& key, + std::string *value) const; + void put_key(rocksdb::WriteBatchBase *batch, const rocksdb::Slice &key, + const rocksdb::Slice &value); + void delete_key(rocksdb::WriteBatchBase *batch, + const rocksdb::Slice &key) const; + rocksdb::Iterator *new_iterator(); + + /* Internal Index id => CF */ + void add_or_update_index_cf_mapping(rocksdb::WriteBatch *batch, + const uchar index_type, + const uint16_t kv_version, + const uint index_id, + const uint cf_id); + void delete_index_info(rocksdb::WriteBatch* batch, + const GL_INDEX_ID &index_id) const; + bool get_index_info(const GL_INDEX_ID &gl_index_id, + uint16_t *index_dict_version, + uchar *index_type, uint16_t *kv_version); + + /* CF id => CF flags */ + void add_cf_flags(rocksdb::WriteBatch *batch, + const uint cf_id, + const uint cf_flags); + bool get_cf_flags(const uint cf_id, uint *cf_flags); + + /* Functions for fast CREATE/DROP TABLE/INDEX */ + void get_ongoing_index_operation(std::vector* gl_index_ids, + Rdb_key_def::DATA_DICT_TYPE dd_type); + bool is_index_operation_ongoing(const GL_INDEX_ID& gl_index_id, + Rdb_key_def::DATA_DICT_TYPE dd_type); + void start_ongoing_index_operation(rocksdb::WriteBatch* batch, + const GL_INDEX_ID& gl_index_id, + Rdb_key_def::DATA_DICT_TYPE dd_type); + void end_ongoing_index_operation(rocksdb::WriteBatch* batch, + const GL_INDEX_ID& gl_index_id, + Rdb_key_def::DATA_DICT_TYPE dd_type); + bool is_drop_index_empty(); + void add_drop_table(std::shared_ptr* key_descr, uint32 n_keys, + rocksdb::WriteBatch *batch); + void add_drop_index(const std::unordered_set& gl_index_ids, + rocksdb::WriteBatch *batch); + void add_create_index(const std::unordered_set& gl_index_ids, + rocksdb::WriteBatch *batch); + void finish_indexes_operation( + const std::unordered_set& gl_index_ids, + Rdb_key_def::DATA_DICT_TYPE dd_type); + void rollback_ongoing_index_creation(); + + inline void get_ongoing_drop_indexes(std::vector* gl_index_ids) + { + get_ongoing_index_operation(gl_index_ids, + Rdb_key_def::DDL_DROP_INDEX_ONGOING); + } + inline void get_ongoing_create_indexes(std::vector* gl_index_ids) + { + get_ongoing_index_operation(gl_index_ids, + Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + } + inline void start_drop_index(rocksdb::WriteBatch *wb, + const GL_INDEX_ID& gl_index_id) + { + start_ongoing_index_operation(wb, gl_index_id, + Rdb_key_def::DDL_DROP_INDEX_ONGOING); + } + inline void start_create_index(rocksdb::WriteBatch *wb, + const GL_INDEX_ID& gl_index_id) + { + start_ongoing_index_operation(wb, gl_index_id, + Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + } + inline void finish_drop_indexes( + const std::unordered_set& gl_index_ids) + { + finish_indexes_operation(gl_index_ids, + Rdb_key_def::DDL_DROP_INDEX_ONGOING); + } + inline void finish_create_indexes( + const std::unordered_set& gl_index_ids) + { + finish_indexes_operation(gl_index_ids, + Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + } + inline bool is_drop_index_ongoing(const GL_INDEX_ID& gl_index_id) + { + return is_index_operation_ongoing(gl_index_id, + Rdb_key_def::DDL_DROP_INDEX_ONGOING); + } + inline bool is_create_index_ongoing(const GL_INDEX_ID& gl_index_id) + { + return is_index_operation_ongoing(gl_index_id, + Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + } + + bool get_max_index_id(uint32_t *index_id); + bool update_max_index_id(rocksdb::WriteBatch* batch, + const uint32_t index_id); + void add_stats(rocksdb::WriteBatch* batch, + const std::vector& stats); + Rdb_index_stats get_stats(GL_INDEX_ID gl_index_id); +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_i_s.cc b/storage/rocksdb/rdb_i_s.cc new file mode 100644 index 00000000000..78af6eff3a5 --- /dev/null +++ b/storage/rocksdb/rdb_i_s.cc @@ -0,0 +1,1189 @@ +/* + Copyright (c) 2012, Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* C++ standard header files */ +#include +#include + +/* MySQL header files */ +#include + +/* RocksDB header files */ +#include "rocksdb/convenience.h" +#include "rocksdb/compaction_filter.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/merge_operator.h" +#include "rocksdb/slice_transform.h" + +/* MyRocks header files */ +#include "./ha_rocksdb.h" +#include "./ha_rocksdb_proto.h" +#include "./rdb_cf_manager.h" +#include "./rdb_datadic.h" + +namespace myrocks { + +/** + Define the INFORMATION_SCHEMA (I_S) structures needed by MyRocks storage engine. +*/ + +#define ROCKSDB_FIELD_INFO(_name_, _len_, _type_, _flag_) \ + { _name_, _len_, _type_, 0, _flag_, nullptr, 0 } + +#define ROCKSDB_FIELD_INFO_END ROCKSDB_FIELD_INFO(nullptr, \ + 0, MYSQL_TYPE_NULL, 0) + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_CFSTATS dynamic table + */ +static int rdb_i_s_cfstats_fill_table( + my_core::THD *thd, + my_core::TABLE_LIST *tables, + my_core::Item *cond __attribute__((__unused__))) +{ + bool ret; + uint64_t val; + + DBUG_ENTER("rdb_i_s_cfstats_fill_table"); + + std::vector> cf_properties = { + {rocksdb::DB::Properties::kNumImmutableMemTable, "NUM_IMMUTABLE_MEM_TABLE"}, + {rocksdb::DB::Properties::kMemTableFlushPending, + "MEM_TABLE_FLUSH_PENDING"}, + {rocksdb::DB::Properties::kCompactionPending, "COMPACTION_PENDING"}, + {rocksdb::DB::Properties::kCurSizeActiveMemTable, + "CUR_SIZE_ACTIVE_MEM_TABLE"}, + {rocksdb::DB::Properties::kCurSizeAllMemTables, "CUR_SIZE_ALL_MEM_TABLES"}, + {rocksdb::DB::Properties::kNumEntriesActiveMemTable, + "NUM_ENTRIES_ACTIVE_MEM_TABLE"}, + {rocksdb::DB::Properties::kNumEntriesImmMemTables, + "NUM_ENTRIES_IMM_MEM_TABLES"}, + {rocksdb::DB::Properties::kEstimateTableReadersMem, + "NON_BLOCK_CACHE_SST_MEM_USAGE"}, + {rocksdb::DB::Properties::kNumLiveVersions, "NUM_LIVE_VERSIONS"} + }; + + rocksdb::DB *rdb= rdb_get_rocksdb_db(); + Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); + DBUG_ASSERT(rdb != nullptr); + + for (auto cf_name : cf_manager.get_cf_names()) + { + rocksdb::ColumnFamilyHandle* cfh; + bool is_automatic; + + /* + Only the cf name is important. Whether it was generated automatically + does not matter, so is_automatic is ignored. + */ + cfh= cf_manager.get_cf(cf_name.c_str(), "", nullptr, &is_automatic); + if (cfh == nullptr) + continue; + + for (auto property : cf_properties) + { + if (!rdb->GetIntProperty(cfh, property.first, &val)) + continue; + + DBUG_ASSERT(tables != nullptr); + + tables->table->field[0]->store(cf_name.c_str(), cf_name.size(), + system_charset_info); + tables->table->field[1]->store(property.second.c_str(), + property.second.size(), + system_charset_info); + tables->table->field[2]->store(val, true); + + ret= my_core::schema_table_store_record(thd, tables->table); + + if (ret) + DBUG_RETURN(ret); + } + } + DBUG_RETURN(0); +} + +static ST_FIELD_INFO rdb_i_s_cfstats_fields_info[]= +{ + ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO_END +}; + +static int rdb_i_s_cfstats_init(void *p) +{ + my_core::ST_SCHEMA_TABLE *schema; + + DBUG_ENTER("rdb_i_s_cfstats_init"); + DBUG_ASSERT(p != nullptr); + + schema= (my_core::ST_SCHEMA_TABLE*) p; + + schema->fields_info= rdb_i_s_cfstats_fields_info; + schema->fill_table= rdb_i_s_cfstats_fill_table; + + DBUG_RETURN(0); +} + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_DBSTATS dynamic table + */ +static int rdb_i_s_dbstats_fill_table( + my_core::THD *thd, + my_core::TABLE_LIST *tables, + my_core::Item *cond __attribute__((__unused__))) +{ + bool ret; + uint64_t val; + + DBUG_ENTER("rdb_i_s_dbstats_fill_table"); + + std::vector> db_properties = { + {rocksdb::DB::Properties::kBackgroundErrors, "DB_BACKGROUND_ERRORS"}, + {rocksdb::DB::Properties::kNumSnapshots, "DB_NUM_SNAPSHOTS"}, + {rocksdb::DB::Properties::kOldestSnapshotTime, "DB_OLDEST_SNAPSHOT_TIME"} + }; + + rocksdb::DB *rdb= rdb_get_rocksdb_db(); + const rocksdb::BlockBasedTableOptions& table_options= + rdb_get_table_options(); + + for (auto property : db_properties) + { + if (!rdb->GetIntProperty(property.first, &val)) + continue; + + DBUG_ASSERT(tables != nullptr); + + tables->table->field[0]->store(property.second.c_str(), + property.second.size(), + system_charset_info); + tables->table->field[1]->store(val, true); + + ret= my_core::schema_table_store_record(thd, tables->table); + + if (ret) + DBUG_RETURN(ret); + } + + /* + Currently, this can only show the usage of a block cache allocated + directly by the handlerton. If the column family config specifies a block + cache (i.e. the column family option has a parameter such as + block_based_table_factory={block_cache=1G}), then the block cache is + allocated within the rocksdb::GetColumnFamilyOptionsFromString(). + + There is no interface to retrieve this block cache, nor fetch the usage + information from the column family. + */ + val= (table_options.block_cache ? table_options.block_cache->GetUsage() : 0); + tables->table->field[0]->store(STRING_WITH_LEN("DB_BLOCK_CACHE_USAGE"), + system_charset_info); + tables->table->field[1]->store(val, true); + + ret= my_core::schema_table_store_record(thd, tables->table); + + DBUG_RETURN(ret); +} + +static ST_FIELD_INFO rdb_i_s_dbstats_fields_info[]= +{ + ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO_END +}; + +static int rdb_i_s_dbstats_init(void *p) +{ + DBUG_ASSERT(p != nullptr); + + my_core::ST_SCHEMA_TABLE *schema; + + DBUG_ENTER("rdb_i_s_dbstats_init"); + + schema= (my_core::ST_SCHEMA_TABLE*) p; + + schema->fields_info= rdb_i_s_dbstats_fields_info; + schema->fill_table= rdb_i_s_dbstats_fill_table; + + DBUG_RETURN(0); +} + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT dynamic table + */ + +static int rdb_i_s_perf_context_fill_table( + my_core::THD *thd, + my_core::TABLE_LIST *tables, + my_core::Item *cond __attribute__((__unused__))) +{ + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + + int ret= 0; + Field** field= tables->table->field; + + DBUG_ENTER("rdb_i_s_perf_context_fill_table"); + + std::vector tablenames= rdb_get_open_table_names(); + for (const auto& it : tablenames) + { + std::string str, dbname, tablename, partname; + Rdb_perf_counters counters; + + if (rdb_normalize_tablename(it, &str)) { + return HA_ERR_INTERNAL_ERROR; + } + + if (rdb_split_normalized_tablename(str, &dbname, &tablename, &partname)) + { + continue; + } + + if (rdb_get_table_perf_counters(it.c_str(), &counters)) + { + continue; + } + + DBUG_ASSERT(field != nullptr); + + field[0]->store(dbname.c_str(), dbname.size(), system_charset_info); + field[1]->store(tablename.c_str(), tablename.size(), system_charset_info); + if (partname.size() == 0) + { + field[2]->set_null(); + } + else + { + field[2]->set_notnull(); + field[2]->store(partname.c_str(), partname.size(), system_charset_info); + } + + for (int i= 0; i < PC_MAX_IDX; i++) + { + field[3]->store(rdb_pc_stat_types[i].c_str(), rdb_pc_stat_types[i].size(), + system_charset_info); + field[4]->store(counters.m_value[i], true); + + ret= my_core::schema_table_store_record(thd, tables->table); + if (ret) + DBUG_RETURN(ret); + } + } + + DBUG_RETURN(0); +} + +static ST_FIELD_INFO rdb_i_s_perf_context_fields_info[]= +{ + ROCKSDB_FIELD_INFO("TABLE_SCHEMA", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("TABLE_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("PARTITION_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, + MY_I_S_MAYBE_NULL), + ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, + 0), + ROCKSDB_FIELD_INFO_END +}; + +static int rdb_i_s_perf_context_init(void *p) +{ + DBUG_ASSERT(p != nullptr); + + my_core::ST_SCHEMA_TABLE *schema; + + DBUG_ENTER("rdb_i_s_perf_context_init"); + + schema= (my_core::ST_SCHEMA_TABLE*) p; + + schema->fields_info= rdb_i_s_perf_context_fields_info; + schema->fill_table= rdb_i_s_perf_context_fill_table; + + DBUG_RETURN(0); +} + +static int rdb_i_s_perf_context_global_fill_table( + my_core::THD *thd, + my_core::TABLE_LIST *tables, + my_core::Item *cond __attribute__((__unused__))) +{ + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + + int ret= 0; + DBUG_ENTER("rdb_i_s_perf_context_global_fill_table"); + + // Get a copy of the global perf counters. + Rdb_perf_counters global_counters; + rdb_get_global_perf_counters(&global_counters); + + for (int i= 0; i < PC_MAX_IDX; i++) { + DBUG_ASSERT(tables->table != nullptr); + DBUG_ASSERT(tables->table->field != nullptr); + + tables->table->field[0]->store(rdb_pc_stat_types[i].c_str(), + rdb_pc_stat_types[i].size(), + system_charset_info); + tables->table->field[1]->store(global_counters.m_value[i], true); + + ret= my_core::schema_table_store_record(thd, tables->table); + if (ret) + DBUG_RETURN(ret); + } + + DBUG_RETURN(0); +} + +static ST_FIELD_INFO rdb_i_s_perf_context_global_fields_info[]= +{ + ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO_END +}; + +static int rdb_i_s_perf_context_global_init(void *p) +{ + DBUG_ASSERT(p != nullptr); + + my_core::ST_SCHEMA_TABLE *schema; + + DBUG_ENTER("rdb_i_s_perf_context_global_init"); + + schema= (my_core::ST_SCHEMA_TABLE*) p; + + schema->fields_info= rdb_i_s_perf_context_global_fields_info; + schema->fill_table= rdb_i_s_perf_context_global_fill_table; + + DBUG_RETURN(0); +} + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_CFOPTIONS dynamic table + */ +static int rdb_i_s_cfoptions_fill_table( + my_core::THD *thd, + my_core::TABLE_LIST *tables, + my_core::Item *cond __attribute__((__unused__))) +{ + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + + bool ret; + + DBUG_ENTER("rdb_i_s_cfoptions_fill_table"); + + Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); + + for (auto cf_name : cf_manager.get_cf_names()) + { + std::string val; + rocksdb::ColumnFamilyOptions opts; + cf_manager.get_cf_options(cf_name, &opts); + + std::vector> cf_option_types = { + {"COMPARATOR", opts.comparator == nullptr ? "NULL" : + std::string(opts.comparator->Name())}, + {"MERGE_OPERATOR", opts.merge_operator == nullptr ? "NULL" : + std::string(opts.merge_operator->Name())}, + {"COMPACTION_FILTER", opts.compaction_filter == nullptr ? "NULL" : + std::string(opts.compaction_filter->Name())}, + {"COMPACTION_FILTER_FACTORY", + opts.compaction_filter_factory == nullptr ? "NULL" : + std::string(opts.compaction_filter_factory->Name())}, + {"WRITE_BUFFER_SIZE", std::to_string(opts.write_buffer_size)}, + {"MAX_WRITE_BUFFER_NUMBER", std::to_string(opts.max_write_buffer_number)}, + {"MIN_WRITE_BUFFER_NUMBER_TO_MERGE", + std::to_string(opts.min_write_buffer_number_to_merge)}, + {"NUM_LEVELS", std::to_string(opts.num_levels)}, + {"LEVEL0_FILE_NUM_COMPACTION_TRIGGER", + std::to_string(opts.level0_file_num_compaction_trigger)}, + {"LEVEL0_SLOWDOWN_WRITES_TRIGGER", + std::to_string(opts.level0_slowdown_writes_trigger)}, + {"LEVEL0_STOP_WRITES_TRIGGER", + std::to_string(opts.level0_stop_writes_trigger)}, + {"MAX_MEM_COMPACTION_LEVEL", std::to_string(opts.max_mem_compaction_level)}, + {"TARGET_FILE_SIZE_BASE", std::to_string(opts.target_file_size_base)}, + {"TARGET_FILE_SIZE_MULTIPLIER", std::to_string(opts.target_file_size_multiplier)}, + {"MAX_BYTES_FOR_LEVEL_BASE", std::to_string(opts.max_bytes_for_level_base)}, + {"LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES", + opts.level_compaction_dynamic_level_bytes ? "ON" : "OFF"}, + {"MAX_BYTES_FOR_LEVEL_MULTIPLIER", + std::to_string(opts.max_bytes_for_level_multiplier)}, + {"SOFT_RATE_LIMIT", std::to_string(opts.soft_rate_limit)}, + {"HARD_RATE_LIMIT", std::to_string(opts.hard_rate_limit)}, + {"RATE_LIMIT_DELAY_MAX_MILLISECONDS", + std::to_string(opts.rate_limit_delay_max_milliseconds)}, + {"ARENA_BLOCK_SIZE", std::to_string(opts.arena_block_size)}, + {"DISABLE_AUTO_COMPACTIONS", + opts.disable_auto_compactions ? "ON" : "OFF"}, + {"PURGE_REDUNDANT_KVS_WHILE_FLUSH", + opts.purge_redundant_kvs_while_flush ? "ON" : "OFF"}, + {"VERIFY_CHECKSUM_IN_COMPACTION", + opts.verify_checksums_in_compaction ? "ON" : "OFF"}, + {"MAX_SEQUENTIAL_SKIP_IN_ITERATIONS", + std::to_string(opts.max_sequential_skip_in_iterations)}, + {"MEMTABLE_FACTORY", + opts.memtable_factory == nullptr ? "NULL" : + opts.memtable_factory->Name()}, + {"INPLACE_UPDATE_SUPPORT", + opts.inplace_update_support ? "ON" : "OFF"}, + {"INPLACE_UPDATE_NUM_LOCKS", + opts.inplace_update_num_locks ? "ON" : "OFF"}, + {"MEMTABLE_PREFIX_BLOOM_BITS_RATIO", + std::to_string(opts.memtable_prefix_bloom_size_ratio)}, + {"MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE", + std::to_string(opts.memtable_huge_page_size)}, + {"BLOOM_LOCALITY", std::to_string(opts.bloom_locality)}, + {"MAX_SUCCESSIVE_MERGES", + std::to_string(opts.max_successive_merges)}, + {"MIN_PARTIAL_MERGE_OPERANDS", + std::to_string(opts.min_partial_merge_operands)}, + {"OPTIMIZE_FILTERS_FOR_HITS", + (opts.optimize_filters_for_hits ? "ON" : "OFF")}, + }; + + // get MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL option value + val = opts.max_bytes_for_level_multiplier_additional.empty() ? "NULL" : ""; + for (auto level : opts.max_bytes_for_level_multiplier_additional) + { + val.append(std::to_string(level) + ":"); + } + val.pop_back(); + cf_option_types.push_back({"MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL", val}); + + // get COMPRESSION_TYPE option value + GetStringFromCompressionType(&val, opts.compression); + if (val.empty()) + { + val = "NULL"; + } + cf_option_types.push_back({"COMPRESSION_TYPE", val}); + + // get COMPRESSION_PER_LEVEL option value + val = opts.compression_per_level.empty() ? "NULL" : ""; + for (auto compression_type : opts.compression_per_level) + { + std::string res; + GetStringFromCompressionType(&res, compression_type); + if (!res.empty()) + { + val.append(res + ":"); + } + } + val.pop_back(); + cf_option_types.push_back({"COMPRESSION_PER_LEVEL", val}); + + // get compression_opts value + val = std::to_string(opts.compression_opts.window_bits) + ":"; + val.append(std::to_string(opts.compression_opts.level) + ":"); + val.append(std::to_string(opts.compression_opts.strategy)); + cf_option_types.push_back({"COMPRESSION_OPTS", val}); + + // bottommost_compression + if (opts.bottommost_compression) + { + std::string res; + GetStringFromCompressionType(&res, opts.bottommost_compression); + if (!res.empty()) + { + cf_option_types.push_back({"BOTTOMMOST_COMPRESSION", res}); + } + } + + // get PREFIX_EXTRACTOR option + cf_option_types.push_back({"PREFIX_EXTRACTOR", + opts.prefix_extractor == nullptr ? "NULL" : + std::string(opts.prefix_extractor->Name())}); + + // get COMPACTION_STYLE option + switch (opts.compaction_style) + { + case rocksdb::kCompactionStyleLevel: val = "kCompactionStyleLevel"; break; + case rocksdb::kCompactionStyleUniversal: val = "kCompactionStyleUniversal"; break; + case rocksdb:: kCompactionStyleFIFO: val = "kCompactionStyleFIFO"; break; + case rocksdb:: kCompactionStyleNone: val = "kCompactionStyleNone"; break; + default: val = "NULL"; + } + cf_option_types.push_back({"COMPACTION_STYLE", val}); + + // get COMPACTION_OPTIONS_UNIVERSAL related options + rocksdb::CompactionOptionsUniversal compac_opts = opts.compaction_options_universal; + val = "{SIZE_RATIO="; + val.append(std::to_string(compac_opts.size_ratio)); + val.append("; MIN_MERGE_WIDTH="); + val.append(std::to_string(compac_opts.min_merge_width)); + val.append("; MAX_MERGE_WIDTH="); + val.append(std::to_string(compac_opts.max_merge_width)); + val.append("; MAX_SIZE_AMPLIFICATION_PERCENT="); + val.append(std::to_string(compac_opts.max_size_amplification_percent)); + val.append("; COMPRESSION_SIZE_PERCENT="); + val.append(std::to_string(compac_opts.compression_size_percent)); + val.append("; STOP_STYLE="); + switch (compac_opts.stop_style) + { + case rocksdb::kCompactionStopStyleSimilarSize: + val.append("kCompactionStopStyleSimilarSize}"); break; + case rocksdb::kCompactionStopStyleTotalSize: + val.append("kCompactionStopStyleTotalSize}"); break; + default: val.append("}"); + } + cf_option_types.push_back({"COMPACTION_OPTIONS_UNIVERSAL", val}); + + // get COMPACTION_OPTION_FIFO option + cf_option_types.push_back({"COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE", + std::to_string(opts.compaction_options_fifo.max_table_files_size)}); + + // get block-based table related options + const rocksdb::BlockBasedTableOptions& table_options= + rdb_get_table_options(); + + // get BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS option + cf_option_types.push_back( + {"BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS", + table_options.cache_index_and_filter_blocks ? "1" : "0"}); + + // get BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE option value + switch (table_options.index_type) + { + case rocksdb::BlockBasedTableOptions::kBinarySearch: val = "kBinarySearch"; break; + case rocksdb::BlockBasedTableOptions::kHashSearch: val = "kHashSearch"; break; + default: val = "NULL"; + } + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE", val}); + + // get BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION option value + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION", + table_options.hash_index_allow_collision ? "ON" : "OFF"}); + + // get BLOCK_BASED_TABLE_FACTORY::CHECKSUM option value + switch (table_options.checksum) + { + case rocksdb::kNoChecksum: val = "kNoChecksum"; break; + case rocksdb::kCRC32c: val = "kCRC32c"; break; + case rocksdb::kxxHash: val = "kxxHash"; break; + default: val = "NULL"; + } + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::CHECKSUM", val}); + + // get BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE option value + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE", + table_options.no_block_cache ? "ON" : "OFF"}); + + // get BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY option + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY", + table_options.filter_policy == nullptr ? "NULL" : + std::string(table_options.filter_policy->Name())}); + + // get BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING option + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING", + table_options.whole_key_filtering ? "1" : "0"}); + + // get BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE option + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE", + table_options.block_cache == nullptr ? "NULL" : + std::to_string(table_options.block_cache->GetUsage())}); + + // get BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED option + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED", + table_options.block_cache_compressed == nullptr ? "NULL" : + std::to_string(table_options.block_cache_compressed->GetUsage())}); + + // get BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE option + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE", + std::to_string(table_options.block_size)}); + + // get BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION option + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION", + std::to_string(table_options.block_size_deviation)}); + + // get BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL option + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL", + std::to_string(table_options.block_restart_interval)}); + + // get BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION option + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION", + std::to_string(table_options.format_version)}); + + for (auto cf_option_type : cf_option_types) + { + DBUG_ASSERT(tables->table != nullptr); + DBUG_ASSERT(tables->table->field != nullptr); + + tables->table->field[0]->store(cf_name.c_str(), cf_name.size(), + system_charset_info); + tables->table->field[1]->store(cf_option_type.first.c_str(), + cf_option_type.first.size(), + system_charset_info); + tables->table->field[2]->store(cf_option_type.second.c_str(), + cf_option_type.second.size(), + system_charset_info); + + ret = my_core::schema_table_store_record(thd, tables->table); + + if (ret) + DBUG_RETURN(ret); + } + } + DBUG_RETURN(0); +} + +static ST_FIELD_INFO rdb_i_s_cfoptions_fields_info[] = +{ + ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("OPTION_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END +}; + +/* + * helper function for rdb_i_s_global_info_fill_table + * to insert (TYPE, KEY, VALUE) rows into + * information_schema.rocksdb_global_info + */ +static int rdb_global_info_fill_row(my_core::THD *thd, + my_core::TABLE_LIST *tables, + const char *type, + const char *name, + const char *value) +{ + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + DBUG_ASSERT(tables->table != nullptr); + DBUG_ASSERT(type != nullptr); + DBUG_ASSERT(name != nullptr); + DBUG_ASSERT(value != nullptr); + + Field **field= tables->table->field; + DBUG_ASSERT(field != nullptr); + + field[0]->store(type, strlen(type), system_charset_info); + field[1]->store(name, strlen(name), system_charset_info); + field[2]->store(value, strlen(value), system_charset_info); + + return my_core::schema_table_store_record(thd, tables->table); +} + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO dynamic table + */ +static int rdb_i_s_global_info_fill_table( + my_core::THD *thd, + my_core::TABLE_LIST *tables, + my_core::Item *cond __attribute__((__unused__))) +{ + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + + DBUG_ENTER("rdb_i_s_global_info_fill_table"); + static const uint32_t INT_BUF_LEN = 21; + static const uint32_t GTID_BUF_LEN = 60; + static const uint32_t CF_ID_INDEX_BUF_LEN = 60; + + int ret= 0; + + /* binlog info */ + Rdb_binlog_manager *blm= rdb_get_binlog_manager(); + DBUG_ASSERT(blm != nullptr); + + char file_buf[FN_REFLEN+1]= {0}; + my_off_t pos = 0; + char pos_buf[INT_BUF_LEN]= {0}; + char gtid_buf[GTID_BUF_LEN]= {0}; + + if (blm->read(file_buf, &pos, gtid_buf)) { + snprintf(pos_buf, INT_BUF_LEN, "%lu", (uint64_t) pos); + ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "FILE", file_buf); + ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "POS", pos_buf); + ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "GTID", gtid_buf); + } + + /* max index info */ + Rdb_dict_manager *dict_manager= rdb_get_dict_manager(); + DBUG_ASSERT(dict_manager != nullptr); + + uint32_t max_index_id; + char max_index_id_buf[INT_BUF_LEN]= {0}; + + if (dict_manager->get_max_index_id(&max_index_id)) { + snprintf(max_index_id_buf, INT_BUF_LEN, "%u", max_index_id); + ret |= rdb_global_info_fill_row(thd, tables, "MAX_INDEX_ID", "MAX_INDEX_ID", + max_index_id_buf); + } + + /* cf_id -> cf_flags */ + char cf_id_buf[INT_BUF_LEN]= {0}; + char cf_value_buf[FN_REFLEN+1] = {0}; + Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); + for (auto cf_handle : cf_manager.get_all_cf()) { + uint flags; + dict_manager->get_cf_flags(cf_handle->GetID(), &flags); + snprintf(cf_id_buf, INT_BUF_LEN, "%u", cf_handle->GetID()); + snprintf(cf_value_buf, FN_REFLEN, "%s [%u]", cf_handle->GetName().c_str(), + flags); + ret |= rdb_global_info_fill_row(thd, tables, "CF_FLAGS", cf_id_buf, + cf_value_buf); + + if (ret) + break; + } + + /* DDL_DROP_INDEX_ONGOING */ + std::vector gl_index_ids; + dict_manager->get_ongoing_index_operation(&gl_index_ids, + Rdb_key_def::DDL_DROP_INDEX_ONGOING); + char cf_id_index_buf[CF_ID_INDEX_BUF_LEN]= {0}; + for (auto gl_index_id : gl_index_ids) { + snprintf(cf_id_index_buf, CF_ID_INDEX_BUF_LEN, "cf_id:%u,index_id:%u", + gl_index_id.cf_id, gl_index_id.index_id); + ret |= rdb_global_info_fill_row(thd, tables, "DDL_DROP_INDEX_ONGOING", + cf_id_index_buf, ""); + + if (ret) + break; + } + + DBUG_RETURN(ret); +} + +static ST_FIELD_INFO rdb_i_s_global_info_fields_info[] = +{ + ROCKSDB_FIELD_INFO("TYPE", FN_REFLEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("NAME", FN_REFLEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", FN_REFLEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END +}; + + +namespace // anonymous namespace = not visible outside this source file +{ +struct Rdb_ddl_scanner : public Rdb_tables_scanner +{ + my_core::THD *m_thd; + my_core::TABLE *m_table; + + int add_table(Rdb_tbl_def* tdef) override; +}; +} // anonymous namespace + + +int Rdb_ddl_scanner::add_table(Rdb_tbl_def *tdef) +{ + DBUG_ASSERT(tdef != nullptr); + + int ret= 0; + + DBUG_ASSERT(m_table != nullptr); + Field** field= m_table->field; + DBUG_ASSERT(field != nullptr); + + const std::string& dbname= tdef->base_dbname(); + field[0]->store(dbname.c_str(), dbname.size(), system_charset_info); + + const std::string& tablename= tdef->base_tablename(); + field[1]->store(tablename.c_str(), tablename.size(), system_charset_info); + + const std::string& partname= tdef->base_partition(); + if (partname.length() == 0) + { + field[2]->set_null(); + } + else + { + field[2]->set_notnull(); + field[2]->store(partname.c_str(), partname.size(), system_charset_info); + } + + for (uint i= 0; i < tdef->m_key_count; i++) + { + const std::shared_ptr& kd= tdef->m_key_descr_arr[i]; + DBUG_ASSERT(kd != nullptr); + + field[3]->store(kd->m_name.c_str(), kd->m_name.size(), system_charset_info); + + GL_INDEX_ID gl_index_id = kd->get_gl_index_id(); + field[4]->store(gl_index_id.cf_id, true); + field[5]->store(gl_index_id.index_id, true); + field[6]->store(kd->m_index_type, true); + field[7]->store(kd->m_kv_format_version, true); + + std::string cf_name= kd->get_cf()->GetName(); + field[8]->store(cf_name.c_str(), cf_name.size(), system_charset_info); + + ret= my_core::schema_table_store_record(m_thd, m_table); + if (ret) + return ret; + } + return 0; +} + +static int rdb_i_s_ddl_fill_table(my_core::THD *thd, + my_core::TABLE_LIST *tables, + my_core::Item *cond) +{ + DBUG_ENTER("rdb_i_s_ddl_fill_table"); + + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + + Rdb_ddl_scanner ddl_arg; + ddl_arg.m_thd= thd; + ddl_arg.m_table= tables->table; + + Rdb_ddl_manager *ddl_manager= rdb_get_ddl_manager(); + DBUG_ASSERT(ddl_manager != nullptr); + int ret= ddl_manager->scan_for_tables(&ddl_arg); + + DBUG_RETURN(ret); +} + +static ST_FIELD_INFO rdb_i_s_ddl_fields_info[] = +{ + ROCKSDB_FIELD_INFO("TABLE_SCHEMA", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("TABLE_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("PARTITION_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, + MY_I_S_MAYBE_NULL), + ROCKSDB_FIELD_INFO("INDEX_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("COLUMN_FAMILY", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("INDEX_NUMBER", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("INDEX_TYPE", sizeof(uint16_t), MYSQL_TYPE_SHORT, 0), + ROCKSDB_FIELD_INFO("KV_FORMAT_VERSION", sizeof(uint16_t), + MYSQL_TYPE_SHORT, 0), + ROCKSDB_FIELD_INFO("CF", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END +}; + +static int rdb_i_s_ddl_init(void *p) +{ + my_core::ST_SCHEMA_TABLE *schema; + + DBUG_ENTER("rdb_i_s_ddl_init"); + DBUG_ASSERT(p != nullptr); + + schema= (my_core::ST_SCHEMA_TABLE*) p; + + schema->fields_info= rdb_i_s_ddl_fields_info; + schema->fill_table= rdb_i_s_ddl_fill_table; + + DBUG_RETURN(0); +} + +static int rdb_i_s_cfoptions_init(void *p) +{ + my_core::ST_SCHEMA_TABLE *schema; + + DBUG_ENTER("rdb_i_s_cfoptions_init"); + DBUG_ASSERT(p != nullptr); + + schema= (my_core::ST_SCHEMA_TABLE*) p; + + schema->fields_info= rdb_i_s_cfoptions_fields_info; + schema->fill_table= rdb_i_s_cfoptions_fill_table; + + DBUG_RETURN(0); +} + +static int rdb_i_s_global_info_init(void *p) +{ + my_core::ST_SCHEMA_TABLE *schema; + + DBUG_ENTER("rdb_i_s_global_info_init"); + DBUG_ASSERT(p != nullptr); + + schema= reinterpret_cast(p); + + schema->fields_info= rdb_i_s_global_info_fields_info; + schema->fill_table= rdb_i_s_global_info_fill_table; + + DBUG_RETURN(0); +} + +/* Given a path to a file return just the filename portion. */ +static std::string rdb_filename_without_path( + const std::string& path) +{ + /* Find last slash in path */ + size_t pos = path.rfind('/'); + + /* None found? Just return the original string */ + if (pos == std::string::npos) { + return std::string(path); + } + + /* Return everything after the slash (or backslash) */ + return path.substr(pos + 1); +} + +/* Fill the information_schema.rocksdb_index_file_map virtual table */ +static int rdb_i_s_index_file_map_fill_table( + my_core::THD *thd, + my_core::TABLE_LIST *tables, + my_core::Item *cond __attribute__((__unused__))) +{ + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + DBUG_ASSERT(tables->table != nullptr); + + int ret = 0; + Field **field = tables->table->field; + DBUG_ASSERT(field != nullptr); + + DBUG_ENTER("rdb_i_s_index_file_map_fill_table"); + + /* Iterate over all the column families */ + rocksdb::DB *rdb= rdb_get_rocksdb_db(); + DBUG_ASSERT(rdb != nullptr); + + Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); + for (auto cf_handle : cf_manager.get_all_cf()) { + /* Grab the the properties of all the tables in the column family */ + rocksdb::TablePropertiesCollection table_props_collection; + rocksdb::Status s = rdb->GetPropertiesOfAllTables(cf_handle, + &table_props_collection); + if (!s.ok()) { + continue; + } + + /* Iterate over all the items in the collection, each of which contains a + * name and the actual properties */ + for (auto props : table_props_collection) { + /* Add the SST name into the output */ + std::string sst_name = rdb_filename_without_path(props.first); + field[2]->store(sst_name.data(), sst_name.size(), system_charset_info); + + /* Get the __indexstats__ data out of the table property */ + std::vector stats; + Rdb_tbl_prop_coll::read_stats_from_tbl_props(props.second, &stats); + if (stats.empty()) { + field[0]->store(-1, true); + field[1]->store(-1, true); + field[3]->store(-1, true); + field[4]->store(-1, true); + field[5]->store(-1, true); + field[6]->store(-1, true); + field[7]->store(-1, true); + field[8]->store(-1, true); + } + else { + for (auto it : stats) { + /* Add the index number, the number of rows, and data size to the output */ + field[0]->store(it.m_gl_index_id.cf_id, true); + field[1]->store(it.m_gl_index_id.index_id, true); + field[3]->store(it.m_rows, true); + field[4]->store(it.m_data_size, true); + field[5]->store(it.m_entry_deletes, true); + field[6]->store(it.m_entry_single_deletes, true); + field[7]->store(it.m_entry_merges, true); + field[8]->store(it.m_entry_others, true); + + /* Tell MySQL about this row in the virtual table */ + ret= my_core::schema_table_store_record(thd, tables->table); + if (ret != 0) { + break; + } + } + } + } + } + + DBUG_RETURN(ret); +} + +static ST_FIELD_INFO rdb_i_s_index_file_map_fields_info[] = +{ + /* The information_schema.rocksdb_index_file_map virtual table has four fields: + * COLUMN_FAMILY => the index's column family contained in the SST file + * INDEX_NUMBER => the index id contained in the SST file + * SST_NAME => the name of the SST file containing some indexes + * NUM_ROWS => the number of entries of this index id in this SST file + * DATA_SIZE => the data size stored in this SST file for this index id */ + ROCKSDB_FIELD_INFO("COLUMN_FAMILY", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("INDEX_NUMBER", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("SST_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("NUM_ROWS", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("DATA_SIZE", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("ENTRY_DELETES", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("ENTRY_SINGLEDELETES", sizeof(int64_t), + MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("ENTRY_MERGES", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("ENTRY_OTHERS", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO_END +}; + +/* Initialize the information_schema.rocksdb_index_file_map virtual table */ +static int rdb_i_s_index_file_map_init(void *p) +{ + my_core::ST_SCHEMA_TABLE *schema; + + DBUG_ENTER("rdb_i_s_index_file_map_init"); + DBUG_ASSERT(p != nullptr); + + schema= (my_core::ST_SCHEMA_TABLE*) p; + + schema->fields_info= rdb_i_s_index_file_map_fields_info; + schema->fill_table= rdb_i_s_index_file_map_fill_table; + + DBUG_RETURN(0); +} + +static int rdb_i_s_deinit(void *p __attribute__((__unused__))) +{ + DBUG_ENTER("rdb_i_s_deinit"); + DBUG_RETURN(0); +} + +static struct st_mysql_information_schema rdb_i_s_info= +{ MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION }; + +struct st_mysql_plugin rdb_i_s_cfstats= +{ + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_CFSTATS", + "Facebook", + "RocksDB column family stats", + PLUGIN_LICENSE_GPL, + rdb_i_s_cfstats_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_mysql_plugin rdb_i_s_dbstats= +{ + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_DBSTATS", + "Facebook", + "RocksDB database stats", + PLUGIN_LICENSE_GPL, + rdb_i_s_dbstats_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_mysql_plugin rdb_i_s_perf_context= +{ + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_PERF_CONTEXT", + "Facebook", + "RocksDB perf context stats", + PLUGIN_LICENSE_GPL, + rdb_i_s_perf_context_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_mysql_plugin rdb_i_s_perf_context_global= +{ + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_PERF_CONTEXT_GLOBAL", + "Facebook", + "RocksDB perf context stats (all)", + PLUGIN_LICENSE_GPL, + rdb_i_s_perf_context_global_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_mysql_plugin rdb_i_s_cfoptions= +{ + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_CF_OPTIONS", + "Facebook", + "RocksDB column family options", + PLUGIN_LICENSE_GPL, + rdb_i_s_cfoptions_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_mysql_plugin rdb_i_s_global_info= +{ + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_GLOBAL_INFO", + "Facebook", + "RocksDB global info", + PLUGIN_LICENSE_GPL, + rdb_i_s_global_info_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_mysql_plugin rdb_i_s_ddl= +{ + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_DDL", + "Facebook", + "RocksDB Data Dictionary", + PLUGIN_LICENSE_GPL, + rdb_i_s_ddl_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_mysql_plugin rdb_i_s_index_file_map= +{ + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_INDEX_FILE_MAP", + "Facebook", + "RocksDB index file map", + PLUGIN_LICENSE_GPL, + rdb_i_s_index_file_map_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_i_s.h b/storage/rocksdb/rdb_i_s.h new file mode 100644 index 00000000000..846defab961 --- /dev/null +++ b/storage/rocksdb/rdb_i_s.h @@ -0,0 +1,34 @@ +/* + Copyright (c) 2012,2013 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +namespace myrocks { + +/* + Declare INFORMATION_SCHEMA (I_S) plugins needed by MyRocks storage engine. +*/ + +extern struct st_mysql_plugin rdb_i_s_cfstats; +extern struct st_mysql_plugin rdb_i_s_dbstats; +extern struct st_mysql_plugin rdb_i_s_perf_context; +extern struct st_mysql_plugin rdb_i_s_perf_context_global; +extern struct st_mysql_plugin rdb_i_s_cfoptions; +extern struct st_mysql_plugin rdb_i_s_global_info; +extern struct st_mysql_plugin rdb_i_s_ddl; +extern struct st_mysql_plugin rdb_i_s_index_file_map; + +} // namespace myrocks + diff --git a/storage/rocksdb/rdb_index_merge.cc b/storage/rocksdb/rdb_index_merge.cc new file mode 100644 index 00000000000..dc85db4d356 --- /dev/null +++ b/storage/rocksdb/rdb_index_merge.cc @@ -0,0 +1,604 @@ +/* + Copyright (c) 2016, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* This C++ file's header file */ +#include "./rdb_index_merge.h" + +/* MyRocks header files */ +#include "./ha_rocksdb.h" +#include "./rdb_datadic.h" + +namespace myrocks { + +Rdb_index_merge::Rdb_index_merge(const ulonglong merge_buf_size, + const ulonglong merge_combine_read_size, + const rocksdb::Comparator* comparator) : + m_merge_buf_size(merge_buf_size), + m_merge_combine_read_size(merge_combine_read_size), + m_comparator(comparator), + m_rec_buf_unsorted(nullptr), + m_output_buf(nullptr) +{ +} + +Rdb_index_merge::~Rdb_index_merge() +{ + /* + Close tmp file, we don't need to worry about deletion, mysql handles it. + */ + my_close(m_merge_file.fd, MYF(MY_WME)); + + /* There should be no records left in the offset tree */ + DBUG_ASSERT(m_offset_tree.empty()); + + /* There should be no pointers left on the merge heap */ + DBUG_ASSERT(m_merge_min_heap.empty()); +} + +int Rdb_index_merge::init() +{ + /* + Create a temporary merge file on disk to store sorted chunks during + inplace index creation. + */ + if (merge_file_create()) + { + return HA_ERR_INTERNAL_ERROR; + } + + /* + Then, allocate buffer to store unsorted records before they are written + to disk. They will be written to disk sorted. A sorted tree is used to + keep track of the offset of each record within the unsorted buffer. + */ + m_rec_buf_unsorted= std::make_shared(m_merge_buf_size); + + /* + Allocate output buffer that will contain sorted block that is written to + disk. + */ + m_output_buf= std::make_shared(m_merge_buf_size); + + return 0; +} + +/** + Create a merge file in the given location. +*/ +int Rdb_index_merge::merge_file_create() +{ + DBUG_ASSERT(m_merge_file.fd == -1); + + int fd = mysql_tmpfile("myrocks"); + + if (fd < 0) + { + return HA_ERR_INTERNAL_ERROR; + } + + m_merge_file.fd = fd; + m_merge_file.num_sort_buffers = 0; + + return 0; +} + +/** + Add record to offset tree (and unsorted merge buffer) in preparation for + writing out to disk in sorted chunks. + + If buffer in memory is full, write the buffer out to disk sorted using the + offset tree, and clear the tree. (Happens in merge_buf_write) +*/ +int Rdb_index_merge::add(const rocksdb::Slice& key, + const rocksdb::Slice& val) +{ + /* Adding a record after heap is already created results in error */ + DBUG_ASSERT(m_merge_min_heap.empty()); + + /* + Check if sort buffer is going to be out of space, if so write it + out to disk in sorted order using offset tree. + */ + uint total_offset= RDB_MERGE_CHUNK_LEN + m_rec_buf_unsorted->curr_offset + + RDB_MERGE_KEY_DELIMITER + RDB_MERGE_VAL_DELIMITER + + key.size() + val.size(); + if (total_offset >= m_rec_buf_unsorted->total_size) + { + if (merge_buf_write()) + { + // NO_LINT_DEBUG + sql_print_error("Error writing sort buffer to disk."); + return HA_ERR_INTERNAL_ERROR; + } + } + + ulonglong rec_offset= m_rec_buf_unsorted->curr_offset; + + /* + Store key and value in temporary unsorted in memory buffer pointed to by + offset tree. + */ + m_rec_buf_unsorted->store_key_value(key, val); + + /* Find sort order of the new record */ + m_offset_tree.emplace(m_rec_buf_unsorted->block.get() + rec_offset, + m_comparator); + + return 0; +} + +/** + Sort + write merge buffer chunk out to disk. +*/ +int Rdb_index_merge::merge_buf_write() +{ + DBUG_ASSERT(m_merge_file.fd != -1); + DBUG_ASSERT(m_rec_buf_unsorted != nullptr); + DBUG_ASSERT(m_output_buf != nullptr); + DBUG_ASSERT(!m_offset_tree.empty()); + + /* Write actual chunk size to first 8 bytes of the merge buffer */ + merge_store_uint64(m_output_buf->block.get(), + m_rec_buf_unsorted->curr_offset + RDB_MERGE_CHUNK_LEN); + m_output_buf->curr_offset += RDB_MERGE_CHUNK_LEN; + + /* + Iterate through the offset tree. Should be ordered by the secondary key + at this point. + */ + for (auto& rec : m_offset_tree) + { + DBUG_ASSERT(m_output_buf->curr_offset <= m_merge_buf_size); + + /* Read record from offset (should never fail) */ + rocksdb::Slice key; + rocksdb::Slice val; + merge_read_rec(rec.block, &key, &val); + + /* Store key and value into sorted output buffer */ + m_output_buf->store_key_value(key, val); + } + + DBUG_ASSERT(m_output_buf->curr_offset <= m_output_buf->total_size); + + /* + Write output buffer to disk. + + Need to position cursor to the chunk it needs to be at on filesystem + then write into the respective merge buffer. + */ + if (my_seek(m_merge_file.fd, m_merge_file.num_sort_buffers * m_merge_buf_size, + SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) + { + // NO_LINT_DEBUG + sql_print_error("Error seeking to location in merge file on disk."); + return HA_ERR_INTERNAL_ERROR; + } + + if (my_write(m_merge_file.fd, m_output_buf->block.get(), + m_output_buf->total_size, MYF(MY_WME | MY_NABP))) + { + // NO_LINT_DEBUG + sql_print_error("Error writing sorted merge buffer to disk."); + return HA_ERR_INTERNAL_ERROR; + } + + /* Increment merge file offset to track number of merge buffers written */ + m_merge_file.num_sort_buffers += 1; + + /* Reset everything for next run */ + merge_reset(); + + return 0; +} + +/** + Prepare n-way merge of n sorted buffers on disk, using a heap sorted by + secondary key records. +*/ +int Rdb_index_merge::merge_heap_prepare() +{ + DBUG_ASSERT(m_merge_min_heap.empty()); + + /* + If the offset tree is not empty, there are still some records that need to + be written to disk. Write them out now. + */ + if (!m_offset_tree.empty() && merge_buf_write()) + { + return HA_ERR_INTERNAL_ERROR; + } + + DBUG_ASSERT(m_merge_file.num_sort_buffers > 0); + + /* + For an n-way merge, we need to read chunks of each merge file + simultaneously. + */ + ulonglong chunk_size= m_merge_combine_read_size/ + m_merge_file.num_sort_buffers; + if (chunk_size >= m_merge_buf_size) + { + chunk_size= m_merge_buf_size; + } + + /* Allocate buffers for each chunk */ + for (ulonglong i = 0; i < m_merge_file.num_sort_buffers; i++) + { + auto entry= std::make_shared(m_comparator); + + /* + Read chunk_size bytes from each chunk on disk, and place inside + respective chunk buffer. + */ + size_t total_size= + entry->prepare(m_merge_file.fd, i * m_merge_buf_size, chunk_size); + + if (total_size == (size_t) - 1) + { + return HA_ERR_INTERNAL_ERROR; + } + + /* Can reach this condition if an index was added on table w/ no rows */ + if (total_size - RDB_MERGE_CHUNK_LEN == 0) + { + break; + } + + /* Read the first record from each buffer to initially populate the heap */ + if (entry->read_rec(&entry->key, &entry->val)) + { + // NO_LINT_DEBUG + sql_print_error("Chunk size is too small to process merge."); + return HA_ERR_INTERNAL_ERROR; + } + + m_merge_min_heap.push(std::move(entry)); + } + + return 0; +} + +/** + Create and/or iterate through keys in the merge heap. +*/ +int Rdb_index_merge::next(rocksdb::Slice* key, rocksdb::Slice* val) +{ + /* + If table fits in one sort buffer, we can optimize by writing + the sort buffer directly through to the sstfilewriter instead of + needing to create tmp files/heap to merge the sort buffers. + + If there are no sort buffer records (alters on empty tables), + also exit here. + */ + if (m_merge_file.num_sort_buffers == 0) + { + if (m_offset_tree.empty()) + { + return -1; + } + + auto rec= m_offset_tree.begin(); + + /* Read record from offset */ + merge_read_rec(rec->block, key, val); + + m_offset_tree.erase(rec); + return 0; + } + + int res; + + /* + If heap and heap chunk info are empty, we must be beginning the merge phase + of the external sort. Populate the heap with initial values from each + disk chunk. + */ + if (m_merge_min_heap.empty()) + { + if ((res= merge_heap_prepare())) + { + // NO_LINT_DEBUG + sql_print_error("Error during preparation of heap."); + return res; + } + + /* + Return the first top record without popping, as we haven't put this + inside the SST file yet. + */ + merge_heap_top(key, val); + return 0; + } + + DBUG_ASSERT(!m_merge_min_heap.empty()); + return merge_heap_pop_and_get_next(key, val); +} + +/** + Get current top record from the heap. +*/ +void Rdb_index_merge::merge_heap_top(rocksdb::Slice* key, + rocksdb::Slice* val) +{ + DBUG_ASSERT(!m_merge_min_heap.empty()); + + const std::shared_ptr& entry= m_merge_min_heap.top(); + *key= entry->key; + *val= entry->val; +} + +/** + Pops the top record, and uses it to read next record from the + corresponding sort buffer and push onto the heap. + + Returns -1 when there are no more records in the heap. +*/ +int Rdb_index_merge::merge_heap_pop_and_get_next(rocksdb::Slice* key, + rocksdb::Slice* val) +{ + /* + Make a new reference to shared ptr so it doesn't get destroyed + during pop(). We are going to push this entry back onto the heap. + */ + const std::shared_ptr entry= m_merge_min_heap.top(); + m_merge_min_heap.pop(); + + /* + We are finished w/ current chunk if: + current_offset + disk_offset == total_size + + Return without adding entry back onto heap. + If heap is also empty, we must be finished with merge. + */ + if (entry->chunk_info->is_chunk_finished()) + { + if (m_merge_min_heap.empty()) + { + return -1; + } + + merge_heap_top(key, val); + return 0; + } + + /* + Make sure we haven't reached the end of the chunk. + */ + DBUG_ASSERT(!entry->chunk_info->is_chunk_finished()); + + /* + If merge_read_rec fails, it means the either the chunk was cut off + or we've reached the end of the respective chunk. + */ + if (entry->read_rec(&entry->key, &entry->val)) + { + if (entry->read_next_chunk_from_disk(m_merge_file.fd)) + { + return HA_ERR_INTERNAL_ERROR; + } + + /* Try reading record again, should never fail. */ + if (entry->read_rec(&entry->key, &entry->val)) + { + return HA_ERR_INTERNAL_ERROR; + } + } + + /* Push entry back on to the heap w/ updated buffer + offset ptr */ + m_merge_min_heap.push(std::move(entry)); + + /* Return the current top record on heap */ + merge_heap_top(key, val); + return 0; +} + +int Rdb_index_merge::merge_heap_entry::read_next_chunk_from_disk(File fd) +{ + if (chunk_info->read_next_chunk_from_disk(fd)) + { + return 1; + } + + block= chunk_info->block.get(); + return 0; +} + +int Rdb_index_merge::merge_buf_info::read_next_chunk_from_disk(File fd) +{ + disk_curr_offset += curr_offset; + + if (my_seek(fd, disk_curr_offset, SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) + { + // NO_LINT_DEBUG + sql_print_error("Error seeking to location in merge file on disk."); + return 1; + } + + /* Overwrite the old block */ + size_t bytes_read= my_read(fd, block.get(), block_len, MYF(MY_WME)); + if (bytes_read == (size_t) -1) + { + // NO_LINT_DEBUG + sql_print_error("Error reading merge file from disk."); + return 1; + } + + curr_offset= 0; + return 0; +} + +/** + Get records from offset within sort buffer and compare them. + Sort by least to greatest. +*/ +int Rdb_index_merge::merge_record_compare(const uchar* a_block, + const uchar* b_block, + const rocksdb::Comparator* const comparator) +{ + return comparator->Compare(as_slice(a_block), as_slice(b_block)); +} + +/** + Given an offset in a merge sort buffer, read out the keys + values. + After this, block will point to the next record in the buffer. +**/ +void Rdb_index_merge::merge_read_rec(const uchar* block, + rocksdb::Slice* key, + rocksdb::Slice* val) +{ + /* Read key at block offset into key slice and the value into value slice*/ + read_slice(key, block); + read_slice(val, block + RDB_MERGE_REC_DELIMITER + key->size()); +} + +void Rdb_index_merge::read_slice(rocksdb::Slice* slice, const uchar* block_ptr) +{ + uint64 slice_len; + merge_read_uint64(&block_ptr, &slice_len); + + *slice= rocksdb::Slice(reinterpret_cast(block_ptr), slice_len); +} + +int Rdb_index_merge::merge_heap_entry::read_rec(rocksdb::Slice *key, + rocksdb::Slice *val) +{ + const uchar* block_ptr= block; + + /* Read key at block offset into key slice and the value into value slice*/ + if (read_slice(key, &block_ptr) != 0 || read_slice(val, &block_ptr) != 0) + { + return 1; + } + + chunk_info->curr_offset += (uintptr_t) block_ptr - (uintptr_t) block; + block += (uintptr_t) block_ptr - (uintptr_t) block; + + return 0; +} + +int Rdb_index_merge::merge_heap_entry::read_slice(rocksdb::Slice* slice, + const uchar** block_ptr) +{ + if (!chunk_info->has_space(RDB_MERGE_REC_DELIMITER)) + { + return 1; + } + + uint64 slice_len; + merge_read_uint64(block_ptr, &slice_len); + if (!chunk_info->has_space(RDB_MERGE_REC_DELIMITER + slice_len)) + { + return 1; + } + + *slice= rocksdb::Slice(reinterpret_cast(*block_ptr), slice_len); + *block_ptr += slice_len; + return 0; +} + +size_t Rdb_index_merge::merge_heap_entry::prepare(File fd, ulonglong f_offset, + ulonglong chunk_size) +{ + chunk_info= std::make_shared(chunk_size); + size_t res = chunk_info->prepare(fd, f_offset); + if (res != (size_t) - 1) + { + block= chunk_info->block.get() + RDB_MERGE_CHUNK_LEN; + } + + return res; +} + +size_t Rdb_index_merge::merge_buf_info::prepare(File fd, ulonglong f_offset) +{ + disk_start_offset= f_offset; + disk_curr_offset= f_offset; + + /* + Need to position cursor to the chunk it needs to be at on filesystem + then read 'chunk_size' bytes into the respective chunk buffer. + */ + if (my_seek(fd, f_offset, SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) + { + // NO_LINT_DEBUG + sql_print_error("Error seeking to location in merge file on disk."); + return (size_t) - 1; + } + + size_t bytes_read= my_read(fd, block.get(), total_size, MYF(MY_WME)); + if (bytes_read == (size_t) - 1) + { + // NO_LINT_DEBUG + sql_print_error("Error reading merge file from disk."); + return (size_t) - 1; + } + + /* + Read the first 8 bytes of each chunk, this gives us the actual + size of each chunk. + */ + const uchar *block_ptr= block.get(); + merge_read_uint64(&block_ptr, &total_size); + curr_offset += RDB_MERGE_CHUNK_LEN; + return total_size; +} + +/* Store key and value w/ their respective delimiters at the given offset */ +void Rdb_index_merge::merge_buf_info::store_key_value( + const rocksdb::Slice& key, const rocksdb::Slice& val) +{ + store_slice(key); + store_slice(val); +} + +void Rdb_index_merge::merge_buf_info::store_slice(const rocksdb::Slice& slice) +{ + /* Store length delimiter */ + merge_store_uint64(&block[curr_offset], slice.size()); + + /* Store slice data */ + memcpy(&block[curr_offset + RDB_MERGE_REC_DELIMITER], slice.data(), + slice.size()); + + curr_offset += slice.size() + RDB_MERGE_REC_DELIMITER; +} + + +void Rdb_index_merge::merge_reset() +{ + /* + Either error, or all values in the sort buffer have been written to disk, + so we need to clear the offset tree. + */ + m_offset_tree.clear(); + + /* Reset sort buffer block */ + if (m_rec_buf_unsorted && m_rec_buf_unsorted->block) + { + m_rec_buf_unsorted->curr_offset= 0; + } + + /* Reset output buf */ + if (m_output_buf && m_output_buf->block) + { + m_output_buf->curr_offset= 0; + } +} + +} // namespace myrocks + diff --git a/storage/rocksdb/rdb_index_merge.h b/storage/rocksdb/rdb_index_merge.h new file mode 100644 index 00000000000..24090c335ac --- /dev/null +++ b/storage/rocksdb/rdb_index_merge.h @@ -0,0 +1,229 @@ +/* + Copyright (c) 2016, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#pragma once + +/* MySQL header files */ +#include "../sql/log.h" +#include "./handler.h" /* handler */ +#include "./my_global.h" /* ulonglong */ + +/* C++ standard header files */ +#include +#include +#include + +/* RocksDB header files */ +#include "rocksdb/db.h" + +/* MyRocks header files */ +#include "./rdb_comparator.h" + +namespace myrocks { + +/* + Length of delimiters used during inplace index creation. +*/ +#define RDB_MERGE_CHUNK_LEN sizeof(size_t) +#define RDB_MERGE_REC_DELIMITER sizeof(size_t) +#define RDB_MERGE_KEY_DELIMITER RDB_MERGE_REC_DELIMITER +#define RDB_MERGE_VAL_DELIMITER RDB_MERGE_REC_DELIMITER + +class Rdb_key_def; +class Rdb_tbl_def; + +class Rdb_index_merge { + Rdb_index_merge(const Rdb_index_merge& p)= delete; + Rdb_index_merge& operator=(const Rdb_index_merge& p)= delete; + + public: + /* Information about temporary files used in external merge sort */ + struct merge_file_info { + File fd= -1; /* file descriptor */ + ulong num_sort_buffers; /* number of sort buffers in temp file */ + }; + + /* Buffer for sorting in main memory. */ + struct merge_buf_info { + /* heap memory allocated for main memory sort/merge */ + std::unique_ptr block; + ulonglong block_len; /* amount of data bytes allocated for block above */ + ulonglong curr_offset; /* offset of the record pointer for the block */ + ulonglong disk_start_offset; /* where the chunk starts on disk */ + ulonglong disk_curr_offset; /* current offset on disk */ + ulonglong total_size; /* total # of data bytes in chunk */ + + void store_key_value(const rocksdb::Slice& key, const rocksdb::Slice& val) + __attribute__((__nonnull__)); + + void store_slice(const rocksdb::Slice& slice) + __attribute__((__nonnull__)); + + size_t prepare(File fd, ulonglong f_offset) + __attribute__((__nonnull__)); + + int read_next_chunk_from_disk(File fd) + __attribute__((__nonnull__, __warn_unused_result__)); + + inline bool is_chunk_finished() + { + return curr_offset + disk_curr_offset - disk_start_offset == total_size; + } + + inline bool has_space(uint64 needed) + { + return curr_offset + needed <= block_len; + } + + explicit merge_buf_info(const ulonglong merge_block_size) : + block(nullptr), block_len(merge_block_size), curr_offset(0), + disk_start_offset(0), disk_curr_offset(0), total_size(merge_block_size) + { + /* Will throw an exception if it runs out of memory here */ + block= std::unique_ptr(new uchar[merge_block_size]); + + /* Initialize entire buffer to 0 to avoid valgrind errors */ + memset(block.get(), 0, merge_block_size); + } + }; + + /* Represents an entry in the heap during merge phase of external sort */ + struct merge_heap_entry + { + std::shared_ptr chunk_info; /* pointer to buffer info */ + uchar* block; /* pointer to heap memory where record is stored */ + const rocksdb::Comparator* const comparator; + rocksdb::Slice key; /* current key pointed to by block ptr */ + rocksdb::Slice val; + + size_t prepare(File fd, ulonglong f_offset, ulonglong chunk_size) + __attribute__((__nonnull__)); + + int read_next_chunk_from_disk(File fd) + __attribute__((__nonnull__, __warn_unused_result__)); + + int read_rec(rocksdb::Slice *key, rocksdb::Slice *val) + __attribute__((__nonnull__, __warn_unused_result__)); + + int read_slice(rocksdb::Slice* slice, const uchar** block_ptr) + __attribute__((__nonnull__, __warn_unused_result__)); + + explicit merge_heap_entry(const rocksdb::Comparator* const comparator) : + chunk_info(nullptr), block(nullptr), comparator(comparator) {} + }; + + struct merge_heap_comparator + { + bool operator() (const std::shared_ptr& lhs, + const std::shared_ptr& rhs) + { + return lhs->comparator->Compare(rhs->key, lhs->key) < 0; + } + }; + + /* Represents a record in unsorted buffer */ + struct merge_record + { + uchar* block; /* points to offset of key in sort buffer */ + const rocksdb::Comparator* const comparator; + + bool operator< (const merge_record &record) const + { + return merge_record_compare(this->block, record.block, comparator) < 0; + } + + merge_record(uchar* block, const rocksdb::Comparator* const comparator) : + block(block), comparator(comparator) {} + }; + + private: + const ulonglong m_merge_buf_size; + const ulonglong m_merge_combine_read_size; + const rocksdb::Comparator* m_comparator; + struct merge_file_info m_merge_file; + std::shared_ptr m_rec_buf_unsorted; + std::shared_ptr m_output_buf; + std::set m_offset_tree; + std::priority_queue, + std::vector>, + merge_heap_comparator> m_merge_min_heap; + + static inline void merge_store_uint64(uchar *dst, uint64 n) + { + memcpy(dst, &n, sizeof(n)); + } + + static inline void merge_read_uint64(const uchar **buf_ptr, uint64 *dst) + { + DBUG_ASSERT(buf_ptr != nullptr); + memcpy(dst, *buf_ptr, sizeof(uint64)); + *buf_ptr += sizeof(uint64); + } + + static inline rocksdb::Slice as_slice(const uchar* block) + { + uint64 len; + merge_read_uint64(&block, &len); + + return rocksdb::Slice(reinterpret_cast(block), len); + } + + static int merge_record_compare(const uchar* a_block, const uchar* b_block, + const rocksdb::Comparator* const comparator) + __attribute__((__nonnull__, __warn_unused_result__)); + + void merge_read_rec(const uchar* block, rocksdb::Slice* key, + rocksdb::Slice* val) + __attribute__((__nonnull__)); + + void read_slice(rocksdb::Slice* slice, const uchar* block_ptr) + __attribute__((__nonnull__)); + + public: + Rdb_index_merge(const ulonglong merge_buf_size, + const ulonglong merge_combine_read_size, + const rocksdb::Comparator* comparator); + ~Rdb_index_merge(); + + int init() + __attribute__((__nonnull__, __warn_unused_result__)); + + int merge_file_create() + __attribute__((__nonnull__, __warn_unused_result__)); + + int add(const rocksdb::Slice& key, const rocksdb::Slice& val) + __attribute__((__nonnull__, __warn_unused_result__)); + + int merge_buf_write() + __attribute__((__nonnull__, __warn_unused_result__)); + + int next(rocksdb::Slice* key, rocksdb::Slice* val) + __attribute__((__nonnull__, __warn_unused_result__)); + + int merge_heap_prepare() + __attribute__((__nonnull__, __warn_unused_result__)); + + void merge_heap_top(rocksdb::Slice* key, rocksdb::Slice* val) + __attribute__((__nonnull__)); + + int merge_heap_pop_and_get_next(rocksdb::Slice* key, rocksdb::Slice* val) + __attribute__((__nonnull__, __warn_unused_result__)); + + void merge_reset(); +}; + +} // namespace myrocks + diff --git a/storage/rocksdb/rdb_mutex_wrapper.cc b/storage/rocksdb/rdb_mutex_wrapper.cc new file mode 100644 index 00000000000..e8077e2fd89 --- /dev/null +++ b/storage/rocksdb/rdb_mutex_wrapper.cc @@ -0,0 +1,234 @@ +/* + Copyright (c) 2015, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* This C++ file's header file */ +#include "./rdb_mutex_wrapper.h" + +/* MyRocks header files */ +#include "./ha_rocksdb.h" +#include "./rdb_utils.h" + +// Internal MySQL APIs not exposed in any header. +extern "C" +{ +void thd_enter_cond(MYSQL_THD thd, mysql_cond_t *cond, mysql_mutex_t *mutex, + const PSI_stage_info *stage, PSI_stage_info *old_stage); +void thd_exit_cond(MYSQL_THD thd, const PSI_stage_info *stage); +} + +using namespace rocksdb; + +namespace myrocks { + +static +PSI_stage_info stage_waiting_on_row_lock2= { 0, "Waiting for row lock", 0}; + +static const int64_t MICROSECS= 1000*1000; +// A timeout as long as one full non-leap year worth of microseconds is as +// good as infinite timeout. +static const int64_t BIG_TIMEOUT= MICROSECS * 60 * 60 * 24 * 7 * 365; + +Rdb_cond_var::Rdb_cond_var() { + mysql_cond_init(0, &m_cond, nullptr); +} + +Rdb_cond_var::~Rdb_cond_var() { + mysql_cond_destroy(&m_cond); +} + +Status Rdb_cond_var::Wait(std::shared_ptr mutex_arg) { + return WaitFor(mutex_arg, BIG_TIMEOUT); +} + + +/* + @brief + Wait on condition variable. The caller must make sure that we own + *mutex_ptr. The mutex is released and re-acquired by the wait function. + + @param + timeout_micros Timeout in microseconds. Negative value means no timeout. + + @return + Status::OK() - Wait successfull + Status::TimedOut() - Timed out or wait killed (the caller can check + thd_killed() to determine which occurred) +*/ + +Status +Rdb_cond_var::WaitFor(std::shared_ptr mutex_arg, + int64_t timeout_micros) +{ + auto *mutex_obj= reinterpret_cast(mutex_arg.get()); + DBUG_ASSERT(mutex_obj != nullptr); + + mysql_mutex_t * const mutex_ptr= &mutex_obj->m_mutex; + + int res= 0; + struct timespec wait_timeout; + + if (timeout_micros < 0) + timeout_micros= BIG_TIMEOUT; + set_timespec_nsec(wait_timeout, timeout_micros*1000); + +#ifndef STANDALONE_UNITTEST + PSI_stage_info old_stage; + mysql_mutex_assert_owner(mutex_ptr); + + if (current_thd && mutex_obj->m_old_stage_info.count(current_thd) == 0) + { + my_core::thd_enter_cond(current_thd, &m_cond, mutex_ptr, + &stage_waiting_on_row_lock2, &old_stage); + /* + After the mysql_cond_timedwait we need make this call + + my_core::thd_exit_cond(thd, &old_stage); + + to inform the SQL layer that KILLable wait has ended. However, + that will cause mutex to be released. Defer the release until the mutex + that is unlocked by RocksDB's Pessimistic Transactions system. + */ + mutex_obj->set_unlock_action(&old_stage); + } + +#endif + bool killed= false; + + do + { + res= mysql_cond_timedwait(&m_cond, mutex_ptr, &wait_timeout); + +#ifndef STANDALONE_UNITTEST + if (current_thd) + killed= my_core::thd_killed(current_thd); +#endif + } while (!killed && res == EINTR); + + if (res || killed) + return Status::TimedOut(); + else + return Status::OK(); +} + + +/* + + @note + This function may be called while not holding the mutex that is used to wait + on the condition variable. + + The manual page says ( http://linux.die.net/man/3/pthread_cond_signal): + + The pthread_cond_broadcast() or pthread_cond_signal() functions may be called + by a thread whether or not it currently owns the mutex that threads calling + pthread_cond_wait() or pthread_cond_timedwait() have associated with the + condition variable during their waits; however, IF PREDICTABLE SCHEDULING + BEHAVIOR IS REQUIRED, THEN THAT MUTEX SHALL BE LOCKED by the thread calling + pthread_cond_broadcast() or pthread_cond_signal(). + + What's "predicate scheduling" and do we need it? The explanation is here: + + https://groups.google.com/forum/?hl=ky#!msg/comp.programming.threads/wEUgPq541v8/ZByyyS8acqMJ + "The problem (from the realtime side) with condition variables is that + if you can signal/broadcast without holding the mutex, and any thread + currently running can acquire an unlocked mutex and check a predicate + without reference to the condition variable, then you can have an + indirect priority inversion." + + Another possible consequence is that one can create spurious wake-ups when + there are multiple threads signaling the condition. + + None of this looks like a problem for our use case. +*/ + +void Rdb_cond_var::Notify() +{ + mysql_cond_signal(&m_cond); +} + + +/* + @note + This is called without holding the mutex that's used for waiting on the + condition. See ::Notify(). +*/ +void Rdb_cond_var::NotifyAll() +{ + mysql_cond_broadcast(&m_cond); +} + + +Rdb_mutex::Rdb_mutex() +{ + mysql_mutex_init(0 /* Don't register in P_S. */, &m_mutex, + MY_MUTEX_INIT_FAST); +} + +Rdb_mutex::~Rdb_mutex() { + mysql_mutex_destroy(&m_mutex); +} + +Status Rdb_mutex::Lock() { + mysql_mutex_lock(&m_mutex); + DBUG_ASSERT(m_old_stage_info.count(current_thd) == 0); + return Status::OK(); +} + +// Attempt to acquire lock. If timeout is non-negative, operation may be +// failed after this many milliseconds. +// If implementing a custom version of this class, the implementation may +// choose to ignore the timeout. +// Return OK on success, or other Status on failure. +Status Rdb_mutex::TryLockFor(int64_t timeout_time __attribute__((__unused__))) +{ + /* + Note: PThreads API has pthread_mutex_timedlock(), but mysql's + mysql_mutex_* wrappers do not wrap that function. + */ + mysql_mutex_lock(&m_mutex); + return Status::OK(); +} + + +#ifndef STANDALONE_UNITTEST +void Rdb_mutex::set_unlock_action(PSI_stage_info *old_stage_arg) +{ + DBUG_ASSERT(old_stage_arg != nullptr); + + mysql_mutex_assert_owner(&m_mutex); + DBUG_ASSERT(m_old_stage_info.count(current_thd) == 0); + + m_old_stage_info[current_thd] = + std::make_shared(*old_stage_arg); +} +#endif + +// Unlock Mutex that was successfully locked by Lock() or TryLockUntil() +void Rdb_mutex::UnLock() { +#ifndef STANDALONE_UNITTEST + if (m_old_stage_info.count(current_thd) > 0) + { + std::shared_ptr old_stage = m_old_stage_info[current_thd]; + m_old_stage_info.erase(current_thd); + /* The following will call mysql_mutex_unlock */ + my_core::thd_exit_cond(current_thd, old_stage.get()); + return; + } +#endif + mysql_mutex_unlock(&m_mutex); +} + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_mutex_wrapper.h b/storage/rocksdb/rdb_mutex_wrapper.h new file mode 100644 index 00000000000..7d0e4169ade --- /dev/null +++ b/storage/rocksdb/rdb_mutex_wrapper.h @@ -0,0 +1,139 @@ +/* + Copyright (c) 2015, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#pragma once + +/* C++ standard header file */ +#include +#include +#include +#include +#include + +/* MySQL header files */ +#include "./my_sys.h" +#include "mysql/plugin.h" + +/* RocksDB header files */ +#include "rocksdb/utilities/transaction_db_mutex.h" + +namespace myrocks { + +class Rdb_mutex: public rocksdb::TransactionDBMutex { + Rdb_mutex(const Rdb_mutex& p) = delete; + Rdb_mutex& operator = (const Rdb_mutex& p)=delete; + public: + Rdb_mutex(); + virtual ~Rdb_mutex(); + + /* + Override parent class's virtual methods of interrest. + */ + + // Attempt to acquire lock. Return OK on success, or other Status on failure. + // If returned status is OK, TransactionDB will eventually call UnLock(). + virtual rocksdb::Status Lock() override; + + // Attempt to acquire lock. If timeout is non-negative, operation should be + // failed after this many microseconds. + // Returns OK on success, + // TimedOut if timed out, + // or other Status on failure. + // If returned status is OK, TransactionDB will eventually call UnLock(). + virtual rocksdb::Status TryLockFor( + int64_t timeout_time __attribute__((__unused__))) override; + + // Unlock Mutex that was successfully locked by Lock() or TryLockUntil() + virtual void UnLock() override; + + private: + mysql_mutex_t m_mutex; + friend class Rdb_cond_var; + +#ifndef STANDALONE_UNITTEST + void set_unlock_action(PSI_stage_info *old_stage_arg); + std::unordered_map> m_old_stage_info; +#endif +}; + + +class Rdb_cond_var: public rocksdb::TransactionDBCondVar { + public: + Rdb_cond_var(); + virtual ~Rdb_cond_var(); + + /* + Override parent class's virtual methods of interrest. + */ + + // Block current thread until condition variable is notified by a call to + // Notify() or NotifyAll(). Wait() will be called with mutex locked. + // Returns OK if notified. + // Returns non-OK if TransactionDB should stop waiting and fail the operation. + // May return OK spuriously even if not notified. + virtual rocksdb::Status + Wait(std::shared_ptr mutex) override; + + // Block current thread until condition variable is notifiesd by a call to + // Notify() or NotifyAll(), or if the timeout is reached. + // If timeout is non-negative, operation should be failed after this many + // microseconds. + // If implementing a custom version of this class, the implementation may + // choose to ignore the timeout. + // + // Returns OK if notified. + // Returns TimedOut if timeout is reached. + // Returns other status if TransactionDB should otherwis stop waiting and + // fail the operation. + // May return OK spuriously even if not notified. + virtual rocksdb::Status + WaitFor(std::shared_ptr mutex, + int64_t timeout_time) override; + + // If any threads are waiting on *this, unblock at least one of the + // waiting threads. + virtual void Notify() override; + + // Unblocks all threads waiting on *this. + virtual void NotifyAll() override; + + private: + mysql_cond_t m_cond; +}; + + +class Rdb_mutex_factory : public rocksdb::TransactionDBMutexFactory { + public: + /* + Override parent class's virtual methods of interrest. + */ + + virtual std::shared_ptr + AllocateMutex() override { + return + std::make_shared(); + } + + virtual std::shared_ptr + AllocateCondVar() override { + return + std::make_shared(); + } + + virtual ~Rdb_mutex_factory() {} +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_perf_context.cc b/storage/rocksdb/rdb_perf_context.cc new file mode 100644 index 00000000000..cd0d9e57c2b --- /dev/null +++ b/storage/rocksdb/rdb_perf_context.cc @@ -0,0 +1,240 @@ +/* + Portions Copyright (c) 2015-Present, Facebook, Inc. + Portions Copyright (c) 2012, Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* This C++ file's header file */ +#include "./rdb_perf_context.h" + +/* C++ system header files */ +#include + +/* RocksDB header files */ +#include "rocksdb/iostats_context.h" +#include "rocksdb/perf_context.h" + +/* MyRocks header files */ +#include "./ha_rocksdb_proto.h" + +namespace myrocks { + +// To add a new metric: +// 1. Update the PC enum in rdb_perf_context.h +// 2. Update sections (A), (B), and (C) below +// 3. Update perf_context.test and show_engine.test + +std::string rdb_pc_stat_types[]= +{ + // (A) These should be in the same order as the PC enum + "USER_KEY_COMPARISON_COUNT", + "BLOCK_CACHE_HIT_COUNT", + "BLOCK_READ_COUNT", + "BLOCK_READ_BYTE", + "BLOCK_READ_TIME", + "BLOCK_CHECKSUM_TIME", + "BLOCK_DECOMPRESS_TIME", + "INTERNAL_KEY_SKIPPED_COUNT", + "INTERNAL_DELETE_SKIPPED_COUNT", + "GET_SNAPSHOT_TIME", + "GET_FROM_MEMTABLE_TIME", + "GET_FROM_MEMTABLE_COUNT", + "GET_POST_PROCESS_TIME", + "GET_FROM_OUTPUT_FILES_TIME", + "SEEK_ON_MEMTABLE_TIME", + "SEEK_ON_MEMTABLE_COUNT", + "SEEK_CHILD_SEEK_TIME", + "SEEK_CHILD_SEEK_COUNT", + "SEEK_IN_HEAP_TIME", + "SEEK_INTERNAL_SEEK_TIME", + "FIND_NEXT_USER_ENTRY_TIME", + "WRITE_WAL_TIME", + "WRITE_MEMTABLE_TIME", + "WRITE_DELAY_TIME", + "WRITE_PRE_AND_POST_PROCESS_TIME", + "DB_MUTEX_LOCK_NANOS", + "DB_CONDITION_WAIT_NANOS", + "MERGE_OPERATOR_TIME_NANOS", + "READ_INDEX_BLOCK_NANOS", + "READ_FILTER_BLOCK_NANOS", + "NEW_TABLE_BLOCK_ITER_NANOS", + "NEW_TABLE_ITERATOR_NANOS", + "BLOCK_SEEK_NANOS", + "FIND_TABLE_NANOS", + "IO_THREAD_POOL_ID", + "IO_BYTES_WRITTEN", + "IO_BYTES_READ", + "IO_OPEN_NANOS", + "IO_ALLOCATE_NANOS", + "IO_WRITE_NANOS", + "IO_READ_NANOS", + "IO_RANGE_SYNC_NANOS", + "IO_LOGGER_NANOS" +}; + +#define IO_PERF_RECORD(_field_) \ + do { \ + if (rocksdb::perf_context._field_ > 0) \ + counters->m_value[idx] += rocksdb::perf_context._field_; \ + idx++; \ + } while (0) +#define IO_STAT_RECORD(_field_) \ + do { \ + if (rocksdb::iostats_context._field_ > 0) \ + counters->m_value[idx] += rocksdb::iostats_context._field_; \ + idx++; \ + } while (0) + +static void harvest_diffs(Rdb_atomic_perf_counters *counters) +{ + // (C) These should be in the same order as the PC enum + size_t idx= 0; + IO_PERF_RECORD(user_key_comparison_count); + IO_PERF_RECORD(block_cache_hit_count); + IO_PERF_RECORD(block_read_count); + IO_PERF_RECORD(block_read_byte); + IO_PERF_RECORD(block_read_time); + IO_PERF_RECORD(block_checksum_time); + IO_PERF_RECORD(block_decompress_time); + IO_PERF_RECORD(internal_key_skipped_count); + IO_PERF_RECORD(internal_delete_skipped_count); + IO_PERF_RECORD(get_snapshot_time); + IO_PERF_RECORD(get_from_memtable_time); + IO_PERF_RECORD(get_from_memtable_count); + IO_PERF_RECORD(get_post_process_time); + IO_PERF_RECORD(get_from_output_files_time); + IO_PERF_RECORD(seek_on_memtable_time); + IO_PERF_RECORD(seek_on_memtable_count); + IO_PERF_RECORD(seek_child_seek_time); + IO_PERF_RECORD(seek_child_seek_count); + IO_PERF_RECORD(seek_min_heap_time); + IO_PERF_RECORD(seek_internal_seek_time); + IO_PERF_RECORD(find_next_user_entry_time); + IO_PERF_RECORD(write_wal_time); + IO_PERF_RECORD(write_memtable_time); + IO_PERF_RECORD(write_delay_time); + IO_PERF_RECORD(write_pre_and_post_process_time); + IO_PERF_RECORD(db_mutex_lock_nanos); + IO_PERF_RECORD(db_condition_wait_nanos); + IO_PERF_RECORD(merge_operator_time_nanos); + IO_PERF_RECORD(read_index_block_nanos); + IO_PERF_RECORD(read_filter_block_nanos); + IO_PERF_RECORD(new_table_block_iter_nanos); + IO_PERF_RECORD(new_table_iterator_nanos); + IO_PERF_RECORD(block_seek_nanos); + IO_PERF_RECORD(find_table_nanos); + IO_STAT_RECORD(thread_pool_id); + IO_STAT_RECORD(bytes_written); + IO_STAT_RECORD(bytes_read); + IO_STAT_RECORD(open_nanos); + IO_STAT_RECORD(allocate_nanos); + IO_STAT_RECORD(write_nanos); + IO_STAT_RECORD(read_nanos); + IO_STAT_RECORD(range_sync_nanos); + IO_STAT_RECORD(logger_nanos); +} + +#undef IO_PERF_DIFF +#undef IO_STAT_DIFF + + +static Rdb_atomic_perf_counters rdb_global_perf_counters; + +void rdb_get_global_perf_counters(Rdb_perf_counters *counters) +{ + DBUG_ASSERT(counters != nullptr); + + counters->load(rdb_global_perf_counters); +} + +void Rdb_perf_counters::load(const Rdb_atomic_perf_counters &atomic_counters) +{ + for (int i= 0; i < PC_MAX_IDX; i++) { + m_value[i]= atomic_counters.m_value[i].load(std::memory_order_relaxed); + } +} + +bool Rdb_io_perf::start(uint32_t perf_context_level) +{ + rocksdb::PerfLevel perf_level= + static_cast(perf_context_level); + + if (rocksdb::GetPerfLevel() != perf_level) + { + rocksdb::SetPerfLevel(perf_level); + } + + if (perf_level == rocksdb::kDisable) + { + return false; + } + + rocksdb::perf_context.Reset(); + rocksdb::iostats_context.Reset(); + return true; +} + +void Rdb_io_perf::end_and_record(uint32_t perf_context_level) +{ + rocksdb::PerfLevel perf_level= + static_cast(perf_context_level); + + if (perf_level == rocksdb::kDisable) + { + return; + } + + if (m_atomic_counters) + { + harvest_diffs(m_atomic_counters); + } + harvest_diffs(&rdb_global_perf_counters); + + if (m_shared_io_perf_read && + (rocksdb::perf_context.block_read_byte != 0 || + rocksdb::perf_context.block_read_count != 0 || + rocksdb::perf_context.block_read_time != 0)) + { + my_io_perf_t io_perf_read; + + my_io_perf_init(&io_perf_read); + io_perf_read.bytes= rocksdb::perf_context.block_read_byte; + io_perf_read.requests= rocksdb::perf_context.block_read_count; + + /* + Rocksdb does not distinguish between I/O service and wait time, so just + use svc time. + */ + io_perf_read.svc_time_max= io_perf_read.svc_time= + rocksdb::perf_context.block_read_time; + + my_io_perf_sum_atomic_helper(m_shared_io_perf_read, &io_perf_read); + my_io_perf_sum(&m_stats->table_io_perf_read, &io_perf_read); + } + + if (m_stats) { + if (rocksdb::perf_context.internal_key_skipped_count != 0) + { + m_stats->key_skipped += rocksdb::perf_context.internal_key_skipped_count; + } + + if (rocksdb::perf_context.internal_delete_skipped_count != 0) + { + m_stats->delete_skipped += + rocksdb::perf_context.internal_delete_skipped_count; + } + } +} + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_perf_context.h b/storage/rocksdb/rdb_perf_context.h new file mode 100644 index 00000000000..1e01e933895 --- /dev/null +++ b/storage/rocksdb/rdb_perf_context.h @@ -0,0 +1,134 @@ +/* + Portions Copyright (c) 2015-Present, Facebook, Inc. + Portions Copyright (c) 2012,2013 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +/* C++ standard header files */ +#include +#include +#include + +/* MySQL header files */ +#include "./handler.h" +#include + +namespace myrocks { + +enum { + PC_USER_KEY_COMPARISON_COUNT = 0, + PC_BLOCK_CACHE_HIT_COUNT, + PC_BLOCK_READ_COUNT, + PC_BLOCK_READ_BYTE, + PC_BLOCK_READ_TIME, + PC_BLOCK_CHECKSUM_TIME, + PC_BLOCK_DECOMPRESS_TIME, + PC_KEY_SKIPPED, + PC_DELETE_SKIPPED, + PC_GET_SNAPSHOT_TIME, + PC_GET_FROM_MEMTABLE_TIME, + PC_GET_FROM_MEMTABLE_COUNT, + PC_GET_POST_PROCESS_TIME, + PC_GET_FROM_OUTPUT_FILES_TIME, + PC_SEEK_ON_MEMTABLE_TIME, + PC_SEEK_ON_MEMTABLE_COUNT, + PC_SEEK_CHILD_SEEK_TIME, + PC_SEEK_CHILD_SEEK_COUNT, + PC_SEEK_MIN_HEAP_TIME, + PC_SEEK_INTERNAL_SEEK_TIME, + PC_FIND_NEXT_USER_ENTRY_TIME, + PC_WRITE_WAL_TIME, + PC_WRITE_MEMTABLE_TIME, + PC_WRITE_DELAY_TIME, + PC_WRITE_PRE_AND_POST_PROCESSS_TIME, + PC_DB_MUTEX_LOCK_NANOS, + PC_DB_CONDITION_WAIT_NANOS, + PC_MERGE_OPERATOR_TIME_NANOS, + PC_READ_INDEX_BLOCK_NANOS, + PC_READ_FILTER_BLOCK_NANOS, + PC_NEW_TABLE_BLOCK_ITER_NANOS, + PC_NEW_TABLE_ITERATOR_NANOS, + PC_BLOCK_SEEK_NANOS, + PC_FIND_TABLE_NANOS, + PC_IO_THREAD_POOL_ID, + PC_IO_BYTES_WRITTEN, + PC_IO_BYTES_READ, + PC_IO_OPEN_NANOS, + PC_IO_ALLOCATE_NANOS, + PC_IO_WRITE_NANOS, + PC_IO_READ_NANOS, + PC_IO_RANGE_SYNC_NANOS, + PC_IO_LOGGER_NANOS, + PC_MAX_IDX +}; + +class Rdb_perf_counters; + +/* + A collection of performance counters that can be safely incremented by + multiple threads since it stores atomic datapoints. +*/ +struct Rdb_atomic_perf_counters +{ + std::atomic_ullong m_value[PC_MAX_IDX]; +}; + +/* + A collection of performance counters that is meant to be incremented by + a single thread. +*/ +class Rdb_perf_counters +{ + public: + uint64_t m_value[PC_MAX_IDX]; + + void load(const Rdb_atomic_perf_counters &atomic_counters); +}; + +extern std::string rdb_pc_stat_types[PC_MAX_IDX]; + +/* + Perf timers for data reads + */ +class Rdb_io_perf +{ + // Context management + Rdb_atomic_perf_counters *m_atomic_counters= nullptr; + my_io_perf_atomic_t *m_shared_io_perf_read= nullptr; + ha_statistics *m_stats= nullptr; + + public: + void init(Rdb_atomic_perf_counters *atomic_counters, + my_io_perf_atomic_t *shared_io_perf_read, + ha_statistics *stats) + { + DBUG_ASSERT(atomic_counters != nullptr); + DBUG_ASSERT(shared_io_perf_read != nullptr); + DBUG_ASSERT(stats != nullptr); + + m_atomic_counters= atomic_counters; + m_shared_io_perf_read= shared_io_perf_read; + m_stats= stats; + } + + bool start(uint32_t perf_context_level); + void end_and_record(uint32_t perf_context_level); + + explicit Rdb_io_perf() : m_atomic_counters(nullptr), + m_shared_io_perf_read(nullptr), + m_stats(nullptr) {} +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_sst_info.cc b/storage/rocksdb/rdb_sst_info.cc new file mode 100644 index 00000000000..d131545e476 --- /dev/null +++ b/storage/rocksdb/rdb_sst_info.cc @@ -0,0 +1,417 @@ +/* + Copyright (c) 2016, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* This C++ file's header file */ +#include "./rdb_sst_info.h" + +/* C++ standard header files */ +#include +#include +#include + +/* MySQL header files */ +#include "../sql/log.h" +#include "./my_dir.h" + +/* RocksDB header files */ +#include "rocksdb/db.h" + +/* MyRocks header files */ +#include "./ha_rocksdb.h" +#include "./ha_rocksdb_proto.h" +#include "./rdb_cf_options.h" + +namespace myrocks { + +Rdb_sst_file::Rdb_sst_file(rocksdb::DB* db, rocksdb::ColumnFamilyHandle* cf, + const rocksdb::DBOptions& db_options, + const std::string& name) : + m_db(db), + m_cf(cf), + m_db_options(db_options), + m_sst_file_writer(nullptr), + m_name(name) +{ + DBUG_ASSERT(db != nullptr); + DBUG_ASSERT(cf != nullptr); +} + +Rdb_sst_file::~Rdb_sst_file() +{ + // Make sure we clean up + delete m_sst_file_writer; + m_sst_file_writer= nullptr; + + // In case something went wrong attempt to delete the temporary file. + // If everything went fine that file will have been renamed and this + // function call will fail. + std::remove(m_name.c_str()); +} + +rocksdb::Status Rdb_sst_file::open() +{ + DBUG_ASSERT(m_sst_file_writer == nullptr); + + rocksdb::ColumnFamilyDescriptor cf_descr; + + rocksdb::Status s= m_cf->GetDescriptor(&cf_descr); + if (!s.ok()) + { + return s; + } + + // Create an sst file writer with the current options and comparator + const rocksdb::Comparator* comparator= m_cf->GetComparator(); + + rocksdb::EnvOptions env_options(m_db_options); + rocksdb::Options options(m_db_options, cf_descr.options); + + m_sst_file_writer= + new rocksdb::SstFileWriter(env_options, options, comparator); + + s= m_sst_file_writer->Open(m_name); + if (!s.ok()) + { + delete m_sst_file_writer; + m_sst_file_writer= nullptr; + } + + return s; +} + +rocksdb::Status Rdb_sst_file::put(const rocksdb::Slice& key, + const rocksdb::Slice& value) +{ + DBUG_ASSERT(m_sst_file_writer != nullptr); + + // Add the specified key/value to the sst file writer + return m_sst_file_writer->Add(key, value); +} + +// This function is run by the background thread +rocksdb::Status Rdb_sst_file::commit() +{ + DBUG_ASSERT(m_sst_file_writer != nullptr); + + rocksdb::Status s; + + // Close out the sst file + s= m_sst_file_writer->Finish(); + if (s.ok()) + { + std::vector files = { m_name }; + // Add the file to the database + // Set the skip_snapshot_check parameter to true since no one + // should be accessing the table we are bulk loading + s= m_db->AddFile(m_cf, files, true, true); + } + + delete m_sst_file_writer; + m_sst_file_writer= nullptr; + + return s; +} + +Rdb_sst_info::Rdb_sst_info(rocksdb::DB* db, const std::string& tablename, + const std::string& indexname, + rocksdb::ColumnFamilyHandle* cf, + const rocksdb::DBOptions& db_options) : + m_db(db), + m_cf(cf), + m_db_options(db_options), + m_curr_size(0), + m_sst_count(0), + m_error_msg(""), +#if defined(RDB_SST_INFO_USE_THREAD) + m_queue(), + m_mutex(), + m_cond(), + m_thread(nullptr), + m_finished(false), +#endif + m_sst_file(nullptr) +{ + m_prefix= db->GetName() + "/"; + + std::string normalized_table; + if (rdb_normalize_tablename(tablename.c_str(), &normalized_table)) + { + // We failed to get a normalized table name. This should never happen, + // but handle it anyway. + m_prefix += "fallback_" + + std::to_string( + reinterpret_cast(reinterpret_cast(this))) + "_" + + indexname + "_"; + } + else + { + m_prefix += normalized_table + "_" + indexname + "_"; + } + + rocksdb::ColumnFamilyDescriptor cf_descr; + rocksdb::Status s= m_cf->GetDescriptor(&cf_descr); + if (!s.ok()) + { + // Default size if we can't get the cf's target size + m_max_size= 64*1024*1024; + } + else + { + // Set the maximum size to 3 times the cf's target size + m_max_size= cf_descr.options.target_file_size_base * 3; + } +} + +Rdb_sst_info::~Rdb_sst_info() +{ + DBUG_ASSERT(m_sst_file == nullptr); +#if defined(RDB_SST_INFO_USE_THREAD) + DBUG_ASSERT(m_thread == nullptr); +#endif +} + +int Rdb_sst_info::open_new_sst_file() +{ + DBUG_ASSERT(m_sst_file == nullptr); + + // Create the new sst file's name + std::string name= m_prefix + std::to_string(m_sst_count++) + m_suffix; + + // Create the new sst file object + m_sst_file= new Rdb_sst_file(m_db, m_cf, m_db_options, name); + + // Open the sst file + rocksdb::Status s= m_sst_file->open(); + if (!s.ok()) + { + set_error_msg(s.ToString()); + delete m_sst_file; + m_sst_file= nullptr; + return 1; + } + + m_curr_size= 0; + + return 0; +} + +void Rdb_sst_info::close_curr_sst_file() +{ + DBUG_ASSERT(m_sst_file != nullptr); + DBUG_ASSERT(m_curr_size > 0); + +#if defined(RDB_SST_INFO_USE_THREAD) + if (m_thread == nullptr) + { + // We haven't already started a background thread, so start one + m_thread= new std::thread(thread_fcn, this); + } + + DBUG_ASSERT(m_thread != nullptr); + + { + // Add this finished sst file to the queue (while holding mutex) + std::lock_guard guard(m_mutex); + m_queue.push(m_sst_file); + } + + // Notify the background thread that there is a new entry in the queue + m_cond.notify_one(); +#else + rocksdb::Status s= m_sst_file->commit(); + if (!s.ok()) + { + set_error_msg(s.ToString()); + } + + delete m_sst_file; +#endif + + // Reset for next sst file + m_sst_file= nullptr; + m_curr_size= 0; +} + +int Rdb_sst_info::put(const rocksdb::Slice& key, + const rocksdb::Slice& value) +{ + int rc; + + if (m_curr_size >= m_max_size) + { + // The current sst file has reached its maximum, close it out + close_curr_sst_file(); + + // While we are here, check to see if we have had any errors from the + // background thread - we don't want to wait for the end to report them + if (!m_error_msg.empty()) + { + return 1; + } + } + + if (m_curr_size == 0) + { + // We don't have an sst file open - open one + rc= open_new_sst_file(); + if (rc != 0) + { + return rc; + } + } + + DBUG_ASSERT(m_sst_file != nullptr); + + // Add the key/value to the current sst file + rocksdb::Status s= m_sst_file->put(key, value); + if (!s.ok()) + { + set_error_msg(s.ToString()); + return 1; + } + + m_curr_size += key.size() + value.size(); + + return 0; +} + +int Rdb_sst_info::commit() +{ + if (m_curr_size > 0) + { + // Close out any existing files + close_curr_sst_file(); + } + +#if defined(RDB_SST_INFO_USE_THREAD) + if (m_thread != nullptr) + { + // Tell the background thread we are done + m_finished= true; + m_cond.notify_one(); + + // Wait for the background thread to finish + m_thread->join(); + delete m_thread; + m_thread= nullptr; + } +#endif + + // Did we get any errors? + if (!m_error_msg.empty()) + { + return 1; + } + + return 0; +} + +void Rdb_sst_info::set_error_msg(const std::string& msg) +{ +#if defined(RDB_SST_INFO_USE_THREAD) + // Both the foreground and background threads can set the error message + // so lock the mutex to protect it. We only want the first error that + // we encounter. + std::lock_guard guard(m_mutex); +#endif + my_printf_error(ER_UNKNOWN_ERROR, "bulk load error: %s", MYF(0), msg.c_str()); + if (m_error_msg.empty()) + { + m_error_msg= msg; + } +} + +#if defined(RDB_SST_INFO_USE_THREAD) +// Static thread function - the Rdb_sst_info object is in 'object' +void Rdb_sst_info::thread_fcn(void* object) +{ + reinterpret_cast(object)->run_thread(); +} + +void Rdb_sst_info::run_thread() +{ + std::unique_lock lk(m_mutex); + + do + { + // Wait for notification or 1 second to pass + m_cond.wait_for(lk, std::chrono::seconds(1)); + + // Inner loop pulls off all Rdb_sst_file entries and processes them + while (!m_queue.empty()) + { + Rdb_sst_file* sst_file= m_queue.front(); + m_queue.pop(); + + // Release the lock - we don't want to hold it while committing the file + lk.unlock(); + + // Close out the sst file and add it to the database + rocksdb::Status s= sst_file->commit(); + if (!s.ok()) + { + set_error_msg(s.ToString()); + } + + delete sst_file; + + // Reacquire the lock for the next inner loop iteration + lk.lock(); + } + + // If the queue is empty and the main thread has indicated we should exit + // break out of the loop. + } while (!m_finished); + + DBUG_ASSERT(m_queue.empty()); +} +#endif + +void Rdb_sst_info::init(rocksdb::DB* db) +{ + std::string path= db->GetName() + FN_DIRSEP; + struct st_my_dir* dir_info= my_dir(path.c_str(), MYF(MY_DONT_SORT)); + + // Access the directory + if (dir_info == nullptr) + { + // NO_LINT_DEBUG + sql_print_warning("RocksDB: Could not access database directory: %s", + path.c_str()); + return; + } + + // Scan through the files in the directory + struct fileinfo* file_info= dir_info->dir_entry; + for (uint ii= 0; ii < dir_info->number_off_files; ii++, file_info++) + { + // find any files ending with m_suffix ... + std::string name= file_info->name; + size_t pos= name.find(m_suffix); + if (pos != std::string::npos && name.size() - pos == m_suffix.size()) + { + // ... and remove them + std::string fullname= path + name; + my_delete(fullname.c_str(), MYF(0)); + } + } + + // Release the directory entry + my_dirend(dir_info); +} + +std::string Rdb_sst_info::m_suffix= ".bulk_load.tmp"; +} // namespace myrocks diff --git a/storage/rocksdb/rdb_sst_info.h b/storage/rocksdb/rdb_sst_info.h new file mode 100644 index 00000000000..8845ec98122 --- /dev/null +++ b/storage/rocksdb/rdb_sst_info.h @@ -0,0 +1,101 @@ +/* + Copyright (c) 2016, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#pragma once + +/* C++ standard header files */ +#include +#include +#include +#include +#include +#include + +/* RocksDB header files */ +#include "rocksdb/db.h" +#include "rocksdb/sst_file_writer.h" + +// define RDB_SST_INFO_USE_THREAD /* uncomment to use threads */ + +namespace myrocks { + +class Rdb_sst_file { + Rdb_sst_file(const Rdb_sst_file& p)= delete; + Rdb_sst_file& operator=(const Rdb_sst_file& p)= delete; + + rocksdb::DB* m_db; + rocksdb::ColumnFamilyHandle* m_cf; + const rocksdb::DBOptions& m_db_options; + rocksdb::SstFileWriter* m_sst_file_writer; + std::string m_name; + + public: + Rdb_sst_file(rocksdb::DB* db, rocksdb::ColumnFamilyHandle* cf, + const rocksdb::DBOptions& db_options, const std::string& name); + ~Rdb_sst_file(); + + rocksdb::Status open(); + rocksdb::Status put(const rocksdb::Slice& key, const rocksdb::Slice& value); + rocksdb::Status commit(); +}; + +class Rdb_sst_info { + Rdb_sst_info(const Rdb_sst_info& p)= delete; + Rdb_sst_info& operator=(const Rdb_sst_info& p)= delete; + + rocksdb::DB* m_db; + rocksdb::ColumnFamilyHandle* m_cf; + const rocksdb::DBOptions& m_db_options; + uint64_t m_curr_size; + uint64_t m_max_size; + uint m_sst_count; + std::string m_error_msg; + std::string m_prefix; + static std::string m_suffix; +#if defined(RDB_SST_INFO_USE_THREAD) + std::queue m_queue; + std::mutex m_mutex; + std::condition_variable m_cond; + std::thread* m_thread; + bool m_finished; +#endif + Rdb_sst_file* m_sst_file; + + int open_new_sst_file(); + void close_curr_sst_file(); + void set_error_msg(const std::string& msg); + +#if defined(RDB_SST_INFO_USE_THREAD) + void run_thread(); + + static void thread_fcn(void* object); +#endif + + public: + Rdb_sst_info(rocksdb::DB* db, const std::string& tablename, + const std::string& indexname, rocksdb::ColumnFamilyHandle* cf, + const rocksdb::DBOptions& db_options); + ~Rdb_sst_info(); + + int put(const rocksdb::Slice& key, const rocksdb::Slice& value); + int commit(); + + const std::string& error_message() const { return m_error_msg; } + + static void init(rocksdb::DB* db); +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_threads.cc b/storage/rocksdb/rdb_threads.cc new file mode 100644 index 00000000000..1538404ea56 --- /dev/null +++ b/storage/rocksdb/rdb_threads.cc @@ -0,0 +1,81 @@ +/* + Portions Copyright (c) 2015-Present, Facebook, Inc. + Portions Copyright (c) 2012, Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +/* The C++ file's header */ +#include "./rdb_threads.h" + +namespace myrocks { + +void* Rdb_thread::thread_func(void* thread_ptr) +{ + DBUG_ASSERT(thread_ptr != nullptr); + Rdb_thread* thread= static_cast(thread_ptr); + if (!thread->m_run_once.exchange(true)) + { + thread->run(); + thread->uninit(); + } + return nullptr; +} + + +void Rdb_thread::init( +#ifdef HAVE_PSI_INTERFACE + my_core::PSI_mutex_key stop_bg_psi_mutex_key, + my_core::PSI_cond_key stop_bg_psi_cond_key +#endif + ) +{ + DBUG_ASSERT(!m_run_once); + mysql_mutex_init(stop_bg_psi_mutex_key, &m_signal_mutex, MY_MUTEX_INIT_FAST); + mysql_cond_init(stop_bg_psi_cond_key, &m_signal_cond, nullptr); +} + + +void Rdb_thread::uninit() +{ + mysql_mutex_destroy(&m_signal_mutex); + mysql_cond_destroy(&m_signal_cond); +} + + +int Rdb_thread::create_thread( +#ifdef HAVE_PSI_INTERFACE + PSI_thread_key background_psi_thread_key +#endif + ) +{ + return mysql_thread_create(background_psi_thread_key, + &m_handle, nullptr, thread_func, this); +} + + +void Rdb_thread::signal(bool stop_thread) +{ + mysql_mutex_lock(&m_signal_mutex); + if (stop_thread) { + m_stop= true; + } + mysql_cond_signal(&m_signal_cond); + mysql_mutex_unlock(&m_signal_mutex); +} + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_threads.h b/storage/rocksdb/rdb_threads.h new file mode 100644 index 00000000000..cba03b4cdba --- /dev/null +++ b/storage/rocksdb/rdb_threads.h @@ -0,0 +1,116 @@ +/* + Portions Copyright (c) 2015-Present, Facebook, Inc. + Portions Copyright (c) 2012, Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +/* MySQL includes */ +#include "./my_global.h" +#include +#include + +/* MyRocks header files */ +#include "./rdb_utils.h" + +namespace myrocks { + +class Rdb_thread +{ + private: + // Disable Copying + Rdb_thread(const Rdb_thread&); + Rdb_thread& operator=(const Rdb_thread&); + + // Make sure we run only once + std::atomic_bool m_run_once; + + pthread_t m_handle; + + protected: + mysql_mutex_t m_signal_mutex; + mysql_cond_t m_signal_cond; + bool m_stop= false; + + public: + Rdb_thread() : m_run_once(false) {} + +#ifdef HAVE_PSI_INTERFACE + void init(my_core::PSI_mutex_key stop_bg_psi_mutex_key, + my_core::PSI_cond_key stop_bg_psi_cond_key); + int create_thread( + my_core::PSI_thread_key background_psi_thread_key); +#else + void init(); + int create_thread(); +#endif + + virtual void run(void) = 0; + + void signal(bool stop_thread= false); + + int join() + { + return pthread_join(m_handle, nullptr); + } + + void uninit(); + + virtual ~Rdb_thread() {} + + private: + static void* thread_func(void* thread_ptr); +}; + + +/** + MyRocks background thread control + N.B. This is on top of RocksDB's own background threads + (@see rocksdb::CancelAllBackgroundWork()) +*/ + +class Rdb_background_thread : public Rdb_thread +{ + private: + bool m_save_stats= false; + + void reset() + { + mysql_mutex_assert_owner(&m_signal_mutex); + m_stop= false; + m_save_stats= false; + } + + public: + virtual void run() override; + + void request_save_stats() + { + mysql_mutex_lock(&m_signal_mutex); + m_save_stats= true; + mysql_mutex_unlock(&m_signal_mutex); + } +}; + + +/* + Drop index thread control +*/ + +struct Rdb_drop_index_thread : public Rdb_thread +{ + virtual void run() override; +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_utils.cc b/storage/rocksdb/rdb_utils.cc new file mode 100644 index 00000000000..05f0104d483 --- /dev/null +++ b/storage/rocksdb/rdb_utils.cc @@ -0,0 +1,311 @@ +/* + Copyright (c) 2016, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* This C++ file's header */ +#include "./rdb_utils.h" + +/* C++ standard header files */ +#include +#include + +/* C standard header files */ +#include + +/* MyRocks header files */ +#include "./ha_rocksdb.h" + +namespace myrocks { + +/* + Skip past any spaces in the input +*/ +const char* rdb_skip_spaces(struct charset_info_st* cs, const char *str) +{ + DBUG_ASSERT(cs != nullptr); + DBUG_ASSERT(str != nullptr); + + while (my_isspace(cs, *str)) + { + str++; + } + + return str; +} + +/* + Compare (ignoring case) to see if str2 is the next data in str1. + Note that str1 can be longer but we only compare up to the number + of characters in str2. +*/ +bool rdb_compare_strings_ic(const char *str1, const char *str2) +{ + DBUG_ASSERT(str1 != nullptr); + DBUG_ASSERT(str2 != nullptr); + + // Scan through the strings + size_t ii; + for (ii = 0; str2[ii]; ii++) + { + if (toupper(static_cast(str1[ii])) != + toupper(static_cast(str2[ii]))) + { + return false; + } + } + + return true; +} + +/* + Scan through an input string looking for pattern, ignoring case + and skipping all data enclosed in quotes. +*/ +const char* rdb_find_in_string(const char *str, const char *pattern, + bool *succeeded) +{ + char quote = '\0'; + bool escape = false; + + DBUG_ASSERT(str != nullptr); + DBUG_ASSERT(pattern != nullptr); + DBUG_ASSERT(succeeded != nullptr); + + *succeeded = false; + + for ( ; *str; str++) + { + /* If we found a our starting quote character */ + if (*str == quote) + { + /* If it was escaped ignore it */ + if (escape) + { + escape = false; + } + /* Otherwise we are now outside of the quoted string */ + else + { + quote = '\0'; + } + } + /* Else if we are currently inside a quoted string? */ + else if (quote != '\0') + { + /* If so, check for the escape character */ + escape = !escape && *str == '\\'; + } + /* Else if we found a quote we are starting a quoted string */ + else if (*str == '"' || *str == '\'' || *str == '`') + { + quote = *str; + } + /* Else we are outside of a quoted string - look for our pattern */ + else + { + if (rdb_compare_strings_ic(str, pattern)) + { + *succeeded = true; + return str; + } + } + } + + // Return the character after the found pattern or the null terminateor + // if the pattern wasn't found. + return str; +} + +/* + See if the next valid token matches the specified string +*/ +const char* rdb_check_next_token(struct charset_info_st* cs, const char *str, + const char *pattern, bool *succeeded) +{ + DBUG_ASSERT(cs != nullptr); + DBUG_ASSERT(str != nullptr); + DBUG_ASSERT(pattern != nullptr); + DBUG_ASSERT(succeeded != nullptr); + + // Move past any spaces + str = rdb_skip_spaces(cs, str); + + // See if the next characters match the pattern + if (rdb_compare_strings_ic(str, pattern)) + { + *succeeded = true; + return str + strlen(pattern); + } + + *succeeded = false; + return str; +} + +/* + Parse id +*/ +const char* rdb_parse_id(struct charset_info_st* cs, const char *str, + std::string *id) +{ + DBUG_ASSERT(cs != nullptr); + DBUG_ASSERT(str != nullptr); + + // Move past any spaces + str = rdb_skip_spaces(cs, str); + + if (*str == '\0') + { + return str; + } + + char quote = '\0'; + if (*str == '`' || *str == '"') + { + quote = *str++; + } + + size_t len = 0; + const char* start = str; + + if (quote != '\0') + { + for ( ; ; ) + { + if (*str == '\0') + { + return str; + } + + if (*str == quote) + { + str++; + if (*str != quote) + { + break; + } + } + + str++; + len++; + } + } + else + { + while (!my_isspace(cs, *str) && *str != '(' && *str != ')' && + *str != '.' && *str != ',' && *str != '\0') + { + str++; + len++; + } + } + + // If the user requested the id create it and return it + if (id != nullptr) + { + *id = std::string(""); + id->reserve(len); + while (len--) + { + *id += *start; + if (*start++ == quote) + { + start++; + } + } + } + + return str; +} + +/* + Skip id +*/ +const char* rdb_skip_id(struct charset_info_st* cs, const char *str) +{ + DBUG_ASSERT(cs != nullptr); + DBUG_ASSERT(str != nullptr); + + return rdb_parse_id(cs, str, nullptr); +} + +static const std::size_t rdb_hex_bytes_per_char = 2; +static const std::array rdb_hexdigit = +{ + { '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' } +}; + +/* + Convert data into a hex string with optional maximum length. + If the data is larger than the maximum length trancate it and append "..". +*/ +std::string rdb_hexdump(const char *data, std::size_t data_len, + std::size_t maxsize) +{ + DBUG_ASSERT(data != nullptr); + + // Count the elements in the string + std::size_t elems = data_len; + // Calculate the amount of output needed + std::size_t len = elems * rdb_hex_bytes_per_char; + std::string str; + + if (maxsize != 0 && len > maxsize) + { + // If the amount of output is too large adjust the settings + // and leave room for the ".." at the end + elems = (maxsize - 2) / rdb_hex_bytes_per_char; + len = elems * rdb_hex_bytes_per_char + 2; + } + + // Reserve sufficient space to avoid reallocations + str.reserve(len); + + // Loop through the input data and build the output string + for (std::size_t ii = 0; ii < elems; ii++, data++) + { + uint8_t ch = (uint8_t) *data; + str += rdb_hexdigit[ch >> 4]; + str += rdb_hexdigit[ch & 0x0F]; + } + + // If we can't fit it all add the ".." + if (elems != data_len) + { + str += ".."; + } + + return str; +} + + +/* + Attempt to access the database subdirectory to see if it exists +*/ +bool rdb_database_exists(const std::string& db_name) +{ + std::string dir = std::string(mysql_real_data_home) + FN_DIRSEP + db_name; + struct st_my_dir* dir_info = my_dir(dir.c_str(), + MYF(MY_DONT_SORT | MY_WANT_STAT)); + if (dir_info == nullptr) + { + return false; + } + + my_dirend(dir_info); + return true; +} + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_utils.h b/storage/rocksdb/rdb_utils.h new file mode 100644 index 00000000000..5630dc1c20f --- /dev/null +++ b/storage/rocksdb/rdb_utils.h @@ -0,0 +1,206 @@ +/* + Copyright (c) 2016, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +/* C++ standard header files */ +#include + +/* MySQL header files */ +#include "./sql_string.h" + +/* RocksDB header files */ +#include "rocksdb/slice.h" + +#ifdef HAVE_JEMALLOC + #include +#endif + +namespace myrocks { + +/* + Guess what? + An interface is a class where all members are public by default. +*/ + +#ifndef interface +#define interface struct +#endif // interface + +/* + Introduce C-style pseudo-namespaces, a handy way to make code more readble + when calling into a legacy API, which does not have any namespace defined. + Since we cannot or don't want to change the API in any way, we can use this + mechanism to define readability tokens that look like C++ namespaces, but are + not enforced in any way by the compiler, since the pre-compiler strips them + out. However, on the calling side, code looks like my_core::thd_ha_data() + rather than plain a thd_ha_data() call. This technique adds an immediate + visible cue on what type of API we are calling into. +*/ + +#ifndef my_core +// C-style pseudo-namespace for MySQL Core API, to be used in decorating calls +// to non-obvious MySQL functions, like the ones that do not start with well +// known prefixes: "my_", "sql_", and "mysql_". +#define my_core +#endif // my_core + +/* + The intent behind a SHIP_ASSERT() macro is to have a mechanism for validating + invariants in retail builds. Traditionally assertions (such as macros defined + in ) are evaluated for performance reasons only in debug builds and + become NOOP in retail builds when NDEBUG is defined. + + This macro is intended to validate the invariants which are critical for + making sure that data corruption and data loss won't take place. Proper + intended usage can be described as "If a particular condition is not true then + stop everything what's going on and terminate the process because continued + execution will cause really bad things to happen". + + Use the power of SHIP_ASSERT() wisely. +*/ + +#ifndef SHIP_ASSERT +#define SHIP_ASSERT(expr) \ + do { \ + if (!(expr)) { \ + my_safe_printf_stderr("\nShip assert failure: \'%s\'\n", #expr); \ + abort_with_stack_traces(); \ + } \ + } while (0) +#endif // SHIP_ASSERT + +/* + Assert a implies b. + If a is true, then b must be true. + If a is false, then the value is b does not matter. +*/ +#ifndef DBUG_ASSERT_IMP +#define DBUG_ASSERT_IMP(a, b) DBUG_ASSERT(!(a) || (b)) +#endif + +/* + Assert a if and only if b. + a and b must be both true or both false. +*/ +#ifndef DBUG_ASSERT_IFF +#define DBUG_ASSERT_IFF(a, b) \ + DBUG_ASSERT(static_cast(a) == static_cast(b)) +#endif + +/* + Helper function to get an NULL terminated uchar* out of a given MySQL String. +*/ + +inline uchar* rdb_mysql_str_to_uchar_str(my_core::String *str) +{ + DBUG_ASSERT(str != nullptr); + return reinterpret_cast(str->c_ptr()); +} + +/* + Helper function to get plain (not necessary NULL terminated) uchar* out of a + given STL string. +*/ + +inline const uchar* rdb_std_str_to_uchar_ptr(const std::string &str) +{ + return reinterpret_cast(str.data()); +} + +/* + Helper function to get plain (not necessary NULL terminated) uchar* out of a + given RocksDB item. +*/ + +inline const uchar* rdb_slice_to_uchar_ptr(const rocksdb::Slice *item) +{ + DBUG_ASSERT(item != nullptr); + return reinterpret_cast(item->data()); +} + +/* + Call this function in cases when you can't rely on garbage collector and need + to explicitly purge all unused dirty pages. This should be a relatively rare + scenario for cases where it has been verified that this intervention has + noticeable benefits. +*/ +inline int purge_all_jemalloc_arenas() +{ +#ifdef HAVE_JEMALLOC + unsigned narenas = 0; + size_t sz = sizeof(unsigned); + char name[25] = { 0 }; + + // Get the number of arenas first. Please see `jemalloc` documentation for + // all the various options. + int result = mallctl("arenas.narenas", &narenas, &sz, nullptr, 0); + + // `mallctl` returns 0 on success and we really want caller to know if all the + // trickery actually works. + if (result) { + return result; + } + + // Form the command to be passed to `mallctl` and purge all the unused dirty + // pages. + snprintf(name, sizeof(name) / sizeof(char), "arena.%d.purge", narenas); + result = mallctl(name, nullptr, nullptr, nullptr, 0); + + return result; +#else + return EXIT_SUCCESS; +#endif +} + +/* + Helper functions to parse strings. +*/ + +const char* rdb_skip_spaces(struct charset_info_st* cs, const char *str) + __attribute__((__nonnull__, __warn_unused_result__)); + +bool rdb_compare_strings_ic(const char *str1, const char *str2) + __attribute__((__nonnull__, __warn_unused_result__)); + +const char* rdb_find_in_string(const char *str, const char *pattern, + bool *succeeded) + __attribute__((__nonnull__, __warn_unused_result__)); + +const char* rdb_check_next_token(struct charset_info_st* cs, const char *str, + const char *pattern, bool *succeeded) + __attribute__((__nonnull__, __warn_unused_result__)); + +const char* rdb_parse_id(struct charset_info_st* cs, const char *str, + std::string *id) + __attribute__((__nonnull__(1, 2), __warn_unused_result__)); + +const char* rdb_skip_id(struct charset_info_st* cs, const char *str) + __attribute__((__nonnull__, __warn_unused_result__)); + +/* + Helper functions to populate strings. +*/ + +std::string rdb_hexdump(const char *data, std::size_t data_len, + std::size_t maxsize = 0) + __attribute__((__nonnull__)); + +/* + Helper function to see if a database exists + */ +bool rdb_database_exists(const std::string& db_name); + +} // namespace myrocks diff --git a/storage/rocksdb/rocksdb-range-access.txt b/storage/rocksdb/rocksdb-range-access.txt new file mode 100644 index 00000000000..c974279ac77 --- /dev/null +++ b/storage/rocksdb/rocksdb-range-access.txt @@ -0,0 +1,353 @@ + +This file describes how MySQL index navigation commands are translated into +RocksDB index navigation commands. + +Index tuples are shown as + + ( kv )-aaa-pkN + +here + * '(kv)' is the 4-byte index number. + * '-' is just for readability + * everything that follows the '-' is mem-comparable form of the key. + In ascii encoding, aaa < bbb < ccc < xxx. + +Tuples that start with '#' do not exist in the database. They are only shown +to demonstrate where Seek() calls end up with. + +== HA_READ_KEY_EXACT, forward CF == + + (kv-1)-xxx-pk +# ( kv )-aaa <-- "kv-aaa" doesn't exist in the database, but it would be + here. + ( kv )-aaa-pk <--- Seek("kv-aaa") will put us here on the next record. + ( kv )-aaa-pk2 + ( kv )-bbb-... + +RocksDB calls: + + it->Seek(kv); + if (it->Valid() && kd->covers_key(..) && kd->cmp_full_keys(...)) + return record. + +== HA_READ_KEY_EXACT, backward CF == + +When we need to seek to a tuple that is a prefix of a full key: + + (kv+1)-xxx-pk + ( kv )-ccc-pk + ( kv )-bbb-pk3 + ( kv )-bbb-pk2 + ( kv )-bbb-pk1 < -- We need to be here +# ( kv )-bbb <---we call Seek(kv-bbb) + ( kv )-aaa-pk ... and end up here. Should call it->Prev(). + +There is a special case when (kv)-bbb-pk1 is the last record in the CF, and +we get invalid iterator. Then, we need to call SeekToLast(). + +Another kind of special case is when we need to seek to the full value. +Suppose, the lookup tuple is kv-bbb-pk1: + + (kv+1)-xxx-pk + ( kv )-ccc-pk + ( kv )-bbb-pk3 + ( kv )-bbb-pk2 + ( kv )-bbb-pk1 < -- Seek(kv-bbb-pk1) + ( kv )-bbb-pk0 + +Then, Seek(kv-bbb-pk1) will position us exactly the tuple we need, and we +won't need to call it->Prev(). If we get an invalid iterator, there is no +need to call SeekToLast(). + +RocksDB calls: + + it->Seek(tuple); + + if (!using_full_key) + { + if (!it->Valid()) + it->SeekToLast(); + else + it->Prev(); + } + + if (it->Valid() && kd->covers_key(..) && kd->cmp_full_keys(...)) + return record. + +== HA_READ_KEY_OR_NEXT, forward CF == + +This is finding min(key) such that key >= lookup_tuple. + +If lookup tuple is kv-bbb: + + ( kv )-aaa-pk +# ( kv )-bbb <-- "kv-bbb" doesn't exist in the database, but it would be + here. + ( kv )-bbb-pk1 <--- Seek("kv-bbb") will put us here on the next record. + ( kv )-bbb-pk2 + ( kv )-bbb-... + +RocksDB calls: + + Seek(kv); + if (it->Valid() && kd->covers_key(..) && kd->cmp_full_keys(...)) + return record. + +== HA_READ_KEY_OR_NEXT, backward CF == + +When specified key tuple is a key prefix: + + (kv+1)-xxx-pk + ( kv )-ccc-pk + ( kv )-bbb-pk3 + ( kv )-bbb-pk2 + ( kv )-bbb-pk1 < -- We need to be here (or above) +# ( kv )-bbb <---we call Seek(kv-bbb) + ( kv )-aaa-pk ... and end up here. Should call it->Prev(). + +There is a special case when (kv)-bbb-pk1 is the last record in the CF, and +we get invalid iterator. Then, we need to call SeekToLast(). + +Another kind of special case is when we need to seek to the full value. +Suppose, the lookup tuple is kv-bbb-pk1: + + (kv+1)-xxx-pk + ( kv )-ccc-pk + ( kv )-bbb-pk3 + ( kv )-bbb-pk2 + ( kv )-bbb-pk1 < -- Seek(kv-bbb-pk1) + ( kv )-bbb-pk0 + +Then, Seek(kv-bbb-pk1) may position us exactly at the tuple we need, and we +won't need to call it->Prev(). +If kv-bbb-pk1 is not present in the database, we will be positioned on +kv-bbb-pk0, and we will need to call it->Prev(). +If we get an invalid iterator, we DO need to call SeekToLast(). + +RocksDB calls: + + Seek(...); + + if (!it->Valid()) + it->SeekToLast(); + else + { + if (!using_full_key || + !(kd->covers_key(...) || kd->cmp_full_keys(...)) + it->Prev(); + } + + if (it->Valid() && kd->covers_key(..)) + return record. + +== HA_READ_AFTER_KEY, forward CF == + +This is finding min(key) such that key > lookup_key. + +Suppose lookup_key = kv-bbb + + ( kv )-aaa-pk +# ( kv )-bbb + ( kv )-bbb-pk1 <--- Seek("kv-bbb") will put us here. We need to + ( kv )-bbb-pk2 get to the value that is next after 'bbb'. + ( kv )-bbb-pk3 + ( kv )-bbb-pk4 + ( kv )-bbb-pk5 + ( kv )-ccc-pkN <-- That is, we need to be here. + +However, we don't know that the next value is kv-ccc. Instead, we seek to the +first value that strictly greater than 'kv-bbb'. It is Successor(kv-bbb). + +It doesn't matter if we're using a full extended key or not. + +RocksDB calls: + + Seek(Successor(kv-bbb)); + if (it->Valid() && kd->covers_key(it.key())) + return record; + +Note that the code is the same as with HA_READ_KEY_OR_NEXT, except that +we seek to Successor($lookup_key) instead of $lookup_key itself. + +== HA_READ_AFTER_KEY, backward CF == + +Suppose, the lookup key is 'kv-bbb': + + (kv+1)-xxx-pk + ( kv )-ccc-pk7 + ( kv )-ccc-pk6 <-- We need to be here. +# Successor(kv-bbb) <-- We get here when we call Seek(Successor(kv-bbb)) + ( kv )-bbb-pk5 and we will need to call Prev() (*) + ( kv )-bbb-pk4 + ( kv )-bbb-pk3 + ( kv )-bbb-pk2 + ( kv )-bbb-pk1 +# ( kv )-bbb <-- We would get here if we called Seek(kv-bbb). + ( kv )-aaa-pk + +(*) - unless Successor(kv-bbb)=(kv-ccc), and Seek(kv-ccc) hits the row. In +that case, we won't need to call Prev(). + +RocksDB calls: + + Seek(Successor(kv-bbb)); + if (!it->Valid()) + { + /* + We may get EOF if rows with 'kv-bbb' (below the Successor... line in the + diagram) do not exist. This doesn't mean that rows with values kv-ccc + do not exist. + */ + it->SeekToLast(); + } + else + { + if (!using_full_key || + !kd->value_matches_prefix(...)) + { + it->Prev(); + } + } + + if (it->Valid() && kd->covers_key(...)) + return record. + +Note that the code is the same as with HA_READ_KEY_OR_NEXT, except that +we seek to Successor($lookup_key) instead of $lookup_key itself. + + +== HA_READ_BEFORE_KEY, forward CF == + +This is finding max(key) such that key < lookup_tuple. + +Suppose, lookup_tuple=kv-bbb. + + ( kv )-aaa-pk1 + ( kv )-aaa-pk2 + ( kv )-aaa-pk3 <-- Need to be here. +# ( kv )-bbb + ( kv )-bbb-pk4 <-- Seek("kv-bbb") will put us here. + ( kv )-bbb-pk5 + ( kv )-bbb-pk6 + +1. Seek(kv-bbb) will put us at kv-bbb-pk4 (or return an invalid iterator + if kv-bbb-pk4 and subsequent rows do not exist in the db). +2. We will need to call Prev() to get to the record before. + (if there is no record before kv-bbb, then we can't find a record). + +It doesn't matter if we're using a full extended key or not. + +RocksDB calls: + + it->Seek(kv-bbb); + if (it->Valid()) + it->Prev(); + else + it->SeekToLast(); + + if (it->Valid() && kd->covers_key(...)) + return record; + + +== HA_READ_BEFORE_KEY, backward CF == + +This is finding max(key) such that key < lookup_tuple. +Suppose, lookup_tuple=kv-bbb, a prefix of the full key. + + ( kv )-bbb-pk6 + ( kv )-bbb-pk5 + ( kv )-bbb-pk4 +# ( kv )-bbb + ( kv )-aaa-pk3 <-- Need to be here, and Seek("kv-bbb") will put us here + ( kv )-aaa-pk2 + ( kv )-aaa-pk1 + +If the lookup tuple is a full key (e.g. kv-bbb-pk4), and the key is present in +the database, the iterator will be positioned on the key. We will need to call +Next() to get the next key. + +RocksDB calls: + + it->Seek(kv-bbb); + + if (it->Valid() && using_full_key && + kd->value_matches_prefix(...)) + { + /* We are using full key and we've hit an exact match */ + it->Next(); + } + + if (it->Valid() && kd->covers_key(...)) + return record; + +== HA_READ_PREFIX_LAST, forward CF == + +Find the last record with the specified index prefix lookup_tuple. + +Suppose, lookup_tuple='kv-bbb' + + ( kv )-aaa-pk2 + ( kv )-aaa-pk3 +# ( kv )-bbb + ( kv )-bbb-pk4 + ( kv )-bbb-pk5 + ( kv )-bbb-pk6 + ( kv )-bbb-pk7 <--- Need to be here. +# ( kv )-ccc + ( kv )-ccc-pk8 <-- Seek(Successor(kv-bbb)) will get us here. will need + ( kv )-ccc-pk9 to call Prev(). + +RocksDB calls: + + Seek(Successor(kv-bbb)); + if (!it->Valid()) + it->SeekToLast(); + else + it->Prev(); + + if (it->Valid() && kd->covers_key(...)) + { + if (!cmp_full_keys(lookup_tuple)) // not needed in _OR_PREV + { + // the record's prefix matches lookup_tuple. + return record; + } + } + +== HA_READ_PREFIX_LAST, backward CF == + +Suppose, lookup_tuple='kv-bbb' + + ( kv )-ccc-pk9 + ( kv )-ccc-pk8 +# ( kv )-ccc <-- 2. Seek(Successor(kv-bbb)) will point here + and it will fall down to the next row. + ( kv )-bbb-pk7 <--- 1. Need to be here. + ( kv )-bbb-pk6 + ( kv )-bbb-pk5 + ( kv )-bbb-pk4 +# ( kv )-bbb + ( kv )-aaa-pk3 + ( kv )-aaa-pk2 + + +RocksDB calls: + + it->Seek(Successor(kv-bbb)); + + if (using_full_key && it->Valid() && !cmp_full_keys(Sucessor(lookup_key))) + it->Next(); + + if (it->Valid() && kd->covers_key(..)) + { + if (!cmp_full_keys(...)) // not needed in _OR_PREV + { + // the record's prefix matches lookup_tuple. + return record; + } + } + +== HA_READ_PREFIX_LAST_OR_PREV, forward or backward CF == + +This is just like HA_READ_PREFIX_LAST but we don't need to check that the key +we've got is in the search prefix. (search for "not needed in _OR_PREV" above) diff --git a/storage/rocksdb/tools/mysql_ldb.cc b/storage/rocksdb/tools/mysql_ldb.cc new file mode 100644 index 00000000000..ce51481690b --- /dev/null +++ b/storage/rocksdb/tools/mysql_ldb.cc @@ -0,0 +1,17 @@ +// Copyright (c) 2013, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#include "rocksdb/ldb_tool.h" +#include "../rdb_comparator.h" + +int main(int argc, char** argv) { + rocksdb::Options db_options; + myrocks::Rdb_pk_comparator pk_comparator; + db_options.comparator= &pk_comparator; + + rocksdb::LDBTool tool; + tool.Run(argc, argv, db_options); + return 0; +} diff --git a/storage/rocksdb/unittest/CMakeLists.txt b/storage/rocksdb/unittest/CMakeLists.txt new file mode 100644 index 00000000000..d2a5ea2aff7 --- /dev/null +++ b/storage/rocksdb/unittest/CMakeLists.txt @@ -0,0 +1,22 @@ +IF (WITH_ROCKSDB_SE_STORAGE_ENGINE) + INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/zlib + ${CMAKE_SOURCE_DIR}/unittest/mytap + ${CMAKE_SOURCE_DIR}/rocksdb/third-party/gtest-1.7.0/fused-src + ) + LINK_LIBRARIES(mytap mysys dbug strings) + + ADD_DEFINITIONS(-DSTANDALONE_UNITTEST) + + MYSQL_ADD_EXECUTABLE(test_properties_collector + test_properties_collector.cc + ) + TARGET_LINK_LIBRARIES(test_properties_collector mysqlserver) + + # Necessary to make sure that we can use the jemalloc API calls. + GET_TARGET_PROPERTY(mysql_embedded LINK_FLAGS PREV_LINK_FLAGS) + IF(NOT PREV_LINK_FLAGS) + SET(PREV_LINK_FLAGS) + ENDIF() + SET_TARGET_PROPERTIES(test_properties_collector PROPERTIES LINK_FLAGS + "${PREV_LINK_FLAGS} ${WITH_MYSQLD_LDFLAGS}") +ENDIF() diff --git a/storage/rocksdb/unittest/test_properties_collector.cc b/storage/rocksdb/unittest/test_properties_collector.cc new file mode 100644 index 00000000000..f798a43d045 --- /dev/null +++ b/storage/rocksdb/unittest/test_properties_collector.cc @@ -0,0 +1,60 @@ +/* + Copyright (c) 2015, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* MyRocks header files */ +#include "../ha_rocksdb.h" +#include "../rdb_datadic.h" + +void putKeys(myrocks::Rdb_tbl_prop_coll* coll, + int num, + bool is_delete, + uint64_t expected_deleted) +{ + std::string str("aaaaaaaaaaaaaa"); + rocksdb::Slice sl(str.data(), str.size()); + + for (int i=0; i < num; i++) { + coll->AddUserKey( + sl, sl, + is_delete ? rocksdb::kEntryDelete : rocksdb::kEntryPut, + 0, 100); + } + DBUG_ASSERT(coll->GetMaxDeletedRows() == expected_deleted); +} + +int main(int argc, char** argv) +{ + // test the circular buffer for delete flags + myrocks::Rdb_compact_params params; + params.m_file_size= 333; + params.m_deletes= 333; // irrelevant + params.m_window= 10; + + myrocks::Rdb_tbl_prop_coll coll(nullptr, params, 0, + RDB_DEFAULT_TBL_STATS_SAMPLE_PCT); + + putKeys(&coll, 2, true, 2); // [xx] + putKeys(&coll, 3, false, 2); // [xxo] + putKeys(&coll, 1, true, 3); // [xxox] + putKeys(&coll, 6, false, 3); // [xxoxoooooo] + putKeys(&coll, 3, true, 4); // xxo[xooooooxxx] + putKeys(&coll, 1, false, 4); // xxox[ooooooxxxo] + putKeys(&coll, 100, false, 4); // ....[oooooooooo] + putKeys(&coll, 100, true, 10); // ....[xxxxxxxxxx] + putKeys(&coll, 100, true, 10); // ....[oooooooooo] + + return 0; +} From be33178833f1dbd4158c1ce3d8b6ce602afa644e Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 6 Oct 2016 17:46:44 +0000 Subject: [PATCH 002/233] Add rocksdb submodule --- .gitmodules | 3 +++ storage/rocksdb/rocksdb | 1 + 2 files changed, 4 insertions(+) create mode 160000 storage/rocksdb/rocksdb diff --git a/.gitmodules b/.gitmodules index f08786dd641..c49f2ca2531 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "libmariadb"] path = libmariadb url = https://github.com/MariaDB/mariadb-connector-c +[submodule "storage/rocksdb/rocksdb"] + path = storage/rocksdb/rocksdb + url = git@github.com:facebook/rocksdb.git diff --git a/storage/rocksdb/rocksdb b/storage/rocksdb/rocksdb new file mode 160000 index 00000000000..912aec13c35 --- /dev/null +++ b/storage/rocksdb/rocksdb @@ -0,0 +1 @@ +Subproject commit 912aec13c357df46309dc0b75971f7e07954b9af From a482f2221d7469ac1fd7c7bc3b394d4eb4413abc Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 7 Oct 2016 08:25:10 +0000 Subject: [PATCH 003/233] Fix MariaRocks build (unfinished) Got to the point where we fail when compiling MyRocks files. --- storage/rocksdb/CMakeLists.txt | 18 ++++++++++-------- storage/rocksdb/get_rocksdb_files.sh | 8 ++++---- storage/rocksdb/ha_rocksdb.cc | 3 +++ storage/rocksdb/ha_rocksdb.h | 2 +- storage/rocksdb/rdb_datadic.cc | 1 + storage/rocksdb/rdb_threads.h | 2 ++ 6 files changed, 21 insertions(+), 13 deletions(-) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index f2c52ce84c4..f9b65a81ab7 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -1,9 +1,11 @@ # TODO: Copyrights -IF (NOT EXISTS "${CMAKE_SOURCE_DIR}/rocksdb/Makefile") +IF (NOT EXISTS "${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/Makefile") MESSAGE(SEND_ERROR "Missing Makefile in rocksdb directory. Try \"git submodule update\".") ENDIF() +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + # get a list of rocksdb library source files # run with env -i to avoid passing variables EXECUTE_PROCESS( @@ -15,8 +17,8 @@ EXECUTE_PROCESS( STRING(REGEX MATCHALL "[^\n]+" ROCKSDB_LIB_SOURCES ${SCRIPT_OUTPUT}) INCLUDE_DIRECTORIES( - ${CMAKE_SOURCE_DIR}/rocksdb - ${CMAKE_SOURCE_DIR}/rocksdb/include + ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb + ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/include ${CMAKE_SOURCE_DIR}/rocksdb/third-party/gtest-1.7.0/fused-src ) @@ -93,14 +95,14 @@ ENDIF() IF (WITH_ROCKSDB_SE_STORAGE_ENGINE) # TODO: read this file list from src.mk:TOOL_SOURCES SET(ROCKSDB_TOOL_SOURCES - ${CMAKE_SOURCE_DIR}/rocksdb/tools/ldb_tool.cc - ${CMAKE_SOURCE_DIR}/rocksdb/tools/ldb_cmd.cc - ${CMAKE_SOURCE_DIR}/rocksdb/tools/sst_dump_tool.cc + ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/tools/ldb_tool.cc + ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/tools/ldb_cmd.cc + ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/tools/sst_dump_tool.cc ) - MYSQL_ADD_EXECUTABLE(sst_dump ${CMAKE_SOURCE_DIR}/rocksdb/tools/sst_dump.cc ${ROCKSDB_TOOL_SOURCES}) + MYSQL_ADD_EXECUTABLE(sst_dump ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/tools/sst_dump.cc ${ROCKSDB_TOOL_SOURCES}) TARGET_LINK_LIBRARIES(sst_dump rocksdb_se) - MYSQL_ADD_EXECUTABLE(ldb ${CMAKE_SOURCE_DIR}/rocksdb/tools/ldb.cc ${ROCKSDB_TOOL_SOURCES}) + MYSQL_ADD_EXECUTABLE(ldb ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/tools/ldb.cc ${ROCKSDB_TOOL_SOURCES}) TARGET_LINK_LIBRARIES(ldb rocksdb_se) MYSQL_ADD_EXECUTABLE(mysql_ldb ${CMAKE_SOURCE_DIR}/storage/rocksdb/tools/mysql_ldb.cc ${ROCKSDB_TOOL_SOURCES}) diff --git a/storage/rocksdb/get_rocksdb_files.sh b/storage/rocksdb/get_rocksdb_files.sh index 121d664a086..bd5128a8609 100755 --- a/storage/rocksdb/get_rocksdb_files.sh +++ b/storage/rocksdb/get_rocksdb_files.sh @@ -2,20 +2,20 @@ MKFILE=`mktemp` # create and run a simple makefile # include rocksdb make file relative to the path of this script -echo "include rocksdb/src.mk +echo "include ./storage/rocksdb/rocksdb/src.mk all: @echo \$(LIB_SOURCES)" > $MKFILE for f in `make --makefile $MKFILE` do - echo ../../rocksdb/$f + echo ./rocksdb/$f done rm $MKFILE # create build_version.cc file. Only create one if it doesn't exists or if it is different # this is so that we don't rebuild mysqld every time -bv=rocksdb/util/build_version.cc +bv=storage/rocksdb/rocksdb/util/build_version.cc date=$(date +%F) -git_sha=$(pushd rocksdb >/dev/null && git rev-parse HEAD 2>/dev/null && popd >/dev/null) +git_sha=$(pushd storage/rocksdb/rocksdb >/dev/null && git rev-parse HEAD 2>/dev/null && popd >/dev/null) if [ ! -f $bv ] || [ -z $git_sha ] || [ ! `grep $git_sha $bv` ] then echo "#include \"build_version.h\" diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 4fbaf01f154..20c4723cdda 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -21,6 +21,7 @@ #define MYSQL_SERVER 1 /* The C++ file's header */ +#include #include "./ha_rocksdb.h" /* C++ standard header files */ @@ -36,7 +37,9 @@ #include "./sql_table.h" #include #include +#ifdef MARIAROCKS_NOT_YET #include +#endif /* RocksDB includes */ #include "rocksdb/compaction_filter.h" diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 8f63050e74c..ae25004e323 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -29,7 +29,7 @@ #include "./handler.h" /* handler */ #include "./my_global.h" /* ulonglong */ #include "./sql_string.h" -#include "./ut0counter.h" +#include "../storage/innobase/include/ut0counter.h" /* RocksDB header files */ #include "rocksdb/cache.h" diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index d80c81a3d9a..0272d13efa4 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -18,6 +18,7 @@ #pragma implementation // gcc: Class implementation #endif +#include /* This C++ file's header file */ #include "./rdb_datadic.h" diff --git a/storage/rocksdb/rdb_threads.h b/storage/rocksdb/rdb_threads.h index cba03b4cdba..7526d0eba47 100644 --- a/storage/rocksdb/rdb_threads.h +++ b/storage/rocksdb/rdb_threads.h @@ -19,7 +19,9 @@ /* MySQL includes */ #include "./my_global.h" #include +#ifdef MARIAROCKS_NOT_YET #include +#endif /* MyRocks header files */ #include "./rdb_utils.h" From 7c3affdcc01fc6451f3b0dde610d7384d60ec445 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 9 Oct 2016 12:36:58 +0000 Subject: [PATCH 004/233] Add #include at the start of every .cc file --- storage/rocksdb/event_listener.cc | 2 ++ storage/rocksdb/ha_rocksdb.cc | 3 ++- storage/rocksdb/properties_collector.cc | 2 ++ storage/rocksdb/rdb_cf_manager.cc | 2 ++ storage/rocksdb/rdb_cf_options.cc | 2 ++ storage/rocksdb/rdb_i_s.cc | 2 ++ storage/rocksdb/rdb_index_merge.cc | 2 ++ storage/rocksdb/rdb_mutex_wrapper.cc | 2 ++ storage/rocksdb/rdb_perf_context.cc | 2 ++ storage/rocksdb/rdb_sst_info.cc | 2 ++ storage/rocksdb/rdb_threads.cc | 2 ++ storage/rocksdb/rdb_utils.cc | 2 ++ 12 files changed, 24 insertions(+), 1 deletion(-) diff --git a/storage/rocksdb/event_listener.cc b/storage/rocksdb/event_listener.cc index 851e4782416..63ddb237e26 100644 --- a/storage/rocksdb/event_listener.cc +++ b/storage/rocksdb/event_listener.cc @@ -14,6 +14,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include + /* The C++ file's header */ #include "./event_listener.h" diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 20c4723cdda..23035508f45 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -20,8 +20,9 @@ #define MYSQL_SERVER 1 -/* The C++ file's header */ #include + +/* The C++ file's header */ #include "./ha_rocksdb.h" /* C++ standard header files */ diff --git a/storage/rocksdb/properties_collector.cc b/storage/rocksdb/properties_collector.cc index fc8cd97fc72..a3a232ee44b 100644 --- a/storage/rocksdb/properties_collector.cc +++ b/storage/rocksdb/properties_collector.cc @@ -14,6 +14,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include + /* This C++ file's header file */ #include "./properties_collector.h" diff --git a/storage/rocksdb/rdb_cf_manager.cc b/storage/rocksdb/rdb_cf_manager.cc index 3ee28b49cb6..1b684816827 100644 --- a/storage/rocksdb/rdb_cf_manager.cc +++ b/storage/rocksdb/rdb_cf_manager.cc @@ -18,6 +18,8 @@ #pragma implementation // gcc: Class implementation #endif +#include + /* This C++ files header file */ #include "./rdb_cf_manager.h" diff --git a/storage/rocksdb/rdb_cf_options.cc b/storage/rocksdb/rdb_cf_options.cc index ccdb46a654d..f5898fe7e9a 100644 --- a/storage/rocksdb/rdb_cf_options.cc +++ b/storage/rocksdb/rdb_cf_options.cc @@ -18,6 +18,8 @@ #pragma implementation // gcc: Class implementation #endif +#include + /* This C++ files header file */ #include "./rdb_cf_options.h" diff --git a/storage/rocksdb/rdb_i_s.cc b/storage/rocksdb/rdb_i_s.cc index 78af6eff3a5..b251c3d00c4 100644 --- a/storage/rocksdb/rdb_i_s.cc +++ b/storage/rocksdb/rdb_i_s.cc @@ -14,6 +14,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include + /* C++ standard header files */ #include #include diff --git a/storage/rocksdb/rdb_index_merge.cc b/storage/rocksdb/rdb_index_merge.cc index dc85db4d356..1ebab5cb351 100644 --- a/storage/rocksdb/rdb_index_merge.cc +++ b/storage/rocksdb/rdb_index_merge.cc @@ -14,6 +14,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include + /* This C++ file's header file */ #include "./rdb_index_merge.h" diff --git a/storage/rocksdb/rdb_mutex_wrapper.cc b/storage/rocksdb/rdb_mutex_wrapper.cc index e8077e2fd89..ae000a398e5 100644 --- a/storage/rocksdb/rdb_mutex_wrapper.cc +++ b/storage/rocksdb/rdb_mutex_wrapper.cc @@ -14,6 +14,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include + /* This C++ file's header file */ #include "./rdb_mutex_wrapper.h" diff --git a/storage/rocksdb/rdb_perf_context.cc b/storage/rocksdb/rdb_perf_context.cc index cd0d9e57c2b..2375b742b3a 100644 --- a/storage/rocksdb/rdb_perf_context.cc +++ b/storage/rocksdb/rdb_perf_context.cc @@ -15,6 +15,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include + /* This C++ file's header file */ #include "./rdb_perf_context.h" diff --git a/storage/rocksdb/rdb_sst_info.cc b/storage/rocksdb/rdb_sst_info.cc index d131545e476..a68c2b699a9 100644 --- a/storage/rocksdb/rdb_sst_info.cc +++ b/storage/rocksdb/rdb_sst_info.cc @@ -14,6 +14,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include + /* This C++ file's header file */ #include "./rdb_sst_info.h" diff --git a/storage/rocksdb/rdb_threads.cc b/storage/rocksdb/rdb_threads.cc index 1538404ea56..8a1a13c3eab 100644 --- a/storage/rocksdb/rdb_threads.cc +++ b/storage/rocksdb/rdb_threads.cc @@ -19,6 +19,8 @@ #pragma implementation // gcc: Class implementation #endif +#include + /* The C++ file's header */ #include "./rdb_threads.h" diff --git a/storage/rocksdb/rdb_utils.cc b/storage/rocksdb/rdb_utils.cc index 05f0104d483..d0df370238e 100644 --- a/storage/rocksdb/rdb_utils.cc +++ b/storage/rocksdb/rdb_utils.cc @@ -14,6 +14,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include + /* This C++ file's header */ #include "./rdb_utils.h" From d4f6c77ccc2dbb0eded6cd9434c3d1458affd5f5 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 9 Oct 2016 12:59:40 +0000 Subject: [PATCH 005/233] Temporarily (?) disable table statistics updates --- storage/rocksdb/ha_rocksdb.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 23035508f45..ed2492d099c 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -3106,6 +3106,7 @@ static bool rocksdb_rollback_to_savepoint_can_release_mdl(handlerton *hton, return true; } +#ifdef MARIAROCKS_NOT_YET /* This is called for INFORMATION_SCHEMA */ @@ -3196,7 +3197,7 @@ static void rocksdb_update_table_stats( rocksdb_hton_name); } } - +#endif static rocksdb::Status check_rocksdb_options_compatibility( const char *dbpath, @@ -3314,7 +3315,9 @@ static int rocksdb_init_func(void *p) rocksdb_hton->savepoint_rollback= rocksdb_rollback_to_savepoint; rocksdb_hton->savepoint_rollback_can_release_mdl= rocksdb_rollback_to_savepoint_can_release_mdl; +#ifdef MARIAROCKS_NOT_YET rocksdb_hton->update_table_stats = rocksdb_update_table_stats; +#endif // MARIAROCKS_NOT_YET rocksdb_hton->flush_logs= rocksdb_flush_wal; rocksdb_hton->flags= HTON_TEMPORARY_NOT_SUPPORTED | From 272c05df5626bf6f7e74bd2d4cd3a67581c65fd7 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 9 Oct 2016 13:02:48 +0000 Subject: [PATCH 006/233] Temporarily (?) disable read-free replication in MariaRocks --- storage/rocksdb/ha_rocksdb.cc | 2 ++ storage/rocksdb/ha_rocksdb.h | 2 ++ 2 files changed, 4 insertions(+) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index ed2492d099c..38926ecd8ab 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -10626,6 +10626,7 @@ void rdb_queue_save_stats_request() rdb_bg_thread.request_save_stats(); } +#ifdef MARIAROCKS_NOT_YET // MDEV-10976 void ha_rocksdb::rpl_before_delete_rows() { m_in_rpl_delete_rows= true; @@ -10646,6 +10647,7 @@ void ha_rocksdb::rpl_after_update_rows() m_in_rpl_update_rows= false; } +#endif // MARIAROCKS_NOT_YET /** @brief Read Free Replication can be used or not. Returning False means diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index ae25004e323..634ca570d9a 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -978,12 +978,14 @@ public: void set_use_read_free_rpl(const char* whitelist); void set_skip_unique_check_tables(const char* whitelist); +#ifdef MARIAROCKS_NOT_YET // MDEV-10976 public: virtual void rpl_before_delete_rows() override; virtual void rpl_after_delete_rows() override; virtual void rpl_before_update_rows() override; virtual void rpl_after_update_rows() override; virtual bool use_read_free_rpl(); +#endif // MARIAROCKS_NOT_YET private: /* Flags tracking if we are inside different replication operation */ From 4be8cae56afd7a31eb2477e9e4fe1c814748b06d Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 9 Oct 2016 14:39:00 +0000 Subject: [PATCH 007/233] Temporarily disable Read-Free replication in MariaRocks --- storage/rocksdb/ha_rocksdb.cc | 6 +++++- storage/rocksdb/ha_rocksdb.h | 2 ++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 38926ecd8ab..e33d9aa78cb 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -4723,6 +4723,7 @@ void ha_rocksdb::free_key_buffers() m_pack_buffer= nullptr; } +#ifdef MARIAROCKS_NOT_YET void ha_rocksdb::set_use_read_free_rpl(const char* whitelist) { DBUG_ASSERT(whitelist != nullptr); @@ -4740,6 +4741,7 @@ void ha_rocksdb::set_use_read_free_rpl(const char* whitelist) m_use_read_free_rpl = regex_handler.matches(m_tbl_def->base_tablename()); } +#endif void ha_rocksdb::set_skip_unique_check_tables(const char* whitelist) { @@ -4837,8 +4839,10 @@ int ha_rocksdb::open(const char *name, int mode, uint test_if_locked) /* Index block size in MyRocks: used by MySQL in query optimization */ stats.block_size = rocksdb_tbl_options.block_size; +#ifdef MARIAROCKS_NOT_YET // MDEV-10976 /* Determine at open whether we can use Read Free Replication or not */ set_use_read_free_rpl(THDVAR(ha_thd(), read_free_rpl_tables)); +#endif /* Determine at open whether we should skip unique checks for this table */ set_skip_unique_check_tables(THDVAR(ha_thd(), skip_unique_check_tables)); @@ -10647,7 +10651,6 @@ void ha_rocksdb::rpl_after_update_rows() m_in_rpl_update_rows= false; } -#endif // MARIAROCKS_NOT_YET /** @brief Read Free Replication can be used or not. Returning False means @@ -10660,6 +10663,7 @@ bool ha_rocksdb::use_read_free_rpl() return ((m_in_rpl_delete_rows || m_in_rpl_update_rows) && !has_hidden_pk(table) && m_use_read_free_rpl); } +#endif // MARIAROCKS_NOT_YET double ha_rocksdb::read_time(uint index, uint ranges, ha_rows rows) { diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 634ca570d9a..c78b2e5cd2b 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -975,7 +975,9 @@ public: int finalize_bulk_load() __attribute__((__warn_unused_result__)); +#ifdef MARIAROCKS_NOT_YET // MDEV-10976 void set_use_read_free_rpl(const char* whitelist); +#endif void set_skip_unique_check_tables(const char* whitelist); #ifdef MARIAROCKS_NOT_YET // MDEV-10976 From 20bd26e6df3acf739ff1b2cffc41084367cd6dd0 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 9 Oct 2016 15:27:22 +0000 Subject: [PATCH 008/233] Backport from facebook/mysql-5.6: commit b5fda565d4bad66f3b5a6432236b96743ccfd323 Author: Steaphan Greene Date: Fri Feb 7 21:34:04 2014 -0800 Add basic atomic_stats type Summary: WebScaleSQL Feature: Basic Stats Support This is a simple class template that implements an atomic stats variable. The focus is on performance, not accuracy. No set operations are guaranteed. The other operations are all atomic, so values should not actually be lost - however, there is no attempt to be consistent in reading multiple stats, nor any concern that different threads see any consistent view across multiple stats. Test Plan: This has been tested quite a bit in isolation, and no problems have been found. However, this has never been used in production. Also, a newer, more sophisticated, version of this is already in development. It also shows a small perf gain in the larger stats diffs (Table Stats, User Stats, etc...), when compared to the built-in MySQL atomics. Reviewers: pivanof Reviewed By: pivanof CC: jtolmer, MarkCallaghan, flamingcow, jeremycole, andrew-ford, inaam-rana, liang.guo.752 Differential Revision: https://reviews.facebook.net/D16029 Differential Revision: https://reviews.facebook.net/D16449 --- include/atomic_stat.h | 94 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 include/atomic_stat.h diff --git a/include/atomic_stat.h b/include/atomic_stat.h new file mode 100644 index 00000000000..04e59bd9a8a --- /dev/null +++ b/include/atomic_stat.h @@ -0,0 +1,94 @@ +/* This is an atomic integer abstract data type, for high-performance + tracking of a single stat. It intentionally permits inconsistent + atomic operations and reads, for better performance. This means + that, though no data should ever be lost by this stat, reads of it + at any time may not include all changes up to any particular point. + + So, values read from these may only be approximately correct. + + If your use-case will fail under these conditions, do not use this. + + Copyright (C) 2012 - 2014 Steaphan Greene + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either version 2 + of the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor + Boston, MA 02110-1301, USA. +*/ + +#ifndef _atomic_stat_h_ +#define _atomic_stat_h_ + +#include + +template < typename TYPE > +class atomic_stat { +public: + // Initialize value to the default for the type + atomic_stat() : value_(TYPE()) {}; + + // This enforces a strict order, as all absolute sets should + void clear() { + value_.store(TYPE(), std::memory_order_seq_cst); + }; + + // Reads can get any valid value, it doesn't matter which, exactly + TYPE load() const { + return value_.load(std::memory_order_relaxed); + }; + + // This only supplies relative arithmetic operations + // These are all done atomically, and so can show up in any order + void inc(const TYPE &other) { + value_.fetch_add(other, std::memory_order_relaxed); + }; + + void dec(const TYPE &other) { + value_.fetch_sub(other, std::memory_order_relaxed); + }; + + void inc() { + value_.fetch_add(1, std::memory_order_relaxed); + }; + + void dec() { + value_.fetch_sub(1, std::memory_order_relaxed); + }; + + // This will make one attempt to set the value to the max of + // the current value, and the passed-in value. It can fail + // for any reason, and we only try it once. + void set_max_maybe(const TYPE &new_val) { + TYPE old_val = value_; + if (new_val > old_val) { + value_.compare_exchange_weak(old_val, new_val, + std::memory_order_relaxed, + std::memory_order_relaxed); + } + }; + + // This will make one attempt to assign the value to the passed-in + // value. It can fail for any reason, and we only try it once. + void set_maybe(const TYPE &new_val) { + TYPE old_val = value_; + value_.compare_exchange_weak(old_val, new_val, + std::memory_order_relaxed, + std::memory_order_relaxed); + }; + +private: + std::atomic value_; +}; + +#endif // _atomic_stat_h_ From 84dd64702a8f3eebe3fba6f8fbcbe767cab3505b Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 9 Oct 2016 16:02:13 +0000 Subject: [PATCH 009/233] Fix compilation failure in rdb_perf_context.h Rdb_atomic_perf_counters uses my_io_perf_atomic_struct which uses atomic_stat from include/atomic_stat.h which was backported in the previous cset. --- storage/rocksdb/CMakeLists.txt | 1 + storage/rocksdb/rdb_mariadb_port.h | 16 ++++++++++++++++ storage/rocksdb/rdb_perf_context.h | 2 ++ 3 files changed, 19 insertions(+) create mode 100644 storage/rocksdb/rdb_mariadb_port.h diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index f9b65a81ab7..e9300884987 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -41,6 +41,7 @@ SET(ROCKSDB_SOURCES rdb_sst_info.cc rdb_sst_info.h rdb_utils.cc rdb_utils.h rdb_buff.h rdb_threads.cc rdb_threads.h + rdb_mariadb_port.h ${ROCKSDB_LIB_SOURCES} ) diff --git a/storage/rocksdb/rdb_mariadb_port.h b/storage/rocksdb/rdb_mariadb_port.h new file mode 100644 index 00000000000..0e529acdf7c --- /dev/null +++ b/storage/rocksdb/rdb_mariadb_port.h @@ -0,0 +1,16 @@ + +#include "atomic_stat.h" + +/* Struct used for IO performance counters, shared among multiple threads */ +struct my_io_perf_atomic_struct { + atomic_stat bytes; + atomic_stat requests; + atomic_stat svc_time; /*!< time to do read or write operation */ + atomic_stat svc_time_max; + atomic_stat wait_time; /*!< total time in the request array */ + atomic_stat wait_time_max; + atomic_stat slow_ios; /*!< requests that take too long */ +}; +typedef struct my_io_perf_atomic_struct my_io_perf_atomic_t; + + diff --git a/storage/rocksdb/rdb_perf_context.h b/storage/rocksdb/rdb_perf_context.h index 1e01e933895..fc4415086f3 100644 --- a/storage/rocksdb/rdb_perf_context.h +++ b/storage/rocksdb/rdb_perf_context.h @@ -25,6 +25,8 @@ #include "./handler.h" #include +#include "rdb_mariadb_port.h" + namespace myrocks { enum { From d3cd64fda985486f845ab08ff5df420db8117ffd Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 9 Oct 2016 16:03:39 +0000 Subject: [PATCH 010/233] In MariaDB, KEY::actual_key_parts is named ext_key_parts --- storage/rocksdb/ha_rocksdb.cc | 8 ++++---- storage/rocksdb/rdb_datadic.cc | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index e33d9aa78cb..c35e6937b20 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -5047,7 +5047,7 @@ int ha_rocksdb::create_cfs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg, !is_hidden_pk(i, table_arg, tbl_def_arg) && tbl_def_arg->base_tablename().find(tmp_file_prefix) != 0) { - for (uint part= 0; part < table_arg->key_info[i].actual_key_parts; part++) + for (uint part= 0; part < table_arg->key_info[i].ext_key_parts; part++) { if (!rdb_is_index_collation_supported( table_arg->key_info[i].key_part[part].field) && @@ -7124,7 +7124,7 @@ bool ha_rocksdb::can_use_single_delete(uint index) { return (index != pk_index(table, m_tbl_def) || (!has_hidden_pk(table) && - table->key_info[index].actual_key_parts == table->s->fields)); + table->key_info[index].ext_key_parts == table->s->fields)); } bool ha_rocksdb::skip_unique_check() @@ -8351,7 +8351,7 @@ int ha_rocksdb::info(uint flag) continue; } KEY* k= &table->key_info[i]; - for (uint j = 0; j < k->actual_key_parts; j++) + for (uint j = 0; j < k->ext_key_parts; j++) { const Rdb_index_stats& k_stats= m_key_descr_arr[i]->m_stats; uint x = k_stats.m_distinct_keys_per_prefix.size() > j && @@ -8367,7 +8367,7 @@ int ha_rocksdb::info(uint flag) // will have rec_per_key for (idx1)=4, (idx1,2)=2, and (idx1,2,3)=1. // rec_per_key for the whole index is 1, and multiplied by 2^n if // n suffix columns of the index are not used. - x = 1 << (k->actual_key_parts-j-1); + x = 1 << (k->ext_key_parts-j-1); } k->rec_per_key[j]= x; } diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index 0272d13efa4..fbb9f584c66 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -160,7 +160,7 @@ void Rdb_key_def::setup(const TABLE *tbl, const Rdb_tbl_def *tbl_def) } if (secondary_key) - m_pk_key_parts= hidden_pk_exists ? 1 : pk_info->actual_key_parts; + m_pk_key_parts= hidden_pk_exists ? 1 : pk_info->ext_key_parts; else { pk_info= nullptr; @@ -168,7 +168,7 @@ void Rdb_key_def::setup(const TABLE *tbl, const Rdb_tbl_def *tbl_def) } // "unique" secondary keys support: - m_key_parts= is_hidden_pk ? 1 : key_info->actual_key_parts; + m_key_parts= is_hidden_pk ? 1 : key_info->ext_key_parts; if (secondary_key) { @@ -229,7 +229,7 @@ void Rdb_key_def::setup(const TABLE *tbl, const Rdb_tbl_def *tbl_def) { /* Check if this field is already present in the key definition */ bool found= false; - for (uint j= 0; j < key_info->actual_key_parts; j++) + for (uint j= 0; j < key_info->ext_key_parts; j++) { if (field->field_index == key_info->key_part[j].field->field_index) { @@ -285,7 +285,7 @@ void Rdb_key_def::setup(const TABLE *tbl, const Rdb_tbl_def *tbl_def) For "unique" secondary indexes, pretend they have "index extensions" */ - if (secondary_key && src_i+1 == key_info->actual_key_parts) + if (secondary_key && src_i+1 == key_info->ext_key_parts) { simulating_extkey= true; if (!hidden_pk_exists) From f4994c78721a7c0da38cbc867c08aa34f458a09d Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 9 Oct 2016 17:03:31 +0000 Subject: [PATCH 011/233] MDEV-10975: Merging of @@rocksdb_skip_unique_check: Part #1 Leave the code in, but #ifdef it away to avoid compilation error --- storage/rocksdb/ha_rocksdb.cc | 2 ++ storage/rocksdb/ha_rocksdb.h | 2 ++ 2 files changed, 4 insertions(+) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index c35e6937b20..c767d7a1f4d 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -7142,10 +7142,12 @@ bool ha_rocksdb::skip_unique_check() THDVAR(table->in_use, skip_unique_check); } +#ifdef MARIAROCKS_NOT_YET // MDEV-10975 void ha_rocksdb::set_force_skip_unique_check(bool skip) { m_force_skip_unique_check= skip; } +#endif bool ha_rocksdb::commit_in_the_middle() { diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index c78b2e5cd2b..3a47db96679 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -459,7 +459,9 @@ class ha_rocksdb: public my_core::handler bool can_use_single_delete(uint index) __attribute__((__warn_unused_result__)); bool skip_unique_check() __attribute__((__warn_unused_result__)); +#ifdef MARIAROCKS_NOT_YET // MDEV-10975 void set_force_skip_unique_check(bool skip) override; +#endif bool commit_in_the_middle() __attribute__((__warn_unused_result__)); bool do_bulk_commit(Rdb_transaction *tx) __attribute__((__nonnull__, __warn_unused_result__)); From 50f323683ad067bdc0c6114e4a99e8c4960516aa Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 9 Oct 2016 17:20:11 +0000 Subject: [PATCH 012/233] Add a temporary stand-in for abort_with_stack_traces() call --- storage/rocksdb/rdb_mariadb_port.h | 17 +++++++++++++++++ storage/rocksdb/rdb_utils.h | 2 ++ 2 files changed, 19 insertions(+) diff --git a/storage/rocksdb/rdb_mariadb_port.h b/storage/rocksdb/rdb_mariadb_port.h index 0e529acdf7c..3a1ab72fa7d 100644 --- a/storage/rocksdb/rdb_mariadb_port.h +++ b/storage/rocksdb/rdb_mariadb_port.h @@ -1,3 +1,9 @@ +/* + A temporary header to resolve WebScaleSQL vs MariaDB differences + when porting MyRocks to MariaDB. +*/ +#ifndef RDB_MARIADB_PORT_H +#define RDB_MARIADB_PORT_H #include "atomic_stat.h" @@ -13,4 +19,15 @@ struct my_io_perf_atomic_struct { }; typedef struct my_io_perf_atomic_struct my_io_perf_atomic_t; +//////////////////////////////////////////////////////////////////////////// +/* + Temporary stand-in for + fae59683dc116be2cc78b0b30d61c84659c33bd3 + Print stack traces before committing suicide + +*/ +#define abort_with_stack_traces() { abort(); } + + +#endif diff --git a/storage/rocksdb/rdb_utils.h b/storage/rocksdb/rdb_utils.h index 5630dc1c20f..138d4bb661b 100644 --- a/storage/rocksdb/rdb_utils.h +++ b/storage/rocksdb/rdb_utils.h @@ -15,6 +15,8 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #pragma once +#include "rdb_mariadb_port.h" + /* C++ standard header files */ #include From a06faac2485810ae46251e32302a182b2db585e2 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 9 Oct 2016 17:24:01 +0000 Subject: [PATCH 013/233] Temporarily disable the fix for WebScaleSQL Issue #108. Issue #108: Index-only scans do not work for partitioned tables and extended keys c1f1f0b64eaecd132fc6ec21663faf0510afacaf --- storage/rocksdb/ha_rocksdb.cc | 3 ++- storage/rocksdb/ha_rocksdb.h | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index c767d7a1f4d..81a3b267e1e 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -4008,6 +4008,7 @@ bool ha_rocksdb::same_table(const ha_rocksdb& other) const return m_tbl_def->base_tablename() == other.m_tbl_def->base_tablename(); } +#ifdef MARIAROCKS_NOT_YET bool ha_rocksdb::init_with_fields() { uint pk= table_share->primary_key; @@ -4022,7 +4023,7 @@ bool ha_rocksdb::init_with_fields() cached_table_flags= table_flags(); return false; /* Ok */ } - +#endif /** Convert record from table->record[0] form into a form that can be written diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 3a47db96679..0db8ee40c76 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -591,9 +591,9 @@ public: HA_NULL_IN_KEY | HA_PARTIAL_COLUMN_READ; } - +#ifdef MARIAROCKS_NOT_YET bool init_with_fields() override; - +#endif /** @brief This is a bitmap of flags that indicates how the storage engine implements indexes. The current index flags are documented in From 3af9986289db05249834769d259d8676bc0e495f Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 9 Oct 2016 17:30:16 +0000 Subject: [PATCH 014/233] MariaRocks port: dir_entry has number_of_files with one 'f' in MariaDB --- storage/rocksdb/rdb_datadic.cc | 4 ++-- storage/rocksdb/rdb_sst_info.cc | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index fbb9f584c66..847343180ee 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -3128,7 +3128,7 @@ bool Rdb_validate_tbls::scan_for_frms( /* Scan through the files in the directory */ struct fileinfo* file_info = dir_info->dir_entry; - for (uint ii = 0; ii < dir_info->number_off_files; ii++, file_info++) + for (uint ii = 0; ii < dir_info->number_of_files; ii++, file_info++) { /* Find .frm files that are not temp files (those that start with '#') */ const char* ext = strrchr(file_info->name, '.'); @@ -3179,7 +3179,7 @@ bool Rdb_validate_tbls::compare_to_actual_tables( } file_info = dir_info->dir_entry; - for (uint ii = 0; ii < dir_info->number_off_files; ii++, file_info++) + for (uint ii = 0; ii < dir_info->number_of_files; ii++, file_info++) { /* Ignore files/dirs starting with '.' */ if (file_info->name[0] == '.') diff --git a/storage/rocksdb/rdb_sst_info.cc b/storage/rocksdb/rdb_sst_info.cc index a68c2b699a9..dbab4b50068 100644 --- a/storage/rocksdb/rdb_sst_info.cc +++ b/storage/rocksdb/rdb_sst_info.cc @@ -398,7 +398,7 @@ void Rdb_sst_info::init(rocksdb::DB* db) // Scan through the files in the directory struct fileinfo* file_info= dir_info->dir_entry; - for (uint ii= 0; ii < dir_info->number_off_files; ii++, file_info++) + for (uint ii= 0; ii < dir_info->number_of_files; ii++, file_info++) { // find any files ending with m_suffix ... std::string name= file_info->name; From 2770e8004d53e70fe428471ed7cfb7ed296ae7f8 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 9 Oct 2016 17:52:01 +0000 Subject: [PATCH 015/233] MariaRocks: remove ha_statistic_increment calls They have been replaced with increment_statistics() calls made by handler::ha_XXX() methods. --- storage/rocksdb/ha_rocksdb.cc | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 81a3b267e1e..3fb40587dd4 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -6246,7 +6246,6 @@ int ha_rocksdb::index_read_map_impl(uchar *buf, const uchar *key, int rc= 0; DBUG_ENTER("ha_rocksdb::index_read_map"); - ha_statistic_increment(&SSV::ha_read_key_count); const std::shared_ptr& kd= m_key_descr_arr[active_index]; uint actual_key_parts= kd->get_key_parts(); bool using_full_key= is_using_full_key(keypart_map, actual_key_parts); @@ -6791,7 +6790,6 @@ int ha_rocksdb::get_row_by_rowid(uchar *buf, const char *rowid, int ha_rocksdb::index_next(uchar *buf) { bool moves_forward= true; - ha_statistic_increment(&SSV::ha_read_next_count); if (m_key_descr_arr[active_index]->m_is_reverse_cf) { moves_forward= false; @@ -6807,7 +6805,6 @@ int ha_rocksdb::index_next(uchar *buf) int ha_rocksdb::index_prev(uchar *buf) { bool moves_forward= false; - ha_statistic_increment(&SSV::ha_read_prev_count); if (m_key_descr_arr[active_index]->m_is_reverse_cf) { moves_forward= true; @@ -6855,7 +6852,6 @@ int ha_rocksdb::index_next_with_direction(uchar *buf, bool move_forward) int ha_rocksdb::index_first(uchar *buf) { m_sk_match_prefix= nullptr; - ha_statistic_increment(&SSV::ha_read_first_count); int rc= m_key_descr_arr[active_index]->m_is_reverse_cf ? index_last_intern(buf) : index_first_intern(buf); if (rc == HA_ERR_KEY_NOT_FOUND) @@ -6867,7 +6863,6 @@ int ha_rocksdb::index_first(uchar *buf) int ha_rocksdb::index_last(uchar *buf) { m_sk_match_prefix= nullptr; - ha_statistic_increment(&SSV::ha_read_last_count); int rc= m_key_descr_arr[active_index]->m_is_reverse_cf ? index_first_intern(buf) : index_last_intern(buf); if (rc == HA_ERR_KEY_NOT_FOUND) @@ -7253,7 +7248,6 @@ int ha_rocksdb::write_row(uchar *buf) DBUG_ENTER("ha_rocksdb::write_row"); DBUG_ASSERT(buf == table->record[0]); DBUG_ASSERT(m_lock_rows == RDB_LOCK_WRITE); - ha_statistic_increment(&SSV::ha_write_count); /* Note: "buf == table->record[0]" is copied from innodb. I am not aware of any use cases where this condition is not true. @@ -8022,7 +8016,6 @@ int ha_rocksdb::rnd_init(bool scan) int ha_rocksdb::rnd_next(uchar *buf) { int rc; - ha_statistic_increment(&SSV::ha_read_rnd_next_count); for (;;) { rc= rnd_next_with_direction(buf, true); @@ -8205,7 +8198,6 @@ int ha_rocksdb::delete_row(const uchar *buf) { DBUG_ENTER("ha_rocksdb::delete_row"); - ha_statistic_increment(&SSV::ha_delete_count); set_last_rowkey(buf); rocksdb::Slice key_slice(m_last_rowkey.ptr(), m_last_rowkey.length()); @@ -8436,7 +8428,6 @@ int ha_rocksdb::rnd_pos(uchar *buf, uchar *pos) size_t len; DBUG_ENTER("ha_rocksdb::rnd_pos"); - ha_statistic_increment(&SSV::ha_read_rnd_count); stats.rows_requested++; len= m_pk_descr->key_length(table, rocksdb::Slice((const char*)pos, ref_length)); @@ -8507,7 +8498,6 @@ int ha_rocksdb::update_row(const uchar *old_data, uchar *new_data) */ DBUG_ASSERT(new_data == table->record[0]); - ha_statistic_increment(&SSV::ha_update_count); int rv; rv= update_write_row(old_data, new_data, false); From 08f96d21fdc6a6e98870759aac11eab9cfc6cab3 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 9 Oct 2016 18:01:39 +0000 Subject: [PATCH 016/233] MariaRocks: add DB_TYPE_ROCKSDB into legacy_db_type enum --- sql/handler.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sql/handler.h b/sql/handler.h index c1f6cb5b162..401d6ad9440 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -441,7 +441,8 @@ enum legacy_db_type DB_TYPE_PERFORMANCE_SCHEMA=28, DB_TYPE_ARIA=42, DB_TYPE_TOKUDB=43, - DB_TYPE_FIRST_DYNAMIC=44, + DB_TYPE_ROCKSDB=44, + DB_TYPE_FIRST_DYNAMIC=45, DB_TYPE_DEFAULT=127 // Must be last }; /* From 06b4962fc4d3eb12bb34dbb7cca6578076bb9fcd Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 9 Oct 2016 18:11:00 +0000 Subject: [PATCH 017/233] MariaRocks port: db_low_byte_first is gone in MariaDB --- storage/rocksdb/ha_rocksdb.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 3fb40587dd4..d442251a3c0 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -4386,8 +4386,7 @@ int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key, memcpy(blob->ptr, data_len_str, length_bytes); - uint32 data_len= blob->get_length((uchar*)data_len_str, length_bytes, - table->s->db_low_byte_first); + uint32 data_len= blob->get_length((uchar*)data_len_str, length_bytes); const char *blob_ptr; if (!(blob_ptr= reader.read(data_len))) { From 0c5823ab9e3666da056cd29c569ea61ef4fea3c0 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 9 Oct 2016 18:22:23 +0000 Subject: [PATCH 018/233] MariaRocks port: mysql_bin_log_is_open -> mysql_bin_log.is_open --- storage/rocksdb/ha_rocksdb.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index d442251a3c0..905604215d4 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -3064,7 +3064,7 @@ static int rocksdb_start_tx_and_assign_read_view( if (binlog_file) { - if (binlog_pos && mysql_bin_log_is_open()) + if (binlog_pos && mysql_bin_log.is_open()) mysql_bin_log_lock_commits(); else return 1; From 746f35d6f383af92d8c82bc61407e80dd860c633 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 10 Oct 2016 18:54:43 +0000 Subject: [PATCH 019/233] MariaRocks: Disable FlashCache support Flashcache is available only in WebScaleSQL (not in MySQL or MariaDB). --- storage/rocksdb/ha_rocksdb.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 905604215d4..8c17b7fd24c 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -3471,6 +3471,7 @@ static int rocksdb_init_func(void *p) rocksdb::Options main_opts(rocksdb_db_options, rocksdb_cf_options_map.get_defaults()); +#ifdef MARIAROCKS_NOT_YET /* Flashcache configuration: When running on Flashcache, mysqld opens Flashcache device before @@ -3497,6 +3498,7 @@ static int rocksdb_init_func(void *p) "writer threads, fd %d", cachedev_fd); main_opts.env= flashcache_aware_env.get(); } +#endif main_opts.env->SetBackgroundThreads(main_opts.max_background_flushes, rocksdb::Env::Priority::HIGH); From 01a5216b9c086655b1127123b7219dbb1799b029 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 15 Oct 2016 20:58:15 +0000 Subject: [PATCH 020/233] MariaRocks port: temporarily? disable Per-table Extra Stats (why are table reads/writes/updates collected in ha_statistics? this structure was intended for table statistics that the engine reports.. and now it is also used for engine-agnostic statistics like rows_read, rows_requested, etc?) --- storage/rocksdb/ha_rocksdb.cc | 34 +++++++++++++++++++++++++---- storage/rocksdb/ha_rocksdb.h | 4 ++-- storage/rocksdb/rdb_mariadb_port.h | 3 +++ storage/rocksdb/rdb_perf_context.cc | 5 +++++ 4 files changed, 40 insertions(+), 6 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 8c17b7fd24c..47cd72e1853 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -97,6 +97,7 @@ namespace myrocks { static st_global_stats global_stats; static st_export_stats export_stats; +#ifdef MARIAROCKS_NOT_YET /** Updates row counters based on the table type and operation type. */ @@ -108,6 +109,7 @@ void ha_rocksdb::update_row_stats(operation_type type) { else global_stats.rows[type].inc(); } +#endif void dbug_dump_database(rocksdb::DB *db); static handler *rocksdb_create_handler(my_core::handlerton *hton, @@ -6111,9 +6113,9 @@ int ha_rocksdb::secondary_index_read(const int keyno, uchar *buf) { DBUG_ASSERT(buf != nullptr); DBUG_ASSERT(table != nullptr); - +#ifdef MARIAROCKS_NOT_YET stats.rows_requested++; - +#endif /* Use STATUS_NOT_FOUND when record not found or some error occurred */ table->status= STATUS_NOT_FOUND; @@ -6150,9 +6152,11 @@ int ha_rocksdb::secondary_index_read(const int keyno, uchar *buf) if (!rc) { table->status= 0; +#ifdef MARIAROCKS_NOT_YET stats.rows_read++; stats.rows_index_next++; update_row_stats(ROWS_READ); +#endif } return rc; } @@ -6256,8 +6260,9 @@ int ha_rocksdb::index_read_map_impl(uchar *buf, const uchar *key, /* By default, we don't need the retrieved records to match the prefix */ m_sk_match_prefix= nullptr; +#ifdef MARIAROCKS_NOT_YET stats.rows_requested++; - +#endif if (active_index == table->s->primary_key && find_flag == HA_READ_KEY_EXACT && using_full_key) { @@ -6270,9 +6275,11 @@ int ha_rocksdb::index_read_map_impl(uchar *buf, const uchar *key, rc= get_row_by_rowid(buf, m_pk_packed_tuple, size); if (!rc) { +#ifdef MARIAROCKS_NOT_YET stats.rows_read++; stats.rows_index_first++; update_row_stats(ROWS_READ); +#endif } DBUG_RETURN(rc); } @@ -6397,9 +6404,11 @@ int ha_rocksdb::index_read_map_impl(uchar *buf, const uchar *key, else { table->status= 0; +#ifdef MARIAROCKS_NOT_YET stats.rows_read++; stats.rows_index_first++; update_row_stats(ROWS_READ); +#endif } DBUG_RETURN(rc); } @@ -6962,8 +6971,10 @@ int ha_rocksdb::index_first_intern(uchar *buf) index_next is always incremented on success, so decrement if it is index_first instead */ +#ifdef MARIAROCKS_NOT_YET stats.rows_index_first++; stats.rows_index_next--; +#endif } DBUG_RETURN(rc); } @@ -7094,8 +7105,10 @@ int ha_rocksdb::index_last_intern(uchar *buf) index_next is always incremented on success, so decrement if it is index_first instead */ +#ifdef MARIAROCKS_NOT_YET stats.rows_index_first++; stats.rows_index_next--; +#endif } DBUG_RETURN(rc); } @@ -7266,8 +7279,10 @@ int ha_rocksdb::write_row(uchar *buf) if (rv == 0) { +#ifdef MARIAROCKS_NOT_YET stats.rows_inserted++; update_row_stats(ROWS_INSERTED); +#endif } DBUG_RETURN(rv); } @@ -8047,8 +8062,9 @@ int ha_rocksdb::rnd_next_with_direction(uchar *buf, bool move_forward) DBUG_ENTER("ha_rocksdb::rnd_next"); table->status= STATUS_NOT_FOUND; +#ifdef MARIAROCKS_NOT_YET stats.rows_requested++; - +#endif if (!m_scan_it || !m_scan_it->Valid()) { /* @@ -8123,9 +8139,11 @@ int ha_rocksdb::rnd_next_with_direction(uchar *buf, bool move_forward) rc= HA_ERR_END_OF_FILE; if (!rc) { +#ifdef MARIAROCKS_NOT_YET stats.rows_read++; stats.rows_index_next++; update_row_stats(ROWS_READ); +#endif } DBUG_RETURN(rc); @@ -8239,8 +8257,10 @@ int ha_rocksdb::delete_row(const uchar *buf) { DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } +#ifdef MARIAROCKS_NOT_YET stats.rows_deleted++; update_row_stats(ROWS_DELETED); +#endif DBUG_RETURN(0); } @@ -8429,7 +8449,9 @@ int ha_rocksdb::rnd_pos(uchar *buf, uchar *pos) size_t len; DBUG_ENTER("ha_rocksdb::rnd_pos"); +#ifdef MARIAROCKS_NOT_YET stats.rows_requested++; +#endif len= m_pk_descr->key_length(table, rocksdb::Slice((const char*)pos, ref_length)); if (len == size_t(-1)) @@ -8441,8 +8463,10 @@ int ha_rocksdb::rnd_pos(uchar *buf, uchar *pos) if (!rc) { +#ifdef MARIAROCKS_NOT_YET stats.rows_read++; update_row_stats(ROWS_READ); +#endif } DBUG_RETURN(rc); @@ -8504,8 +8528,10 @@ int ha_rocksdb::update_row(const uchar *old_data, uchar *new_data) if (rv == 0) { +#ifdef MARIAROCKS_NOT_YET stats.rows_updated++; update_row_stats(ROWS_UPDATED); +#endif } DBUG_RETURN(rv); } diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 0db8ee40c76..bb41766f96d 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -467,9 +467,9 @@ class ha_rocksdb: public my_core::handler __attribute__((__nonnull__, __warn_unused_result__)); bool has_hidden_pk(const TABLE* table) __attribute__((__nonnull__, __warn_unused_result__)); - +#ifdef MARIAROCKS_NOT_YET void update_row_stats(operation_type type); - +#endif void set_last_rowkey(const uchar *old_data); /* diff --git a/storage/rocksdb/rdb_mariadb_port.h b/storage/rocksdb/rdb_mariadb_port.h index 3a1ab72fa7d..fc82fc4c3b9 100644 --- a/storage/rocksdb/rdb_mariadb_port.h +++ b/storage/rocksdb/rdb_mariadb_port.h @@ -5,6 +5,7 @@ #ifndef RDB_MARIADB_PORT_H #define RDB_MARIADB_PORT_H +#include "my_global.h" /* ulonglong */ #include "atomic_stat.h" /* Struct used for IO performance counters, shared among multiple threads */ @@ -29,5 +30,7 @@ typedef struct my_io_perf_atomic_struct my_io_perf_atomic_t; */ #define abort_with_stack_traces() { abort(); } +//////////////////////////////////////////////////////////////////////////// +typedef struct my_io_perf_struct my_io_perf_t; #endif diff --git a/storage/rocksdb/rdb_perf_context.cc b/storage/rocksdb/rdb_perf_context.cc index 2375b742b3a..c9c8eba78f4 100644 --- a/storage/rocksdb/rdb_perf_context.cc +++ b/storage/rocksdb/rdb_perf_context.cc @@ -17,6 +17,7 @@ #include +#include "rdb_mariadb_port.h" /* This C++ file's header file */ #include "./rdb_perf_context.h" @@ -222,9 +223,12 @@ void Rdb_io_perf::end_and_record(uint32_t perf_context_level) rocksdb::perf_context.block_read_time; my_io_perf_sum_atomic_helper(m_shared_io_perf_read, &io_perf_read); +#ifdef MARIAROCKS_NOT_YET my_io_perf_sum(&m_stats->table_io_perf_read, &io_perf_read); +#endif } +#ifdef MARIAROCKS_NOT_YET if (m_stats) { if (rocksdb::perf_context.internal_key_skipped_count != 0) { @@ -237,6 +241,7 @@ void Rdb_io_perf::end_and_record(uint32_t perf_context_level) rocksdb::perf_context.internal_delete_skipped_count; } } +#endif } } // namespace myrocks From f456532c62b227832c0a0c4107e4ee542d1e5b30 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 15 Oct 2016 21:20:52 +0000 Subject: [PATCH 021/233] MariaRocks port: compile RocksDB with -frtti. Probably this is not the right way to do it, see the comment --- storage/rocksdb/CMakeLists.txt | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index e9300884987..f881096780e 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -25,6 +25,19 @@ INCLUDE_DIRECTORIES( ADD_DEFINITIONS(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DOS_LINUX -DZLIB) +# MARIAROCKS_NOT_YET: Add -frtti flag when compiling RocksDB files. +# TODO: is this the right way to do this? +# - SQL layer and storage/rocksdb/*.cc are compiled with -fnortti +# - RocksDB files are compiled with "-fnortti ... -frtti" +# - This causes RocksDB headers to be compiled with different settings: +# = with RTTI when compiling RocksDB +# = without RTTI when compiling storage/rocksdb/*.cc +# +# (facebook/mysql-5.6 just compiles everything without -f*rtti, which means +# everything is compiled with -frtti) +# +set_source_files_properties(${ROCKSDB_LIB_SOURCES} PROPERTIES COMPILE_FLAGS -frtti) + SET(ROCKSDB_SOURCES ha_rocksdb.cc ha_rocksdb.h ha_rocksdb_proto.h logger.h From 274e5be1942aa0b6533d78b43454a7ed940b6628 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 16 Oct 2016 11:00:07 +0000 Subject: [PATCH 022/233] MariaRocks: port my_hash_const_element "Add new function my_hash_const_element(), the const equivalent of my_hash_element()" - comes from facebook/mysql-5.6, 7c869d34b9fa2262b941efd6363a260b7c37948f --- include/hash.h | 1 + mysys/hash.c | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/include/hash.h b/include/hash.h index fde7fc30d38..4d575fe1736 100644 --- a/include/hash.h +++ b/include/hash.h @@ -74,6 +74,7 @@ my_bool my_hash_init2(HASH *hash, uint growth_size, CHARSET_INFO *charset, void my_hash_free(HASH *tree); void my_hash_reset(HASH *hash); uchar *my_hash_element(HASH *hash, ulong idx); +const uchar *my_hash_const_element(const HASH *hash, ulong idx); uchar *my_hash_search(const HASH *info, const uchar *key, size_t length); uchar *my_hash_search_using_hash_value(const HASH *info, my_hash_value_type hash_value, diff --git a/mysys/hash.c b/mysys/hash.c index dc03ea9a4dc..580e21b77d0 100644 --- a/mysys/hash.c +++ b/mysys/hash.c @@ -721,6 +721,14 @@ uchar *my_hash_element(HASH *hash, ulong idx) } +const uchar *my_hash_const_element(const HASH *hash, ulong idx) +{ + if (idx < hash->records) + return dynamic_element(&hash->array,idx,const HASH_LINK*)->data; + return 0; +} + + /* Replace old row with new row. This should only be used when key isn't changed From d26283458c71a1a333962bf7ed075e54bbc8f15c Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 16 Oct 2016 11:01:01 +0000 Subject: [PATCH 023/233] MariaRocks port: make key_restore() parameter const --- sql/key.cc | 2 +- sql/key.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/key.cc b/sql/key.cc index 31b65adabe9..77660c0fc8c 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -176,7 +176,7 @@ void key_copy(uchar *to_key, uchar *from_record, KEY *key_info, @param key_length specifies length of all keyparts that will be restored */ -void key_restore(uchar *to_record, uchar *from_key, KEY *key_info, +void key_restore(uchar *to_record, const uchar *from_key, KEY *key_info, uint key_length) { uint length; diff --git a/sql/key.h b/sql/key.h index 47b981f5298..f2521e4a665 100644 --- a/sql/key.h +++ b/sql/key.h @@ -29,7 +29,7 @@ int find_ref_key(KEY *key, uint key_count, uchar *record, Field *field, uint *key_length, uint *keypart); void key_copy(uchar *to_key, uchar *from_record, KEY *key_info, uint key_length, bool with_zerofill= FALSE); -void key_restore(uchar *to_record, uchar *from_key, KEY *key_info, +void key_restore(uchar *to_record, const uchar *from_key, KEY *key_info, uint key_length); bool key_cmp_if_same(TABLE *form,const uchar *key,uint index,uint key_length); void key_unpack(String *to, TABLE *table, KEY *key); From a7091b0679bed9f58609c173e4b036a51c69b730 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 16 Oct 2016 11:17:19 +0000 Subject: [PATCH 024/233] Temporarily disable Read-Free replication in MariaRocks, part#2 (Squash with 4be8cae56afd7a31eb2477e9e4fe1c814748b06d) --- storage/rocksdb/ha_rocksdb.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 47cd72e1853..949e3017a4f 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -7299,6 +7299,7 @@ int ha_rocksdb::write_row(uchar *buf) */ void ha_rocksdb::set_last_rowkey(const uchar *old_data) { +#ifdef MARIAROCKS_NOT_YET if (old_data && use_read_free_rpl()) { int old_pk_size= m_pk_descr->pack_record( @@ -7306,6 +7307,7 @@ void ha_rocksdb::set_last_rowkey(const uchar *old_data) m_last_rowkey.copy((const char*)m_pk_packed_tuple, old_pk_size, &my_charset_bin); } +#endif } int ha_rocksdb::get_pk_for_update(struct update_row_info* row_info) From b42248d2f2583d29142cf9b0c81c1994249edbcb Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 16 Oct 2016 11:39:55 +0000 Subject: [PATCH 025/233] MyRocks port: make Field::char_length() a const function --- sql/field.cc | 2 +- sql/field.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/sql/field.cc b/sql/field.cc index ba0ebb253f4..846bb1fe674 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -10598,7 +10598,7 @@ Column_definition::Column_definition(THD *thd, Field *old_field, length */ -uint32 Field_blob::char_length() +uint32 Field_blob::char_length() const { switch (packlength) { diff --git a/sql/field.h b/sql/field.h index f550dad1c6c..0bb9da83827 100644 --- a/sql/field.h +++ b/sql/field.h @@ -1335,7 +1335,7 @@ public: longlong convert_decimal2longlong(const my_decimal *val, bool unsigned_flag, int *err); /* The max. number of characters */ - virtual uint32 char_length() + virtual uint32 char_length() const { return field_length / charset()->mbmaxlen; } @@ -3316,7 +3316,7 @@ public: bool has_charset(void) const { return charset() == &my_charset_bin ? FALSE : TRUE; } uint32 max_display_length(); - uint32 char_length(); + uint32 char_length() const; uint is_equal(Create_field *new_field); private: int do_save_field_metadata(uchar *first_byte); From 7eef3de1ae00ce70919bd9196c5a64cd3f8b2e2d Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 16 Oct 2016 11:56:14 +0000 Subject: [PATCH 026/233] MariaRocks port: In MariaDB, there's no Field_longlong::PACK_LENGTH And the code in ha_rocksdb.cc makes assumptions about hidden pk length being 8, anyway. --- storage/rocksdb/ha_rocksdb.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 949e3017a4f..7991b2f25cb 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -3931,7 +3931,7 @@ int ha_rocksdb::read_hidden_pk_id_from_rowkey(longlong* hidden_pk_id) if ((!reader.read(Rdb_key_def::INDEX_NUMBER_SIZE))) return 1; - const int length= Field_longlong::PACK_LENGTH; + const int length= 8; /* was Field_longlong::PACK_LENGTH in FB MySQL tree */ const uchar *from= reinterpret_cast(reader.read(length)); if (from == nullptr) { From 27adea907b634519a96cc24fc590c9eb78ae114e Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 16 Oct 2016 12:30:51 +0000 Subject: [PATCH 027/233] MariaRocks port: temporarily disable rdb_collation_exceptions Then Regex_list_handler is not needed and we don't have to backport it right now. --- storage/rocksdb/ha_rocksdb.cc | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 7991b2f25cb..0d6ee7c2737 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -155,10 +155,11 @@ Rdb_binlog_manager binlog_manager; static Rdb_background_thread rdb_bg_thread; - +#ifdef MARIAROCKS_NOT_YET // List of table names (using regex) that are exceptions to the strict // collation check requirement. Regex_list_handler *rdb_collation_exceptions; +#endif static const char* const ERRSTR_ROLLBACK_ONLY = "This transaction was rolled back and cannot be " @@ -3288,11 +3289,13 @@ static int rocksdb_init_func(void *p) mysql_mutex_init(rdb_mem_cmp_space_mutex_key, &rdb_mem_cmp_space_mutex, MY_MUTEX_INIT_FAST); +#ifdef MARIAROCKS_NOT_YET #if defined(HAVE_PSI_INTERFACE) rdb_collation_exceptions = new Regex_list_handler( key_rwlock_collation_exception_list); #else rdb_collation_exceptions = new Regex_list_handler(); +#endif #endif mysql_mutex_init(rdb_sysvars_psi_mutex_key, &rdb_sysvars_mutex, @@ -3672,7 +3675,9 @@ static int rocksdb_done_func(void *p) mysql_mutex_destroy(&rdb_open_tables.m_mutex); mysql_mutex_destroy(&rdb_sysvars_mutex); +#ifdef MARIAROCKS_NOT_YET delete rdb_collation_exceptions; +#endif mysql_mutex_destroy(&rdb_collation_data_mutex); mysql_mutex_destroy(&rdb_mem_cmp_space_mutex); @@ -4751,6 +4756,7 @@ void ha_rocksdb::set_skip_unique_check_tables(const char* whitelist) { DBUG_ASSERT(whitelist != nullptr); +#ifdef MARIAROCKS_NOT_YET // regex_handler #if defined(HAVE_PSI_INTERFACE) Regex_list_handler regex_handler(key_rwlock_skip_unique_check_tables); #else @@ -4763,6 +4769,7 @@ void ha_rocksdb::set_skip_unique_check_tables(const char* whitelist) } m_skip_unique_check= regex_handler.matches(m_tbl_def->base_tablename()); +#endif } int ha_rocksdb::open(const char *name, int mode, uint test_if_locked) @@ -5055,7 +5062,11 @@ int ha_rocksdb::create_cfs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg, { if (!rdb_is_index_collation_supported( table_arg->key_info[i].key_part[part].field) && +#ifdef MARIAROCKS_NOT_YET !rdb_collation_exceptions->matches(tablename_sys)) +#else + true) +#endif { std::string collation_err; for (auto coll : RDB_INDEX_COLLATIONS) @@ -10602,6 +10613,7 @@ rocksdb_set_rate_limiter_bytes_per_sec( void rdb_set_collation_exception_list(const char *exception_list) { +#ifdef MARIAROCKS_NOT_YET DBUG_ASSERT(rdb_collation_exceptions != nullptr); if (!rdb_collation_exceptions->set_patterns(exception_list)) @@ -10609,6 +10621,7 @@ void rdb_set_collation_exception_list(const char *exception_list) my_core::warn_about_bad_patterns(rdb_collation_exceptions, "strict_collation_exceptions"); } +#endif } void From e43ce1859761185cd24e3bf380c8c1d0072cc3c2 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 16 Oct 2016 12:45:53 +0000 Subject: [PATCH 028/233] MariaRocks port: use correct path in CMakeLists.txt --- storage/rocksdb/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index f881096780e..77c0dfe9c2e 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -19,7 +19,7 @@ STRING(REGEX MATCHALL "[^\n]+" ROCKSDB_LIB_SOURCES ${SCRIPT_OUTPUT}) INCLUDE_DIRECTORIES( ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/include - ${CMAKE_SOURCE_DIR}/rocksdb/third-party/gtest-1.7.0/fused-src + ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/third-party/gtest-1.7.0/fused-src ) ADD_DEFINITIONS(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DOS_LINUX From 1dead2d213a2323eaf0f15a6a072f381bc6debea Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 16 Oct 2016 14:06:25 +0000 Subject: [PATCH 029/233] MariaRocks port: MariaDB has no "ASYNC_COMMIT" feature adjust a few function definitions accordingly --- storage/rocksdb/ha_rocksdb.cc | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 0d6ee7c2737..c17da537f7d 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -2543,9 +2543,15 @@ static bool rocksdb_flush_wal(handlerton* hton __attribute__((__unused__)), For a slave, prepare() updates the slave_gtid_info table which tracks the replication progress. */ -static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx, +static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx) +#ifdef MARIAROCKS_NOT_YET bool async) +// This is "ASYNC_COMMIT" feature which is only in webscalesql +// for now, define async=false below: +#endif { + bool async=false; + Rdb_transaction*& tx= get_tx_from_thd(thd); if (!tx->can_prepare()) { @@ -2693,7 +2699,7 @@ static int rocksdb_recover(handlerton* hton, XID* xid_list, uint len, return count; } -static int rocksdb_commit(handlerton* hton, THD* thd, bool commit_tx, bool) +static int rocksdb_commit(handlerton* hton, THD* thd, bool commit_tx) { DBUG_ENTER("rocksdb_commit"); From 8cde1d449a3ad6c3e294b9cbc0a1494863433e16 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 16 Oct 2016 14:16:58 +0000 Subject: [PATCH 030/233] MariaRocks port: disable thd_store_lsn() call It is from "write/sync redo log before flushing binlog cache to file" feature that MariaDB doesn't have (or need?) --- storage/rocksdb/ha_rocksdb.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index c17da537f7d..fe782d7199c 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -2578,10 +2578,15 @@ static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx) return 1; } if (thd->durability_property == HA_IGNORE_DURABILITY) { +#ifdef MARIAROCKS_NOT_YET + // MariaRocks: disable the + // "write/sync redo log before flushing binlog cache to file" + // feature. See a869c56d361bb44f46c0efeb11a8f03561676247 /** we set the log sequence as '1' just to trigger hton->flush_logs */ thd_store_lsn(thd, 1, DB_TYPE_ROCKSDB); +#endif } } From 54295beee293e75b33080cfc9a6cee9463ed77be Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 16 Oct 2016 14:28:22 +0000 Subject: [PATCH 031/233] MariaRocks port: compare_key_icp() is called compare_key2() in MariaDB --- storage/rocksdb/ha_rocksdb.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index fe782d7199c..f6849d2889b 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -9503,8 +9503,10 @@ enum icp_result ha_rocksdb::check_index_cond() { DBUG_ASSERT(pushed_idx_cond); DBUG_ASSERT(pushed_idx_cond_keyno != MAX_KEY); - - if (end_range && compare_key_icp(end_range) > 0) + + // MARIAROCKS_NOT_YET: MariaRocks todo: switch to using + // handler_index_cond_check() call? + if (end_range && compare_key2(end_range) > 0) { /* caller should return HA_ERR_END_OF_FILE already */ return ICP_OUT_OF_RANGE; From c6dee75759f529431468a98891f608ebc27ddb18 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 16 Oct 2016 14:34:46 +0000 Subject: [PATCH 032/233] MariaRocks: MariaDB doesnt support ICP over backwards index scans yet --- storage/rocksdb/ha_rocksdb.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index f6849d2889b..c3ce4c32adc 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -6201,7 +6201,12 @@ int ha_rocksdb::read_range_first(const key_range *start_key, DBUG_ENTER("ha_rocksdb::read_range_first"); eq_range= eq_range_arg; +#ifdef MARIAROCKS_NOT_YET + // Range scan direction is used to get ICP to work for backwards scans set_end_range(end_key, RANGE_SCAN_ASC); +#else + set_end_range(end_key); +#endif range_key_part= table->key_info[active_index].key_part; From 9b78cd3cac9fa16217918cb72b8036681504fdc2 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 16 Oct 2016 18:02:28 +0000 Subject: [PATCH 033/233] MariaRocks port: MariaDB doesn't support SHOW ENGINE ROCKSDB TRANSACTION STATUS Should we start supporting it? --- storage/rocksdb/ha_rocksdb.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index c3ce4c32adc..d1c2e930253 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -3012,12 +3012,13 @@ static bool rocksdb_show_status(handlerton* hton, str.append(buf); res |= print_stats(thd, "Memory_Stats", "rocksdb", str, stat_print); } +#ifdef MARIAROCKS_NOT_YET else if (stat_type == HA_ENGINE_TRX) { /* Handle the SHOW ENGINE ROCKSDB TRANSACTION STATUS command */ res |= rocksdb_show_snapshot_status(hton, thd, stat_print); } - +#endif return res; } From fdf026a4add67b099e691835d765f0edd0f51ba6 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 16 Oct 2016 18:07:35 +0000 Subject: [PATCH 034/233] MariaRocks port: s/my_core::thd_killed/thd_killed/ --- storage/rocksdb/rdb_mutex_wrapper.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/rocksdb/rdb_mutex_wrapper.cc b/storage/rocksdb/rdb_mutex_wrapper.cc index ae000a398e5..9b42a4f8145 100644 --- a/storage/rocksdb/rdb_mutex_wrapper.cc +++ b/storage/rocksdb/rdb_mutex_wrapper.cc @@ -115,7 +115,7 @@ Rdb_cond_var::WaitFor(std::shared_ptr mutex_arg, #ifndef STANDALONE_UNITTEST if (current_thd) - killed= my_core::thd_killed(current_thd); + killed= thd_killed(current_thd); #endif } while (!killed && res == EINTR); From 0a132ae7a29d21397ff7cf031f7fdc42d218a01f Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 16 Oct 2016 18:38:34 +0000 Subject: [PATCH 035/233] MariaRocks: disable more of my_io_perf_t usage --- storage/rocksdb/ha_rocksdb.cc | 2 ++ storage/rocksdb/rdb_perf_context.cc | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index d1c2e930253..563bd5e199d 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -3774,7 +3774,9 @@ Rdb_table_handler* Rdb_open_tables_map::get_table_handler( } thr_lock_init(&table_handler->m_thr_lock); +#ifdef MARIAROCKS_NOT_YET my_io_perf_atomic_init(&table_handler->m_io_perf_read); +#endif } DBUG_ASSERT(table_handler->m_ref_count >= 0); table_handler->m_ref_count++; diff --git a/storage/rocksdb/rdb_perf_context.cc b/storage/rocksdb/rdb_perf_context.cc index c9c8eba78f4..cd1ac6eca93 100644 --- a/storage/rocksdb/rdb_perf_context.cc +++ b/storage/rocksdb/rdb_perf_context.cc @@ -209,6 +209,7 @@ void Rdb_io_perf::end_and_record(uint32_t perf_context_level) rocksdb::perf_context.block_read_count != 0 || rocksdb::perf_context.block_read_time != 0)) { +#ifdef MARIAROCKS_NOT_YET my_io_perf_t io_perf_read; my_io_perf_init(&io_perf_read); @@ -223,7 +224,6 @@ void Rdb_io_perf::end_and_record(uint32_t perf_context_level) rocksdb::perf_context.block_read_time; my_io_perf_sum_atomic_helper(m_shared_io_perf_read, &io_perf_read); -#ifdef MARIAROCKS_NOT_YET my_io_perf_sum(&m_stats->table_io_perf_read, &io_perf_read); #endif } From 960fbc38d6444c3056827f2cc176864609a0de5c Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 17 Oct 2016 16:53:20 +0000 Subject: [PATCH 036/233] MariaRocks port: temporarily? disable "CONSISTENT INNODB|ROCKSDB SNAPSHOT" feature cset c2b8ced16e255ffdbcc1e638ac8dd4f398eb1a93 Implement START TRANSACTION WITH CONSISTENT INNODB SNAPSHOT --- storage/rocksdb/ha_rocksdb.cc | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 563bd5e199d..a261e78a251 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -3056,13 +3056,15 @@ static inline void rocksdb_register_tx(handlerton *hton, THD *thd, */ static int rocksdb_start_tx_and_assign_read_view( handlerton* hton, /*!< in: RocksDB handlerton */ - THD* thd, /*!< in: MySQL thread handle of the + THD* thd) /*!< in: MySQL thread handle of the user for whom the transaction should be committed */ +#ifdef MARIAROCKS_NOT_YET // consistent snapshot with binlog char* binlog_file, /* out: binlog file for last commit */ ulonglong* binlog_pos, /* out: binlog pos for last commit */ char** gtid_executed, /* out: Gtids logged until last commit */ int* gtid_executed_length) /*out: Length of gtid_executed string */ +#endif { Rdb_perf_context_guard guard(thd); @@ -3076,7 +3078,7 @@ static int rocksdb_start_tx_and_assign_read_view( "in RocksDB Storage Engine.", MYF(0)); return 1; } - +#ifdef MARIAROCKS_NOT_YET // consistent snapshot with binlog if (binlog_file) { if (binlog_pos && mysql_bin_log.is_open()) @@ -3084,17 +3086,18 @@ static int rocksdb_start_tx_and_assign_read_view( else return 1; } - +#endif Rdb_transaction* tx= get_or_create_tx(thd); DBUG_ASSERT(!tx->has_snapshot()); tx->set_tx_read_only(true); rocksdb_register_tx(hton, thd, tx); tx->acquire_snapshot(true); +#ifdef MARIAROCKS_NOT_YET // consistent snapshot with binlog if (binlog_file) mysql_bin_log_unlock_commits(binlog_file, binlog_pos, gtid_executed, gtid_executed_length); - +#endif return 0; } From e951fd17e81d4524e8090fe0f268bdcf7952b519 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 19 Oct 2016 12:00:22 +0000 Subject: [PATCH 037/233] MariaRocks port: make rocksdb_flush_wal() match handlerton::flush_logs definition The original parameter comes from commit a869c56d361bb44f46c0efeb11a8f03561676247 write/sync redo log before flushing binlog cache to file we've already disabled some of that patch in 8cde1d449a3ad6c3e294b9cbc0a1494863433e16 --- storage/rocksdb/ha_rocksdb.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index a261e78a251..d1c289e2bb0 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -2528,8 +2528,10 @@ static std::string rdb_xid_to_string(const XID& src) Called by hton->flush_logs after MySQL group commit prepares a set of transactions. */ -static bool rocksdb_flush_wal(handlerton* hton __attribute__((__unused__)), +static bool rocksdb_flush_wal(handlerton* hton __attribute__((__unused__))) +#ifdef MARIAROCKS_NOT_YET ulonglong target_lsn __attribute__((__unused__))) +#else { DBUG_ASSERT(rdb != nullptr); rocksdb::Status s= rdb->SyncWAL(); From 085fa3e4bc82acd7215a1de1a076bc0894f20f51 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 19 Oct 2016 12:15:59 +0000 Subject: [PATCH 038/233] MariaRocks port: rli_slave is called rgi_slave in MariaDB. --- storage/rocksdb/ha_rocksdb.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index d1c289e2bb0..87cd43f84b8 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -2453,7 +2453,7 @@ static Rdb_transaction *get_or_create_tx(THD *thd) // TODO: this is called too many times.. O(#rows) if (tx == nullptr) { - if (rpl_skip_tx_api_var && thd->rli_slave) + if (rpl_skip_tx_api_var && thd->rgi_slave) tx= new Rdb_writebatch_impl(thd); else tx= new Rdb_transaction_impl(thd); @@ -2531,7 +2531,7 @@ static std::string rdb_xid_to_string(const XID& src) static bool rocksdb_flush_wal(handlerton* hton __attribute__((__unused__))) #ifdef MARIAROCKS_NOT_YET ulonglong target_lsn __attribute__((__unused__))) -#else +#endif { DBUG_ASSERT(rdb != nullptr); rocksdb::Status s= rdb->SyncWAL(); @@ -8675,7 +8675,7 @@ int ha_rocksdb::external_lock(THD *thd, int lock_type) int binlog_format= my_core::thd_binlog_format(thd); bool unsafe_for_binlog= THDVAR(ha_thd(), unsafe_for_binlog); - if (lock_type == F_WRLCK && !thd->rli_slave && + if (lock_type == F_WRLCK && !thd->rgi_slave && !unsafe_for_binlog && binlog_format != BINLOG_FORMAT_ROW && binlog_format != BINLOG_FORMAT_UNSPEC && From fe0b57dfbead28895771d81f2fa92129869c3548 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 19 Oct 2016 13:16:51 +0000 Subject: [PATCH 039/233] MariaRocks port: disable more code that synchronizes with the binlog --- storage/rocksdb/ha_rocksdb.cc | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 87cd43f84b8..1e672cad542 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -1632,11 +1632,13 @@ public: } else { +#ifdef MARIAROCKS_NOT_YET my_core::thd_binlog_pos(m_thd, &m_mysql_log_file_name, &m_mysql_log_offset, &m_mysql_gtid); binlog_manager.update(m_mysql_log_file_name, m_mysql_log_offset, m_mysql_gtid, get_write_batch()); +#endif return commit_no_binlog(); } } @@ -2559,6 +2561,7 @@ static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx) { return 1; } +#ifdef MARIAROCKS_NOT_YET // disable prepare/commit if (prepare_tx || (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { /* We were instructed to prepare the whole transaction, or @@ -2594,7 +2597,7 @@ static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx) DEBUG_SYNC(thd, "rocksdb.prepared"); } - +#endif return 0; } @@ -2660,9 +2663,12 @@ static void rdb_xid_from_string(const std::string& src, XID *dst) Reading last committed binary log info from RocksDB system row. The info is needed for crash safe slave/master to work. */ -static int rocksdb_recover(handlerton* hton, XID* xid_list, uint len, +static int rocksdb_recover(handlerton* hton, XID* xid_list, uint len) +#ifdef MARIAROCKS_NOT_YET char* binlog_file, my_off_t* binlog_pos) +#endif { +#ifdef MARIAROCKS_NOT_YET if (binlog_file && binlog_pos) { char file_buf[FN_REFLEN+1]= {0}; @@ -2683,6 +2689,7 @@ static int rocksdb_recover(handlerton* hton, XID* xid_list, uint len, } } } +#endif if (len == 0 || xid_list == nullptr) { From 8c5912e9ee721c32349a778531b8fd7836b8bd40 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 19 Oct 2016 14:27:43 +0000 Subject: [PATCH 040/233] MariaRocks port: temporarily disable gap lock checking Also provide handler::is_using_full_key --- sql/handler.cc | 8 ++++++++ sql/handler.h | 1 + storage/rocksdb/ha_rocksdb.cc | 6 ++++++ 3 files changed, 15 insertions(+) diff --git a/sql/handler.cc b/sql/handler.cc index d7481f8e8ea..e21b7a94fa1 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2704,6 +2704,14 @@ int handler::ha_index_first(uchar * buf) return result; } +bool handler::is_using_full_key(key_part_map keypart_map, + uint actual_key_parts) +{ + return (keypart_map == HA_WHOLE_KEY) || + (keypart_map == ((key_part_map(1) << actual_key_parts) + - 1)); +} + int handler::ha_index_last(uchar * buf) { int result; diff --git a/sql/handler.h b/sql/handler.h index 401d6ad9440..775d21ed3ff 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -3159,6 +3159,7 @@ public: size_t size) { return 0; } + bool is_using_full_key(key_part_map keypart_map, uint actual_key_parts); virtual int read_range_first(const key_range *start_key, const key_range *end_key, bool eq_range, bool sorted); diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 1e672cad542..7f377073bd4 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -6229,6 +6229,7 @@ int ha_rocksdb::read_range_first(const key_range *start_key, result= ha_index_first(table->record[0]); else { +#ifdef MARIAROCKS_NOT_YET if (is_using_prohibited_gap_locks(table, is_using_full_unique_key( active_index, @@ -6237,6 +6238,7 @@ int ha_rocksdb::read_range_first(const key_range *start_key, { DBUG_RETURN(HA_ERR_LOCK_DEADLOCK); } +#endif MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0, { result= index_read_map_impl(table->record[0], @@ -8613,7 +8615,11 @@ THR_LOCK_DATA **ha_rocksdb::store_lock(THD *thd, uint sql_command = my_core::thd_sql_command(thd); if ((lock_type == TL_READ && in_lock_tables) || (lock_type == TL_READ_HIGH_PRIORITY && in_lock_tables) || +#ifdef MARIAROCKS_NOT_YET can_hold_read_locks_on_select(thd, lock_type)) +#else + false) +#endif { ulong tx_isolation = my_core::thd_tx_isolation(thd); if (sql_command != SQLCOM_CHECKSUM && From 86d963eb0888cb7f608a9185116c38de8310f7a6 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 21 Oct 2016 20:05:40 +0000 Subject: [PATCH 041/233] MariaRocks port: provide timeout_message() Also, ER_LOCK_WAIT_TIMEOUT error doesn't have a string parameter in MariaDB. --- storage/rocksdb/ha_rocksdb.cc | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 7f377073bd4..6f35d0f75b0 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -1430,6 +1430,27 @@ class Rdb_snapshot_notifier : public rocksdb::TransactionNotifier } }; + +#ifdef MARIAROCKS_NOT_YET +// ER_LOCK_WAIT_TIMEOUT error also has a reason in facebook/mysql-5.6 +#endif +String timeout_message(const char *command, const char *name1, + const char *name2) +{ + String msg; + msg.append("Timeout on "); + msg.append(command); + msg.append(": "); + msg.append(name1); + if (name2 && name2[0]) + { + msg.append("."); + msg.append(name2); + } + return msg; +} + + /* This is the base class for transactions when interacting with rocksdb. */ class Rdb_transaction From e22b271b39f5fc8de4ea4018f4642831338ac5d5 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 21 Oct 2016 21:11:47 +0000 Subject: [PATCH 042/233] MariaRocks port: compilation fixes --- sql/field.h | 2 +- storage/rocksdb/CMakeLists.txt | 5 ++++- storage/rocksdb/ha_rocksdb.cc | 8 ++++---- storage/rocksdb/rdb_i_s.cc | 16 ++++++++-------- storage/rocksdb/rdb_i_s.h | 16 ++++++++-------- storage/rocksdb/rdb_mutex_wrapper.cc | 13 ++++++++++--- storage/rocksdb/tools/mysql_ldb.cc | 1 + 7 files changed, 36 insertions(+), 25 deletions(-) diff --git a/sql/field.h b/sql/field.h index 0bb9da83827..f349f357244 100644 --- a/sql/field.h +++ b/sql/field.h @@ -3275,7 +3275,7 @@ public: memcpy(ptr,length,packlength); memcpy(ptr+packlength, &data,sizeof(char*)); } - void set_ptr_offset(my_ptrdiff_t ptr_diff, uint32 length, uchar *data) + void set_ptr_offset(my_ptrdiff_t ptr_diff, uint32 length, const uchar *data) { uchar *ptr_ofs= ADD_TO_PTR(ptr,ptr_diff,uchar*); store_length(ptr_ofs, packlength, length); diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index 77c0dfe9c2e..1c8d0690611 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -4,7 +4,7 @@ IF (NOT EXISTS "${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/Makefile") MESSAGE(SEND_ERROR "Missing Makefile in rocksdb directory. Try \"git submodule update\".") ENDIF() -SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -frtti") # get a list of rocksdb library source files # run with env -i to avoid passing variables @@ -36,6 +36,9 @@ ADD_DEFINITIONS(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DOS_LINUX # (facebook/mysql-5.6 just compiles everything without -f*rtti, which means # everything is compiled with -frtti) # +# (also had to add -frtti above, because something that event_listener.cc +# includes requires it. So, now everything in MariaRocks is compiled with +# -frtti) set_source_files_properties(${ROCKSDB_LIB_SOURCES} PROPERTIES COMPILE_FLAGS -frtti) SET(ROCKSDB_SOURCES diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 6f35d0f75b0..153eb258357 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -10780,7 +10780,7 @@ double ha_rocksdb::read_time(uint index, uint ranges, ha_rows rows) struct st_mysql_storage_engine rocksdb_storage_engine= { MYSQL_HANDLERTON_INTERFACE_VERSION }; -mysql_declare_plugin(rocksdb_se) +maria_declare_plugin(rocksdb_se) { MYSQL_STORAGE_ENGINE_PLUGIN, /* Plugin Type */ &rocksdb_storage_engine, /* Plugin Descriptor */ @@ -10793,8 +10793,8 @@ mysql_declare_plugin(rocksdb_se) 0x0001, /* version number (0.1) */ myrocks::rocksdb_status_vars, /* status variables */ myrocks::rocksdb_system_variables, /* system variables */ - nullptr, /* config options */ - 0, /* flags */ + "1.0", /* string version */ + MariaDB_PLUGIN_MATURITY_ALPHA /* maturity */ }, myrocks::rdb_i_s_cfstats, myrocks::rdb_i_s_dbstats, @@ -10804,4 +10804,4 @@ myrocks::rdb_i_s_cfoptions, myrocks::rdb_i_s_global_info, myrocks::rdb_i_s_ddl, myrocks::rdb_i_s_index_file_map -mysql_declare_plugin_end; +maria_declare_plugin_end; diff --git a/storage/rocksdb/rdb_i_s.cc b/storage/rocksdb/rdb_i_s.cc index b251c3d00c4..a15371fea73 100644 --- a/storage/rocksdb/rdb_i_s.cc +++ b/storage/rocksdb/rdb_i_s.cc @@ -1052,7 +1052,7 @@ static int rdb_i_s_deinit(void *p __attribute__((__unused__))) static struct st_mysql_information_schema rdb_i_s_info= { MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION }; -struct st_mysql_plugin rdb_i_s_cfstats= +struct st_maria_plugin rdb_i_s_cfstats= { MYSQL_INFORMATION_SCHEMA_PLUGIN, &rdb_i_s_info, @@ -1069,7 +1069,7 @@ struct st_mysql_plugin rdb_i_s_cfstats= 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_dbstats= +struct st_maria_plugin rdb_i_s_dbstats= { MYSQL_INFORMATION_SCHEMA_PLUGIN, &rdb_i_s_info, @@ -1086,7 +1086,7 @@ struct st_mysql_plugin rdb_i_s_dbstats= 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_perf_context= +struct st_maria_plugin rdb_i_s_perf_context= { MYSQL_INFORMATION_SCHEMA_PLUGIN, &rdb_i_s_info, @@ -1103,7 +1103,7 @@ struct st_mysql_plugin rdb_i_s_perf_context= 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_perf_context_global= +struct st_maria_plugin rdb_i_s_perf_context_global= { MYSQL_INFORMATION_SCHEMA_PLUGIN, &rdb_i_s_info, @@ -1120,7 +1120,7 @@ struct st_mysql_plugin rdb_i_s_perf_context_global= 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_cfoptions= +struct st_maria_plugin rdb_i_s_cfoptions= { MYSQL_INFORMATION_SCHEMA_PLUGIN, &rdb_i_s_info, @@ -1137,7 +1137,7 @@ struct st_mysql_plugin rdb_i_s_cfoptions= 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_global_info= +struct st_maria_plugin rdb_i_s_global_info= { MYSQL_INFORMATION_SCHEMA_PLUGIN, &rdb_i_s_info, @@ -1154,7 +1154,7 @@ struct st_mysql_plugin rdb_i_s_global_info= 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_ddl= +struct st_maria_plugin rdb_i_s_ddl= { MYSQL_INFORMATION_SCHEMA_PLUGIN, &rdb_i_s_info, @@ -1171,7 +1171,7 @@ struct st_mysql_plugin rdb_i_s_ddl= 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_index_file_map= +struct st_maria_plugin rdb_i_s_index_file_map= { MYSQL_INFORMATION_SCHEMA_PLUGIN, &rdb_i_s_info, diff --git a/storage/rocksdb/rdb_i_s.h b/storage/rocksdb/rdb_i_s.h index 846defab961..4d47c0beda9 100644 --- a/storage/rocksdb/rdb_i_s.h +++ b/storage/rocksdb/rdb_i_s.h @@ -21,14 +21,14 @@ namespace myrocks { Declare INFORMATION_SCHEMA (I_S) plugins needed by MyRocks storage engine. */ -extern struct st_mysql_plugin rdb_i_s_cfstats; -extern struct st_mysql_plugin rdb_i_s_dbstats; -extern struct st_mysql_plugin rdb_i_s_perf_context; -extern struct st_mysql_plugin rdb_i_s_perf_context_global; -extern struct st_mysql_plugin rdb_i_s_cfoptions; -extern struct st_mysql_plugin rdb_i_s_global_info; -extern struct st_mysql_plugin rdb_i_s_ddl; -extern struct st_mysql_plugin rdb_i_s_index_file_map; +extern struct st_maria_plugin rdb_i_s_cfstats; +extern struct st_maria_plugin rdb_i_s_dbstats; +extern struct st_maria_plugin rdb_i_s_perf_context; +extern struct st_maria_plugin rdb_i_s_perf_context_global; +extern struct st_maria_plugin rdb_i_s_cfoptions; +extern struct st_maria_plugin rdb_i_s_global_info; +extern struct st_maria_plugin rdb_i_s_ddl; +extern struct st_maria_plugin rdb_i_s_index_file_map; } // namespace myrocks diff --git a/storage/rocksdb/rdb_mutex_wrapper.cc b/storage/rocksdb/rdb_mutex_wrapper.cc index 9b42a4f8145..278e4ed017b 100644 --- a/storage/rocksdb/rdb_mutex_wrapper.cc +++ b/storage/rocksdb/rdb_mutex_wrapper.cc @@ -19,10 +19,17 @@ /* This C++ file's header file */ #include "./rdb_mutex_wrapper.h" +/* The following are for THD_ENTER_COND: */ +#define MYSQL_SERVER 1 +#include "sql_priv.h" +#include "my_decimal.h" +#include "sql_class.h" + /* MyRocks header files */ #include "./ha_rocksdb.h" #include "./rdb_utils.h" + // Internal MySQL APIs not exposed in any header. extern "C" { @@ -92,8 +99,8 @@ Rdb_cond_var::WaitFor(std::shared_ptr mutex_arg, if (current_thd && mutex_obj->m_old_stage_info.count(current_thd) == 0) { - my_core::thd_enter_cond(current_thd, &m_cond, mutex_ptr, - &stage_waiting_on_row_lock2, &old_stage); + THD_ENTER_COND(current_thd, &m_cond, mutex_ptr, + &stage_waiting_on_row_lock2, &old_stage); /* After the mysql_cond_timedwait we need make this call @@ -226,7 +233,7 @@ void Rdb_mutex::UnLock() { std::shared_ptr old_stage = m_old_stage_info[current_thd]; m_old_stage_info.erase(current_thd); /* The following will call mysql_mutex_unlock */ - my_core::thd_exit_cond(current_thd, old_stage.get()); + THD_EXIT_COND(current_thd, old_stage.get()); return; } #endif diff --git a/storage/rocksdb/tools/mysql_ldb.cc b/storage/rocksdb/tools/mysql_ldb.cc index ce51481690b..4664aa3f3d1 100644 --- a/storage/rocksdb/tools/mysql_ldb.cc +++ b/storage/rocksdb/tools/mysql_ldb.cc @@ -3,6 +3,7 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. // +#include #include "rocksdb/ldb_tool.h" #include "../rdb_comparator.h" From fd4e83eb325c810d5838cf610eb29a1548d3ccec Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 21 Oct 2016 21:27:23 +0000 Subject: [PATCH 043/233] MariaRocks port: update ha_rocksdb::delete_all_rows() to match the definition in class handler --- storage/rocksdb/ha_rocksdb.cc | 2 +- storage/rocksdb/ha_rocksdb.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 153eb258357..73e4e907401 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -8253,7 +8253,7 @@ int ha_rocksdb::index_end() } -int ha_rocksdb::delete_all_rows(ha_rows* nrows) +int ha_rocksdb::delete_all_rows() { DBUG_ENTER("ha_rocksdb::delete_all_rows"); /* diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index bb41766f96d..04544ddce74 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -911,7 +911,7 @@ public: __attribute__((__warn_unused_result__)); int external_lock(THD *thd, int lock_type) __attribute__((__warn_unused_result__)); - int delete_all_rows(ha_rows* nrows) __attribute__((__warn_unused_result__)); + int delete_all_rows() __attribute__((__warn_unused_result__)); int truncate() __attribute__((__warn_unused_result__)); int reset() override From 680a206b133b19da35f2290440612d28c954fd8e Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 23 Oct 2016 18:28:07 +0000 Subject: [PATCH 044/233] MariaRocks port: compile the needed files with -frtti - RocksDB uses RTTI (e.g. it uses dynamic_cast) - mysql-5.6 has RTTI enabled for the whole server, so there's no conflict. - MariaDB doesn't have RTTI enabled So the solution is to enable RTTI for - RocksDB files - Parts of MyRocks that interface with RocksDB features that require RTTI. (If one just enables RTTI for all of MyRocks, they will get a "undefined reference to `typeinfo for handler'" link error) --- storage/rocksdb/CMakeLists.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index 1c8d0690611..059bf61ac59 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -4,7 +4,7 @@ IF (NOT EXISTS "${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/Makefile") MESSAGE(SEND_ERROR "Missing Makefile in rocksdb directory. Try \"git submodule update\".") ENDIF() -SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -frtti") +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") # get a list of rocksdb library source files # run with env -i to avoid passing variables @@ -40,6 +40,8 @@ ADD_DEFINITIONS(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DOS_LINUX # includes requires it. So, now everything in MariaRocks is compiled with # -frtti) set_source_files_properties(${ROCKSDB_LIB_SOURCES} PROPERTIES COMPILE_FLAGS -frtti) +set_source_files_properties(event_listener.cc PROPERTIES COMPILE_FLAGS -frtti) + SET(ROCKSDB_SOURCES ha_rocksdb.cc ha_rocksdb.h ha_rocksdb_proto.h @@ -116,6 +118,7 @@ IF (WITH_ROCKSDB_SE_STORAGE_ENGINE) ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/tools/ldb_cmd.cc ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/tools/sst_dump_tool.cc ) + set_source_files_properties(${ROCKSDB_TOOL_SOURCES} PROPERTIES COMPILE_FLAGS -frtti) MYSQL_ADD_EXECUTABLE(sst_dump ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/tools/sst_dump.cc ${ROCKSDB_TOOL_SOURCES}) TARGET_LINK_LIBRARIES(sst_dump rocksdb_se) From 015617879a7144657cbbdc356d2681f52dddb197 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 24 Oct 2016 10:35:56 +0000 Subject: [PATCH 045/233] MariaRocks port: fix a typo in test_if_order_by_key() - Off-by-one error in handler->index_flags() parameter caused it to be called for the keypart that's after the last key part. --- sql/sql_select.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 1f79010e993..899fdab8229 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -20569,7 +20569,7 @@ static int test_if_order_by_key(JOIN *join, key_parts= (uint) (key_part - table->key_info[idx].key_part); if (reverse == -1 && - !(table->file->index_flags(idx, user_defined_kp, 1) & HA_READ_PREV)) + !(table->file->index_flags(idx, user_defined_kp-1, 1) & HA_READ_PREV)) reverse= 0; // Index can't be used if (have_pk_suffix && reverse == -1) From 8d8858c10aae562868218509aea4a01b1777e8cd Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 24 Oct 2016 10:38:18 +0000 Subject: [PATCH 046/233] MariaRocks: trying to get the MTR tests to work - Add include/have_rocksdb.inc (TODO: is there any way to have this file somewhere under storage/rocksdb/mysql-test ?) - Make rocksdb.test require have_partition.inc because it uses partitioned tables --- mysql-test/include/have_rocksdb.inc | 17 +++++++++++++++++ .../rocksdb/mysql-test/rocksdb/t/rocksdb.test | 1 + 2 files changed, 18 insertions(+) create mode 100644 mysql-test/include/have_rocksdb.inc diff --git a/mysql-test/include/have_rocksdb.inc b/mysql-test/include/have_rocksdb.inc new file mode 100644 index 00000000000..c76d851e339 --- /dev/null +++ b/mysql-test/include/have_rocksdb.inc @@ -0,0 +1,17 @@ +if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'rocksdb' AND support IN ('DEFAULT')`) +{ + --skip Test requires default engine RocksDB +} + +# MARIAROCKS_NOT_YET: replication doesn't work yet: +#if (`select count(*) = 0 from information_schema.tables where engine='rocksdb' and table_name='slave_gtid_info'`) +#{ +# --skip Test requires default engine RocksDB +#} + +--disable_query_log +# Table statistics can vary depending on when the memtables are flushed, so +# flush them at the beginning of the test to ensure the test runs consistently. +set global rocksdb_force_flush_memtable_now = true; +--enable_query_log + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test index f612cb0997b..9808d50a092 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test @@ -1,4 +1,5 @@ --source include/have_rocksdb.inc +--source include/have_partition.inc # # RocksDB Storage Engine tests From 3ac33f8cdb57148c2a4c1b46656a256e4e1e2556 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 24 Oct 2016 12:04:01 +0000 Subject: [PATCH 047/233] MariaRocks: fix a compilation problem rdb_cf_options.cc must be compiled with RTTI --- storage/rocksdb/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index 059bf61ac59..247f6b06ad4 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -41,6 +41,7 @@ ADD_DEFINITIONS(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DOS_LINUX # -frtti) set_source_files_properties(${ROCKSDB_LIB_SOURCES} PROPERTIES COMPILE_FLAGS -frtti) set_source_files_properties(event_listener.cc PROPERTIES COMPILE_FLAGS -frtti) +set_source_files_properties(rdb_cf_options.cc PROPERTIES COMPILE_FLAGS -frtti) SET(ROCKSDB_SOURCES From 1d1b10e93cff9d2db796ceac2d6be25ee8a8309c Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 24 Oct 2016 20:51:44 +0000 Subject: [PATCH 048/233] MariaRocks: rocksdb.rocksdb fails with a duplicate key error In MariaDB, Field::make_sort_key stores NULL-indicator byte for the field. In MySQL, it doesn't, so MyRocks stores the NULL-indicator itself. Switch to using Field::sort_string, which is the same as Field::make_sort_key in MySQL. --- storage/rocksdb/rdb_datadic.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index 847343180ee..472af8141a7 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -741,7 +741,7 @@ void rdb_pack_with_make_sort_key(Rdb_field_packing *fpi, Field *field, DBUG_ASSERT(*dst != nullptr); const int max_len= fpi->m_max_image_len; - field->make_sort_key(*dst, max_len); + field->sort_string(*dst, max_len); *dst += max_len; } From bc646ee8810ae2a2342c61e2ca37320c7d8fb071 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 24 Oct 2016 20:55:26 +0000 Subject: [PATCH 049/233] MariaRocks: fix a bug in MariaDB: SHOW STATUS LIKE shows extra rows SHOW STATUS LIKE 'pattern' returned Rocksdb_XXX status variables that had SHOW_FUNC type but didn't match the pattern (for example Rocksdb_block_cache_add). Among other things, this caused MTR to assume that each testcase has damaged the execution environment. The issue was a unitialized variable and then a typo in the condition that checks if variable name matches the pattern. --- sql/sql_show.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sql/sql_show.cc b/sql/sql_show.cc index de284cbe39d..d48316e4047 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -3394,7 +3394,7 @@ static bool show_status_array(THD *thd, const char *wild, for (; variables->name; variables++) { - bool wild_checked; + bool wild_checked= false; strnmov(prefix_end, variables->name, len); name_buffer[sizeof(name_buffer)-1]=0; /* Safety */ @@ -3460,8 +3460,8 @@ static bool show_status_array(THD *thd, const char *wild, else { if ((wild_checked || - (wild && wild[0] && wild_case_compare(system_charset_info, - name_buffer, wild))) && + !(wild && wild[0] && wild_case_compare(system_charset_info, + name_buffer, wild))) && (!cond || cond->val_int())) { const char *pos; // We assign a lot of const's From e9ee999e776819d42be999b5efb7585c499a8591 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 25 Oct 2016 15:01:27 +0000 Subject: [PATCH 050/233] MariaRocks port: "get rid of Invalid (old?) table or database name" error Running MTR tests produces an error like this after the test run: ... 140562896741120 [ERROR] Invalid (old?) table or database name '.rocksdb' There seems to be no way to have the MariaRocks to prevent these, so the current way to fix them is to have --ignore-db-dirs in my.cnf --- storage/rocksdb/mysql-test/rocksdb/suite.opt | 1 + 1 file changed, 1 insertion(+) create mode 100644 storage/rocksdb/mysql-test/rocksdb/suite.opt diff --git a/storage/rocksdb/mysql-test/rocksdb/suite.opt b/storage/rocksdb/mysql-test/rocksdb/suite.opt new file mode 100644 index 00000000000..4942baf3b85 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/suite.opt @@ -0,0 +1 @@ +--ignore-db-dirs=.rocksdb From f23a0093e1a2069010acd72c53fba99dd71e0ff1 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 26 Oct 2016 23:56:59 +0300 Subject: [PATCH 051/233] MariaRocks port: fix a few test result differences - MariaDB has different wording for a few error messages - MySQL changed Extra='' to Extra=NULL for EXPLAIN outputs, MariaDB didnt - The testsuite in storage/rocksdb/mysql-test needs paths to include files adjusted - In SHOW COLUMNS output, Extra column is "NULL" in MariaDB vs '' in MySQL --- .../mysql-test/rocksdb/{t => include}/rocksdb_icp.inc | 0 storage/rocksdb/mysql-test/rocksdb/r/issue111.result | 5 +++++ storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result | 2 +- storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result | 6 +++--- storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result | 2 +- storage/rocksdb/mysql-test/rocksdb/r/type_blob.result | 2 +- storage/rocksdb/mysql-test/rocksdb/r/type_bool.result | 4 ++-- storage/rocksdb/mysql-test/rocksdb/r/type_char.result | 4 ++-- storage/rocksdb/mysql-test/rocksdb/r/type_fixed.result | 6 +++--- storage/rocksdb/mysql-test/rocksdb/r/type_int.result | 2 +- storage/rocksdb/mysql-test/rocksdb/r/type_text.result | 2 +- storage/rocksdb/mysql-test/rocksdb/r/type_varbinary.result | 6 +++--- storage/rocksdb/mysql-test/rocksdb/t/partition.test | 1 + storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test | 2 +- storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev.test | 2 +- storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test | 2 +- storage/rocksdb/mysql-test/rocksdb/t/type_char.test | 2 +- 17 files changed, 28 insertions(+), 22 deletions(-) rename storage/rocksdb/mysql-test/rocksdb/{t => include}/rocksdb_icp.inc (100%) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.inc b/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_icp.inc similarity index 100% rename from storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.inc rename to storage/rocksdb/mysql-test/rocksdb/include/rocksdb_icp.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue111.result b/storage/rocksdb/mysql-test/rocksdb/r/issue111.result index 315d2d2b50b..e15519c3d7a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/issue111.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue111.result @@ -1,3 +1,5 @@ +connect con2,localhost,root,,; +connection default; create table t1 ( pk int not null primary key, col1 int not null, @@ -24,9 +26,12 @@ pk col1 col2 8 8 8 9 9 9 # Connect with another connection and make a conflicting change +connection con2; begin; update t1 set col2=123456 where pk=0; commit; +connection default; update t1 set col2=col2+1 where col1 < 10 limit 5; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +disconnect con2; drop table t1, ten, one_k; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result index acf62d0bb70..fd41048d253 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result @@ -83,7 +83,7 @@ partition by list (b*a) (partition x1 values in (1) tablespace ts1, partition x2 values in (3,11,5,7) tablespace ts2, partition x3 values in (16,8,5+19,70-43) tablespace ts3); create table t2(b binary(2)); -set session optimizer_switch=5; +set session optimizer_switch='materialization=off'; insert into t1(a,b) values(1,7); select a from t1 where a in (select a from t1 where a in (select b from t2)); a diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result index d20bbc9b775..8cf38fd207d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result @@ -21,7 +21,7 @@ insert into t2 select A.a, FLOOR(A.a/10), A.a from t1 A; explain select * from t2 force index (a) where a=0; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ref a a 4 const # NULL +1 SIMPLE t2 ref a a 4 const # select * from t2 force index (a) where a=0; pk a b 0 0 0 @@ -38,7 +38,7 @@ pk a b explain select * from t2 force index (a) where a=2; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ref a a 4 const # NULL +1 SIMPLE t2 ref a a 4 const # select * from t2 force index (a) where a=2; pk a b 20 2 20 @@ -54,7 +54,7 @@ pk a b explain select * from t2 force index (a) where a=3 and pk=33; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 const a a 8 const,const # NULL +1 SIMPLE t2 const a a 8 const,const # select * from t2 force index (a) where a=3 and pk=33; pk a b 33 3 33 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result index 1544256f194..e6ff6e1ca32 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result @@ -29,5 +29,5 @@ DROP TABLE t1; CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'); HANDLER t1 OPEN AS h1; -ERROR HY000: Table storage engine for 'h1' doesn't have this option +ERROR HY000: Storage engine ROCKSDB of the table `test`.`t1` doesn't have this option DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_blob.result b/storage/rocksdb/mysql-test/rocksdb/r/type_blob.result index e36c91658fd..3b2bee74b6a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_blob.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_blob.result @@ -53,5 +53,5 @@ LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m 65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152 ALTER TABLE t1 ADD COLUMN bbb BLOB(4294967296); -ERROR 42000: Display width out of range for column 'bbb' (max = 4294967295) +ERROR 42000: Display width out of range for 'bbb' (max = 4294967295) DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_bool.result b/storage/rocksdb/mysql-test/rocksdb/r/type_bool.result index dd9dc6d1f9f..4abfdb49f37 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_bool.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_bool.result @@ -67,7 +67,7 @@ b1 b2 127 -128 2 3 ALTER TABLE t1 ADD COLUMN b3 BOOLEAN UNSIGNED ; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'UNSIGNED' at line 1 +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNSIGNED' at line 1 ALTER TABLE ADD COLUMN b3 BOOL ZEROFILL ; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ADD COLUMN b3 BOOL ZEROFILL' at line 1 +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ADD COLUMN b3 BOOL ZEROFILL' at line 1 DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_char.result b/storage/rocksdb/mysql-test/rocksdb/r/type_char.result index 1e5ac0d44f5..1786dfae1e7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_char.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_char.result @@ -13,7 +13,7 @@ c char(1) YES NULL c0 char(0) YES NULL c1 char(1) YES NULL c20 char(20) YES NULL -c255 char(255) NO PRI +c255 char(255) NO PRI NULL INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('','','','',''); INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.'); SELECT c,c0,c1,c20,c255 FROM t1; @@ -66,7 +66,7 @@ c1 DROP TABLE t1; CREATE TABLE t1(a char(10) character set utf8 collate utf8_bin primary key); INSERT INTO t1 VALUES ('one'),('two'),('three'),('four'),('five'); -SELECT * FROM t1 LIMIT 1 UNION SELECT * FROM t1; +(SELECT * FROM t1 LIMIT 1) UNION (SELECT * FROM t1); a five four diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_fixed.result b/storage/rocksdb/mysql-test/rocksdb/r/type_fixed.result index 45fd402f9b3..055952ea55f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_fixed.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_fixed.result @@ -123,9 +123,9 @@ d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) ; -ERROR 42000: Too big precision 66 specified for column 'n66'. Maximum is 65. +ERROR 42000: Too big precision 66 specified for 'n66'. Maximum is 65 ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) ; -ERROR 42000: Too big precision 66 specified for column 'n66_6'. Maximum is 65. +ERROR 42000: Too big precision 66 specified for 'n66_6'. Maximum is 65 ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) ; -ERROR 42000: Too big scale 66 specified for column 'n66_66'. Maximum is 30. +ERROR 42000: Too big scale 66 specified for 'n66_66'. Maximum is 38 DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_int.result b/storage/rocksdb/mysql-test/rocksdb/r/type_int.result index 06866e9b5bb..306042912d0 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_int.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_int.result @@ -208,5 +208,5 @@ i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 ALTER TABLE t1 ADD COLUMN i257 INT(257) ; -ERROR 42000: Display width out of range for column 'i257' (max = 255) +ERROR 42000: Display width out of range for 'i257' (max = 255) DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_text.result b/storage/rocksdb/mysql-test/rocksdb/r/type_text.result index bd10e3888c9..4e118bb9bc5 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_text.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_text.result @@ -53,5 +53,5 @@ LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m 65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 ALTER TABLE t1 ADD COLUMN ttt TEXT(4294967296) ; -ERROR 42000: Display width out of range for column 'ttt' (max = 4294967295) +ERROR 42000: Display width out of range for 'ttt' (max = 4294967295) DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varbinary.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varbinary.result index 06afbb92802..6de8c0331cd 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_varbinary.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varbinary.result @@ -10,12 +10,12 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra v0 varbinary(0) YES NULL v1 varbinary(1) YES NULL -v64 varbinary(64) NO PRI +v64 varbinary(64) NO PRI NULL v65000 varbinary(65000) YES NULL CREATE TABLE t2 (v VARBINARY(65532) , PRIMARY KEY(v(255))) ENGINE=rocksdb; SHOW COLUMNS IN t2; Field Type Null Key Default Extra -v varbinary(65532) NO PRI +v varbinary(65532) NO PRI NULL INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. @@ -87,7 +87,7 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra v0 varbinary(0) YES NULL v1 varbinary(1) YES NULL -v64 varbinary(64) NO PRI +v64 varbinary(64) NO PRI NULL v65000 varbinary(65000) YES NULL v65536 mediumblob YES NULL DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/partition.test b/storage/rocksdb/mysql-test/rocksdb/t/partition.test index 9f3ccb9dd6c..d5e13fea0a7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/partition.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/partition.test @@ -1,4 +1,5 @@ --source include/have_rocksdb.inc +--source include/have_partition.inc # # Create tables with partitions and try to update/select from them. diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test index 175476974df..8bd93845e86 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test @@ -3,7 +3,7 @@ let $cf_name=cf1; ---source suite/rocksdb/t/rocksdb_icp.inc +--source include/rocksdb_icp.inc --echo # --echo # Issue #67: Inefficient index condition pushdown diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev.test index c3fd43e5b7e..33914a4eac6 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev.test @@ -3,5 +3,5 @@ let $cf_name=rev:cf1; ---source suite/rocksdb/t/rocksdb_icp.inc +--source include/rocksdb_icp.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test index 82fb70b0596..53ca05c7fdc 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test @@ -88,7 +88,7 @@ create TABLE t1(a int,b int,c int,primary key(a,b)) partition x2 values in (3,11,5,7) tablespace ts2, partition x3 values in (16,8,5+19,70-43) tablespace ts3); create table t2(b binary(2)); -set session optimizer_switch=5; +set session optimizer_switch='materialization=off'; insert into t1(a,b) values(1,7); select a from t1 where a in (select a from t1 where a in (select b from t2)); diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char.test b/storage/rocksdb/mysql-test/rocksdb/t/type_char.test index 5bcf23b39ee..347f83cad33 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/type_char.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char.test @@ -15,5 +15,5 @@ DROP TABLE t1; # Issue #259 CREATE TABLE t1(a char(10) character set utf8 collate utf8_bin primary key); INSERT INTO t1 VALUES ('one'),('two'),('three'),('four'),('five'); -SELECT * FROM t1 LIMIT 1 UNION SELECT * FROM t1; +(SELECT * FROM t1 LIMIT 1) UNION (SELECT * FROM t1); DROP TABLE t1; From 9826edb6b896a6fafedcebcf37e1b844f827d94b Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 27 Oct 2016 00:32:59 +0300 Subject: [PATCH 052/233] MariaRocks port: fix a few test result differences, part#2. --- .../rocksdb/mysql-test/rocksdb/r/1st.result | 2 +- .../r/corrupted_data_reads_debug.result | 30 +++++++++---------- .../rocksdb/r/delete_before_lock.result | 7 +++++ .../mysql-test/rocksdb/r/describe.result | 2 +- .../mysql-test/rocksdb/r/handler_basic.result | 12 ++++++++ .../rocksdb/r/negative_stats.result | 4 +-- .../rocksdb/r/type_enum_indexes.result | 2 +- .../mysql-test/rocksdb/r/type_set.result | 4 +-- .../rocksdb/t/corrupted_data_reads_debug.test | 20 ++++++------- .../mysql-test/rocksdb/t/dup_key_update.test | 4 +-- .../rocksdb/t/get_error_message.test | 1 + .../mysql-test/rocksdb/t/negative_stats.test | 4 +-- .../rocksdb/t/rocksdb_concurrent_delete.test | 8 ++--- .../rocksdb/t/validate_datadic.test | 1 + 14 files changed, 61 insertions(+), 40 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/1st.result b/storage/rocksdb/mysql-test/rocksdb/r/1st.result index 7d1e8607645..323b614ea36 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/1st.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/1st.result @@ -3,7 +3,7 @@ CREATE TABLE t1 (pk INT PRIMARY KEY DEFAULT '0', a INT(11), b CHAR(8)) ENGINE=ro SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `pk` int(11) NOT NULL DEFAULT '0', + `pk` int(11) NOT NULL DEFAULT 0, `a` int(11) DEFAULT NULL, `b` char(8) DEFAULT NULL, PRIMARY KEY (`pk`) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result b/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result index 28c200ebf30..f9f64a66f05 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result @@ -18,22 +18,22 @@ pk col1 3 3 set @tmp1=@@rocksdb_verify_checksums; set rocksdb_verify_checksums=1; -set session debug= "+d,myrocks_simulate_bad_row_read1"; +set session debug_dbug= "+d,myrocks_simulate_bad_row_read1"; select * from t1 where pk=1; -ERROR HY000: Got error 122 from storage engine -set session debug= "-d,myrocks_simulate_bad_row_read1"; +ERROR HY000: Got error 122 "Internal (unspecified) error in handler" from storage engine ROCKSDB +set session debug_dbug= "-d,myrocks_simulate_bad_row_read1"; set rocksdb_verify_checksums=@tmp1; select * from t1 where pk=1; pk col1 1 1 -set session debug= "+d,myrocks_simulate_bad_row_read2"; +set session debug_dbug= "+d,myrocks_simulate_bad_row_read2"; select * from t1 where pk=1; -ERROR HY000: Got error 122 from storage engine -set session debug= "-d,myrocks_simulate_bad_row_read2"; -set session debug= "+d,myrocks_simulate_bad_row_read3"; +ERROR HY000: Got error 122 "Internal (unspecified) error in handler" from storage engine ROCKSDB +set session debug_dbug= "-d,myrocks_simulate_bad_row_read2"; +set session debug_dbug= "+d,myrocks_simulate_bad_row_read3"; select * from t1 where pk=1; -ERROR HY000: Got error 122 from storage engine -set session debug= "-d,myrocks_simulate_bad_row_read3"; +ERROR HY000: Got error 122 "Internal (unspecified) error in handler" from storage engine ROCKSDB +set session debug_dbug= "-d,myrocks_simulate_bad_row_read3"; insert into t1 values(4,'0123456789'); select * from t1; pk col1 @@ -54,10 +54,10 @@ insert into t2 values ('ABCD',1); select * from t2; pk col1 ABCD 1 -set session debug= "+d,myrocks_simulate_bad_pk_read1"; +set session debug_dbug= "+d,myrocks_simulate_bad_pk_read1"; select * from t2; -ERROR HY000: Got error 122 from storage engine -set session debug= "-d,myrocks_simulate_bad_pk_read1"; +ERROR HY000: Got error 122 "Internal (unspecified) error in handler" from storage engine ROCKSDB +set session debug_dbug= "-d,myrocks_simulate_bad_pk_read1"; drop table t2; create table t2 ( pk varchar(4) not null primary key, @@ -67,8 +67,8 @@ insert into t2 values ('ABCD',1); select * from t2; pk col1 ABCD 1 -set session debug= "+d,myrocks_simulate_bad_pk_read1"; +set session debug_dbug= "+d,myrocks_simulate_bad_pk_read1"; select * from t2; -ERROR HY000: Got error 122 from storage engine -set session debug= "-d,myrocks_simulate_bad_pk_read1"; +ERROR HY000: Got error 122 "Internal (unspecified) error in handler" from storage engine ROCKSDB +set session debug_dbug= "-d,myrocks_simulate_bad_pk_read1"; drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete_before_lock.result b/storage/rocksdb/mysql-test/rocksdb/r/delete_before_lock.result index 402ef539ffd..a8ea5e1677f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/delete_before_lock.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/delete_before_lock.result @@ -1,15 +1,22 @@ +connect con, localhost, root,,; +connection default; set debug_sync='RESET'; drop table if exists t1; create table t1 (id1 int, id2 int, value int, primary key (id1, id2)) engine=rocksdb; insert into t1 values (1, 1, 1),(1, 2, 1),(1, 3, 1), (2, 2, 2); +connection con; set debug_sync='rocksdb.get_row_by_rowid SIGNAL parked WAIT_FOR go'; update t1 set value=100 where id1=1; +connection default; set debug_sync='now WAIT_FOR parked'; delete from t1 where id1=1 and id2=1; set debug_sync='now SIGNAL go'; +connection con; select * from t1 where id1=1 for update; id1 id2 value 1 2 100 1 3 100 +connection default; +disconnect con; set debug_sync='RESET'; drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/describe.result b/storage/rocksdb/mysql-test/rocksdb/r/describe.result index ec828577ae5..6d43f89c9bd 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/describe.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/describe.result @@ -6,7 +6,7 @@ INSERT INTO t2 (a,b) VALUES (1, 'bar'); CREATE TABLE t3 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb CHARACTER SET utf8; DESCRIBE t1; Field Type Null Key Default Extra -a int(11) NO PRI 0 +a int(11) NO PRI NULL b char(8) YES NULL DESC t2 a; Field Type Null Key Default Extra diff --git a/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result b/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result index 1ab8bd7678c..b21764ed3a7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result @@ -29,7 +29,9 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 +Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 FLUSH STATUS; SELECT * FROM t1 WHERE b=6; @@ -42,7 +44,9 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 1 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 +Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 FLUSH STATUS; SELECT * FROM t1; @@ -63,7 +67,9 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 +Handler_read_rnd_deleted 0 Handler_read_rnd_next 10 FLUSH STATUS; SELECT * FROM t1 WHERE b <=5 ORDER BY b; @@ -77,7 +83,9 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 2 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 +Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 FLUSH STATUS; SELECT * FROM t1 WHERE id >=8 ORDER BY id; @@ -92,7 +100,9 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 3 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 +Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 FLUSH STATUS; SELECT * FROM t1 WHERE id < 8 ORDER BY id; @@ -110,6 +120,8 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 6 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 +Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/negative_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/negative_stats.result index e45c5d6efc7..61c1d7e9bdb 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/negative_stats.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/negative_stats.result @@ -1,9 +1,9 @@ DROP TABLE IF EXISTS t1; CREATE TABLE t1 (i1 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB; SET GLOBAL ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW=1; -set session debug= "+d,myrocks_simulate_negative_stats"; +set session debug_dbug= "+d,myrocks_simulate_negative_stats"; SELECT CASE WHEN DATA_LENGTH < 1024 * 1024 THEN 'true' ELSE 'false' END FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'; CASE WHEN DATA_LENGTH < 1024 * 1024 THEN 'true' ELSE 'false' END true -set session debug= "-d,myrocks_simulate_negative_stats"; +set session debug_dbug= "-d,myrocks_simulate_negative_stats"; DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result index 37d005485d6..70bbc840454 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result @@ -49,7 +49,7 @@ t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE t1 1 b 1 b A 500 NULL NULL YES LSMTREE EXPLAIN SELECT DISTINCT b FROM t1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index NULL b 2 NULL # NULL +1 SIMPLE t1 index NULL b 2 NULL # SELECT DISTINCT b FROM t1; b test1 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_set.result b/storage/rocksdb/mysql-test/rocksdb/r/type_set.result index f401af46536..5f875d147ec 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_set.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_set.result @@ -9,7 +9,7 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra a set('') YES NULL b set('test1','test2','test3','test4','test5') YES NULL -c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI +c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI NULL INSERT INTO t1 (a,b,c) VALUES ('','test2,test3','01,34,44,,23'), ('',5,2), @@ -38,7 +38,7 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra a set('') YES NULL b set('test1','test2','test3','test4','test5') YES NULL -c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI +c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI NULL e set('a','A') YES NULL ALTER TABLE t1 ADD COLUMN f SET('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i') ; ERROR HY000: Too many strings for column f and SET diff --git a/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test b/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test index a9ee98dfda5..34f1e0f2270 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test @@ -23,23 +23,23 @@ select * from t1; set @tmp1=@@rocksdb_verify_checksums; set rocksdb_verify_checksums=1; -set session debug= "+d,myrocks_simulate_bad_row_read1"; +set session debug_dbug= "+d,myrocks_simulate_bad_row_read1"; --error ER_GET_ERRNO select * from t1 where pk=1; -set session debug= "-d,myrocks_simulate_bad_row_read1"; +set session debug_dbug= "-d,myrocks_simulate_bad_row_read1"; set rocksdb_verify_checksums=@tmp1; select * from t1 where pk=1; -set session debug= "+d,myrocks_simulate_bad_row_read2"; +set session debug_dbug= "+d,myrocks_simulate_bad_row_read2"; --error ER_GET_ERRNO select * from t1 where pk=1; -set session debug= "-d,myrocks_simulate_bad_row_read2"; +set session debug_dbug= "-d,myrocks_simulate_bad_row_read2"; -set session debug= "+d,myrocks_simulate_bad_row_read3"; +set session debug_dbug= "+d,myrocks_simulate_bad_row_read3"; --error ER_GET_ERRNO select * from t1 where pk=1; -set session debug= "-d,myrocks_simulate_bad_row_read3"; +set session debug_dbug= "-d,myrocks_simulate_bad_row_read3"; insert into t1 values(4,'0123456789'); select * from t1; @@ -57,10 +57,10 @@ create table t2 ( insert into t2 values ('ABCD',1); select * from t2; -set session debug= "+d,myrocks_simulate_bad_pk_read1"; +set session debug_dbug= "+d,myrocks_simulate_bad_pk_read1"; --error ER_GET_ERRNO select * from t2; -set session debug= "-d,myrocks_simulate_bad_pk_read1"; +set session debug_dbug= "-d,myrocks_simulate_bad_pk_read1"; drop table t2; @@ -72,9 +72,9 @@ create table t2 ( insert into t2 values ('ABCD',1); select * from t2; -set session debug= "+d,myrocks_simulate_bad_pk_read1"; +set session debug_dbug= "+d,myrocks_simulate_bad_pk_read1"; --error ER_GET_ERRNO select * from t2; -set session debug= "-d,myrocks_simulate_bad_pk_read1"; +set session debug_dbug= "-d,myrocks_simulate_bad_pk_read1"; drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/dup_key_update.test b/storage/rocksdb/mysql-test/rocksdb/t/dup_key_update.test index ebe18ab2e95..a9a09d44e01 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/dup_key_update.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/dup_key_update.test @@ -16,7 +16,7 @@ CREATE TABLE t2 (id1 INT, id2 INT, id3 INT, UNIQUE KEY (id3, id1) COMMENT 'rev:cf') ENGINE=ROCKSDB; ---source suite/rocksdb/include/dup_key_update.inc +--source include/dup_key_update.inc # Cleanup DROP TABLE t1; @@ -34,7 +34,7 @@ CREATE TABLE t2 (id1 varchar(128) CHARACTER SET latin1 COLLATE latin1_bin, PRIMARY KEY (id1, id2, id3), UNIQUE KEY (id3, id1) COMMENT 'rev:cf') ENGINE=ROCKSDB; ---source suite/rocksdb/include/dup_key_update.inc +--source include/dup_key_update.inc # Cleanup DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test b/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test index 4f07bf206f2..31013b75f63 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test @@ -1,4 +1,5 @@ --source include/have_rocksdb.inc +--source include/have_partition.inc # # Create tables with partitions and try to generate an error while creating diff --git a/storage/rocksdb/mysql-test/rocksdb/t/negative_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/negative_stats.test index e3e0cf898a1..9de41d17976 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/negative_stats.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/negative_stats.test @@ -19,8 +19,8 @@ while ($i <= $max) { SET GLOBAL ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW=1; -set session debug= "+d,myrocks_simulate_negative_stats"; +set session debug_dbug= "+d,myrocks_simulate_negative_stats"; SELECT CASE WHEN DATA_LENGTH < 1024 * 1024 THEN 'true' ELSE 'false' END FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'; -set session debug= "-d,myrocks_simulate_negative_stats"; +set session debug_dbug= "-d,myrocks_simulate_negative_stats"; DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_delete.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_delete.test index ecb4b2a3609..52f9485e6b7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_delete.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_delete.test @@ -9,16 +9,16 @@ let $order=ASC; let $comment=""; ---source suite/rocksdb/include/rocksdb_concurrent_delete.inc +--source include/rocksdb_concurrent_delete.inc let $order=DESC; let $comment=""; ---source suite/rocksdb/include/rocksdb_concurrent_delete.inc +--source include/rocksdb_concurrent_delete.inc let $order=ASC; let $comment="rev:cf2"; ---source suite/rocksdb/include/rocksdb_concurrent_delete.inc +--source include/rocksdb_concurrent_delete.inc let $order=DESC; let $comment="rev:cf2"; ---source suite/rocksdb/include/rocksdb_concurrent_delete.inc +--source include/rocksdb_concurrent_delete.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test b/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test index b34c85eb4c4..183b62f1e80 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test @@ -1,4 +1,5 @@ --source include/have_rocksdb.inc +--source include/have_partition.inc # # Validate that the server starts when everything is okay, but detects errors From 1d1211ab1a70f2e16cb9ca4ce2182e5712a97383 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 29 Oct 2016 00:23:58 +0000 Subject: [PATCH 053/233] MariaRocks port: trivial Maria-fication of test results Updated test results for obvious MariaDB vs MySQL differences --- .../mysql-test/rocksdb/r/alter_table.result | 2 +- .../mysql-test/rocksdb/r/col_opt_null.result | 4 ++-- .../rocksdb/r/col_opt_unsigned.result | 8 ++++---- .../rocksdb/r/col_opt_zerofill.result | 6 +++--- .../rocksdb/mysql-test/rocksdb/r/misc.result | 2 +- .../mysql-test/rocksdb/r/rpl_statement.result | 6 ++++++ .../rocksdb/r/rpl_statement_not_found.result | 12 ++++++++++++ .../mysql-test/rocksdb/r/type_bit.result | 6 +++--- .../mysql-test/rocksdb/r/type_float.result | 6 ++---- .../mysql-test/rocksdb/r/type_varchar.result | 18 +++++++++--------- .../mysql-test/rocksdb/t/type_float.inc | 6 ++++-- 11 files changed, 47 insertions(+), 29 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result b/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result index b37bf17e1ac..a4e00626122 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result @@ -16,7 +16,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `pk` int(11) NOT NULL, - `a` int(11) DEFAULT '0', + `a` int(11) DEFAULT 0, `c` char(8) DEFAULT NULL, `b` int(11) DEFAULT NULL, PRIMARY KEY (`pk`) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result index 051784528b1..51f75c1004b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result @@ -254,7 +254,7 @@ a+0 b+0 c+0 d+0 1 0 18446744073709551615 0 DROP TABLE t1; CREATE TABLE t1 (pk INT PRIMARY KEY, a BIT(65) NULL) ENGINE=rocksdb; -ERROR 42000: Display width out of range for column 'a' (max = 64) +ERROR 42000: Display width out of range for 'a' (max = 64) DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( c BIT NULL, @@ -340,7 +340,7 @@ LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m 65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152 ALTER TABLE t1 ADD COLUMN bbb BLOB(4294967296); -ERROR 42000: Display width out of range for column 'bbb' (max = 4294967295) +ERROR 42000: Display width out of range for 'bbb' (max = 4294967295) DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result index 66b6a5fe799..f5d87b26d57 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result @@ -148,11 +148,11 @@ d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) UNSIGNED; -ERROR 42000: Too big precision 66 specified for column 'n66'. Maximum is 65. +ERROR 42000: Too big precision 66 specified for 'n66'. Maximum is 65 ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) UNSIGNED; -ERROR 42000: Too big precision 66 specified for column 'n66_6'. Maximum is 65. +ERROR 42000: Too big precision 66 specified for 'n66_6'. Maximum is 65 ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) UNSIGNED; -ERROR 42000: Too big scale 66 specified for column 'n66_66'. Maximum is 30. +ERROR 42000: Too big scale 66 specified for 'n66_66'. Maximum is 38 DROP TABLE t1; CREATE TABLE t1 ( a DECIMAL UNSIGNED, @@ -709,7 +709,7 @@ i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 ALTER TABLE t1 ADD COLUMN i257 INT(257) UNSIGNED; -ERROR 42000: Display width out of range for column 'i257' (max = 255) +ERROR 42000: Display width out of range for 'i257' (max = 255) DROP TABLE t1; CREATE TABLE t1 ( t TINYINT UNSIGNED, diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result index 823ad2f2fc4..a846e2925b9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result @@ -148,11 +148,11 @@ d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) ZEROFILL; -ERROR 42000: Too big precision 66 specified for column 'n66'. Maximum is 65. +ERROR 42000: Too big precision 66 specified for 'n66'. Maximum is 65 ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) ZEROFILL; -ERROR 42000: Too big precision 66 specified for column 'n66_6'. Maximum is 65. +ERROR 42000: Too big precision 66 specified for 'n66_6'. Maximum is 65 ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) ZEROFILL; -ERROR 42000: Too big scale 66 specified for column 'n66_66'. Maximum is 30. +ERROR 42000: Too big scale 66 specified for 'n66_66'. Maximum is 38 DROP TABLE t1; CREATE TABLE t1 ( a DECIMAL ZEROFILL, diff --git a/storage/rocksdb/mysql-test/rocksdb/r/misc.result b/storage/rocksdb/mysql-test/rocksdb/r/misc.result index 70c270d5538..a7ee3f9a199 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/misc.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/misc.result @@ -23,7 +23,7 @@ user(), 'utf8_general_ci', 'select 1'); SHOW EVENTS; -ERROR 42000: This version of MySQL doesn't yet support 'MICROSECOND' +ERROR 42000: This version of MariaDB doesn't yet support 'MICROSECOND' DROP EVENT ev1; SELECT TABLE_NAME, COLUMN_NAME, REFERENCED_TABLE_NAME, REFERENCED_COLUMN_NAME FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE ORDER BY TABLE_NAME; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result index 315f040899e..6f9a2be0f4e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result @@ -3,7 +3,9 @@ Warnings: Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. [connection master] +connection master; drop table if exists t1; +connection master; select @@binlog_format; @@binlog_format STATEMENT @@ -24,11 +26,13 @@ ERROR HY000: Can't execute updates on master with binlog_format != ROW. set binlog_format=row; insert into t1 values (1),(2),(3); include/sync_slave_sql_with_master.inc +connection slave; select * from t1; pk 1 2 3 +connection master; drop table t1; create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb; insert into t1 values (1,1,1); @@ -40,6 +44,7 @@ update t1 set value2=100 where id=1; update t1 set value2=200 where id=2; update t1 set value2=300 where id=3; include/sync_slave_sql_with_master.inc +connection slave; select * from t1 where id=1; id value value2 1 1 100 @@ -49,6 +54,7 @@ id value value2 select * from t1 where id=3; id value value2 3 1 300 +connection master; drop table t1; set binlog_format=row; include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result index 8cdfa910739..8d7f3460e86 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result @@ -3,7 +3,9 @@ Warnings: Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. [connection master] +connection master; drop table if exists t1; +connection master; create table t0 (a int) engine=myisam; insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); create table t1(a int) engine=myisam; @@ -19,26 +21,34 @@ insert into t2 select a,a,a,a from t1; create table t3 like t2; insert into t3 select * from t2; include/sync_slave_sql_with_master.inc +connection slave; set global debug= 'd,dbug.rocksdb.get_row_by_rowid'; include/stop_slave.inc include/start_slave.inc +connection master; update t2 set col1=100 where kp1 between 1 and 3 and mod(kp2,2)=0; +connection slave; set debug_sync= 'now WAIT_FOR Reached'; set global debug = ''; set sql_log_bin=0; delete from t2 where pk=2; delete from t2 where pk=3; set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running'; +connection master; include/sync_slave_sql_with_master.inc +connection slave; select * from t2 where pk < 5; pk kp1 kp2 col1 0 0 0 0 1 1 1 1 4 4 4 4 +connection slave; set global debug= 'd,dbug.rocksdb.get_row_by_rowid'; include/stop_slave.inc include/start_slave.inc +connection master; update t3 set col1=100 where kp1 between 1 and 4 and mod(kp2,2)=0; +connection slave; call mtr.add_suppression("Deadlock found when trying to get lock"); set debug_sync= 'now WAIT_FOR Reached'; set global debug = ''; @@ -46,7 +56,9 @@ set sql_log_bin=0; delete from t3 where pk=2; delete from t3 where pk=3; set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running'; +connection master; include/sync_slave_sql_with_master.inc +connection slave; select * from t3 where pk < 5; pk kp1 kp2 col1 0 0 0 0 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_bit.result b/storage/rocksdb/mysql-test/rocksdb/r/type_bit.result index d385c0d4670..fa84cbde8c8 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_bit.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_bit.result @@ -10,7 +10,7 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra a bit(1) YES NULL b bit(20) YES NULL -c bit(64) NO PRI b'0' +c bit(64) NO PRI NULL d bit(1) YES NULL ALTER TABLE t1 DROP COLUMN d; ALTER TABLE t1 ADD COLUMN d BIT(0) ; @@ -18,7 +18,7 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra a bit(1) YES NULL b bit(20) YES NULL -c bit(64) NO PRI b'0' +c bit(64) NO PRI NULL d bit(1) YES NULL INSERT INTO t1 (a,b,c,d) VALUES (0,POW(2,20)-1,b'1111111111111111111111111111111111111111111111111111111111111111',1); SELECT BIN(a), HEX(b), c+0 FROM t1 WHERE d>0; @@ -50,4 +50,4 @@ a+0 b+0 c+0 d+0 1 0 18446744073709551615 0 DROP TABLE t1; CREATE TABLE t1 (pk INT PRIMARY KEY, a BIT(65) ) ENGINE=rocksdb; -ERROR 42000: Display width out of range for column 'a' (max = 64) +ERROR 42000: Display width out of range for 'a' (max = 64) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_float.result b/storage/rocksdb/mysql-test/rocksdb/r/type_float.result index 0f78926c89a..fbb44d1552c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_float.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_float.result @@ -226,7 +226,7 @@ INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( 6 ); Warnings: -Warning 1292 Truncated incorrect DECIMAL value: '' +Warning 1916 Got overflow when converting '' to DECIMAL. Value truncated Warning 1264 Out of range value for column 'f' at row 1 Warning 1264 Out of range value for column 'f0' at row 1 Warning 1264 Out of range value for column 'r1_1' at row 1 @@ -298,9 +298,7 @@ r1_1 0.9 r1_1 0.9 r1_1 0.9 ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) ; -ERROR 42000: Display width out of range for column 'd0_0' (max = 255) ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) ; -ERROR 42000: Too big precision 256 specified for column 'n66_6'. Maximum is 65. +ERROR 42000: Too big precision 256 specified for 'n66_6'. Maximum is 65 ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) ; -ERROR 42000: Too big scale 35 specified for column 'n66_66'. Maximum is 30. DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result index 5c449da2b8f..afb4d5013f2 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result @@ -21,12 +21,12 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra v0 varchar(0) YES NULL v1 varchar(1) YES NULL -v64 varchar(64) NO PRI +v64 varchar(64) NO PRI NULL v65000 varchar(65000) YES NULL CREATE TABLE t2 (v VARCHAR(65532), PRIMARY KEY (v(255))) ENGINE=rocksdb; SHOW COLUMNS IN t2; Field Type Null Key Default Extra -v varchar(65532) NO PRI +v varchar(65532) NO PRI NULL INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. @@ -133,7 +133,7 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra v0 varchar(0) YES NULL v1 varchar(1) YES NULL -v64 varchar(64) NO PRI +v64 varchar(64) NO PRI NULL v65000 varchar(65000) YES NULL v65536 mediumtext YES NULL DROP TABLE t1, t2; @@ -418,7 +418,7 @@ insert into t1 values (3, 'a \t', 'a-tab'); explain select col1, hex(col1) from t1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL # NULL +1 SIMPLE t1 ALL NULL NULL NULL NULL # select col1, hex(col1) from t1; col1 hex(col1) ab 00610062 @@ -448,7 +448,7 @@ insert into t1 values(23, repeat(' ',18), '18x-space'); explain select pk, col1, hex(col1), length(col1) from t1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 # NULL NULL NULL NULL # NULL +1 SIMPLE t1 # NULL NULL NULL NULL # select pk, col1, hex(col1), length(col1) from t1; pk col1 hex(col1) length(col1) 10 0 @@ -530,7 +530,7 @@ insert into t1 values (3, 'a \t', 'a-tab'); explain select col1, hex(col1) from t1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL # NULL +1 SIMPLE t1 ALL NULL NULL NULL NULL # select col1, hex(col1) from t1; col1 hex(col1) ab 6162 @@ -560,7 +560,7 @@ insert into t1 values(23, repeat(' ',18), '18x-space'); explain select pk, col1, hex(col1), length(col1) from t1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 # NULL NULL NULL NULL # NULL +1 SIMPLE t1 # NULL NULL NULL NULL # select pk, col1, hex(col1), length(col1) from t1; pk col1 hex(col1) length(col1) 10 0 @@ -642,7 +642,7 @@ insert into t1 values (3, 'a \t', 'a-tab'); explain select col1, hex(col1) from t1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL # NULL +1 SIMPLE t1 ALL NULL NULL NULL NULL # select col1, hex(col1) from t1; col1 hex(col1) ab 00610062 @@ -672,7 +672,7 @@ insert into t1 values(23, repeat(' ',18), '18x-space'); explain select pk, col1, hex(col1), length(col1) from t1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 # NULL NULL NULL NULL # NULL +1 SIMPLE t1 # NULL NULL NULL NULL # select pk, col1, hex(col1), length(col1) from t1; pk col1 hex(col1) length(col1) 10 0 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_float.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_float.inc index 2f37e55b8d6..8403b088f9d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/type_float.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_float.inc @@ -95,13 +95,15 @@ INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( --sorted_result --query_vertical SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1 ---error ER_TOO_BIG_DISPLAYWIDTH +# Error occurs on MySQL but not on MariaDB: +# --error ER_TOO_BIG_DISPLAYWIDTH eval ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) $extra_col_opts; --error ER_TOO_BIG_PRECISION eval ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) $extra_col_opts; ---error ER_TOO_BIG_SCALE +# Error occurs on MySQL but not on MariaDB: +# --error ER_TOO_BIG_SCALE eval ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) $extra_col_opts; DROP TABLE t1; From 15f2bcfa94d82e779de490bd7002b82cb4a17022 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 29 Oct 2016 00:30:01 +0000 Subject: [PATCH 054/233] MariaRocks port: get rid of "invalid table name" in the error log filename_to_tablename() in MariaDB produces warnings when its is called for temporary file names. Don't call this function for temporary filename (as we don't need its result anyway) --- storage/rocksdb/ha_rocksdb.cc | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 73e4e907401..06609e4836a 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -5089,9 +5089,7 @@ int ha_rocksdb::create_cfs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg, DBUG_ENTER("ha_rocksdb::create_cfs"); char tablename_sys[NAME_LEN + 1]; - - my_core::filename_to_tablename(tbl_def_arg->base_tablename().c_str(), - tablename_sys, sizeof(tablename_sys)); + bool tsys_set= false; /* The first loop checks the index parameters and creates @@ -5105,6 +5103,13 @@ int ha_rocksdb::create_cfs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg, !is_hidden_pk(i, table_arg, tbl_def_arg) && tbl_def_arg->base_tablename().find(tmp_file_prefix) != 0) { + if (!tsys_set) + { + tsys_set= true; + my_core::filename_to_tablename(tbl_def_arg->base_tablename().c_str(), + tablename_sys, sizeof(tablename_sys)); + } + for (uint part= 0; part < table_arg->key_info[i].ext_key_parts; part++) { if (!rdb_is_index_collation_supported( From 4462e77afad40f32dbb6dbcdb34eef0225a7e149 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 29 Oct 2016 13:23:42 +0000 Subject: [PATCH 055/233] MariaRocks port: update result for rocksdb.type_char_indexes (see MDEV-11172) --- storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result index e8b913288c5..413c3f69f23 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result @@ -45,7 +45,7 @@ t1 1 v16 1 v16 A 500 NULL NULL YES LSMTREE INSERT INTO t1 (c,c20,v16,v128,pk) VALUES ('a','char1','varchar1a','varchar1b','1'),('a','char2','varchar2a','varchar2b','2'),('b','char3','varchar1a','varchar1b','3'),('c','char4','varchar3a','varchar3b','4'); EXPLAIN SELECT SUBSTRING(v16,0,3) FROM t1 WHERE v16 LIKE 'varchar%'; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index v16 v16 19 NULL # Using where; Using index +1 SIMPLE t1 index v16 v16 21 NULL # Using where; Using index SELECT SUBSTRING(v16,7,3) FROM t1 WHERE v16 LIKE 'varchar%'; SUBSTRING(v16,7,3) r1a From 997c86c76c3a5e92f297b9740dbc9b6a65bb771d Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 30 Oct 2016 21:07:16 +0000 Subject: [PATCH 056/233] MariaRocks port: fix rocksdb.handler_basic test MySQL has each storage engine to increment Handler_XXX counters, while MariaDB has handler::ha_XXX() methods to do the increments. MariaDB's solution doesn't work for storage engines that implement handler::read_range_first(), though. Make ha_rocksdb::read_range_first increment the counter (when it is calling handler::ha_XXX() function that will) --- sql/handler.h | 8 +++++++- storage/rocksdb/ha_rocksdb.cc | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/sql/handler.h b/sql/handler.h index 775d21ed3ff..dca052b0ac9 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -3928,10 +3928,16 @@ private: void mark_trx_read_write_internal(); bool check_table_binlog_row_based_internal(bool binlog_row); - /* Private helpers */ +protected: + /* + These are intended to be used only by handler::ha_xxxx() functions + However, engines that implement read_range_XXX() (like MariaRocks) + or embed other engines (like ha_partition) may need to call these also + */ inline void increment_statistics(ulong SSV::*offset) const; inline void decrement_statistics(ulong SSV::*offset) const; +private: /* Low-level primitives for storage engines. These should be overridden by the storage engine class. To call these methods, use diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 06609e4836a..fb43b8ce849 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -6265,6 +6265,7 @@ int ha_rocksdb::read_range_first(const key_range *start_key, DBUG_RETURN(HA_ERR_LOCK_DEADLOCK); } #endif + increment_statistics(&SSV::ha_read_key_count); MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0, { result= index_read_map_impl(table->record[0], From ece3ab37027aa63ce7ac6e1b85b94aaafda4d51a Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 5 Nov 2016 17:20:33 +0000 Subject: [PATCH 057/233] MariaRocks port: add have_rocksdb.opt which enables MyRocks' plugins A number of tests used to fail due to just not being able to access MyRocks' I_S plugins: cons_snapshot_repeatable_read -drop_table3 -index_file_map -index_key_block_size -issue100_delete -truncate_table3 --- mysql-test/include/have_rocksdb.opt | 1 + 1 file changed, 1 insertion(+) create mode 100644 mysql-test/include/have_rocksdb.opt diff --git a/mysql-test/include/have_rocksdb.opt b/mysql-test/include/have_rocksdb.opt new file mode 100644 index 00000000000..fe043d7b703 --- /dev/null +++ b/mysql-test/include/have_rocksdb.opt @@ -0,0 +1 @@ +--loose-enable-rocksdb --loose-enable-rocksdb_global_info --loose-enable-rocksdb_ddl --loose-enable-rocksdb_cf_options --loose-enable_rocksdb_perf_context --loose-enable-rocksdb_index_file_map --loose-enable-rocksdb_dbstats From a42b9003f40c59e5d4c25da2c1e883d1660cdc87 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 5 Nov 2016 19:23:18 +0000 Subject: [PATCH 058/233] MariaRocks port: more of testcase Maria-fication - Adjust paths to MyRocks tests - s/@@dbug/@@debug_dbug/ - etc --- .../rocksdb/r/rocksdb_checksums.result | 16 ++++++++-------- .../rocksdb/r/unique_sec_rev_cf.result | 15 +++++++++++++++ .../mysql-test/rocksdb/t/bloomfilter.inc | 8 ++++---- .../rocksdb/t/bloomfilter_load_select.inc | 4 ++-- .../rocksdb/t/compact_deletes_test.inc | 3 ++- .../mysql-test/rocksdb/t/drop_table2.test | 2 +- .../rocksdb/t/rocksdb_checksums.test | 18 +++++++++--------- .../rocksdb/t/unique_sec_rev_cf.test | 2 +- 8 files changed, 42 insertions(+), 26 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result index a8908edada5..2b30501aa0f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result @@ -64,7 +64,7 @@ insert into mtr.test_suppressions values ('Checksum mismatch in value of key-value pair for index'), ('Data with incorrect checksum'); # 1. Start with mismatch in key checksum of the PK. -set session debug= "+d,myrocks_simulate_bad_pk_checksum1"; +set session debug_dbug= "+d,myrocks_simulate_bad_pk_checksum1"; set session rocksdb_verify_checksums=off; select * from t3; pk a b @@ -76,9 +76,9 @@ select * from t3; ERROR HY000: Internal error: Record checksum mismatch select * from t4; ERROR HY000: Internal error: Record checksum mismatch -set session debug= "-d,myrocks_simulate_bad_pk_checksum1"; +set session debug_dbug= "-d,myrocks_simulate_bad_pk_checksum1"; # 2. Continue with mismatch in pk value checksum. -set session debug= "+d,myrocks_simulate_bad_pk_checksum2"; +set session debug_dbug= "+d,myrocks_simulate_bad_pk_checksum2"; set session rocksdb_verify_checksums=off; select * from t3; pk a b @@ -90,7 +90,7 @@ select * from t3; ERROR HY000: Internal error: Record checksum mismatch select * from t4; ERROR HY000: Internal error: Record checksum mismatch -set session debug= "-d,myrocks_simulate_bad_pk_checksum2"; +set session debug_dbug= "-d,myrocks_simulate_bad_pk_checksum2"; # 3. Check if we catch checksum mismatches for secondary indexes explain select * from t3 force index(a) where a<4; @@ -101,12 +101,12 @@ pk a b 1 1 1 2 2 3 3 3 3 -set session debug= "+d,myrocks_simulate_bad_key_checksum1"; +set session debug_dbug= "+d,myrocks_simulate_bad_key_checksum1"; select * from t3 force index(a) where a<4; ERROR HY000: Internal error: Record checksum mismatch select * from t4 force index(a) where a<1000000; ERROR HY000: Internal error: Record checksum mismatch -set session debug= "-d,myrocks_simulate_bad_key_checksum1"; +set session debug_dbug= "-d,myrocks_simulate_bad_key_checksum1"; # 4. The same for index-only reads? explain select a from t3 force index(a) where a<4; @@ -117,12 +117,12 @@ a 1 2 3 -set session debug= "+d,myrocks_simulate_bad_key_checksum1"; +set session debug_dbug= "+d,myrocks_simulate_bad_key_checksum1"; select a from t3 force index(a) where a<4; ERROR HY000: Internal error: Record checksum mismatch select a from t4 force index(a) where a<1000000; ERROR HY000: Internal error: Record checksum mismatch -set session debug= "-d,myrocks_simulate_bad_key_checksum1"; +set session debug_dbug= "-d,myrocks_simulate_bad_key_checksum1"; set @@global.rocksdb_store_checksums=@save_rocksdb_store_checksums; set @@global.rocksdb_verify_checksums=@save_rocksdb_verify_checksums; set @@global.rocksdb_checksums_pct=@save_rocksdb_checksums_pct; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result index 0ff55ac8d10..5392c9acf90 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result @@ -1,4 +1,7 @@ DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; CREATE TABLE t1 (id1 INT NOT NULL, id2 INT NOT NULL, id3 VARCHAR(32), id4 INT, id5 VARCHAR(32), value1 INT, value2 INT, value3 VARCHAR(32), @@ -67,8 +70,10 @@ ERROR 23000: Duplicate entry '10-10-10' for key 'id2_2' SELECT COUNT(*) FROM t1; COUNT(*) 13 +connection con1; BEGIN; INSERT INTO t1 VALUES (30, 31, 32, 33, 34, 30, 30, 30); +connection con2; BEGIN; SELECT COUNT(*) FROM t1; COUNT(*) @@ -116,14 +121,19 @@ UPDATE t1 SET id5=34 WHERE id1=38; ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 # NULL values are unique UPDATE t1 SET id5=NULL WHERE value1 > 37; +connection con1; COMMIT; +connection con2; COMMIT; +connection con2; BEGIN; SELECT COUNT(*) FROM t1; COUNT(*) 17 +connection con1; BEGIN; INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +connection con2; # When transaction is pending, fail on lock acquisition INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY @@ -132,7 +142,9 @@ ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on SELECT COUNT(*) FROM t1; COUNT(*) 17 +connection con1; COMMIT; +connection con2; # When transaction is committed, fail on duplicate key INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); Got one of the listed errors @@ -159,4 +171,7 @@ id1 id2 id3 id4 id5 value1 value2 value3 3 3 3 3 3 4 1 1 2 2 2 2 2 3 1 1 1 1 1 1 1 2 0 0 +disconnect con1; +disconnect con2; +connection default; DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc index acc1a9f2365..14b69c93e5b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc @@ -1,6 +1,6 @@ --source include/have_rocksdb.inc -let tmpl_ddl= suite/rocksdb/t/bloomfilter_table_def.tmpl; +let tmpl_ddl= ../storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.tmpl; let ddl= $MYSQL_TMP_DIR/bloomfilter_create.sql; DELIMITER //; @@ -19,18 +19,18 @@ DELIMITER ;// #BF is sometimes invoked and useful --exec sed s/##CF##//g $tmpl_ddl > $ddl --source $ddl ---source suite/rocksdb/t/bloomfilter_load_select.inc +--source t/bloomfilter_load_select.inc #BF is always invoked but not useful at all --exec sed s/##CF##/" COMMENT 'cf_short_prefix'"/g $tmpl_ddl > $ddl --source $ddl ---source suite/rocksdb/t/bloomfilter_load_select.inc +--source t/bloomfilter_load_select.inc #BF is most of the time invoked and useful --exec sed s/##CF##/" COMMENT 'cf_long_prefix'"/g $tmpl_ddl > $ddl --source $ddl ---source suite/rocksdb/t/bloomfilter_load_select.inc +--source t/bloomfilter_load_select.inc # BUG: Prev() with prefix lookup should not use prefix bloom filter create table r1 (id1 bigint, id2 bigint, id3 bigint, v1 int, v2 text, primary key (id1, id2, id3)) engine=rocksdb DEFAULT CHARSET=latin1 collate latin1_bin; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc index 1f1a4b9810f..a4a60d18bec 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc @@ -1,6 +1,6 @@ # loading some data (larger than write buf size) to cause compaction ---exec perl suite/rocksdb/t/gen_insert.pl t1 > $MYSQL_TMP_DIR/insert_t1.sql ---exec perl suite/rocksdb/t/gen_insert.pl t2 > $MYSQL_TMP_DIR/insert_t2.sql +--exec perl ../storage/rocksdb/mysql-test/rocksdb/t/gen_insert.pl t1 > $MYSQL_TMP_DIR/insert_t1.sql +--exec perl ../storage/rocksdb/mysql-test/rocksdb/t/gen_insert.pl t2 > $MYSQL_TMP_DIR/insert_t2.sql --disable_query_log --source $MYSQL_TMP_DIR/insert_t1.sql --source $MYSQL_TMP_DIR/insert_t2.sql diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc index 15a611c8dbb..dcceb38cf99 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc @@ -37,7 +37,8 @@ let $wait_condition = select count(*) = 0 --source include/wait_condition.inc --enable_query_log ---exec bash suite/rocksdb/t/sst_count_rows.sh $MYSQLTEST_VARDIR $MYSQL_SST_DUMP $no_more_deletes +let $MYSQL_SST_DUMP=../storage/rocksdb/sst_dump +--exec bash ../storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh $MYSQLTEST_VARDIR $MYSQL_SST_DUMP $no_more_deletes eval SET GLOBAL rocksdb_compaction_sequential_deletes= $save_rocksdb_compaction_sequential_deletes; eval SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= $save_rocksdb_compaction_sequential_deletes_file_size; eval SET GLOBAL rocksdb_compaction_sequential_deletes_window= $save_rocksdb_compaction_sequential_deletes_window; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test index 3742ab0e444..d259114dbda 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test @@ -105,6 +105,6 @@ let $wait_condition = select count(*) = 0 # Check that space is reclaimed --exec du -c $MYSQLTEST_VARDIR/mysqld.1/data/.rocksdb/*.sst |grep total |sed 's/[\t]total/ after/' >> $output ---exec perl suite/rocksdb/t/drop_table2_check.pl $output +--exec perl ../storage/rocksdb/mysql-test/rocksdb/t/drop_table2_check.pl $output # Cleanup diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test index 1a0364ebaee..4bff091d698 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test @@ -57,7 +57,7 @@ while ($i<10000) --enable_query_log check table t4; --exec grep "^[0-9-]* [0-9:]* [0-9]* \[Note\] CHECKTABLE t4" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 > $MYSQL_TMP_DIR/rocksdb_checksums.log ---exec perl suite/rocksdb/t/rocksdb_checksums.pl $MYSQL_TMP_DIR/rocksdb_checksums.log 10000 5 +--exec perl ../storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.pl $MYSQL_TMP_DIR/rocksdb_checksums.log 10000 5 --remove_file $MYSQL_TMP_DIR/rocksdb_checksums.log set session rocksdb_checksums_pct=100; @@ -70,7 +70,7 @@ insert into mtr.test_suppressions values ('Data with incorrect checksum'); --echo # 1. Start with mismatch in key checksum of the PK. -set session debug= "+d,myrocks_simulate_bad_pk_checksum1"; +set session debug_dbug= "+d,myrocks_simulate_bad_pk_checksum1"; set session rocksdb_verify_checksums=off; select * from t3; set session rocksdb_verify_checksums=on; @@ -78,10 +78,10 @@ set session rocksdb_verify_checksums=on; select * from t3; --error ER_INTERNAL_ERROR select * from t4; -set session debug= "-d,myrocks_simulate_bad_pk_checksum1"; +set session debug_dbug= "-d,myrocks_simulate_bad_pk_checksum1"; --echo # 2. Continue with mismatch in pk value checksum. -set session debug= "+d,myrocks_simulate_bad_pk_checksum2"; +set session debug_dbug= "+d,myrocks_simulate_bad_pk_checksum2"; set session rocksdb_verify_checksums=off; select * from t3; set session rocksdb_verify_checksums=on; @@ -89,7 +89,7 @@ set session rocksdb_verify_checksums=on; select * from t3; --error ER_INTERNAL_ERROR select * from t4; -set session debug= "-d,myrocks_simulate_bad_pk_checksum2"; +set session debug_dbug= "-d,myrocks_simulate_bad_pk_checksum2"; --echo # 3. Check if we catch checksum mismatches for secondary indexes --replace_column 9 # @@ -97,12 +97,12 @@ explain select * from t3 force index(a) where a<4; select * from t3 force index(a) where a<4; -set session debug= "+d,myrocks_simulate_bad_key_checksum1"; +set session debug_dbug= "+d,myrocks_simulate_bad_key_checksum1"; --error ER_INTERNAL_ERROR select * from t3 force index(a) where a<4; --error ER_INTERNAL_ERROR select * from t4 force index(a) where a<1000000; -set session debug= "-d,myrocks_simulate_bad_key_checksum1"; +set session debug_dbug= "-d,myrocks_simulate_bad_key_checksum1"; --echo # 4. The same for index-only reads? --replace_column 9 # @@ -110,12 +110,12 @@ explain select a from t3 force index(a) where a<4; select a from t3 force index(a) where a<4; -set session debug= "+d,myrocks_simulate_bad_key_checksum1"; +set session debug_dbug= "+d,myrocks_simulate_bad_key_checksum1"; --error ER_INTERNAL_ERROR select a from t3 force index(a) where a<4; --error ER_INTERNAL_ERROR select a from t4 force index(a) where a<1000000; -set session debug= "-d,myrocks_simulate_bad_key_checksum1"; +set session debug_dbug= "-d,myrocks_simulate_bad_key_checksum1"; set @@global.rocksdb_store_checksums=@save_rocksdb_store_checksums; set @@global.rocksdb_verify_checksums=@save_rocksdb_verify_checksums; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test index d6a8e3d5a1b..724281b73c8 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test @@ -1,5 +1,5 @@ --source include/have_rocksdb.inc let ddl= $MYSQL_TMP_DIR/unique_sec_rev_cf.sql; ---exec sed s/##CF##/" COMMENT 'rev:cf'"/g suite/rocksdb/t/unique_sec.inc > $ddl +--exec sed s/##CF##/" COMMENT 'rev:cf'"/g ../storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc > $ddl --source $ddl From 0ab7cb236e2e3cac6a5c2bee618be1337c2698a2 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 5 Nov 2016 22:29:02 +0000 Subject: [PATCH 059/233] MariaRocks port: More of testcase Maria-fication --- mysql-test/include/have_rocksdb.opt | 9 +- .../r/innodb_i_s_tables_disabled.result | 327 ++++++++++++++++-- .../mysql-test/rocksdb/r/type_decimal.result | 4 +- .../rocksdb/r/type_varchar_debug.result | 20 +- .../mysql-test/rocksdb/r/unique_sec.result | 15 + .../mysql-test/rocksdb/t/bulk_load.test | 1 + .../mysql-test/rocksdb/t/drop_table.test | 2 +- .../t/innodb_i_s_tables_disabled-master.opt | 30 ++ .../rocksdb/t/innodb_i_s_tables_disabled.test | 4 +- .../mysql-test/rocksdb/t/type_decimal.test | 4 +- .../rocksdb/t/type_varchar_debug.test | 20 +- .../mysql-test/rocksdb/t/unique_sec.test | 3 +- 12 files changed, 383 insertions(+), 56 deletions(-) create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled-master.opt diff --git a/mysql-test/include/have_rocksdb.opt b/mysql-test/include/have_rocksdb.opt index fe043d7b703..ce8cf45e549 100644 --- a/mysql-test/include/have_rocksdb.opt +++ b/mysql-test/include/have_rocksdb.opt @@ -1 +1,8 @@ ---loose-enable-rocksdb --loose-enable-rocksdb_global_info --loose-enable-rocksdb_ddl --loose-enable-rocksdb_cf_options --loose-enable_rocksdb_perf_context --loose-enable-rocksdb_index_file_map --loose-enable-rocksdb_dbstats +--loose-enable-rocksdb +--loose-enable-rocksdb_global_info +--loose-enable-rocksdb_ddl +--loose-enable-rocksdb_cf_options +--loose-enable_rocksdb_perf_context +--loose-enable_rocksdb_perf_context_global +--loose-enable-rocksdb_index_file_map +--loose-enable-rocksdb_dbstats diff --git a/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result b/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result index 6d8d9685a79..0490086e77d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result @@ -2,10 +2,6 @@ SELECT * FROM INFORMATION_SCHEMA.INNODB_TRX; trx_id trx_state trx_started trx_requested_lock_id trx_wait_started trx_weight trx_mysql_thread_id trx_query trx_operation_state trx_tables_in_use trx_tables_locked trx_lock_structs trx_lock_memory_bytes trx_rows_locked trx_rows_modified trx_concurrency_tickets trx_isolation_level trx_unique_checks trx_foreign_key_checks trx_last_foreign_key_error trx_adaptive_hash_latched trx_adaptive_hash_timeout trx_is_read_only trx_autocommit_non_locking Warnings: Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_TRX but the InnoDB storage engine is not installed -SELECT * FROM INFORMATION_SCHEMA.INNODB_FILE_STATUS; -FILE OPERATION REQUESTS SLOW BYTES BYTES/R SVC:SECS SVC:MSECS/R SVC:MAX_MSECS WAIT:SECS WAIT:MSECS/R WAIT:MAX_MSECS -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FILE_STATUS but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCKS; lock_id lock_trx_id lock_mode lock_type lock_table lock_index lock_space lock_page lock_rec lock_data Warnings: @@ -15,11 +11,11 @@ requesting_trx_id requested_lock_id blocking_trx_id blocking_lock_id Warnings: Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_LOCK_WAITS but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP; -page_size compress_ops compress_ops_ok compress_time compress_ok_time compress_primary_ops compress_primary_ops_ok compress_primary_time compress_primary_ok_time compress_secondary_ops compress_secondary_ops_ok compress_secondary_time compress_secondary_ok_time uncompress_ops uncompress_time uncompress_primary_ops uncompress_primary_time uncompress_secondary_ops uncompress_secondary_time +page_size compress_ops compress_ops_ok compress_time uncompress_ops uncompress_time Warnings: Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMP but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_RESET; -page_size compress_ops compress_ops_ok compress_time compress_ok_time compress_primary_ops compress_primary_ops_ok compress_primary_time compress_primary_ok_time compress_secondary_ops compress_secondary_ops_ok compress_secondary_time compress_secondary_ok_time uncompress_ops uncompress_time uncompress_primary_ops uncompress_primary_time uncompress_secondary_ops uncompress_secondary_time +page_size compress_ops compress_ops_ok compress_time uncompress_ops uncompress_time Warnings: Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMP_RESET but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX; @@ -40,32 +36,313 @@ Warnings: Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMPMEM_RESET but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_METRICS; NAME SUBSYSTEM COUNT MAX_COUNT MIN_COUNT AVG_COUNT COUNT_RESET MAX_COUNT_RESET MIN_COUNT_RESET AVG_COUNT_RESET TIME_ENABLED TIME_DISABLED TIME_ELAPSED TIME_RESET STATUS TYPE COMMENT -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_METRICS but the InnoDB storage engine is not installed +metadata_table_handles_opened metadata 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of table handles opened +metadata_table_handles_closed metadata 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of table handles closed +metadata_table_reference_count metadata 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Table reference counter +lock_deadlocks lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of deadlocks +lock_timeouts lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of lock timeouts +lock_rec_lock_waits lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times enqueued into record lock wait queue +lock_table_lock_waits lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times enqueued into table lock wait queue +lock_rec_lock_requests lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of record locks requested +lock_rec_lock_created lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of record locks created +lock_rec_lock_removed lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of record locks removed from the lock queue +lock_rec_locks lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Current number of record locks on tables +lock_table_lock_created lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of table locks created +lock_table_lock_removed lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of table locks removed from the lock queue +lock_table_locks lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Current number of table locks on tables +lock_row_lock_current_waits lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of row locks currently being waited for (innodb_row_lock_current_waits) +lock_row_lock_time lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Time spent in acquiring row locks, in milliseconds (innodb_row_lock_time) +lock_row_lock_time_max lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value The maximum time to acquire a row lock, in milliseconds (innodb_row_lock_time_max) +lock_row_lock_waits lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of times a row lock had to be waited for (innodb_row_lock_waits) +lock_row_lock_time_avg lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value The average time to acquire a row lock, in milliseconds (innodb_row_lock_time_avg) +buffer_pool_size server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Server buffer pool size (all buffer pools) in bytes +buffer_pool_reads buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of reads directly from disk (innodb_buffer_pool_reads) +buffer_pool_read_requests buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of logical read requests (innodb_buffer_pool_read_requests) +buffer_pool_write_requests buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of write requests (innodb_buffer_pool_write_requests) +buffer_pool_wait_free buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of times waited for free buffer (innodb_buffer_pool_wait_free) +buffer_pool_read_ahead buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of pages read as read ahead (innodb_buffer_pool_read_ahead) +buffer_pool_read_ahead_evicted buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Read-ahead pages evicted without being accessed (innodb_buffer_pool_read_ahead_evicted) +buffer_pool_pages_total buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Total buffer pool size in pages (innodb_buffer_pool_pages_total) +buffer_pool_pages_misc buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Buffer pages for misc use such as row locks or the adaptive hash index (innodb_buffer_pool_pages_misc) +buffer_pool_pages_data buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Buffer pages containing data (innodb_buffer_pool_pages_data) +buffer_pool_bytes_data buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Buffer bytes containing data (innodb_buffer_pool_bytes_data) +buffer_pool_pages_dirty buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Buffer pages currently dirty (innodb_buffer_pool_pages_dirty) +buffer_pool_bytes_dirty buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Buffer bytes currently dirty (innodb_buffer_pool_bytes_dirty) +buffer_pool_pages_free buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Buffer pages currently free (innodb_buffer_pool_pages_free) +buffer_pages_created buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of pages created (innodb_pages_created) +buffer_pages_written buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of pages written (innodb_pages_written) +buffer_index_pages_written buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of index pages written (innodb_index_pages_written) +buffer_non_index_pages_written buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of non index pages written (innodb_non_index_pages_written) +buffer_pages_read buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of pages read (innodb_pages_read) +buffer_index_sec_rec_cluster_reads buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of secondary record reads triggered cluster read +buffer_index_sec_rec_cluster_reads_avoided buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of secondary record reads avoided triggering cluster read +buffer_data_reads buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Amount of data read in bytes (innodb_data_reads) +buffer_data_written buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Amount of data written in bytes (innodb_data_written) +buffer_flush_batch_scanned buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages scanned as part of flush batch +buffer_flush_batch_num_scan buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of times buffer flush list flush is called +buffer_flush_batch_scanned_per_call buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages scanned per flush batch scan +buffer_flush_batch_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages flushed as part of flush batch +buffer_flush_batches buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of flush batches +buffer_flush_batch_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages queued as a flush batch +buffer_flush_neighbor_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total neighbors flushed as part of neighbor flush +buffer_flush_neighbor buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of times neighbors flushing is invoked +buffer_flush_neighbor_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages queued as a neighbor batch +buffer_flush_n_to_flush_requested buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages requested for flushing. +buffer_flush_n_to_flush_by_age buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages target by LSN Age for flushing. +buffer_flush_adaptive_avg_time_slot buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Avg time (ms) spent for adaptive flushing recently per slot. +buffer_LRU_batch_flush_avg_time_slot buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Avg time (ms) spent for LRU batch flushing recently per slot. +buffer_flush_adaptive_avg_time_thread buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Avg time (ms) spent for adaptive flushing recently per thread. +buffer_LRU_batch_flush_avg_time_thread buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Avg time (ms) spent for LRU batch flushing recently per thread. +buffer_flush_adaptive_avg_time_est buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Estimated time (ms) spent for adaptive flushing recently. +buffer_LRU_batch_flush_avg_time_est buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Estimated time (ms) spent for LRU batch flushing recently. +buffer_flush_avg_time buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Avg time (ms) spent for flushing recently. +buffer_flush_adaptive_avg_pass buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Numner of adaptive flushes passed during the recent Avg period. +buffer_LRU_batch_flush_avg_pass buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of LRU batch flushes passed during the recent Avg period. +buffer_flush_avg_pass buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of flushes passed during the recent Avg period. +buffer_LRU_get_free_loops buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Total loops in LRU get free. +buffer_LRU_get_free_waits buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Total sleep waits in LRU get free. +buffer_flush_avg_page_rate buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Average number of pages at which flushing is happening +buffer_flush_lsn_avg_rate buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Average redo generation rate +buffer_flush_pct_for_dirty buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Percent of IO capacity used to avoid max dirty page limit +buffer_flush_pct_for_lsn buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Percent of IO capacity used to avoid reusable redo space limit +buffer_flush_sync_waits buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times a wait happens due to sync flushing +buffer_flush_adaptive_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages flushed as part of adaptive flushing +buffer_flush_adaptive buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of adaptive batches +buffer_flush_adaptive_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages queued as an adaptive batch +buffer_flush_sync_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages flushed as part of sync batches +buffer_flush_sync buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of sync batches +buffer_flush_sync_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages queued as a sync batch +buffer_flush_background_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages flushed as part of background batches +buffer_flush_background buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of background batches +buffer_flush_background_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages queued as a background batch +buffer_LRU_batch_scanned buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages scanned as part of LRU batch +buffer_LRU_batch_num_scan buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of times LRU batch is called +buffer_LRU_batch_scanned_per_call buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages scanned per LRU batch call +buffer_LRU_batch_flush_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages flushed as part of LRU batches +buffer_LRU_batches_flush buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of LRU batches +buffer_LRU_batch_flush_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages queued as an LRU batch +buffer_LRU_batch_evict_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages evicted as part of LRU batches +buffer_LRU_batches_evict buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of LRU batches +buffer_LRU_batch_evict_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages queued as an LRU batch +buffer_LRU_single_flush_scanned buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages scanned as part of single page LRU flush +buffer_LRU_single_flush_num_scan buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of times single page LRU flush is called +buffer_LRU_single_flush_scanned_per_call buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Page scanned per single LRU flush +buffer_LRU_single_flush_failure_count Buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times attempt to flush a single page from LRU failed +buffer_LRU_get_free_search Buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of searches performed for a clean page +buffer_LRU_search_scanned buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages scanned as part of LRU search +buffer_LRU_search_num_scan buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of times LRU search is performed +buffer_LRU_search_scanned_per_call buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Page scanned per single LRU search +buffer_LRU_unzip_search_scanned buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages scanned as part of LRU unzip search +buffer_LRU_unzip_search_num_scan buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of times LRU unzip search is performed +buffer_LRU_unzip_search_scanned_per_call buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Page scanned per single LRU unzip search +buffer_page_read_index_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Index Leaf Pages read +buffer_page_read_index_non_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Index Non-leaf Pages read +buffer_page_read_index_ibuf_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Insert Buffer Index Leaf Pages read +buffer_page_read_index_ibuf_non_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Insert Buffer Index Non-Leaf Pages read +buffer_page_read_undo_log buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Undo Log Pages read +buffer_page_read_index_inode buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Index Inode Pages read +buffer_page_read_ibuf_free_list buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Insert Buffer Free List Pages read +buffer_page_read_ibuf_bitmap buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Insert Buffer Bitmap Pages read +buffer_page_read_system_page buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of System Pages read +buffer_page_read_trx_system buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Transaction System Pages read +buffer_page_read_fsp_hdr buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of File Space Header Pages read +buffer_page_read_xdes buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Extent Descriptor Pages read +buffer_page_read_blob buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Uncompressed BLOB Pages read +buffer_page_read_zblob buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of First Compressed BLOB Pages read +buffer_page_read_zblob2 buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Subsequent Compressed BLOB Pages read +buffer_page_read_other buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of other/unknown (old version of InnoDB) Pages read +buffer_page_written_index_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Index Leaf Pages written +buffer_page_written_index_non_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Index Non-leaf Pages written +buffer_page_written_index_ibuf_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Insert Buffer Index Leaf Pages written +buffer_page_written_index_ibuf_non_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Insert Buffer Index Non-Leaf Pages written +buffer_page_written_undo_log buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Undo Log Pages written +buffer_page_written_index_inode buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Index Inode Pages written +buffer_page_written_ibuf_free_list buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Insert Buffer Free List Pages written +buffer_page_written_ibuf_bitmap buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Insert Buffer Bitmap Pages written +buffer_page_written_system_page buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of System Pages written +buffer_page_written_trx_system buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Transaction System Pages written +buffer_page_written_fsp_hdr buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of File Space Header Pages written +buffer_page_written_xdes buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Extent Descriptor Pages written +buffer_page_written_blob buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Uncompressed BLOB Pages written +buffer_page_written_zblob buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of First Compressed BLOB Pages written +buffer_page_written_zblob2 buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Subsequent Compressed BLOB Pages written +buffer_page_written_other buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of other/unknown (old version InnoDB) Pages written +os_data_reads os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of reads initiated (innodb_data_reads) +os_data_writes os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of writes initiated (innodb_data_writes) +os_data_fsyncs os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of fsync() calls (innodb_data_fsyncs) +os_pending_reads os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of reads pending +os_pending_writes os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of writes pending +os_log_bytes_written os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Bytes of log written (innodb_os_log_written) +os_log_fsyncs os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of fsync log writes (innodb_os_log_fsyncs) +os_log_pending_fsyncs os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of pending fsync write (innodb_os_log_pending_fsyncs) +os_log_pending_writes os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of pending log file writes (innodb_os_log_pending_writes) +trx_rw_commits transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of read-write transactions committed +trx_ro_commits transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of read-only transactions committed +trx_nl_ro_commits transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of non-locking auto-commit read-only transactions committed +trx_commits_insert_update transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of transactions committed with inserts and updates +trx_rollbacks transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of transactions rolled back +trx_rollbacks_savepoint transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of transactions rolled back to savepoint +trx_rollback_active transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of resurrected active transactions rolled back +trx_active_transactions transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of active transactions +trx_rseg_history_len transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Length of the TRX_RSEG_HISTORY list +trx_undo_slots_used transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of undo slots used +trx_undo_slots_cached transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of undo slots cached +trx_rseg_current_size transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Current rollback segment size in pages +purge_del_mark_records purge 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of delete-marked rows purged +purge_upd_exist_or_extern_records purge 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of purges on updates of existing records and updates on delete marked record with externally stored field +purge_invoked purge 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times purge was invoked +purge_undo_log_pages purge 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of undo log pages handled by the purge +purge_dml_delay_usec purge 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Microseconds DML to be delayed due to purge lagging +purge_stop_count purge 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Number of times purge was stopped +purge_resume_count purge 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Number of times purge was resumed +log_checkpoints recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of checkpoints +log_lsn_last_flush recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value LSN of Last flush +log_lsn_last_checkpoint recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value LSN at last checkpoint +log_lsn_current recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Current LSN value +log_lsn_checkpoint_age recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Current LSN value minus LSN at last checkpoint +log_lsn_buf_pool_oldest recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value The oldest modified block LSN in the buffer pool +log_max_modified_age_async recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Maximum LSN difference; when exceeded, start asynchronous preflush +log_max_modified_age_sync recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Maximum LSN difference; when exceeded, start synchronous preflush +log_pending_log_flushes recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Pending log flushes +log_pending_checkpoint_writes recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Pending checkpoints +log_num_log_io recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of log I/Os +log_waits recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of log waits due to small log buffer (innodb_log_waits) +log_write_requests recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of log write requests (innodb_log_write_requests) +log_writes recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of log writes (innodb_log_writes) +log_padded recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Bytes of log padded for log write ahead +compress_pages_compressed compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages compressed +compress_pages_decompressed compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages decompressed +compression_pad_increments compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times padding is incremented to avoid compression failures +compression_pad_decrements compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times padding is decremented due to good compressibility +compress_saved compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of bytes saved by page compression +compress_trim_sect512 compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of sect-512 TRIMed by page compression +compress_trim_sect1024 compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of sect-1024 TRIMed by page compression +compress_trim_sect2048 compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of sect-2048 TRIMed by page compression +compress_trim_sect4096 compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of sect-4K TRIMed by page compression +compress_trim_sect8192 compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of sect-8K TRIMed by page compression +compress_trim_sect16384 compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of sect-16K TRIMed by page compression +compress_trim_sect32768 compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of sect-32K TRIMed by page compression +compress_pages_page_compressed compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages compressed by page compression +compress_page_compressed_trim_op compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of TRIM operation performed by page compression +compress_page_compressed_trim_op_saved compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of TRIM operation saved by page compression +compress_pages_page_decompressed compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages decompressed by page compression +compress_pages_page_compression_error compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of page compression errors +compress_pages_encrypted compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages encrypted +compress_pages_decrypted compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages decrypted +index_page_splits index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of index page splits +index_page_merge_attempts index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of index page merge attempts +index_page_merge_successful index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of successful index page merges +index_page_reorg_attempts index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of index page reorganization attempts +index_page_reorg_successful index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of successful index page reorganizations +index_page_discards index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of index pages discarded +adaptive_hash_searches adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of successful searches using Adaptive Hash Index +adaptive_hash_searches_btree adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of searches using B-tree on an index search +adaptive_hash_pages_added adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of index pages on which the Adaptive Hash Index is built +adaptive_hash_pages_removed adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of index pages whose corresponding Adaptive Hash Index entries were removed +adaptive_hash_rows_added adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Adaptive Hash Index rows added +adaptive_hash_rows_removed adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Adaptive Hash Index rows removed +adaptive_hash_rows_deleted_no_hash_entry adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of rows deleted that did not have corresponding Adaptive Hash Index entries +adaptive_hash_rows_updated adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Adaptive Hash Index rows updated +file_num_open_files file_system 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Number of files currently open (innodb_num_open_files) +ibuf_merges_insert change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of inserted records merged by change buffering +ibuf_merges_delete_mark change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of deleted records merged by change buffering +ibuf_merges_delete change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of purge records merged by change buffering +ibuf_merges_discard_insert change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of insert merged operations discarded +ibuf_merges_discard_delete_mark change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of deleted merged operations discarded +ibuf_merges_discard_delete change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of purge merged operations discarded +ibuf_merges change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of change buffer merges +ibuf_size change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Change buffer size in pages +innodb_master_thread_sleeps server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times (seconds) master thread sleeps +innodb_activity_count server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Current server activity count +innodb_master_active_loops server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times master thread performs its tasks when server is active +innodb_master_idle_loops server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times master thread performs its tasks when server is idle +innodb_background_drop_table_usec server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Time (in microseconds) spent to process drop table list +innodb_ibuf_merge_usec server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Time (in microseconds) spent to process change buffer merge +innodb_log_flush_usec server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Time (in microseconds) spent to flush log records +innodb_mem_validate_usec server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Time (in microseconds) spent to do memory validation +innodb_master_purge_usec server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Time (in microseconds) spent by master thread to purge records +innodb_dict_lru_usec server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Time (in microseconds) spent to process DICT LRU list +innodb_dict_lru_count_active server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of tables evicted from DICT LRU list in the active loop +innodb_dict_lru_count_idle server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of tables evicted from DICT LRU list in the idle loop +innodb_checkpoint_usec server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Time (in microseconds) spent by master thread to do checkpoint +innodb_dblwr_writes server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of doublewrite operations that have been performed (innodb_dblwr_writes) +innodb_dblwr_pages_written server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of pages that have been written for doublewrite operations (innodb_dblwr_pages_written) +innodb_page_size server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value InnoDB page size in bytes (innodb_page_size) +innodb_rwlock_s_spin_waits server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rwlock spin waits due to shared latch request +innodb_rwlock_x_spin_waits server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rwlock spin waits due to exclusive latch request +innodb_rwlock_sx_spin_waits server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rwlock spin waits due to sx latch request +innodb_rwlock_s_spin_rounds server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rwlock spin loop rounds due to shared latch request +innodb_rwlock_x_spin_rounds server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rwlock spin loop rounds due to exclusive latch request +innodb_rwlock_sx_spin_rounds server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rwlock spin loop rounds due to sx latch request +innodb_rwlock_s_os_waits server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of OS waits due to shared latch request +innodb_rwlock_x_os_waits server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of OS waits due to exclusive latch request +innodb_rwlock_sx_os_waits server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of OS waits due to sx latch request +dml_reads dml 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rows read +dml_inserts dml 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rows inserted +dml_deletes dml 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rows deleted +dml_updates dml 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rows updated +dml_system_reads dml 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of system rows read +dml_system_inserts dml 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of system rows inserted +dml_system_deletes dml 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of system rows deleted +dml_system_updates dml 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of system rows updated +ddl_background_drop_indexes ddl 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of indexes waiting to be dropped after failed index creation +ddl_background_drop_tables ddl 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of tables in background drop table list +ddl_online_create_index ddl 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of indexes being created online +ddl_pending_alter_table ddl 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of ALTER TABLE, CREATE INDEX, DROP INDEX in progress +ddl_sort_file_alter_table ddl 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of sort files created during alter table +ddl_log_file_alter_table ddl 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of log files created during alter table +icp_attempts icp 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of attempts for index push-down condition checks +icp_no_match icp 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Index push-down condition does not match +icp_out_of_range icp 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Index push-down condition out of range +icp_match icp 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Index push-down condition matches SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_DEFAULT_STOPWORD; value -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_DEFAULT_STOPWORD but the InnoDB storage engine is not installed +a +about +an +are +as +at +be +by +com +de +en +for +from +how +i +in +is +it +la +of +on +or +that +the +this +to +was +what +when +where +who +will +with +und +the +www SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_DELETED; DOC_ID -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_DELETED but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_BEING_DELETED; DOC_ID -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_BEING_DELETED but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHE; WORD FIRST_DOC_ID LAST_DOC_ID DOC_COUNT DOC_ID POSITION -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHE but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE; WORD FIRST_DOC_ID LAST_DOC_ID DOC_COUNT DOC_ID POSITION -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_CONFIG; KEY VALUE -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_CONFIG but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_POOL_STATS; POOL_ID POOL_SIZE FREE_BUFFERS DATABASE_PAGES OLD_DATABASE_PAGES MODIFIED_DATABASE_PAGES PENDING_DECOMPRESS PENDING_READS PENDING_FLUSH_LRU PENDING_FLUSH_LIST PAGES_MADE_YOUNG PAGES_NOT_MADE_YOUNG PAGES_MADE_YOUNG_RATE PAGES_MADE_NOT_YOUNG_RATE NUMBER_PAGES_READ NUMBER_PAGES_CREATED NUMBER_PAGES_WRITTEN PAGES_READ_RATE PAGES_CREATE_RATE PAGES_WRITTEN_RATE NUMBER_PAGES_GET HIT_RATE YOUNG_MAKE_PER_THOUSAND_GETS NOT_YOUNG_MAKE_PER_THOUSAND_GETS NUMBER_PAGES_READ_AHEAD NUMBER_READ_AHEAD_EVICTED READ_AHEAD_RATE READ_AHEAD_EVICTED_RATE LRU_IO_TOTAL LRU_IO_CURRENT UNCOMPRESS_TOTAL UNCOMPRESS_CURRENT Warnings: @@ -79,7 +356,7 @@ POOL_ID LRU_POSITION SPACE PAGE_NUMBER PAGE_TYPE FLUSH_TYPE FIX_COUNT IS_HASHED Warnings: Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_BUFFER_PAGE_LRU but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES; -TABLE_ID NAME FLAG N_COLS SPACE FILE_FORMAT ROW_FORMAT ZIP_PAGE_SIZE +TABLE_ID NAME FLAG N_COLS SPACE FILE_FORMAT ROW_FORMAT ZIP_PAGE_SIZE SPACE_TYPE Warnings: Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_TABLES but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS; @@ -87,7 +364,7 @@ TABLE_ID NAME STATS_INITIALIZED NUM_ROWS CLUST_INDEX_SIZE OTHER_INDEX_SIZE MODIF Warnings: Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_INDEXES; -INDEX_ID NAME TABLE_ID TYPE N_FIELDS PAGE_NO SPACE +INDEX_ID NAME TABLE_ID TYPE N_FIELDS PAGE_NO SPACE MERGE_THRESHOLD Warnings: Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_INDEXES but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_COLUMNS; @@ -107,14 +384,10 @@ ID FOR_COL_NAME REF_COL_NAME POS Warnings: Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES; -SPACE NAME FLAG FILE_FORMAT ROW_FORMAT PAGE_SIZE ZIP_PAGE_SIZE +SPACE NAME FLAG FILE_FORMAT ROW_FORMAT PAGE_SIZE ZIP_PAGE_SIZE SPACE_TYPE FS_BLOCK_SIZE FILE_SIZE ALLOCATED_SIZE Warnings: Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DATAFILES; SPACE PATH Warnings: Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_DATAFILES but the InnoDB storage engine is not installed -SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DOCSTORE_FIELDS; -INDEX_ID POS DOCUMENT_PATH DOCUMENT_TYPE -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_DOCSTORE_FIELDS but the InnoDB storage engine is not installed diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result b/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result index 7a7a5c7638c..677808261ee 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result @@ -5,7 +5,7 @@ drop table if exists t1, t2; create table t0(a int); insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); # First, make the server to create a dataset in the old format: -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; create table t1 ( pk1 decimal(32,16), pk2 decimal(32,16), @@ -29,7 +29,7 @@ where TABLE_SCHEMA=database() AND table_name='t1'; table_name index_name kv_format_version t1 PRIMARY 10 flush tables; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; # Check that the new server reads the data in the old format: select * from t1 order by pk1,pk2,pk3 limit 5; pk1 pk2 pk3 a diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar_debug.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar_debug.result index de7608ebb1c..eace25b2ccf 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar_debug.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar_debug.result @@ -1,5 +1,5 @@ drop table if exists t1,t2; -set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans"; +set session debug_dbug= "+d,myrocks_enable_unknown_collation_index_only_scans"; # # Issue 257: Sort order for varchars is different between # MyISAM/InnoDB vs MyRocks @@ -112,11 +112,11 @@ pk length(a) rtrim(a) 1 301 a 2 301 b drop table t1; -set session debug= "-d,myrocks_enable_unknown_collation_index_only_scans"; +set session debug_dbug= "-d,myrocks_enable_unknown_collation_index_only_scans"; # # Check backwards compatibility: # -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; # Create the tables in the old format create table t1 ( pk varchar(64) collate latin1_bin, @@ -154,7 +154,7 @@ t1 PRIMARY 10 t2 PRIMARY 10 t2 col1 10 flush tables; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; select pk, hex(pk), col1 from t1; pk hex(pk) col1 a 61 a @@ -177,13 +177,13 @@ drop table t1,t2; # # General upgrade tests to see that they work. # -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; create table t2 ( id int primary key, col1 varchar(64) collate latin1_swedish_ci, unique key (col1) ) engine=rocksdb; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; insert into t2 values (1, 'a'); insert into t2 values (2, 'b'); insert into t2 values (3, 'c'); @@ -204,13 +204,13 @@ c insert into t2 values (4, 'c '); ERROR 23000: Duplicate entry 'c ' for key 'col1' drop table t2; -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; create table t2 ( id int primary key, col1 varchar(64) collate latin1_bin, unique key (col1) ) engine=rocksdb; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; insert into t2 values (1, 'a'); insert into t2 values (2, 'b'); insert into t2 values (3, 'c'); @@ -235,7 +235,7 @@ drop table t2; # Check what happens when one tries to 'upgrade' to the new data format # and causes a unique key violation: # -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; create table t2 ( pk int not null primary key, col1 varchar(64) collate latin1_bin, @@ -248,7 +248,7 @@ select * from t2; pk col1 col2 1 a a-space 2 a a -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; alter table t2 engine=rocksdb; ERROR 23000: Duplicate entry 'a' for key 'col1' drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result index 59ad709a595..63743bbf792 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result @@ -1,4 +1,7 @@ DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; CREATE TABLE t1 (id1 INT NOT NULL, id2 INT NOT NULL, id3 VARCHAR(32), id4 INT, id5 VARCHAR(32), value1 INT, value2 INT, value3 VARCHAR(32), @@ -67,8 +70,10 @@ ERROR 23000: Duplicate entry '10-10-10' for key 'id2_2' SELECT COUNT(*) FROM t1; COUNT(*) 13 +connection con1; BEGIN; INSERT INTO t1 VALUES (30, 31, 32, 33, 34, 30, 30, 30); +connection con2; BEGIN; SELECT COUNT(*) FROM t1; COUNT(*) @@ -116,14 +121,19 @@ UPDATE t1 SET id5=34 WHERE id1=38; ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 # NULL values are unique UPDATE t1 SET id5=NULL WHERE value1 > 37; +connection con1; COMMIT; +connection con2; COMMIT; +connection con2; BEGIN; SELECT COUNT(*) FROM t1; COUNT(*) 17 +connection con1; BEGIN; INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +connection con2; # When transaction is pending, fail on lock acquisition INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY @@ -132,7 +142,9 @@ ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on SELECT COUNT(*) FROM t1; COUNT(*) 17 +connection con1; COMMIT; +connection con2; # When transaction is committed, fail on duplicate key INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); Got one of the listed errors @@ -159,6 +171,9 @@ id1 id2 id3 id4 id5 value1 value2 value3 38 31 32 NULL 38 37 37 37 39 31 32 NULL 39 37 37 37 40 40 40 40 40 40 40 40 +disconnect con1; +disconnect con2; +connection default; DROP TABLE t1; # # Issue #88: Creating unique index over column with duplicate values succeeds diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test index cfa2f6ff747..c9b0022d7dd 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test @@ -1,4 +1,5 @@ --source include/have_rocksdb.inc +--source include/have_partition.inc --disable_warnings DROP TABLE IF EXISTS t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test index 7b28474d9f2..32d8133fab1 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test @@ -109,7 +109,7 @@ let $wait_condition = select count(*) = 0 # Get list of all indices needing to be dropped # Check total compacted-away rows for all indices # Check that all indices have been successfully dropped ---exec perl suite/rocksdb/t/drop_table_compactions.pl $MYSQLTEST_VARDIR/log/mysqld.1.err +--exec perl ../storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl $MYSQLTEST_VARDIR/log/mysqld.1.err # Cleanup drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled-master.opt new file mode 100644 index 00000000000..b3565b5fa82 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled-master.opt @@ -0,0 +1,30 @@ +--loose-enable-innodb_trx +--loose-enable-innodb_file_status +--loose-enable-innodb_locks +--loose-enable-innodb_lock_waits +--loose-enable-innodb_cmp +--loose-enable-innodb_cmp_reset +--loose-enable-innodb_cmp_per_index +--loose-enable-innodb_cmp_per_index_reset +--loose-enable-innodb_cmpmem +--loose-enable-innodb_cmpmem_reset +--loose-enable-innodb_metrics +--loose-enable-innodb_ft_default_stopword +--loose-enable-innodb_ft_deleted +--loose-enable-innodb_ft_being_deleted +--loose-enable-innodb_ft_index_cache +--loose-enable-innodb_ft_index_table +--loose-enable-innodb_ft_config +--loose-enable-innodb_buffer_pool_stats +--loose-enable-innodb_buffer_page +--loose-enable-innodb_buffer_page_lru +--loose-enable-innodb_sys_tables +--loose-enable-innodb_sys_tablestats +--loose-enable-innodb_sys_indexes +--loose-enable-innodb_sys_columns +--loose-enable-innodb_sys_fields +--loose-enable-innodb_sys_foreign +--loose-enable-innodb_sys_foreign_cols +--loose-enable-innodb_sys_tablespaces +--loose-enable-innodb_sys_datafiles +--loose-enable-innodb_sys_docstore_fields diff --git a/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test b/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test index 99ad24f09fc..b50cf08b227 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test @@ -4,7 +4,7 @@ # is turned off and attempting to access them doesn't crash. SELECT * FROM INFORMATION_SCHEMA.INNODB_TRX; -SELECT * FROM INFORMATION_SCHEMA.INNODB_FILE_STATUS; +#Not in MariaDB: SELECT * FROM INFORMATION_SCHEMA.INNODB_FILE_STATUS; SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCKS; SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCK_WAITS; SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP; @@ -32,4 +32,4 @@ SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN; SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS; SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES; SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DATAFILES; -SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DOCSTORE_FIELDS; +#Not in MariaDB: SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DOCSTORE_FIELDS; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test b/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test index d5ee75686df..f0cb2e2af27 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test @@ -12,7 +12,7 @@ create table t0(a int); insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); --echo # First, make the server to create a dataset in the old format: -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; create table t1 ( pk1 decimal(32,16), pk2 decimal(32,16), @@ -40,7 +40,7 @@ where TABLE_SCHEMA=database() AND table_name='t1'; flush tables; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; --source include/restart_mysqld.inc --echo # Check that the new server reads the data in the old format: diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test index d61e85ed204..9ffeaede45d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test @@ -9,18 +9,18 @@ drop table if exists t1,t2; --enable_warnings -set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans"; +set session debug_dbug= "+d,myrocks_enable_unknown_collation_index_only_scans"; --let $character_set_collate=CHARACTER SET utf8 COLLATE utf8_general_ci --source type_varchar_endspace.inc -set session debug= "-d,myrocks_enable_unknown_collation_index_only_scans"; +set session debug_dbug= "-d,myrocks_enable_unknown_collation_index_only_scans"; --echo # --echo # Check backwards compatibility: --echo # -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; --echo # Create the tables in the old format @@ -53,7 +53,7 @@ from information_schema.ROCKSDB_DDL where TABLE_SCHEMA=database() AND table_name in ('t1','t2'); flush tables; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; select pk, hex(pk), col1 from t1; select pk, col1, hex(col1), col2 from t2; @@ -69,13 +69,13 @@ drop table t1,t2; --echo # --echo # General upgrade tests to see that they work. --echo # -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; create table t2 ( id int primary key, col1 varchar(64) collate latin1_swedish_ci, unique key (col1) ) engine=rocksdb; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; insert into t2 values (1, 'a'); insert into t2 values (2, 'b'); @@ -91,13 +91,13 @@ select col1 from t2; insert into t2 values (4, 'c '); drop table t2; -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; create table t2 ( id int primary key, col1 varchar(64) collate latin1_bin, unique key (col1) ) engine=rocksdb; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; insert into t2 values (1, 'a'); insert into t2 values (2, 'b'); @@ -117,7 +117,7 @@ drop table t2; --echo # Check what happens when one tries to 'upgrade' to the new data format --echo # and causes a unique key violation: --echo # -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; create table t2 ( pk int not null primary key, col1 varchar(64) collate latin1_bin, @@ -129,7 +129,7 @@ insert into t2 values (1, 'a ', 'a-space'); insert into t2 values (2, 'a', 'a'); select * from t2; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; +set session debug_dbug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; --error ER_DUP_ENTRY alter table t2 engine=rocksdb; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test index 28b52f262cc..4483f48b4ca 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test @@ -1,7 +1,8 @@ --source include/have_rocksdb.inc +--source include/have_partition.inc let ddl= $MYSQL_TMP_DIR/unique_sec.sql; ---exec sed s/##CF##//g suite/rocksdb/t/unique_sec.inc > $ddl +--exec sed s/##CF##//g ../storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc > $ddl --source $ddl --echo # From df407fca0b5385b1a4134b71f47e2dcb29d09ad8 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 5 Nov 2016 23:19:09 +0000 Subject: [PATCH 060/233] MariaRocks port: fix a few more testcases --- storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result | 4 ++-- storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test | 2 +- storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result b/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result index a0fd7a13780..b37b0d0b72d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result @@ -8,10 +8,10 @@ PRIMARY KEY (z, y) COMMENT 'zy_cf', KEY (x)) ENGINE = ROCKSDB; SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%'; TABLE_SCHEMA TABLE_NAME PARTITION_NAME INDEX_NAME INDEX_TYPE KV_FORMAT_VERSION CF -test is_ddl_t2 NULL PRIMARY 1 11 zy_cf -test is_ddl_t2 NULL x 2 11 default test is_ddl_t1 NULL PRIMARY 1 11 default test is_ddl_t1 NULL j 2 11 default test is_ddl_t1 NULL k 2 11 kl_cf +test is_ddl_t2 NULL PRIMARY 1 11 zy_cf +test is_ddl_t2 NULL x 2 11 default DROP TABLE is_ddl_t1; DROP TABLE is_ddl_t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test b/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test index 9ee23a88bbe..7dc3c207ecc 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test @@ -16,7 +16,7 @@ CREATE TABLE is_ddl_t1 (i INT, j INT, k INT, l INT, CREATE TABLE is_ddl_t2 (x INT, y INT, z INT, PRIMARY KEY (z, y) COMMENT 'zy_cf', KEY (x)) ENGINE = ROCKSDB; - +--sorted_result SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%'; # cleanup diff --git a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc index a41bd046455..08a465e7244 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc @@ -64,7 +64,7 @@ select count(*) from t5; select count(*) from t6; # run a check script to verify sst files reduced enough during each optimize table ---exec perl suite/rocksdb/optimize_table_check_sst.pl $MYSQL_TMP_DIR/sst_size.dat +--exec perl ../storage/rocksdb/mysql-test/rocksdb/optimize_table_check_sst.pl $MYSQL_TMP_DIR/sst_size.dat #cleanup optimize table t2; From f246829e2f1199059955be283d5c87bfa1b90a0c Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 7 Nov 2016 01:05:55 +0300 Subject: [PATCH 061/233] MariaRocks port: Support --force-restart "pseudo-argument" testcases can specify it in *.opt files, it causes the server(s) to be restarted before running the testcase. MTRv1 supported this, this patch adds support for MTRv2 --- mysql-test/lib/mtr_cases.pm | 8 ++++++++ mysql-test/mysql-test-run.pl | 7 +++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/mysql-test/lib/mtr_cases.pm b/mysql-test/lib/mtr_cases.pm index 2be903abf42..3c478d94838 100644 --- a/mysql-test/lib/mtr_cases.pm +++ b/mysql-test/lib/mtr_cases.pm @@ -604,6 +604,14 @@ sub process_opts { # Fallthrough, add the --default-time-zone option } + # --force-restart is a "fake" option which just signals MTR that + # it should restart the mysqld server even if it was started with + # a matching set of options + if ($opt eq "--force-restart") { + $tinfo->{'force_restart'}= 1; + next; + } + # Ok, this was a real option, add it push(@{$tinfo->{$opt_name}}, $opt); } diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 81291789a20..e711e611ed3 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -5164,7 +5164,6 @@ sub stop_all_servers () { # Find out if server should be restarted for this test sub server_need_restart { my ($tinfo, $server)= @_; - if ( using_extern() ) { mtr_verbose_restart($server, "no restart for --extern server"); @@ -5257,7 +5256,11 @@ sub server_need_restart { sub servers_need_restart($) { my ($tinfo)= @_; - return grep { server_need_restart($tinfo, $_); } all_servers(); + if ($tinfo->{'force_restart'} == 1) { + return all_servers(); + } else { + return grep { server_need_restart($tinfo, $_); } all_servers(); + } } From 600a2075b5b18f94f1a47acb12d3d764ef3196b7 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 6 Nov 2016 22:18:11 +0000 Subject: [PATCH 062/233] MariaRocks port: more of testcase Maria-fication --- mysql-test/include/have_rocksdb.opt | 1 + .../rocksdb/r/records_in_range.result | 60 +++++++++---------- .../mysql-test/rocksdb/t/show_engine.test | 1 + 3 files changed, 32 insertions(+), 30 deletions(-) diff --git a/mysql-test/include/have_rocksdb.opt b/mysql-test/include/have_rocksdb.opt index ce8cf45e549..c7de6da2bd5 100644 --- a/mysql-test/include/have_rocksdb.opt +++ b/mysql-test/include/have_rocksdb.opt @@ -6,3 +6,4 @@ --loose-enable_rocksdb_perf_context_global --loose-enable-rocksdb_index_file_map --loose-enable-rocksdb_dbstats +--loose-enable-rocksdb_cfstats diff --git a/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result b/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result index e165e117a99..7176b4b733a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result @@ -11,142 +11,142 @@ explain extended select * from t1 where a> 500 and a< 750; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` > 500) and (`test`.`t1`.`a` < 750)) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` > 500) and (`test`.`t1`.`a` < 750)) explain extended select * from t1 where a< 750; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` < 750) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` < 750) explain extended select * from t1 where a> 500; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` > 500) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` > 500) explain extended select * from t1 where a>=0 and a<=1000; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` >= 0) and (`test`.`t1`.`a` <= 1000)) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` >= 0) and (`test`.`t1`.`a` <= 1000)) explain extended select * from t1 where b> 500 and b< 750; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750)) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750)) explain extended select * from t1 where b< 750; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`b` < 750) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`b` < 750) explain extended select * from t1 where b> 500; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`b` > 500) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`b` > 500) explain extended select * from t1 where b>=0 and b<=1000; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` >= 0) and (`test`.`t1`.`b` <= 1000)) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` >= 0) and (`test`.`t1`.`b` <= 1000)) set @save_rocksdb_records_in_range = @@session.rocksdb_records_in_range; set rocksdb_records_in_range = 15000; explain extended select a from t1 where a < 750; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range ka ka 5 NULL 15000 100.00 Using where; Using index Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` < 750) +Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` < 750) explain extended select a, b from t1 where a < 750; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL ka NULL NULL NULL 20000 75.00 Using where Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` < 750) +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` < 750) explain extended select a from t1 where a = 700; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ref ka ka 5 const 15000 100.00 Using index Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` = 700) +Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` = 700) explain extended select a,b from t1 where a = 700; id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE t1 ref ka ka 5 const 15000 100.00 NULL +1 SIMPLE t1 ref ka ka 5 const 15000 100.00 Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` = 700) +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` = 700) explain extended select a from t1 where a in (700, 800); id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 index ka ka 5 NULL 20000 100.00 Using where; Using index Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` in (700,800)) +Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` in (700,800)) explain extended select a,b from t1 where a in (700, 800); id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL ka NULL NULL NULL 20000 100.00 Using where Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` in (700,800)) +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` in (700,800)) set rocksdb_records_in_range=8000; explain extended select a from t1 where a in (700, 800); id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range ka ka 5 NULL 16000 100.00 Using where; Using index Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` in (700,800)) +Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` in (700,800)) explain extended select a,b from t1 where a in (700, 800); id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL ka NULL NULL NULL 20000 80.00 Using where Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` in (700,800)) +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` in (700,800)) set rocksdb_records_in_range = @save_rocksdb_records_in_range; set global rocksdb_force_flush_memtable_now = true; explain extended select * from t1 where a> 500 and a< 750; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` > 500) and (`test`.`t1`.`a` < 750)) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` > 500) and (`test`.`t1`.`a` < 750)) explain extended select * from t1 where a< 750; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` < 750) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` < 750) explain extended select * from t1 where a> 500; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` > 500) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` > 500) explain extended select * from t1 where a>=0 and a<=1000; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` >= 0) and (`test`.`t1`.`a` <= 1000)) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` >= 0) and (`test`.`t1`.`a` <= 1000)) explain extended select * from t1 where b> 500 and b< 750; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750)) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750)) explain extended select * from t1 where b< 750; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`b` < 750) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`b` < 750) explain extended select * from t1 where b> 500; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`b` > 500) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`b` > 500) explain extended select * from t1 where b>=0 and b<=1000; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` >= 0) and (`test`.`t1`.`b` <= 1000)) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` >= 0) and (`test`.`t1`.`b` <= 1000)) explain extended select * from t1 where a>= 500 and a<= 500; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` >= 500) and (`test`.`t1`.`a` <= 500)) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` >= 500) and (`test`.`t1`.`a` <= 500)) explain extended select * from t1 where b>= 500 and b<= 500; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` >= 500) and (`test`.`t1`.`b` <= 500)) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` >= 500) and (`test`.`t1`.`b` <= 500)) explain extended select * from t1 where a< 750 and b> 500 and b< 750; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range ka,kb ka 5 NULL 1000 100.00 Using index condition; Using where Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` < 750) and (`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750)) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` < 750) and (`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750)) drop index ka on t1; drop index kb on t1; create index kab on t1(a,b); @@ -155,13 +155,13 @@ explain extended select * from t1 where a< 750 and b> 500 and b< 750; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range kab kab 5 NULL 1000 100.00 Using where; Using index Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` < 750) and (`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750)) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` < 750) and (`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750)) set rocksdb_records_in_range=444; explain extended select * from t1 where a< 750 and b> 500 and b< 750; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range kab kab 5 NULL 444 100.00 Using where; Using index Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` < 750) and (`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750)) +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` < 750) and (`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750)) set rocksdb_records_in_range=0; CREATE TABLE `linktable` ( `id1` bigint(20) unsigned NOT NULL DEFAULT '0', diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test b/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test index 0cb32d95d8a..847d4d948f6 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test @@ -1,4 +1,5 @@ --source include/have_rocksdb.inc +--source include/have_partition.inc # # SHOW ENGINE STATUS command From ce9aeb888ed75773595a19ae3d784cedcbc97dbe Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 7 Nov 2016 02:49:49 +0300 Subject: [PATCH 063/233] MariaRocks port: Support --force-restart "pseudo-argument", part#2 --- mysql-test/mysql-test-run.pl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index e711e611ed3..4b37212c8cc 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -5256,7 +5256,7 @@ sub server_need_restart { sub servers_need_restart($) { my ($tinfo)= @_; - if ($tinfo->{'force_restart'} == 1) { + if (defined($tinfo->{'force_restart'})) { return all_servers(); } else { return grep { server_need_restart($tinfo, $_); } all_servers(); From 8014a942b78a44e294983f9a39d296f56303e143 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 10 Nov 2016 20:36:24 +0000 Subject: [PATCH 064/233] MariaRocks port: update results for rocksdb.col_opt_null test. The differences are due: - MariaDB 10.1+ assigns different DEFAULT values = in some cases, MariaDB's table has no default value, while MySQL's has '' as default. = BLOB/TEXT can have a default value. - MariaDB's MTR runs tests with diffferent timezone (affects result of FROM_UNIXTIME()) - MySQL has removed YEAR(2). MariaDB produces warning but still creates the column of this type. - Different wording in warning/error messages. --- .../mysql-test/rocksdb/r/col_opt_null.result | 156 ++++++++---------- 1 file changed, 69 insertions(+), 87 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result index 51f75c1004b..3a59302bf58 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result @@ -91,12 +91,12 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra v0 varbinary(0) YES NULL v1 varbinary(1) YES NULL -v64 varbinary(64) NO PRI +v64 varbinary(64) NO PRI NULL v65000 varbinary(65000) YES NULL CREATE TABLE t2 (v VARBINARY(65532) NULL, PRIMARY KEY(v(255))) ENGINE=rocksdb; SHOW COLUMNS IN t2; Field Type Null Key Default Extra -v varbinary(65532) NO PRI +v varbinary(65532) NO PRI NULL INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. @@ -168,7 +168,7 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra v0 varbinary(0) YES NULL v1 varbinary(1) YES NULL -v64 varbinary(64) NO PRI +v64 varbinary(64) NO PRI NULL v65000 varbinary(65000) YES NULL v65536 mediumblob YES NULL DROP TABLE t1, t2; @@ -214,7 +214,7 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra a bit(1) YES NULL b bit(20) YES NULL -c bit(64) NO PRI b'0' +c bit(64) NO PRI NULL d bit(1) YES NULL ALTER TABLE t1 DROP COLUMN d; ALTER TABLE t1 ADD COLUMN d BIT(0) NULL; @@ -222,7 +222,7 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra a bit(1) YES NULL b bit(20) YES NULL -c bit(64) NO PRI b'0' +c bit(64) NO PRI NULL d bit(1) YES NULL INSERT INTO t1 (a,b,c,d) VALUES (0,POW(2,20)-1,b'1111111111111111111111111111111111111111111111111111111111111111',1); SELECT BIN(a), HEX(b), c+0 FROM t1 WHERE d>0; @@ -349,13 +349,11 @@ c1 BLOB NULL DEFAULT NULL, c2 BLOB NULL DEFAULT '', pk INT AUTO_INCREMENT PRIMARY KEY ) ENGINE=rocksdb; -Warnings: -Warning 1101 BLOB/TEXT column 'c2' can't have a default value SHOW COLUMNS IN t1; Field Type Null Key Default Extra c blob YES NULL c1 blob YES NULL -c2 blob YES NULL +c2 blob YES '' pk int(11) NO PRI NULL auto_increment INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); INSERT INTO t1 (c,c1,c2) VALUES ('','',''); @@ -364,12 +362,12 @@ SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; pk HEX(c) HEX(c1) HEX(c2) 1 NULL NULL NULL 2 -3 NULL NULL NULL +3 NULL NULL SELECT pk, HEX(c2) FROM t1 ORDER BY pk; pk HEX(c2) 1 NULL 2 -3 NULL +3 DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( @@ -378,13 +376,11 @@ c1 TINYBLOB NULL DEFAULT NULL, c2 TINYBLOB NULL DEFAULT '', pk INT AUTO_INCREMENT PRIMARY KEY ) ENGINE=rocksdb; -Warnings: -Warning 1101 BLOB/TEXT column 'c2' can't have a default value SHOW COLUMNS IN t1; Field Type Null Key Default Extra c tinyblob YES NULL c1 tinyblob YES NULL -c2 tinyblob YES NULL +c2 tinyblob YES '' pk int(11) NO PRI NULL auto_increment INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); INSERT INTO t1 (c,c1,c2) VALUES ('','',''); @@ -393,12 +389,12 @@ SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; pk HEX(c) HEX(c1) HEX(c2) 1 NULL NULL NULL 2 -3 NULL NULL NULL +3 NULL NULL SELECT pk, HEX(c2) FROM t1 ORDER BY pk; pk HEX(c2) 1 NULL 2 -3 NULL +3 DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( @@ -407,13 +403,11 @@ c1 MEDIUMBLOB NULL DEFAULT NULL, c2 MEDIUMBLOB NULL DEFAULT '', pk INT AUTO_INCREMENT PRIMARY KEY ) ENGINE=rocksdb; -Warnings: -Warning 1101 BLOB/TEXT column 'c2' can't have a default value SHOW COLUMNS IN t1; Field Type Null Key Default Extra c mediumblob YES NULL c1 mediumblob YES NULL -c2 mediumblob YES NULL +c2 mediumblob YES '' pk int(11) NO PRI NULL auto_increment INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); INSERT INTO t1 (c,c1,c2) VALUES ('','',''); @@ -422,12 +416,12 @@ SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; pk HEX(c) HEX(c1) HEX(c2) 1 NULL NULL NULL 2 -3 NULL NULL NULL +3 NULL NULL SELECT pk, HEX(c2) FROM t1 ORDER BY pk; pk HEX(c2) 1 NULL 2 -3 NULL +3 DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( @@ -436,13 +430,11 @@ c1 LONGBLOB NULL DEFAULT NULL, c2 LONGBLOB NULL DEFAULT '', pk INT AUTO_INCREMENT PRIMARY KEY ) ENGINE=rocksdb; -Warnings: -Warning 1101 BLOB/TEXT column 'c2' can't have a default value SHOW COLUMNS IN t1; Field Type Null Key Default Extra c longblob YES NULL c1 longblob YES NULL -c2 longblob YES NULL +c2 longblob YES '' pk int(11) NO PRI NULL auto_increment INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); INSERT INTO t1 (c,c1,c2) VALUES ('','',''); @@ -451,12 +443,12 @@ SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; pk HEX(c) HEX(c1) HEX(c2) 1 NULL NULL NULL 2 -3 NULL NULL NULL +3 NULL NULL SELECT pk, HEX(c2) FROM t1 ORDER BY pk; pk HEX(c2) 1 NULL 2 -3 NULL +3 DROP TABLE t1; ######################## # BOOL columns @@ -530,9 +522,9 @@ b1 b2 127 -128 2 3 ALTER TABLE t1 ADD COLUMN b3 BOOLEAN UNSIGNED NULL; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'UNSIGNED NULL' at line 1 +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNSIGNED NULL' at line 1 ALTER TABLE ADD COLUMN b3 BOOL ZEROFILL NULL; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ADD COLUMN b3 BOOL ZEROFILL NULL' at line 1 +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ADD COLUMN b3 BOOL ZEROFILL NULL' at line 1 DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( @@ -579,7 +571,7 @@ c char(1) YES NULL c0 char(0) YES NULL c1 char(1) YES NULL c20 char(20) YES NULL -c255 char(255) NO PRI +c255 char(255) NO PRI NULL INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('','','','',''); INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.'); SELECT c,c0,c1,c20,c255 FROM t1; @@ -664,12 +656,12 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra v0 varchar(0) YES NULL v1 varchar(1) YES NULL -v64 varchar(64) NO PRI +v64 varchar(64) NO PRI NULL v65000 varchar(65000) YES NULL CREATE TABLE t2 (v VARCHAR(65532), PRIMARY KEY (v(255))) ENGINE=rocksdb; SHOW COLUMNS IN t2; Field Type Null Key Default Extra -v varchar(65532) NO PRI +v varchar(65532) NO PRI NULL INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. @@ -776,7 +768,7 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra v0 varchar(0) YES NULL v1 varchar(1) YES NULL -v64 varchar(64) NO PRI +v64 varchar(64) NO PRI NULL v65000 varchar(65000) YES NULL v65536 mediumtext YES NULL DROP TABLE t1, t2; @@ -822,7 +814,7 @@ y2 YEAR(2) NULL, pk DATETIME PRIMARY KEY ) ENGINE=rocksdb; Warnings: -Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead. +Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead SHOW COLUMNS IN t1; Field Type Null Key Default Extra d date YES NULL @@ -831,7 +823,7 @@ ts timestamp YES NULL t time YES NULL y year(4) YES NULL y4 year(4) YES NULL -y2 year(4) YES NULL +y2 year(2) YES NULL pk datetime NO PRI NULL SET @tm = '2012-04-09 05:27:00'; INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES @@ -841,27 +833,27 @@ INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES (DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),YEAR(@tm),YEAR(@tm),'2012-12-12 12:12:15'); SELECT d,dt,ts,t,y,y4,y2 FROM t1; d dt ts t y y4 y2 -0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000 -1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000 -2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012 -9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 00 +1000-01-01 1000-01-01 00:00:00 1970-01-01 00:00:01 -838:59:59 1901 1901 00 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 12 +9999-12-31 9999-12-31 23:59:59 2038-01-19 03:14:07 838:59:59 2155 2155 99 INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES ('999-13-32', '999-11-31 00:00:00', '0', '-839:00:00', '1900', '1900', '-1','2012-12-12 12:12:16'); Warnings: Warning 1265 Data truncated for column 'd' at row 1 -Warning 1264 Out of range value for column 'dt' at row 1 -Warning 1264 Out of range value for column 'ts' at row 1 +Warning 1265 Data truncated for column 'dt' at row 1 +Warning 1265 Data truncated for column 'ts' at row 1 Warning 1264 Out of range value for column 't' at row 1 Warning 1264 Out of range value for column 'y' at row 1 Warning 1264 Out of range value for column 'y4' at row 1 Warning 1264 Out of range value for column 'y2' at row 1 SELECT d,dt,ts,t,y,y4,y2 FROM t1; d dt ts t y y4 y2 -1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000 -9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999 -0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000 -2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012 -0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 0000 +1000-01-01 1000-01-01 00:00:00 1970-01-01 00:00:01 -838:59:59 1901 1901 00 +9999-12-31 9999-12-31 23:59:59 2038-01-19 03:14:07 838:59:59 2155 2155 99 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 00 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 12 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 00 DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( @@ -1006,14 +998,14 @@ c2 YEAR(2) NULL DEFAULT '12', pk INT AUTO_INCREMENT PRIMARY KEY ) ENGINE=rocksdb; Warnings: -Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead. -Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead. -Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead. +Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead +Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead +Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead SHOW COLUMNS IN t1; Field Type Null Key Default Extra -c year(4) YES NULL -c1 year(4) YES NULL -c2 year(4) YES 2012 +c year(2) YES NULL +c1 year(2) YES NULL +c2 year(2) YES 12 pk int(11) NO PRI NULL auto_increment INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); INSERT INTO t1 (c,c1,c2) VALUES ('12','12','12'); @@ -1021,13 +1013,13 @@ INSERT INTO t1 () VALUES (); SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; pk HEX(c) HEX(c1) HEX(c2) 1 NULL NULL NULL -2 7DC 7DC 7DC -3 NULL NULL 7DC +2 C C C +3 NULL NULL C SELECT pk, HEX(c2) FROM t1 ORDER BY pk; pk HEX(c2) 1 NULL -2 7DC -3 7DC +2 C +3 C DROP TABLE t1; ######################## # ENUM columns @@ -1042,7 +1034,7 @@ PRIMARY KEY (b) SHOW COLUMNS IN t1; Field Type Null Key Default Extra a enum('') YES NULL -b enum('test1','test2','test3','test4','test5') NO PRI test1 +b enum('test1','test2','test3','test4','test5') NO PRI NULL c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') YES NULL INSERT INTO t1 (a,b,c) VALUES ('','test2','4'),('',5,2); SELECT a,b,c FROM t1; @@ -1065,7 +1057,7 @@ Note 1291 Column 'e' has duplicated value 'a' in ENUM SHOW COLUMNS IN t1; Field Type Null Key Default Extra a enum('') YES NULL -b enum('test1','test2','test3','test4','test5') NO PRI test1 +b enum('test1','test2','test3','test4','test5') NO PRI NULL c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') YES NULL e enum('a','A') YES NULL INSERT INTO t1 (a,b,c,e) VALUES ('','test3','75','A'); @@ -1234,11 +1226,11 @@ d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) NULL; -ERROR 42000: Too big precision 66 specified for column 'n66'. Maximum is 65. +ERROR 42000: Too big precision 66 specified for 'n66'. Maximum is 65 ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) NULL; -ERROR 42000: Too big precision 66 specified for column 'n66_6'. Maximum is 65. +ERROR 42000: Too big precision 66 specified for 'n66_6'. Maximum is 65 ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) NULL; -ERROR 42000: Too big scale 66 specified for column 'n66_66'. Maximum is 30. +ERROR 42000: Too big scale 66 specified for 'n66_66'. Maximum is 38 DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( @@ -1531,7 +1523,7 @@ INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( 6 ); Warnings: -Warning 1292 Truncated incorrect DECIMAL value: '' +Warning 1916 Got overflow when converting '' to DECIMAL. Value truncated Warning 1264 Out of range value for column 'f' at row 1 Warning 1264 Out of range value for column 'f0' at row 1 Warning 1264 Out of range value for column 'r1_1' at row 1 @@ -1603,11 +1595,9 @@ r1_1 0.9 r1_1 0.9 r1_1 0.9 ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) NULL; -ERROR 42000: Display width out of range for column 'd0_0' (max = 255) ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) NULL; -ERROR 42000: Too big precision 256 specified for column 'n66_6'. Maximum is 65. +ERROR 42000: Too big precision 256 specified for 'n66_6'. Maximum is 65 ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) NULL; -ERROR 42000: Too big scale 35 specified for column 'n66_66'. Maximum is 30. DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( @@ -1876,7 +1866,7 @@ i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 ALTER TABLE t1 ADD COLUMN i257 INT(257) NULL; -ERROR 42000: Display width out of range for column 'i257' (max = 255) +ERROR 42000: Display width out of range for 'i257' (max = 255) DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( @@ -2027,7 +2017,7 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra a set('') YES NULL b set('test1','test2','test3','test4','test5') YES NULL -c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI +c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI NULL INSERT INTO t1 (a,b,c) VALUES ('','test2,test3','01,34,44,,23'), ('',5,2), @@ -2056,7 +2046,7 @@ SHOW COLUMNS IN t1; Field Type Null Key Default Extra a set('') YES NULL b set('test1','test2','test3','test4','test5') YES NULL -c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI +c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI NULL e set('a','A') YES NULL ALTER TABLE t1 ADD COLUMN f SET('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i') NULL; ERROR HY000: Too many strings for column f and SET @@ -2150,7 +2140,7 @@ LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m 65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 ALTER TABLE t1 ADD COLUMN ttt TEXT(4294967296) NULL; -ERROR 42000: Display width out of range for column 'ttt' (max = 4294967295) +ERROR 42000: Display width out of range for 'ttt' (max = 4294967295) DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( @@ -2159,13 +2149,11 @@ c1 TEXT NULL DEFAULT NULL, c2 TEXT NULL DEFAULT '', pk INT AUTO_INCREMENT PRIMARY KEY ) ENGINE=rocksdb; -Warnings: -Warning 1101 BLOB/TEXT column 'c2' can't have a default value SHOW COLUMNS IN t1; Field Type Null Key Default Extra c text YES NULL c1 text YES NULL -c2 text YES NULL +c2 text YES '' pk int(11) NO PRI NULL auto_increment INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); INSERT INTO t1 (c,c1,c2) VALUES ('','',''); @@ -2174,12 +2162,12 @@ SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; pk HEX(c) HEX(c1) HEX(c2) 1 NULL NULL NULL 2 -3 NULL NULL NULL +3 NULL NULL SELECT pk, HEX(c2) FROM t1 ORDER BY pk; pk HEX(c2) 1 NULL 2 -3 NULL +3 DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( @@ -2188,13 +2176,11 @@ c1 TINYTEXT NULL DEFAULT NULL, c2 TINYTEXT NULL DEFAULT '', pk INT AUTO_INCREMENT PRIMARY KEY ) ENGINE=rocksdb; -Warnings: -Warning 1101 BLOB/TEXT column 'c2' can't have a default value SHOW COLUMNS IN t1; Field Type Null Key Default Extra c tinytext YES NULL c1 tinytext YES NULL -c2 tinytext YES NULL +c2 tinytext YES '' pk int(11) NO PRI NULL auto_increment INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); INSERT INTO t1 (c,c1,c2) VALUES ('','',''); @@ -2203,12 +2189,12 @@ SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; pk HEX(c) HEX(c1) HEX(c2) 1 NULL NULL NULL 2 -3 NULL NULL NULL +3 NULL NULL SELECT pk, HEX(c2) FROM t1 ORDER BY pk; pk HEX(c2) 1 NULL 2 -3 NULL +3 DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( @@ -2217,13 +2203,11 @@ c1 MEDIUMTEXT NULL DEFAULT NULL, c2 MEDIUMTEXT NULL DEFAULT '', pk INT AUTO_INCREMENT PRIMARY KEY ) ENGINE=rocksdb; -Warnings: -Warning 1101 BLOB/TEXT column 'c2' can't have a default value SHOW COLUMNS IN t1; Field Type Null Key Default Extra c mediumtext YES NULL c1 mediumtext YES NULL -c2 mediumtext YES NULL +c2 mediumtext YES '' pk int(11) NO PRI NULL auto_increment INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); INSERT INTO t1 (c,c1,c2) VALUES ('','',''); @@ -2232,12 +2216,12 @@ SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; pk HEX(c) HEX(c1) HEX(c2) 1 NULL NULL NULL 2 -3 NULL NULL NULL +3 NULL NULL SELECT pk, HEX(c2) FROM t1 ORDER BY pk; pk HEX(c2) 1 NULL 2 -3 NULL +3 DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( @@ -2246,13 +2230,11 @@ c1 LONGTEXT NULL DEFAULT NULL, c2 LONGTEXT NULL DEFAULT '', pk INT AUTO_INCREMENT PRIMARY KEY ) ENGINE=rocksdb; -Warnings: -Warning 1101 BLOB/TEXT column 'c2' can't have a default value SHOW COLUMNS IN t1; Field Type Null Key Default Extra c longtext YES NULL c1 longtext YES NULL -c2 longtext YES NULL +c2 longtext YES '' pk int(11) NO PRI NULL auto_increment INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); INSERT INTO t1 (c,c1,c2) VALUES ('','',''); @@ -2261,10 +2243,10 @@ SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; pk HEX(c) HEX(c1) HEX(c2) 1 NULL NULL NULL 2 -3 NULL NULL NULL +3 NULL NULL SELECT pk, HEX(c2) FROM t1 ORDER BY pk; pk HEX(c2) 1 NULL 2 -3 NULL +3 DROP TABLE t1; From 792aaedb424e287b771059d19076573be557cdda Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 10 Nov 2016 21:32:31 +0000 Subject: [PATCH 065/233] MariaRocks port: Use another way to handle --force-restart Based on discussion at maria-developers@: - Remove 'Support --force-restart "pseudo-argument"' that was added a few csets before - Instead, use "source include/restart_mysqld.inc" in the testcases that need a freshly-started server --- mysql-test/lib/mtr_cases.pm | 8 -------- mysql-test/mysql-test-run.pl | 7 ++----- .../rocksdb/mysql-test/rocksdb/t/cardinality-master.opt | 1 - storage/rocksdb/mysql-test/rocksdb/t/cardinality.test | 2 ++ .../mysql-test/rocksdb/t/information_schema-master.opt | 2 +- .../rocksdb/mysql-test/rocksdb/t/information_schema.test | 2 ++ storage/rocksdb/mysql-test/rocksdb/t/mysqldump-master.opt | 2 +- storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test | 2 ++ .../mysql-test/rocksdb/t/records_in_range-master.opt | 1 - .../rocksdb/mysql-test/rocksdb/t/records_in_range.test | 2 ++ .../rocksdb/mysql-test/rocksdb/t/show_engine-master.opt | 1 - storage/rocksdb/mysql-test/rocksdb/t/show_engine.test | 2 ++ 12 files changed, 14 insertions(+), 18 deletions(-) delete mode 100644 storage/rocksdb/mysql-test/rocksdb/t/show_engine-master.opt diff --git a/mysql-test/lib/mtr_cases.pm b/mysql-test/lib/mtr_cases.pm index 3c478d94838..2be903abf42 100644 --- a/mysql-test/lib/mtr_cases.pm +++ b/mysql-test/lib/mtr_cases.pm @@ -604,14 +604,6 @@ sub process_opts { # Fallthrough, add the --default-time-zone option } - # --force-restart is a "fake" option which just signals MTR that - # it should restart the mysqld server even if it was started with - # a matching set of options - if ($opt eq "--force-restart") { - $tinfo->{'force_restart'}= 1; - next; - } - # Ok, this was a real option, add it push(@{$tinfo->{$opt_name}}, $opt); } diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 4b37212c8cc..81291789a20 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -5164,6 +5164,7 @@ sub stop_all_servers () { # Find out if server should be restarted for this test sub server_need_restart { my ($tinfo, $server)= @_; + if ( using_extern() ) { mtr_verbose_restart($server, "no restart for --extern server"); @@ -5256,11 +5257,7 @@ sub server_need_restart { sub servers_need_restart($) { my ($tinfo)= @_; - if (defined($tinfo->{'force_restart'})) { - return all_servers(); - } else { - return grep { server_need_restart($tinfo, $_); } all_servers(); - } + return grep { server_need_restart($tinfo, $_); } all_servers(); } diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cardinality-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/cardinality-master.opt index ed6029f9a27..2cd3c8051f8 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/cardinality-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb/t/cardinality-master.opt @@ -1,4 +1,3 @@ --skip-rocksdb_debug_optimizer_no_zero_cardinality --rocksdb_compaction_sequential_deletes=0 ---force-restart --rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test b/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test index df2b0673315..0bc0ae4e900 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test @@ -1,5 +1,7 @@ --source include/have_rocksdb.inc +--source include/restart_mysqld.inc + --disable_warnings DROP TABLE IF EXISTS t1; --enable_warnings diff --git a/storage/rocksdb/mysql-test/rocksdb/t/information_schema-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/information_schema-master.opt index a12f583ef82..40b14167e17 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/information_schema-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb/t/information_schema-master.opt @@ -1 +1 @@ ---force-restart --binlog_format=row --gtid_mode=ON --enforce_gtid_consistency --log_slave_updates +--binlog_format=row --gtid_mode=ON --enforce_gtid_consistency --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test b/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test index 39bae56bea6..3afc7a0ddae 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test @@ -1,6 +1,8 @@ --source include/have_rocksdb.inc --source include/have_log_bin.inc +--source include/restart_mysqld.inc + --disable_warnings DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump-master.opt index e41620e94f6..2672d4ff35e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump-master.opt @@ -1 +1 @@ ---force-restart --binlog_format=row +--binlog_format=row diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test index 107790f0c9a..4d7823f390c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test @@ -4,6 +4,8 @@ --enable_connect_log +--source include/restart_mysqld.inc + # Save the initial number of concurrent sessions --source include/count_sessions.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/records_in_range-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/records_in_range-master.opt index 99929434028..75a17cc157e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/records_in_range-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb/t/records_in_range-master.opt @@ -1,4 +1,3 @@ ---force-restart --rocksdb_debug_optimizer_n_rows=20000 --rocksdb_records_in_range=1000 --rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/records_in_range.test b/storage/rocksdb/mysql-test/rocksdb/t/records_in_range.test index 9c939ef06e4..15db3288084 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/records_in_range.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/records_in_range.test @@ -1,5 +1,7 @@ --source include/have_rocksdb.inc +--source include/restart_mysqld.inc + --disable_warnings DROP TABLE IF EXISTS t1; --enable_warnings diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_engine-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/show_engine-master.opt deleted file mode 100644 index cef79bc8585..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/t/show_engine-master.opt +++ /dev/null @@ -1 +0,0 @@ ---force-restart diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test b/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test index 847d4d948f6..1df7e522cbd 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test @@ -1,6 +1,8 @@ --source include/have_rocksdb.inc --source include/have_partition.inc +--source include/restart_mysqld.inc + # # SHOW ENGINE STATUS command # Checking that the command doesn't produce an error. From 223c14e7061e5c0e031b6e87f2ca1de0b13d6155 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 10 Nov 2016 22:50:01 +0000 Subject: [PATCH 066/233] MariaRocks port: more test result updates Make the same changes as with rocksdb.col_opt_null a few csets before: - MariaDB doesn't provide a default value for non-NULL columns - Wording in error messages is different - limits for DECIMAL columns are different - YEAR(2) is deprecated but still supported - Also added a --sorted_result for an I_S query. --- .../rocksdb/r/col_opt_unsigned.result | 12 ++++----- .../rocksdb/r/col_opt_zerofill.result | 14 +++++----- .../rocksdb/r/type_date_time.result | 26 +++++++++---------- .../mysql-test/rocksdb/r/type_enum.result | 4 +-- .../rocksdb/t/type_varchar_debug.test | 1 + 5 files changed, 27 insertions(+), 30 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result index f5d87b26d57..ef85ad1c237 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result @@ -161,7 +161,7 @@ PRIMARY KEY (a) ) ENGINE=rocksdb; SHOW COLUMNS IN t1; Field Type Null Key Default Extra -a decimal(10,0) unsigned NO PRI 0 +a decimal(10,0) unsigned NO PRI NULL b decimal(10,0) unsigned YES NULL INSERT INTO t1 (a,b) VALUES (1.0,-1.0); Warnings: @@ -416,7 +416,7 @@ INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( 6 ); Warnings: -Warning 1292 Truncated incorrect DECIMAL value: '' +Warning 1916 Got overflow when converting '' to DECIMAL. Value truncated Warning 1264 Out of range value for column 'f' at row 1 Warning 1264 Out of range value for column 'f0' at row 1 Warning 1264 Out of range value for column 'r1_1' at row 1 @@ -488,11 +488,9 @@ r1_1 0.9 r1_1 0.9 r1_1 0.9 ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) UNSIGNED; -ERROR 42000: Display width out of range for column 'd0_0' (max = 255) ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) UNSIGNED; -ERROR 42000: Too big precision 256 specified for column 'n66_6'. Maximum is 65. +ERROR 42000: Too big precision 256 specified for 'n66_6'. Maximum is 65 ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) UNSIGNED; -ERROR 42000: Too big scale 35 specified for column 'n66_66'. Maximum is 30. DROP TABLE t1; CREATE TABLE t1 ( a DOUBLE UNSIGNED, @@ -502,7 +500,7 @@ PRIMARY KEY (b) SHOW COLUMNS IN t1; Field Type Null Key Default Extra a double unsigned YES NULL -b float unsigned NO PRI 0 +b float unsigned NO PRI NULL INSERT INTO t1 (a,b) VALUES (1.0,-1.0); Warnings: Warning 1264 Out of range value for column 'b' at row 1 @@ -725,7 +723,7 @@ t tinyint(3) unsigned YES NULL s smallint(5) unsigned YES NULL m mediumint(8) unsigned YES NULL i int(10) unsigned YES NULL -b bigint(20) unsigned NO PRI 0 +b bigint(20) unsigned NO PRI NULL INSERT INTO t1 (t,s,m,i,b) VALUES (255,65535,16777215,4294967295,18446744073709551615); INSERT INTO t1 (t,s,m,i,b) VALUES (-1,-1,-1,-1,-1); Warnings: diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result index a846e2925b9..1f9bf16ecfc 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result @@ -161,7 +161,7 @@ PRIMARY KEY (a) ) ENGINE=rocksdb; SHOW COLUMNS IN t1; Field Type Null Key Default Extra -a decimal(10,0) unsigned zerofill NO PRI 0000000000 +a decimal(10,0) unsigned zerofill NO PRI NULL b decimal(10,0) unsigned zerofill YES NULL INSERT INTO t1 (a,b) VALUES (1.1,1234); Warnings: @@ -412,7 +412,7 @@ INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( 6 ); Warnings: -Warning 1292 Truncated incorrect DECIMAL value: '' +Warning 1916 Got overflow when converting '' to DECIMAL. Value truncated Warning 1264 Out of range value for column 'f' at row 1 Warning 1264 Out of range value for column 'f0' at row 1 Warning 1264 Out of range value for column 'r1_1' at row 1 @@ -484,11 +484,9 @@ r1_1 0.9 r1_1 0.9 r1_1 0.9 ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) ZEROFILL; -ERROR 42000: Display width out of range for column 'd0_0' (max = 255) ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) ZEROFILL; -ERROR 42000: Too big precision 256 specified for column 'n66_6'. Maximum is 65. +ERROR 42000: Too big precision 256 specified for 'n66_6'. Maximum is 65 ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) ZEROFILL; -ERROR 42000: Too big scale 35 specified for column 'n66_66'. Maximum is 30. DROP TABLE t1; CREATE TABLE t1 ( a DOUBLE ZEROFILL, @@ -498,7 +496,7 @@ PRIMARY KEY (b) SHOW COLUMNS IN t1; Field Type Null Key Default Extra a double unsigned zerofill YES NULL -b float unsigned zerofill NO PRI 000000000000 +b float unsigned zerofill NO PRI NULL INSERT INTO t1 (a,b) VALUES (1,1234.5); SELECT a,b FROM t1; a b @@ -699,7 +697,7 @@ i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 ALTER TABLE t1 ADD COLUMN i257 INT(257) ZEROFILL; -ERROR 42000: Display width out of range for column 'i257' (max = 255) +ERROR 42000: Display width out of range for 'i257' (max = 255) DROP TABLE t1; CREATE TABLE t1 ( t TINYINT ZEROFILL, @@ -715,7 +713,7 @@ t tinyint(3) unsigned zerofill YES NULL s smallint(5) unsigned zerofill YES NULL m mediumint(8) unsigned zerofill YES NULL i int(10) unsigned zerofill YES NULL -b bigint(20) unsigned zerofill NO PRI 00000000000000000000 +b bigint(20) unsigned zerofill NO PRI NULL INSERT INTO t1 (t,s,m,i,b) VALUES (1,10,100,1000,0); SELECT t,s,m,i,b FROM t1; t s m i b diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result index 1a5ec573be7..ab008c34ae6 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result @@ -10,7 +10,7 @@ y2 YEAR(2) , pk DATETIME PRIMARY KEY ) ENGINE=rocksdb; Warnings: -Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead. +Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead SHOW COLUMNS IN t1; Field Type Null Key Default Extra d date YES NULL @@ -19,7 +19,7 @@ ts timestamp YES NULL t time YES NULL y year(4) YES NULL y4 year(4) YES NULL -y2 year(4) YES NULL +y2 year(2) YES NULL pk datetime NO PRI NULL SET @tm = '2012-04-09 05:27:00'; INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES @@ -29,25 +29,25 @@ INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES (DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),YEAR(@tm),YEAR(@tm),'2012-12-12 12:12:15'); SELECT d,dt,ts,t,y,y4,y2 FROM t1; d dt ts t y y4 y2 -0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000 -1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000 -2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012 -9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 00 +1000-01-01 1000-01-01 00:00:00 1970-01-01 00:00:01 -838:59:59 1901 1901 00 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 12 +9999-12-31 9999-12-31 23:59:59 2038-01-19 03:14:07 838:59:59 2155 2155 99 INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES ('999-13-32', '999-11-31 00:00:00', '0', '-839:00:00', '1900', '1900', '-1','2012-12-12 12:12:16'); Warnings: Warning 1265 Data truncated for column 'd' at row 1 -Warning 1264 Out of range value for column 'dt' at row 1 -Warning 1264 Out of range value for column 'ts' at row 1 +Warning 1265 Data truncated for column 'dt' at row 1 +Warning 1265 Data truncated for column 'ts' at row 1 Warning 1264 Out of range value for column 't' at row 1 Warning 1264 Out of range value for column 'y' at row 1 Warning 1264 Out of range value for column 'y4' at row 1 Warning 1264 Out of range value for column 'y2' at row 1 SELECT d,dt,ts,t,y,y4,y2 FROM t1; d dt ts t y y4 y2 -1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000 -9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999 -0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000 -2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012 -0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 0000 +1000-01-01 1000-01-01 00:00:00 1970-01-01 00:00:01 -838:59:59 1901 1901 00 +9999-12-31 9999-12-31 23:59:59 2038-01-19 03:14:07 838:59:59 2155 2155 99 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 00 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 12 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 00 DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_enum.result b/storage/rocksdb/mysql-test/rocksdb/r/type_enum.result index 009b342182d..ed66303cc0f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_enum.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_enum.result @@ -8,7 +8,7 @@ PRIMARY KEY (b) SHOW COLUMNS IN t1; Field Type Null Key Default Extra a enum('') YES NULL -b enum('test1','test2','test3','test4','test5') NO PRI test1 +b enum('test1','test2','test3','test4','test5') NO PRI NULL c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') YES NULL INSERT INTO t1 (a,b,c) VALUES ('','test2','4'),('',5,2); SELECT a,b,c FROM t1; @@ -31,7 +31,7 @@ Note 1291 Column 'e' has duplicated value 'a' in ENUM SHOW COLUMNS IN t1; Field Type Null Key Default Extra a enum('') YES NULL -b enum('test1','test2','test3','test4','test5') NO PRI test1 +b enum('test1','test2','test3','test4','test5') NO PRI NULL c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') YES NULL e enum('a','A') YES NULL INSERT INTO t1 (a,b,c,e) VALUES ('','test3','75','A'); diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test index 9ffeaede45d..4b014e970d4 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test @@ -48,6 +48,7 @@ insert into t2 values (2, 'a', 'a'); select pk, col1, hex(col1), col2 from t2; --echo # Check the format version: +--sorted_result select table_name,index_name,kv_format_version from information_schema.ROCKSDB_DDL where TABLE_SCHEMA=database() AND table_name in ('t1','t2'); From 183ab78bab3a7475b7fe9f539238bac06d95fa0f Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 12 Nov 2016 04:48:47 +0000 Subject: [PATCH 067/233] MariaRocks port: more test result updates, again - Make the same changes as in the previous cset and in a cset changing rocksdb.col_opt_null a few csets before. - MySQL 5.6.11 has fixes for Bug #68472, Bug #16394472, MariaDB doesn't. adjust the testcase accordingly. --- .../rocksdb/r/col_opt_not_null.result | 107 ++++++++---------- .../rocksdb/t/col_not_null_timestamp.inc | 13 ++- 2 files changed, 57 insertions(+), 63 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result index 005aa1e2989..f34de57bb0c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result @@ -276,7 +276,7 @@ a+0 b+0 c+0 d+0 1 0 18446744073709551615 0 DROP TABLE t1; CREATE TABLE t1 (pk INT PRIMARY KEY, a BIT(65) NOT NULL) ENGINE=rocksdb; -ERROR 42000: Display width out of range for column 'a' (max = 64) +ERROR 42000: Display width out of range for 'a' (max = 64) DROP TABLE IF EXISTS t1; #---------------------------------- # BIT NOT NULL columns without a default @@ -373,7 +373,7 @@ LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m 65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152 ALTER TABLE t1 ADD COLUMN bbb BLOB(4294967296); -ERROR 42000: Display width out of range for column 'bbb' (max = 4294967295) +ERROR 42000: Display width out of range for 'bbb' (max = 4294967295) DROP TABLE t1; DROP TABLE IF EXISTS t1; #---------------------------------- @@ -398,12 +398,10 @@ CREATE TABLE t1 ( pk INT AUTO_INCREMENT PRIMARY KEY, c BLOB NOT NULL DEFAULT '' ) ENGINE=rocksdb; -Warnings: -Warning 1101 BLOB/TEXT column 'c' can't have a default value SHOW COLUMNS IN t1; Field Type Null Key Default Extra pk int(11) NO PRI NULL auto_increment -c blob NO NULL +c blob NO '' ALTER TABLE t1 ADD COLUMN err BLOB NOT NULL DEFAULT NULL; ERROR 42000: Invalid default value for 'err' INSERT INTO t1 (c) VALUES (NULL); @@ -438,12 +436,10 @@ CREATE TABLE t1 ( pk INT AUTO_INCREMENT PRIMARY KEY, c TINYBLOB NOT NULL DEFAULT '' ) ENGINE=rocksdb; -Warnings: -Warning 1101 BLOB/TEXT column 'c' can't have a default value SHOW COLUMNS IN t1; Field Type Null Key Default Extra pk int(11) NO PRI NULL auto_increment -c tinyblob NO NULL +c tinyblob NO '' ALTER TABLE t1 ADD COLUMN err TINYBLOB NOT NULL DEFAULT NULL; ERROR 42000: Invalid default value for 'err' INSERT INTO t1 (c) VALUES (NULL); @@ -478,12 +474,10 @@ CREATE TABLE t1 ( pk INT AUTO_INCREMENT PRIMARY KEY, c MEDIUMBLOB NOT NULL DEFAULT '' ) ENGINE=rocksdb; -Warnings: -Warning 1101 BLOB/TEXT column 'c' can't have a default value SHOW COLUMNS IN t1; Field Type Null Key Default Extra pk int(11) NO PRI NULL auto_increment -c mediumblob NO NULL +c mediumblob NO '' ALTER TABLE t1 ADD COLUMN err MEDIUMBLOB NOT NULL DEFAULT NULL; ERROR 42000: Invalid default value for 'err' INSERT INTO t1 (c) VALUES (NULL); @@ -518,12 +512,10 @@ CREATE TABLE t1 ( pk INT AUTO_INCREMENT PRIMARY KEY, c LONGBLOB NOT NULL DEFAULT '' ) ENGINE=rocksdb; -Warnings: -Warning 1101 BLOB/TEXT column 'c' can't have a default value SHOW COLUMNS IN t1; Field Type Null Key Default Extra pk int(11) NO PRI NULL auto_increment -c longblob NO NULL +c longblob NO '' ALTER TABLE t1 ADD COLUMN err LONGBLOB NOT NULL DEFAULT NULL; ERROR 42000: Invalid default value for 'err' INSERT INTO t1 (c) VALUES (NULL); @@ -607,9 +599,9 @@ b1 b2 127 -128 2 3 ALTER TABLE t1 ADD COLUMN b3 BOOLEAN UNSIGNED NOT NULL; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'UNSIGNED NOT NULL' at line 1 +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNSIGNED NOT NULL' at line 1 ALTER TABLE ADD COLUMN b3 BOOL ZEROFILL NOT NULL; -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ADD COLUMN b3 BOOL ZEROFILL NOT NULL' at line 1 +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ADD COLUMN b3 BOOL ZEROFILL NOT NULL' at line 1 DROP TABLE t1; DROP TABLE IF EXISTS t1; #---------------------------------- @@ -768,7 +760,7 @@ v65000 varchar(65000) NO NULL CREATE TABLE t2 (v VARCHAR(65532), PRIMARY KEY (v(255))) ENGINE=rocksdb; SHOW COLUMNS IN t2; Field Type Null Key Default Extra -v varchar(65532) NO PRI +v varchar(65532) NO PRI NULL INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. @@ -932,7 +924,7 @@ y2 YEAR(2) NOT NULL, pk DATETIME PRIMARY KEY ) ENGINE=rocksdb; Warnings: -Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead. +Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead SHOW COLUMNS IN t1; Field Type Null Key Default Extra d date NO NULL @@ -941,7 +933,7 @@ ts timestamp NO NULL t time NO NULL y year(4) NO NULL y4 year(4) NO NULL -y2 year(4) NO NULL +y2 year(2) NO NULL pk datetime NO PRI NULL SET @tm = '2012-04-09 05:27:00'; INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES @@ -951,27 +943,27 @@ INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES (DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),YEAR(@tm),YEAR(@tm),'2012-12-12 12:12:15'); SELECT d,dt,ts,t,y,y4,y2 FROM t1; d dt ts t y y4 y2 -0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000 -1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000 -2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012 -9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 00 +1000-01-01 1000-01-01 00:00:00 1970-01-01 00:00:01 -838:59:59 1901 1901 00 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 12 +9999-12-31 9999-12-31 23:59:59 2038-01-19 03:14:07 838:59:59 2155 2155 99 INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES ('999-13-32', '999-11-31 00:00:00', '0', '-839:00:00', '1900', '1900', '-1','2012-12-12 12:12:16'); Warnings: Warning 1265 Data truncated for column 'd' at row 1 -Warning 1264 Out of range value for column 'dt' at row 1 -Warning 1264 Out of range value for column 'ts' at row 1 +Warning 1265 Data truncated for column 'dt' at row 1 +Warning 1265 Data truncated for column 'ts' at row 1 Warning 1264 Out of range value for column 't' at row 1 Warning 1264 Out of range value for column 'y' at row 1 Warning 1264 Out of range value for column 'y4' at row 1 Warning 1264 Out of range value for column 'y2' at row 1 SELECT d,dt,ts,t,y,y4,y2 FROM t1; d dt ts t y y4 y2 -1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000 -9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999 -0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000 -2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012 -0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 0000 +1000-01-01 1000-01-01 00:00:00 1970-01-01 00:00:01 -838:59:59 1901 1901 00 +9999-12-31 9999-12-31 23:59:59 2038-01-19 03:14:07 838:59:59 2155 2155 99 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 00 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 12 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 00 DROP TABLE t1; SET TIMESTAMP=UNIX_TIMESTAMP('2013-12-12 12:12:12'); DROP TABLE IF EXISTS t1; @@ -1060,10 +1052,10 @@ Field Type Null Key Default Extra pk int(11) NO PRI NULL auto_increment c timestamp NO NULL INSERT INTO t1 (c) VALUES (NULL); -ERROR 23000: Column 'c' cannot be null INSERT INTO t1 (c) VALUES ('2012-12-21 12:21:12'); SELECT HEX(c) FROM t1; HEX(c) +323031332D31322D31322031323A31323A3132 323031322D31322D32312031323A32313A3132 DROP TABLE t1; #---------------------------------- @@ -1079,14 +1071,17 @@ pk int(11) NO PRI NULL auto_increment c timestamp NO 2012-12-21 12:21:12 ALTER TABLE t1 ADD COLUMN err TIMESTAMP NOT NULL DEFAULT NULL; ERROR 42000: Invalid default value for 'err' +set @save_ts=@@timestamp; +set timestamp=1478923914; INSERT INTO t1 (c) VALUES (NULL); -ERROR 23000: Column 'c' cannot be null +set timestamp=@save_ts; INSERT INTO t1 (c) VALUES ('2012-12-21 12:21:12'); INSERT INTO t1 () VALUES (); SELECT pk, HEX(c) FROM t1 ORDER BY pk; pk HEX(c) -1 323031322D31322D32312031323A32313A3132 +1 323031362D31312D31322030343A31313A3534 2 323031322D31322D32312031323A32313A3132 +3 323031322D31322D32312031323A32313A3132 DROP TABLE t1; DROP TABLE IF EXISTS t1; #---------------------------------- @@ -1170,17 +1165,17 @@ DROP TABLE IF EXISTS t1; #---------------------------------- CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c YEAR(2) NOT NULL) ENGINE=rocksdb; Warnings: -Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead. +Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead SHOW COLUMNS IN t1; Field Type Null Key Default Extra pk int(11) NO PRI NULL auto_increment -c year(4) NO NULL +c year(2) NO NULL INSERT INTO t1 (c) VALUES (NULL); ERROR 23000: Column 'c' cannot be null INSERT INTO t1 (c) VALUES ('12'); SELECT HEX(c) FROM t1; HEX(c) -7DC +C DROP TABLE t1; #---------------------------------- # YEAR(2) NOT NULL columns with a default @@ -1190,11 +1185,11 @@ pk INT AUTO_INCREMENT PRIMARY KEY, c YEAR(2) NOT NULL DEFAULT '12' ) ENGINE=rocksdb; Warnings: -Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead. +Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead SHOW COLUMNS IN t1; Field Type Null Key Default Extra pk int(11) NO PRI NULL auto_increment -c year(4) NO 2012 +c year(2) NO 12 ALTER TABLE t1 ADD COLUMN err YEAR(2) NOT NULL DEFAULT NULL; ERROR 42000: Invalid default value for 'err' INSERT INTO t1 (c) VALUES (NULL); @@ -1203,8 +1198,8 @@ INSERT INTO t1 (c) VALUES ('12'); INSERT INTO t1 () VALUES (); SELECT pk, HEX(c) FROM t1 ORDER BY pk; pk HEX(c) -1 7DC -2 7DC +1 C +2 C DROP TABLE t1; ######################## # ENUM columns @@ -1422,11 +1417,11 @@ d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) NOT NULL; -ERROR 42000: Too big precision 66 specified for column 'n66'. Maximum is 65. +ERROR 42000: Too big precision 66 specified for 'n66'. Maximum is 65 ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) NOT NULL; -ERROR 42000: Too big precision 66 specified for column 'n66_6'. Maximum is 65. +ERROR 42000: Too big precision 66 specified for 'n66_6'. Maximum is 65 ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) NOT NULL; -ERROR 42000: Too big scale 66 specified for column 'n66_66'. Maximum is 30. +ERROR 42000: Too big scale 66 specified for 'n66_66'. Maximum is 38 DROP TABLE t1; DROP TABLE IF EXISTS t1; #---------------------------------- @@ -1741,7 +1736,7 @@ INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( 6 ); Warnings: -Warning 1292 Truncated incorrect DECIMAL value: '' +Warning 1916 Got overflow when converting '' to DECIMAL. Value truncated Warning 1264 Out of range value for column 'f' at row 1 Warning 1264 Out of range value for column 'f0' at row 1 Warning 1264 Out of range value for column 'r1_1' at row 1 @@ -1813,11 +1808,9 @@ r1_1 0.9 r1_1 0.9 r1_1 0.9 ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) NOT NULL; -ERROR 42000: Display width out of range for column 'd0_0' (max = 255) ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) NOT NULL; -ERROR 42000: Too big precision 256 specified for column 'n66_6'. Maximum is 65. +ERROR 42000: Too big precision 256 specified for 'n66_6'. Maximum is 65 ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) NOT NULL; -ERROR 42000: Too big scale 35 specified for column 'n66_66'. Maximum is 30. DROP TABLE t1; DROP TABLE IF EXISTS t1; #---------------------------------- @@ -2108,7 +2101,7 @@ i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 ALTER TABLE t1 ADD COLUMN i257 INT(257) NOT NULL; -ERROR 42000: Display width out of range for column 'i257' (max = 255) +ERROR 42000: Display width out of range for 'i257' (max = 255) DROP TABLE t1; DROP TABLE IF EXISTS t1; #---------------------------------- @@ -2448,7 +2441,7 @@ LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m 65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 ALTER TABLE t1 ADD COLUMN ttt TEXT(4294967296) NOT NULL; -ERROR 42000: Display width out of range for column 'ttt' (max = 4294967295) +ERROR 42000: Display width out of range for 'ttt' (max = 4294967295) DROP TABLE t1; DROP TABLE IF EXISTS t1; #---------------------------------- @@ -2473,12 +2466,10 @@ CREATE TABLE t1 ( pk INT AUTO_INCREMENT PRIMARY KEY, c TEXT NOT NULL DEFAULT '' ) ENGINE=rocksdb; -Warnings: -Warning 1101 BLOB/TEXT column 'c' can't have a default value SHOW COLUMNS IN t1; Field Type Null Key Default Extra pk int(11) NO PRI NULL auto_increment -c text NO NULL +c text NO '' ALTER TABLE t1 ADD COLUMN err TEXT NOT NULL DEFAULT NULL; ERROR 42000: Invalid default value for 'err' INSERT INTO t1 (c) VALUES (NULL); @@ -2513,12 +2504,10 @@ CREATE TABLE t1 ( pk INT AUTO_INCREMENT PRIMARY KEY, c TINYTEXT NOT NULL DEFAULT '' ) ENGINE=rocksdb; -Warnings: -Warning 1101 BLOB/TEXT column 'c' can't have a default value SHOW COLUMNS IN t1; Field Type Null Key Default Extra pk int(11) NO PRI NULL auto_increment -c tinytext NO NULL +c tinytext NO '' ALTER TABLE t1 ADD COLUMN err TINYTEXT NOT NULL DEFAULT NULL; ERROR 42000: Invalid default value for 'err' INSERT INTO t1 (c) VALUES (NULL); @@ -2553,12 +2542,10 @@ CREATE TABLE t1 ( pk INT AUTO_INCREMENT PRIMARY KEY, c MEDIUMTEXT NOT NULL DEFAULT '' ) ENGINE=rocksdb; -Warnings: -Warning 1101 BLOB/TEXT column 'c' can't have a default value SHOW COLUMNS IN t1; Field Type Null Key Default Extra pk int(11) NO PRI NULL auto_increment -c mediumtext NO NULL +c mediumtext NO '' ALTER TABLE t1 ADD COLUMN err MEDIUMTEXT NOT NULL DEFAULT NULL; ERROR 42000: Invalid default value for 'err' INSERT INTO t1 (c) VALUES (NULL); @@ -2593,12 +2580,10 @@ CREATE TABLE t1 ( pk INT AUTO_INCREMENT PRIMARY KEY, c LONGTEXT NOT NULL DEFAULT '' ) ENGINE=rocksdb; -Warnings: -Warning 1101 BLOB/TEXT column 'c' can't have a default value SHOW COLUMNS IN t1; Field Type Null Key Default Extra pk int(11) NO PRI NULL auto_increment -c longtext NO NULL +c longtext NO '' ALTER TABLE t1 ADD COLUMN err LONGTEXT NOT NULL DEFAULT NULL; ERROR 42000: Invalid default value for 'err' INSERT INTO t1 (c) VALUES (NULL); diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_not_null_timestamp.inc b/storage/rocksdb/mysql-test/rocksdb/t/col_not_null_timestamp.inc index 4c5b89c9d00..812ada6f486 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/col_not_null_timestamp.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_not_null_timestamp.inc @@ -22,7 +22,8 @@ SHOW COLUMNS IN t1; # instead of an error we are getting the current timestamp # As of mysql-5.6.11, this no longer works, and we get an error: ---error ER_BAD_NULL_ERROR +# (MariaDB doesn't have this patch, so it doesnt produce an error): +# --error ER_BAD_NULL_ERROR INSERT INTO t1 (c) VALUES (NULL); eval INSERT INTO t1 (c) VALUES ($col_default); SELECT HEX(c) FROM t1; @@ -47,8 +48,16 @@ eval ALTER TABLE t1 ADD COLUMN err $col_type NOT NULL DEFAULT NULL; # instead of an error we are getting the current timestamp # As of mysql-5.6.11, this no longer works, and we get an error: ---error ER_BAD_NULL_ERROR +# (MariaDB doesn't have this patch, so it doesnt produce an error): +# --error ER_BAD_NULL_ERROR + +# Since we don't produce an error, the row will get inserted. Make it +# deterministic: +set @save_ts=@@timestamp; +set timestamp=1478923914; + INSERT INTO t1 (c) VALUES (NULL); +set timestamp=@save_ts; eval INSERT INTO t1 (c) VALUES ($col_default); eval INSERT INTO t1 () VALUES (); From 0f1821db19fece826a856529c001d58b33701612 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 12 Nov 2016 05:15:06 +0000 Subject: [PATCH 068/233] MariaRocks port: make rocksdb.rocksdb_cf_options test pass Copy mysql-test/include/restart_mysqld_with_option.inc from facebook/mysql-5.6 --- .../include/restart_mysqld_with_option.inc | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 mysql-test/include/restart_mysqld_with_option.inc diff --git a/mysql-test/include/restart_mysqld_with_option.inc b/mysql-test/include/restart_mysqld_with_option.inc new file mode 100644 index 00000000000..4250b368b1a --- /dev/null +++ b/mysql-test/include/restart_mysqld_with_option.inc @@ -0,0 +1,31 @@ + +if ($rpl_inited) +{ + if (!$allow_rpl_inited) + { + --die ERROR IN TEST: This script does not support replication + } +} + +# Write file to make mysql-test-run.pl expect the "crash", but don't start +# it until it's told to +--let $_server_id= `SELECT @@server_id` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--exec echo "wait" > $_expect_file_name + +# Send shutdown to the connected server and give +# it 10 seconds to die before zapping it +shutdown_server 10; + +# Write file to make mysql-test-run.pl start up the server again +--exec echo "restart:$_mysqld_option" > $_expect_file_name + +# Turn on reconnect +--enable_reconnect + +# Call script that will poll the server waiting for it to be back online again +--source include/wait_until_connected_again.inc + +# Turn off reconnect again +--disable_reconnect + From c4270952b7a6790fa3a3b6fe0608f8b0df99a433 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 12 Nov 2016 06:09:13 +0000 Subject: [PATCH 069/233] MariaRocks port: fix rocksdb.rocksdb_checksums to work for MariaDB in the mornings MySQL's log entries start with log entries like YYYY-MM-DD 08:25:04 ... while MariaDB YYYY-MM-DD 8:25:04 ... note that '08' turns into ' 8'. Made grep pattern to account for this. --- .../rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test index 4bff091d698..96ec9c93b96 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test @@ -22,7 +22,7 @@ show variables like 'rocksdb_%checksum%'; create table t1 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; insert into t1 values (1,1,1),(2,2,2),(3,3,3); check table t1; ---exec grep "^[0-9-]* [0-9:]* [0-9]* \[Note\] CHECKTABLE t1" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +--exec grep "^[0-9-]* \?[0-9:]* [0-9]* \[Note\] CHECKTABLE t1" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 drop table t1; @@ -30,7 +30,7 @@ set session rocksdb_store_checksums=on; create table t2 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; insert into t2 values (1,1,1),(2,2,2),(3,3,3); check table t2; ---exec grep "^[0-9-]* [0-9:]* [0-9]* \[Note\] CHECKTABLE t2" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +--exec grep "^[0-9-]* \?[0-9:]* [0-9]* \[Note\] CHECKTABLE t2" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 --echo # Now, make a table that has both rows with checksums and without create table t3 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; @@ -39,7 +39,7 @@ set session rocksdb_store_checksums=off; update t3 set b=3 where a=2; set session rocksdb_store_checksums=on; check table t3; ---exec grep "^[0-9-]* [0-9:]* [0-9]* \[Note\] CHECKTABLE t3" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +--exec grep "^[0-9-]* \?[0-9:]* [0-9]* \[Note\] CHECKTABLE t3" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 set session rocksdb_store_checksums=on; set session rocksdb_checksums_pct=5; @@ -56,7 +56,7 @@ while ($i<10000) } --enable_query_log check table t4; ---exec grep "^[0-9-]* [0-9:]* [0-9]* \[Note\] CHECKTABLE t4" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 > $MYSQL_TMP_DIR/rocksdb_checksums.log +--exec grep "^[0-9-]* \?[0-9:]* [0-9]* \[Note\] CHECKTABLE t4" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 > $MYSQL_TMP_DIR/rocksdb_checksums.log --exec perl ../storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.pl $MYSQL_TMP_DIR/rocksdb_checksums.log 10000 5 --remove_file $MYSQL_TMP_DIR/rocksdb_checksums.log set session rocksdb_checksums_pct=100; From a5f72fb3c2af1629787b3caece96afefaa60e8fa Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 12 Nov 2016 06:56:39 +0000 Subject: [PATCH 070/233] MariaRocks port: put MyRocks options into rocksdb/my.cnf - Before this, one had to provide appropriate --mysqld=.... options to mysql-test-run. - Also moved one option from rocksdb/suite.opt to rocksdb/my.cnf. --- storage/rocksdb/mysql-test/rocksdb/my.cnf | 6 ++++++ storage/rocksdb/mysql-test/rocksdb/suite.opt | 1 - 2 files changed, 6 insertions(+), 1 deletion(-) delete mode 100644 storage/rocksdb/mysql-test/rocksdb/suite.opt diff --git a/storage/rocksdb/mysql-test/rocksdb/my.cnf b/storage/rocksdb/mysql-test/rocksdb/my.cnf index 2ed68088259..b006f63bf0c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/my.cnf +++ b/storage/rocksdb/mysql-test/rocksdb/my.cnf @@ -1,6 +1,12 @@ !include include/default_my.cnf [server] +rocksdb +skip-innodb +default-storage-engine=rocksdb + +ignore-db-dirs=.rocksdb + sql-mode=NO_ENGINE_SUBSTITUTION explicit-defaults-for-timestamp=1 rocksdb_lock_wait_timeout=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/suite.opt b/storage/rocksdb/mysql-test/rocksdb/suite.opt deleted file mode 100644 index 4942baf3b85..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/suite.opt +++ /dev/null @@ -1 +0,0 @@ ---ignore-db-dirs=.rocksdb From 826753942fbedba51d97d1eff03e4d03699f6217 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 12 Nov 2016 09:20:36 +0000 Subject: [PATCH 071/233] MariaRocks port: temporarily? update ER_LOCK_WAIT_TIMEOUT error messages facebook/mysql-5.6 has error messages with extra info, MariaDB doesn't have extra info: -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction This patch updates .result files so that differences do not get in the way; if we decide to use new error message text in MariaDB, this patch can be applied in reverse. --- .../rocksdb/mysql-test/rocksdb/r/lock.result | 12 +++++----- .../rocksdb/r/lock_rows_not_exist.result | 6 ++--- .../mysql-test/rocksdb/r/rocksdb_locks.result | 2 +- .../rocksdb/r/select_for_update.result | 4 ++-- .../r/select_lock_in_share_mode.result | 4 ++-- .../mysql-test/rocksdb/r/unique_sec.result | 22 +++++++++---------- .../rocksdb/r/unique_sec_rev_cf.result | 22 +++++++++---------- 7 files changed, 36 insertions(+), 36 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/lock.result b/storage/rocksdb/mysql-test/rocksdb/r/lock.result index 8c89fa1b934..1e047c1823c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/lock.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/lock.result @@ -15,9 +15,9 @@ id2 COUNT(DISTINCT id) UPDATE t1 SET id=-1 WHERE id=1; connection con1; SELECT id,id2 FROM t1; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction LOCK TABLE t1 READ; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction connection default; LOCK TABLE t1 READ; UPDATE t1 SET id=1 WHERE id=1; @@ -27,9 +27,9 @@ SELECT COUNT(DISTINCT id) FROM t1; COUNT(DISTINCT id) 1 UPDATE t1 SET id=2 WHERE id=2; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table: test.t1 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction LOCK TABLE t1 WRITE; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction LOCK TABLE t1 READ; UNLOCK TABLES; connection default; @@ -83,13 +83,13 @@ UNLOCK TABLES; FLUSH TABLES t1, t2 WITH READ LOCK; connection con1; INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction connection default; UNLOCK TABLES; FLUSH TABLES WITH READ LOCK; connection con1; INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on global read: +ERROR HY000: Lock wait timeout exceeded; try restarting transaction connection default; UNLOCK TABLES; INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); diff --git a/storage/rocksdb/mysql-test/rocksdb/r/lock_rows_not_exist.result b/storage/rocksdb/mysql-test/rocksdb/r/lock_rows_not_exist.result index cf764f89581..7898489d98d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/lock_rows_not_exist.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/lock_rows_not_exist.result @@ -11,7 +11,7 @@ connection con2; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=2 FOR UPDATE; id1 id2 id3 value connection con1; @@ -22,7 +22,7 @@ connection con2; ROLLBACK; BEGIN; UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction UPDATE t SET value=value+100 WHERE id1=1 AND id2=0 AND id3=1; connection con1; ROLLBACK; @@ -32,7 +32,7 @@ connection con2; ROLLBACK; BEGIN; DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction DELETE FROM t WHERE id1=1 AND id2=1 AND id3=0; connection default; disconnect con1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result index e4d11960e6e..f5ba3ecb7b9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result @@ -30,7 +30,7 @@ set @@rocksdb_lock_wait_timeout=2; set autocommit=0; begin; select * from t1 where pk=1 for update; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction connection default; rollback; set autocommit=1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select_for_update.result b/storage/rocksdb/mysql-test/rocksdb/r/select_for_update.result index 713f5e85fe0..2890941a1b9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/select_for_update.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/select_for_update.result @@ -14,9 +14,9 @@ a b 1 a 3 a SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction UPDATE t1 SET b='c' WHERE b='a'; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction connection con1; COMMIT; SELECT a,b FROM t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result b/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result index e6433dcbeef..22aec87cd5a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result @@ -17,9 +17,9 @@ a b # Currently, SELECT ... LOCK IN SHARE MODE works like # SELECT FOR UPDATE SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction UPDATE t1 SET b='c' WHERE b='a'; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction connection con1; COMMIT; SELECT a,b FROM t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result index 63743bbf792..51acef8173e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result @@ -80,32 +80,32 @@ COUNT(*) 13 # Primary key should prevent duplicate on insert INSERT INTO t1 VALUES (30, 31, 30, 30, 30, 30, 30, 30); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction # Primary key should prevent duplicate on update UPDATE t1 SET id1=30, id2=31 WHERE id2=10; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction # Unique secondary key should prevent duplicate on insert INSERT INTO t1 VALUES (31, 31, 32, 33, 30, 30, 30, 30); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction INSERT INTO t1 VALUES (32, 32, 32, 32, 34, 32, 32, 32); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction # Unique secondary key should prevent duplicate on update UPDATE t1 SET id2=31, id3=32, id4=33 WHERE id2=8; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction UPDATE t1 SET id5=34 WHERE id2=8; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction # Adding multiple rows where one of the rows fail the duplicate # check should fail the whole statement INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35), (36, 36, 36, 36, 36, 36, 36, 36), (37, 31, 32, 33, 37, 37, 37, 37), (38, 38, 38, 38, 38, 38, 38, 38); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35), (36, 36, 36, 36, 36, 36, 36, 36), (37, 37, 37, 37, 34, 37, 37, 37), (38, 38, 38, 38, 38, 38, 38, 38); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction # NULL values are unique and duplicates in value fields are ignored INSERT INTO t1 VALUES (37, 31, 32, NULL, 37, 37, 37, 37), (38, 31, 32, NULL, 38, 37, 37, 37), @@ -118,7 +118,7 @@ UPDATE t1 SET id5=37 WHERE id1=38; ERROR 23000: Duplicate entry '37' for key 'id5' # Fail on lock timeout for row modified in another transaction UPDATE t1 SET id5=34 WHERE id1=38; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction # NULL values are unique UPDATE t1 SET id5=NULL WHERE value1 > 37; connection con1; @@ -136,9 +136,9 @@ INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); connection con2; # When transaction is pending, fail on lock acquisition INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction SELECT COUNT(*) FROM t1; COUNT(*) 17 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result index 5392c9acf90..210c74098af 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result @@ -80,32 +80,32 @@ COUNT(*) 13 # Primary key should prevent duplicate on insert INSERT INTO t1 VALUES (30, 31, 30, 30, 30, 30, 30, 30); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction # Primary key should prevent duplicate on update UPDATE t1 SET id1=30, id2=31 WHERE id2=10; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction # Unique secondary key should prevent duplicate on insert INSERT INTO t1 VALUES (31, 31, 32, 33, 30, 30, 30, 30); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction INSERT INTO t1 VALUES (32, 32, 32, 32, 34, 32, 32, 32); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction # Unique secondary key should prevent duplicate on update UPDATE t1 SET id2=31, id3=32, id4=33 WHERE id2=8; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction UPDATE t1 SET id5=34 WHERE id2=8; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction # Adding multiple rows where one of the rows fail the duplicate # check should fail the whole statement INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35), (36, 36, 36, 36, 36, 36, 36, 36), (37, 31, 32, 33, 37, 37, 37, 37), (38, 38, 38, 38, 38, 38, 38, 38); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35), (36, 36, 36, 36, 36, 36, 36, 36), (37, 37, 37, 37, 34, 37, 37, 37), (38, 38, 38, 38, 38, 38, 38, 38); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction # NULL values are unique and duplicates in value fields are ignored INSERT INTO t1 VALUES (37, 31, 32, NULL, 37, 37, 37, 37), (38, 31, 32, NULL, 38, 37, 37, 37), @@ -118,7 +118,7 @@ UPDATE t1 SET id5=37 WHERE id1=38; ERROR 23000: Duplicate entry '37' for key 'id5' # Fail on lock timeout for row modified in another transaction UPDATE t1 SET id5=34 WHERE id1=38; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction # NULL values are unique UPDATE t1 SET id5=NULL WHERE value1 > 37; connection con1; @@ -136,9 +136,9 @@ INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); connection con2; # When transaction is pending, fail on lock acquisition INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction SELECT COUNT(*) FROM t1; COUNT(*) 17 From 75f00a3388bd95106a765352bd428cb54c7ae720 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 13 Nov 2016 11:40:13 +0000 Subject: [PATCH 072/233] MariaRocks port: move --ignore-db-dirs back to suite.opt I've moved it to storage/rocksdb/mysql-test/rocksdb/my.cnf, but replication tests do not include that file? Will need to get back to this when getting replication to work --- storage/rocksdb/mysql-test/rocksdb/my.cnf | 1 - storage/rocksdb/mysql-test/rocksdb/suite.opt | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 storage/rocksdb/mysql-test/rocksdb/suite.opt diff --git a/storage/rocksdb/mysql-test/rocksdb/my.cnf b/storage/rocksdb/mysql-test/rocksdb/my.cnf index b006f63bf0c..d5f501e15ad 100644 --- a/storage/rocksdb/mysql-test/rocksdb/my.cnf +++ b/storage/rocksdb/mysql-test/rocksdb/my.cnf @@ -5,7 +5,6 @@ rocksdb skip-innodb default-storage-engine=rocksdb -ignore-db-dirs=.rocksdb sql-mode=NO_ENGINE_SUBSTITUTION explicit-defaults-for-timestamp=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/suite.opt b/storage/rocksdb/mysql-test/rocksdb/suite.opt new file mode 100644 index 00000000000..8907deed6d8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/suite.opt @@ -0,0 +1,2 @@ +--ignore-db-dirs=.rocksdb + From e370d0a9ae2f440b4f5ae100906c70c9e9e6b186 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 13 Nov 2016 18:31:09 +0000 Subject: [PATCH 073/233] MariaRocks port: Make rocksdb.compression_zstd test skip itself when ZSTD is not supported - Introduce @@rocksdb_supported_compression_types read-only variable. It has a comma-separated list of compiled-in compression algorithms. - Make rocksdb.compression_zstd test skip itself when ZSTD support is not compiled in --- storage/rocksdb/ha_rocksdb.cc | 11 +++++ .../rocksdb/t/compression_zstd-master.opt | 1 - .../rocksdb/t/compression_zstd.test | 10 ++++ storage/rocksdb/rdb_utils.cc | 48 +++++++++++++++++++ storage/rocksdb/rdb_utils.h | 2 + 5 files changed, 71 insertions(+), 1 deletion(-) delete mode 100644 storage/rocksdb/mysql-test/rocksdb/t/compression_zstd-master.opt diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index fb43b8ce849..674892b7f18 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -419,6 +419,9 @@ static uint32_t rocksdb_table_stats_sampling_pct; static my_bool rocksdb_enable_bulk_load_api= 1; static my_bool rpl_skip_tx_api_var= 0; +char *compression_types_val= + const_cast(get_rocksdb_supported_compression_types()); + std::atomic rocksdb_snapshot_conflict_errors(0); static rocksdb::DBOptions rdb_init_rocksdb_db_options(void) @@ -1107,6 +1110,13 @@ static MYSQL_SYSVAR_STR(datadir, "RocksDB data directory", nullptr, nullptr, "./.rocksdb"); +static MYSQL_SYSVAR_STR(supported_compression_types, + compression_types_val, + PLUGIN_VAR_NOCMDOPT | PLUGIN_VAR_READONLY, + "Compression algorithms supported by RocksDB", + nullptr, nullptr, + compression_types_val); + static MYSQL_SYSVAR_UINT( table_stats_sampling_pct, rocksdb_table_stats_sampling_pct, @@ -1228,6 +1238,7 @@ static struct st_mysql_sys_var* rocksdb_system_variables[]= { MYSQL_SYSVAR(compaction_sequential_deletes_count_sd), MYSQL_SYSVAR(datadir), + MYSQL_SYSVAR(supported_compression_types), MYSQL_SYSVAR(create_checkpoint), MYSQL_SYSVAR(checksums_pct), diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd-master.opt deleted file mode 100644 index 81b5acc4e56..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd-master.opt +++ /dev/null @@ -1 +0,0 @@ ---rocksdb_default_cf_options=compression_per_level=kZSTDNotFinalCompression;compression_opts=-14:4:0 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test b/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test index c146d43474c..263896e8487 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test @@ -1,4 +1,14 @@ --source include/have_rocksdb.inc +let $no_zstd=`select @@rocksdb_supported_compression_types NOT LIKE '%ZSTD%'`; + +if ($no_zstd) +{ + -- Skip Requires RocksDB to be built with ZStandard Compression support +} + +--let $_mysqld_option=--rocksdb_default_cf_options=compression_per_level=kZSTDNotFinalCompression;compression_opts=-14:4:0; +--source include/restart_mysqld_with_option.inc + create table t (id int primary key) engine=rocksdb; drop table t; diff --git a/storage/rocksdb/rdb_utils.cc b/storage/rocksdb/rdb_utils.cc index d0df370238e..b15e6ce9d9d 100644 --- a/storage/rocksdb/rdb_utils.cc +++ b/storage/rocksdb/rdb_utils.cc @@ -29,6 +29,17 @@ /* MyRocks header files */ #include "./ha_rocksdb.h" +/* + Both innobase/include/ut0counter.h and rocksdb/port/port_posix.h define + CACHE_LINE_SIZE. +*/ +#ifdef CACHE_LINE_SIZE +# undef CACHE_LINE_SIZE +#endif + +/* RocksDB header files */ +#include "util/compression.h" + namespace myrocks { /* @@ -310,4 +321,41 @@ bool rdb_database_exists(const std::string& db_name) return true; } + +/* + @brief + Return a comma-separated string with compiled-in compression types. + Not thread-safe. +*/ +const char *get_rocksdb_supported_compression_types() +{ + static std::string compression_methods_buf; + static bool inited=false; + if (!inited) + { + inited= true; + std::vector known_types= + { + rocksdb::kSnappyCompression, + rocksdb::kZlibCompression, + rocksdb::kBZip2Compression, + rocksdb::kLZ4Compression, + rocksdb::kLZ4HCCompression, + rocksdb::kXpressCompression, + rocksdb::kZSTDNotFinalCompression + }; + + for (auto typ : known_types) + { + if (CompressionTypeSupported(typ)) + { + if (compression_methods_buf.size()) + compression_methods_buf.append(","); + compression_methods_buf.append(CompressionTypeToString(typ)); + } + } + } + return compression_methods_buf.c_str(); +} + } // namespace myrocks diff --git a/storage/rocksdb/rdb_utils.h b/storage/rocksdb/rdb_utils.h index 138d4bb661b..cf451954706 100644 --- a/storage/rocksdb/rdb_utils.h +++ b/storage/rocksdb/rdb_utils.h @@ -205,4 +205,6 @@ std::string rdb_hexdump(const char *data, std::size_t data_len, */ bool rdb_database_exists(const std::string& db_name); +const char *get_rocksdb_supported_compression_types(); + } // namespace myrocks From c82f573879f448f916e809a718140f370944bd10 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Wed, 2 Nov 2016 13:35:49 -0400 Subject: [PATCH 074/233] Use https instead of ssh(git@) for rocksdb submodule. --- .gitmodules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index c49f2ca2531..6419657e501 100644 --- a/.gitmodules +++ b/.gitmodules @@ -3,4 +3,4 @@ url = https://github.com/MariaDB/mariadb-connector-c [submodule "storage/rocksdb/rocksdb"] path = storage/rocksdb/rocksdb - url = git@github.com:facebook/rocksdb.git + url = https://github.com/facebook/rocksdb.git From c12a1bb9c26da8cc5fa4559ef26cb35c10b4380b Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 18 Nov 2016 21:05:49 +0000 Subject: [PATCH 075/233] MariaRocks port: more testcase fixes rocksdb.lock: LOCK TABLE t1 LOW_PRIORITY WRITE does not produce a warining in MariaDB rocksdb.unique_check: - MariaDB's mtr prints connection actions - New (but temporary) ER_LOCK_WAIT_TIMEOUT text rocksdb.allow_pk_no_concurrent_insert: - Fix path rocksdb.locking_issues - Fix path - The test still fails but for a different reason now --- .../rocksdb/mysql-test/rocksdb/r/lock.result | 2 - .../mysql-test/rocksdb/r/unique_check.result | 36 ++++++++++++++++- .../t/allow_no_pk_concurrent_insert.test | 2 +- .../mysql-test/rocksdb/t/locking_issues.test | 40 +++++++++---------- 4 files changed, 55 insertions(+), 25 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/lock.result b/storage/rocksdb/mysql-test/rocksdb/r/lock.result index 1e047c1823c..36fea5b937f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/lock.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/lock.result @@ -5,8 +5,6 @@ DROP TABLE IF EXISTS t1, t2, t3; CREATE TABLE t1 (id INT, id2 INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; INSERT INTO t1 (id,id2) VALUES (1,1),(1,2),(1,3); LOCK TABLE t1 LOW_PRIORITY WRITE; -Warnings: -Warning 1287 'LOW_PRIORITY WRITE' is deprecated and will be removed in a future release. Please use WRITE instead SELECT id2,COUNT(DISTINCT id) FROM t1 GROUP BY id2; id2 COUNT(DISTINCT id) 1 1 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result index 0f3e7200d8a..b3262178024 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result @@ -1,63 +1,91 @@ +connect con1, localhost, root,,; +connect con2, localhost, root,,; +connect con3, localhost, root,,; +connection default; set debug_sync='RESET'; drop table if exists t1; create table t1 (id int, value int, primary key (id)) engine=rocksdb; create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb; +connection con1; begin; insert into t1 values (1,1); +connection con2; set session rocksdb_lock_wait_timeout=50; begin; insert into t1 values (1,2); +connection con1; commit; +connection con2; ERROR 23000: Duplicate entry '1' for key 'PRIMARY' commit; select * from t1; id value 1 1 truncate table t1; +connection con1; begin; insert into t2 values (1,1,1); +connection con2; begin; insert into t2 values (2,1,2); +connection con1; commit; +connection con2; ERROR 23000: Duplicate entry '1' for key 'id2' commit; select * from t2; id id2 value 1 1 1 truncate table t2; +connection con1; begin; insert into t1 values (1,1); +connection con2; begin; insert into t1 values (1,2); +connection con1; rollback; +connection con2; commit; select * from t1; id value 1 2 truncate table t1; +connection con1; begin; insert into t2 values (1,1,1); +connection con2; begin; insert into t2 values (2,1,2); +connection con1; rollback; +connection con2; commit; select * from t2; id id2 value 2 1 2 truncate table t2; +connection con1; set debug_sync='rocksdb.update_write_row_after_unique_check SIGNAL parked1 WAIT_FOR go1'; insert into t1 values (1,1); +connection con2; set debug_sync='rocksdb.update_write_row_after_unique_check SIGNAL parked2 WAIT_FOR go2'; insert into t2 values (1,1,1); +connection default; set debug_sync='now WAIT_FOR parked1'; set debug_sync='now WAIT_FOR parked2'; +connection con3; set session rocksdb_lock_wait_timeout=1; insert into t1 values (1,2); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction insert into t2 values (2,1,2); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t2.id2 +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection default; set debug_sync='now SIGNAL go1'; set debug_sync='now SIGNAL go2'; +connection con1; +connection con2; +connection default; insert into t1 values (1,2); ERROR 23000: Duplicate entry '1' for key 'PRIMARY' insert into t2 values (2,1,2); @@ -68,5 +96,9 @@ id value select * from t2; id id2 value 1 1 1 +connection default; set debug_sync='RESET'; +disconnect con1; +disconnect con2; +disconnect con3; drop table t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_pk_concurrent_insert.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_pk_concurrent_insert.test index 033b1325151..8dda4372eb3 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_pk_concurrent_insert.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_pk_concurrent_insert.test @@ -15,7 +15,7 @@ drop table if exists t1; # create the actual table CREATE TABLE t1 (a INT) ENGINE=rocksdb; -let $exec = python suite/rocksdb/t/rocksdb_concurrent_insert.py root 127.0.0.1 $MASTER_MYPORT test t1 100 4; +let $exec = python ../storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_insert.py root 127.0.0.1 $MASTER_MYPORT test t1 100 4; exec $exec; SELECT COUNT(*) from t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test index 035046ae368..18a796573d1 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test @@ -1,67 +1,67 @@ --source include/have_rocksdb.inc let $isolation_level = REPEATABLE READ; ---source suite/rocksdb/include/locking_issues_case1_1.inc +--source include/locking_issues_case1_1.inc let $isolation_level = READ COMMITTED; ---source suite/rocksdb/include/locking_issues_case1_1.inc +--source include/locking_issues_case1_1.inc let $isolation_level = REPEATABLE READ; ---source suite/rocksdb/include/locking_issues_case1_2.inc +--source include/locking_issues_case1_2.inc let $isolation_level = READ COMMITTED; ---source suite/rocksdb/include/locking_issues_case1_2.inc +--source include/locking_issues_case1_2.inc let $lock_scanned_rows=0; let $isolation_level = REPEATABLE READ; ---source suite/rocksdb/include/locking_issues_case2.inc +--source include/locking_issues_case2.inc let $isolation_level = READ COMMITTED; ---source suite/rocksdb/include/locking_issues_case2.inc +--source include/locking_issues_case2.inc # Rerun the case2 tests with rocksdb_lock_scanned_rows on let $lock_scanned_rows=1; let $isolation_level = REPEATABLE READ; ---source suite/rocksdb/include/locking_issues_case2.inc +--source include/locking_issues_case2.inc let $isolation_level = READ COMMITTED; ---source suite/rocksdb/include/locking_issues_case2.inc +--source include/locking_issues_case2.inc let $isolation_level = REPEATABLE READ; ---source suite/rocksdb/include/locking_issues_case3.inc +--source include/locking_issues_case3.inc let $isolation_level = READ COMMITTED; ---source suite/rocksdb/include/locking_issues_case3.inc +--source include/locking_issues_case3.inc let $isolation_level = REPEATABLE READ; ---source suite/rocksdb/include/locking_issues_case4.inc +--source include/locking_issues_case4.inc let $isolation_level = READ COMMITTED; ---source suite/rocksdb/include/locking_issues_case4.inc +--source include/locking_issues_case4.inc let $isolation_level = REPEATABLE READ; ---source suite/rocksdb/include/locking_issues_case5.inc +--source include/locking_issues_case5.inc let $isolation_level = READ COMMITTED; ---source suite/rocksdb/include/locking_issues_case5.inc +--source include/locking_issues_case5.inc let $isolation_level = REPEATABLE READ; ---source suite/rocksdb/include/locking_issues_case6.inc +--source include/locking_issues_case6.inc let $isolation_level = READ COMMITTED; ---source suite/rocksdb/include/locking_issues_case6.inc +--source include/locking_issues_case6.inc let $lock_scanned_rows=0; let $isolation_level = REPEATABLE READ; ---source suite/rocksdb/include/locking_issues_case7.inc +--source include/locking_issues_case7.inc let $isolation_level = READ COMMITTED; ---source suite/rocksdb/include/locking_issues_case7.inc +--source include/locking_issues_case7.inc # Rerun the case7 tests with rocksdb_lock_scanned_rows on let $lock_scanned_rows=1; let $isolation_level = REPEATABLE READ; ---source suite/rocksdb/include/locking_issues_case7.inc +--source include/locking_issues_case7.inc let $isolation_level = READ COMMITTED; ---source suite/rocksdb/include/locking_issues_case7.inc +--source include/locking_issues_case7.inc From a4c1b5bba865a95bc197ac2104d495d3c4d07687 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 18 Nov 2016 23:18:56 +0000 Subject: [PATCH 076/233] MariaRocks port: make rocksdb.alter_table work The warning "ORDER BY ignored as there is a user-defined clustered index in the table 't1' was missing. The reason is different condition in copy_data_between_tables(): MariaDB has a change, it uses to->file->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX while MySQL uses: to->file->primary_key_is_clustered()). For some reason, MyRocks didn't have HA_TABLE_SCAN_ON_INDEX flag. It should have one, will raise that with upstream, too. --- storage/rocksdb/ha_rocksdb.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 04544ddce74..0c6034705a7 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -589,7 +589,8 @@ public: (m_pk_can_be_decoded? HA_PRIMARY_KEY_IN_READ_INDEX : 0) | HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | HA_NULL_IN_KEY | - HA_PARTIAL_COLUMN_READ; + HA_PARTIAL_COLUMN_READ | + HA_TABLE_SCAN_ON_INDEX; } #ifdef MARIAROCKS_NOT_YET bool init_with_fields() override; From 3876f461a8baa4e878baf4604543c14b938905a6 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 20 Nov 2016 20:54:36 +0000 Subject: [PATCH 077/233] MariaRocks port: MDEV-11318: rocksdb.rocksdb test fails - MariaDB produces a warning instead of error when the key length is too long - Trivial test results updates - rocksdb.rocksdb still fails but this commit makes some progress. --- .../mysql-test/rocksdb/r/rocksdb.result | 40 ++++++++++++------- .../rocksdb/mysql-test/rocksdb/t/rocksdb.test | 4 +- 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result index 55388c65b99..94a95947282 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result @@ -74,7 +74,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables explain select * from t2 where a='abc'; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 const PRIMARY PRIMARY 12 const # NULL +1 SIMPLE t2 const PRIMARY PRIMARY 12 const # select * from t2 where a='abc'; a b abc def @@ -190,7 +190,7 @@ insert into t8 values # Delete by PK explain delete from t8 where a='three'; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t8 range PRIMARY PRIMARY 12 const # Using where +1 SIMPLE t8 range PRIMARY PRIMARY 12 NULL # Using where delete from t8 where a='three'; select * from t8; a col1 @@ -306,7 +306,7 @@ drop table t14; # create table t9 (i int primary key) engine=rocksdb; alter table t9 discard tablespace; -ERROR HY000: Table storage engine for 't9' doesn't have this option +ERROR HY000: Storage engine ROCKSDB of the table `test`.`t9` doesn't have this option drop table t9; # # MDEV-3959: Assertion `slice->size() == table->s->reclength' fails ... @@ -368,11 +368,16 @@ row4 val1 # # Locking tests # +connect con1,localhost,root,,; # First, make sure there's no locking when transactions update different rows +connection con1; set autocommit=0; update t17 set col1='UPD1' where pk='row2'; +connection default; update t17 set col1='UPD2' where pk='row3'; +connection con1; commit; +connection default; select * from t17; pk col1 row2 UPD1 @@ -387,17 +392,22 @@ show variables like 'rocksdb_lock_wait_timeout'; Variable_name Value rocksdb_lock_wait_timeout 2 # Try updating the same row from two transactions +connection con1; begin; update t17 set col1='UPD2-AA' where pk='row2'; +connection default; update t17 set col1='UPD2-BB' where pk='row2'; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t17.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction set rocksdb_lock_wait_timeout=1000; update t17 set col1='UPD2-CC' where pk='row2'; +connection con1; rollback; +connection default; select * from t17 where pk='row2'; pk col1 row2 UPD2-CC drop table t17; +disconnect con1; # # MDEV-4035: RocksDB: SELECT produces different results inside a transaction (read is not repeatable) # @@ -488,7 +498,7 @@ row3 row3data connection default; set rocksdb_lock_wait_timeout=1; update t27 set col1='row2-modified' where pk='row3'; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t27.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction connection con1; rollback; connection default; @@ -1146,7 +1156,7 @@ BEGIN; UPDATE t1 SET i = 100; connect con1,localhost,root,,test; DELETE IGNORE FROM t1 ORDER BY i; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction disconnect con1; connection default; COMMIT; @@ -1669,14 +1679,14 @@ insert into t1 values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5); explain select * from t1 where col1=2; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref col1 col1 5 const # NULL +1 SIMPLE t1 ref col1 col1 5 const # select * from t1 where col1=2; pk col1 col2 2 2 2 explain select * from t1 where col2=3; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref col2 col2 5 const # NULL +1 SIMPLE t1 ref col2 col2 5 const # select * from t1 where col2=3; pk col1 col2 3 3 3 @@ -1699,7 +1709,7 @@ insert into t1 values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5); explain select * from t1 where col1=2; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref col1 col1 5 const # NULL +1 SIMPLE t1 ref col1 col1 5 const # select * from t1 where col1=2; pk col1 col2 2 2 2 @@ -1749,9 +1759,9 @@ id key1 1 1 # Check that ALTER and RENAME are disallowed alter table t1 add col2 int; -ERROR 42000: This version of MySQL doesn't yet support 'ALTER TABLE on table with per-index CF' +ERROR 42000: This version of MariaDB doesn't yet support 'ALTER TABLE on table with per-index CF' rename table t1 to t2; -ERROR 42000: This version of MySQL doesn't yet support 'ALTER TABLE on table with per-index CF' +ERROR 42000: This version of MariaDB doesn't yet support 'ALTER TABLE on table with per-index CF' drop table t1; # Check detection of typos in $per_index_cf create table t1 ( @@ -1760,7 +1770,7 @@ key1 int, PRIMARY KEY (id), index (key1) comment '$per_idnex_cf' )engine=rocksdb; -ERROR 42000: This version of MySQL doesn't yet support 'column family name looks like a typo of $per_index_cf' +ERROR 42000: This version of MariaDB doesn't yet support 'column family name looks like a typo of $per_index_cf' # # Issue #22: SELECT ... FOR UPDATE takes a long time # @@ -2131,7 +2141,7 @@ id 6 begin; select * from t1 where id=4 for update; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction select * from t1 where id=7 for update; id select * from t1 where id=9 for update; @@ -2163,7 +2173,9 @@ a b c 3 3abcde 3abcde drop table t1; create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(b(2255))) engine=rocksdb; -ERROR 42000: Specified key was too long; max key length is 2048 bytes +Warnings: +Warning 1071 Specified key was too long; max key length is 2048 bytes +drop table t1; SET sql_mode = @old_mode; drop table t0; # diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test index 9808d50a092..aa4b99f1c32 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test @@ -1688,8 +1688,10 @@ explain select b, a from t1 where b like '1%'; update t1 set b= '12345' where b = '2abcde'; select * from t1; drop table t1; ---error ER_TOO_LONG_KEY +# In MariaDB, the error becomes a warning: +# --error ER_TOO_LONG_KEY create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(b(2255))) engine=rocksdb; +drop table t1; SET sql_mode = @old_mode; drop table t0; From 6fb94c3e430df86e8b585599a28f3208c5615081 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 21 Nov 2016 10:33:09 +0000 Subject: [PATCH 078/233] MDEV-11320: MariaRocks: rocksdb.type_text_indexes fails Backport the fix for BUG#81810 from facebook/mysql-5.6 tree. Added the original testcase. rocksdb.type_text_indexes still fails due to another problem. --- mysql-test/r/ctype_collate.result | 35 +++++++++++++++++++++++++++++++ mysql-test/r/myisam.result | 2 +- mysql-test/t/ctype_collate.test | 31 +++++++++++++++++++++++++++ sql/field.cc | 2 +- 4 files changed, 68 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/ctype_collate.result b/mysql-test/r/ctype_collate.result index f84613e086f..8ddc4e9a80a 100644 --- a/mysql-test/r/ctype_collate.result +++ b/mysql-test/r/ctype_collate.result @@ -719,3 +719,38 @@ DROP FUNCTION getText; DROP DATABASE test1; USE test; SET NAMES latin1; +# +# MDEV-11320, MySQL BUG#81810: Inconsistent sort order for blob/text between InnoDB and filesort +# +CREATE TABLE t1 ( +b LONGTEXT CHARACTER SET "latin1" COLLATE "latin1_bin", +KEY b (b(32)) +); +INSERT INTO t1 (b) VALUES ('a'), (_binary 0x1), (_binary 0x0), (''); +drop table t1; +CREATE TABLE t1 ( +b LONGTEXT CHARACTER SET "latin1" COLLATE "latin1_bin", +PRIMARY KEY b (b(32)) +); +INSERT INTO t1 (b) VALUES ('a'), (_binary 0x1), (_binary 0x0), (''); +explain +select hex(b) from t1 force index (PRIMARY) where b<'zzz'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range PRIMARY PRIMARY 34 NULL 4 Using where +select hex(b) from t1 force index (PRIMARY) where b<'zzz'; +hex(b) +00 +01 + +61 +explain +select hex(b) from t1 where b<'zzz' order by b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 4 Using where; Using filesort +select hex(b) from t1 where b<'zzz' order by b; +hex(b) +00 +01 + +61 +drop table t1; diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result index 898609e1ec2..c47a9b9fc06 100644 --- a/mysql-test/r/myisam.result +++ b/mysql-test/r/myisam.result @@ -1108,8 +1108,8 @@ length(c1) c1 0 SELECT DISTINCT length(c1), c1 FROM t1 ORDER BY c1; length(c1) c1 -0 2 A +0 2 B DROP TABLE t1; End of 4.1 tests diff --git a/mysql-test/t/ctype_collate.test b/mysql-test/t/ctype_collate.test index 23d34deb981..6704395a0a1 100644 --- a/mysql-test/t/ctype_collate.test +++ b/mysql-test/t/ctype_collate.test @@ -308,3 +308,34 @@ DROP FUNCTION getText; DROP DATABASE test1; USE test; SET NAMES latin1; + +--echo # +--echo # MDEV-11320, MySQL BUG#81810: Inconsistent sort order for blob/text between InnoDB and filesort +--echo # + +CREATE TABLE t1 ( + b LONGTEXT CHARACTER SET "latin1" COLLATE "latin1_bin", + KEY b (b(32)) +); +INSERT INTO t1 (b) VALUES ('a'), (_binary 0x1), (_binary 0x0), (''); + + +drop table t1; + +CREATE TABLE t1 ( + b LONGTEXT CHARACTER SET "latin1" COLLATE "latin1_bin", + PRIMARY KEY b (b(32)) +); + +INSERT INTO t1 (b) VALUES ('a'), (_binary 0x1), (_binary 0x0), (''); + +explain +select hex(b) from t1 force index (PRIMARY) where b<'zzz'; +select hex(b) from t1 force index (PRIMARY) where b<'zzz'; + +explain +select hex(b) from t1 where b<'zzz' order by b; +select hex(b) from t1 where b<'zzz' order by b; + +drop table t1; + diff --git a/sql/field.cc b/sql/field.cc index 846bb1fe674..da20f00a41f 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -8259,7 +8259,7 @@ void Field_blob::sort_string(uchar *to,uint length) uchar *blob; uint blob_length=get_length(); - if (!blob_length) + if (!blob_length && field_charset->pad_char == 0) bzero(to,length); else { From 9668b705f9b317868f15cf415168b92a2d320b7a Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 2 Dec 2016 13:52:12 +0000 Subject: [PATCH 079/233] MDEV-11462: MariaRocks: rocksdb.type_float_indexes fails Upadte test results (see jira entry for investigation about the source of the differences) --- .../mysql-test/rocksdb/r/type_float_indexes.result | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result index 9a50f66870c..99d6bbe45b9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result @@ -85,7 +85,7 @@ INSERT INTO t1 (f,r,d,dp,pk) VALUES (4644,1422.22,466664.999,0.5,5); EXPLAIN SELECT DISTINCT d FROM t1 ORDER BY d; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index d d 9 NULL # Using index +1 SIMPLE t1 index NULL d 9 NULL # Using index SELECT DISTINCT d FROM t1 ORDER BY d; d -1 @@ -114,7 +114,7 @@ INSERT INTO t1 (f,r,d,dp,pk) VALUES (4644,1422.22,466664.999,0.5,5); EXPLAIN SELECT DISTINCT d FROM t1 ORDER BY d; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index d d 9 NULL # Using index +1 SIMPLE t1 index NULL d 9 NULL # Using index SELECT DISTINCT d FROM t1 ORDER BY d; d -1 @@ -146,7 +146,7 @@ INSERT INTO t1 (f,r,d,dp,pk) VALUES ERROR 23000: Duplicate entry '1.2345' for key 'f' EXPLAIN SELECT DISTINCT f FROM t1 ORDER BY f; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index f f 5 NULL # Using index +1 SIMPLE t1 index NULL f 5 NULL # Using index SELECT DISTINCT f FROM t1 ORDER BY f; f -1 @@ -177,7 +177,7 @@ INSERT INTO t1 (f,r,d,dp,pk) VALUES (1.2345,0,0,0,6); EXPLAIN SELECT DISTINCT f FROM t1 ORDER BY f; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index f f 5 NULL # Using index +1 SIMPLE t1 index NULL f 5 NULL # Using index SELECT DISTINCT f FROM t1 ORDER BY f; f -1 From f2219fe94d1da1da59cb39784e04ffb587c37c91 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 2 Dec 2016 13:59:31 +0000 Subject: [PATCH 080/233] MDEV-11462: MariaRocks: rocksdb.type_float_indexes fails Also do similar result updates for: - rocksdb.type_date_time_indexese - rocksdb.type_fixed_indexes --- .../mysql-test/rocksdb/r/type_date_time_indexes.result | 4 ++-- .../rocksdb/mysql-test/rocksdb/r/type_fixed_indexes.result | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result index 187330836df..120d0d81b55 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result @@ -95,7 +95,7 @@ INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES (DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'23:18:18'); EXPLAIN SELECT y, COUNT(*) FROM t1 GROUP BY y; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index y y 6 NULL # Using index +1 SIMPLE t1 index NULL y 6 NULL # Using index SELECT y, COUNT(*) FROM t1 GROUP BY y; y COUNT(*) 1994 1 @@ -106,7 +106,7 @@ y COUNT(*) 2012 1 EXPLAIN SELECT y, COUNT(*) FROM t1 USE INDEX FOR GROUP BY () GROUP BY y; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index y y 6 NULL # Using index; Using temporary; Using filesort +1 SIMPLE t1 index NULL y 6 NULL # Using index; Using temporary; Using filesort SELECT y, COUNT(*) FROM t1 USE INDEX FOR GROUP BY () GROUP BY y; y COUNT(*) 1994 1 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_fixed_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_fixed_indexes.result index 8aa80244908..3f6a0f5d2b9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_fixed_indexes.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_fixed_indexes.result @@ -70,7 +70,7 @@ Warnings: Warning 1264 Out of range value for column 'd1' at row 6 EXPLAIN SELECT DISTINCT n1+n2 FROM t1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index n1_n2 n1_n2 37 NULL # Using index; Using temporary +1 SIMPLE t1 index NULL n1_n2 37 NULL # Using index; Using temporary SELECT DISTINCT n1+n2 FROM t1; n1+n2 0.0000 @@ -105,7 +105,7 @@ Warnings: Warning 1264 Out of range value for column 'd1' at row 6 EXPLAIN SELECT d2, COUNT(*) FROM t1 GROUP BY d2; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index d2 d2 29 NULL # Using index +1 SIMPLE t1 index NULL d2 29 NULL # Using index SELECT d2, COUNT(*) FROM t1 GROUP BY d2; d2 COUNT(*) 0.0000000000 1 @@ -116,7 +116,7 @@ d2 COUNT(*) 60.1234500000 2 EXPLAIN SELECT d2, COUNT(*) FROM t1 IGNORE INDEX FOR GROUP BY (d2) GROUP BY d2; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index d2 d2 29 NULL # Using index; Using temporary; Using filesort +1 SIMPLE t1 index NULL d2 29 NULL # Using index; Using temporary; Using filesort SELECT d2, COUNT(*) FROM t1 IGNORE INDEX FOR GROUP BY (d2) GROUP BY d2; d2 COUNT(*) 0.0000000000 1 From 59d76665eed69974da81cdccba2df9f26b4e0967 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 2 Dec 2016 15:35:36 +0000 Subject: [PATCH 081/233] MariaRocks port: Return correct value of HA_PRIMARY_KEY_IN_READ_INDEX flag This cset just re-uses the approach from facebook/mysql-5.6 (Perhaps we will have something different for MariaDB in the end). For now this is: Port this fix dd7eeae69503cb8ab6ddc8fd9e2fef451cc31a32 Issue#250: MyRocks/Innodb different output from query with order by on table with index and decimal type Summary: Make open_binary_frm() set TABLE_SHARE::primary_key before it computes Also add the patch for https://github.com/facebook/mysql-5.6/issues/376 --- sql/ha_partition.cc | 16 +++++++++++++ sql/ha_partition.h | 2 ++ sql/handler.h | 2 ++ sql/table.cc | 44 +++++++++++++++++++++++++---------- storage/rocksdb/ha_rocksdb.cc | 5 ++-- storage/rocksdb/ha_rocksdb.h | 4 ++-- 6 files changed, 57 insertions(+), 16 deletions(-) diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 400ca6129d6..92ac18d8f13 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -421,6 +421,22 @@ ha_partition::~ha_partition() } +bool ha_partition::init_with_fields() +{ + /* Pass the call to each partition */ + for (uint i= 0; i < m_tot_parts; i++) + { + if (m_file[i]->init_with_fields()) + return true; + } + /* Re-read table flags in case init_with_fields caused it to change */ + cached_table_flags= (m_file[0]->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + PARTITION_ENABLED_TABLE_FLAGS; + return false; +} + + /* Initialize partition handler object diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 3ea8d4a855d..74f5a06e4bc 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -307,6 +307,8 @@ public: ha_partition *clone_arg, MEM_ROOT *clone_mem_root_arg); ~ha_partition(); + + bool init_with_fields(); /* A partition handler has no characteristics in itself. It only inherits those from the underlying handlers. Here we set-up those constants to diff --git a/sql/handler.h b/sql/handler.h index dca052b0ac9..27416d4e137 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -2757,6 +2757,8 @@ public: { cached_table_flags= table_flags(); } + + virtual bool init_with_fields() { return false; } /* ha_ methods: pubilc wrappers for private virtual API */ int ha_open(TABLE *table, const char *name, int mode, uint test_if_locked); diff --git a/sql/table.cc b/sql/table.cc index 4c68bcb468a..8fe1a930167 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2061,18 +2061,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, if (key == primary_key) { field->flags|= PRI_KEY_FLAG; - /* - If this field is part of the primary key and all keys contains - the primary key, then we can use any key to find this column - */ - if (ha_option & HA_PRIMARY_KEY_IN_READ_INDEX) - { - if (field->key_length() == key_part->length && - !(field->flags & BLOB_FLAG)) - field->part_of_key= share->keys_in_use; - if (field->part_of_sortkey.is_set(key)) - field->part_of_sortkey= share->keys_in_use; - } } if (field->key_length() != key_part->length) { @@ -2132,6 +2120,38 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, (ha_option & HA_ANY_INDEX_MAY_BE_UNIQUE)) set_if_bigger(share->max_unique_length,keyinfo->key_length); } + + /* + The next call is here for MyRocks/MariaRocks: Now, we have filled in + field and key definitions, give the storage engine a chance to adjust + its properties. + + MyRocks may (and typically does) adjust HA_PRIMARY_KEY_IN_READ_INDEX + flag in this call. + */ + if (handler_file->init_with_fields()) + goto err; + + if (primary_key < MAX_KEY && (handler_file->ha_table_flags() & + HA_PRIMARY_KEY_IN_READ_INDEX)) + { + keyinfo= &share->key_info[primary_key]; + key_part= keyinfo->key_part; + for (i=0 ; i < keyinfo->user_defined_key_parts ; key_part++,i++) + { + Field *field= key_part->field; + /* + If this field is part of the primary key and all keys contains + the primary key, then we can use any key to find this column + */ + if (field->key_length() == key_part->length && + !(field->flags & BLOB_FLAG)) + field->part_of_key= share->keys_in_use; + if (field->part_of_sortkey.is_set(primary_key)) + field->part_of_sortkey= share->keys_in_use; + } + } + if (primary_key < MAX_KEY && (share->keys_in_use.is_set(primary_key))) { diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 674892b7f18..ecfc6a2f3d8 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -4075,7 +4075,7 @@ bool ha_rocksdb::same_table(const ha_rocksdb& other) const return m_tbl_def->base_tablename() == other.m_tbl_def->base_tablename(); } -#ifdef MARIAROCKS_NOT_YET + bool ha_rocksdb::init_with_fields() { uint pk= table_share->primary_key; @@ -4090,7 +4090,7 @@ bool ha_rocksdb::init_with_fields() cached_table_flags= table_flags(); return false; /* Ok */ } -#endif + /** Convert record from table->record[0] form into a form that can be written @@ -4884,6 +4884,7 @@ int ha_rocksdb::open(const char *name, int mode, uint test_if_locked) DBUG_RETURN(err); } + init_with_fields(); setup_field_converters(); info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 0c6034705a7..63c8683daab 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -592,9 +592,9 @@ public: HA_PARTIAL_COLUMN_READ | HA_TABLE_SCAN_ON_INDEX; } -#ifdef MARIAROCKS_NOT_YET +//#ifdef MARIAROCKS_NOT_YET bool init_with_fields() override; -#endif +//#endif /** @brief This is a bitmap of flags that indicates how the storage engine implements indexes. The current index flags are documented in From 9c083cd355166b4d5375ad0ec8c805fc81548532 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 2 Dec 2016 17:25:51 +0000 Subject: [PATCH 082/233] MariaRocks port: update test result for rocksdb.rocksdb_parts - EXPLAIN is different - error message is - the output order is different, because MySQL knows when to use ha_partition::handle_unordered_scan_next_partition. Reading the table data without any ordering happens to produce MariaDB uses ha_partition::handle_ordered_index_scan for this index scan (this is a deficiency), which causes it to produce the row with pk=1 first. MariaDB uses --- storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result index fd41048d253..7bebbbec205 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result @@ -47,11 +47,11 @@ insert into t1 values (11,20,20); insert into t1 values (12,20,20); explain select * from t1 force index(col1) where col1=10; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref col1 col1 5 const 2000 NULL +1 SIMPLE t1 ref col1 col1 5 const 2000 select * from t1 force index(col1) where col1=10; pk col1 col2 -2 10 10 1 10 10 +2 10 10 select * from t1 use index () where col1=10; pk col1 col2 2 10 10 @@ -94,7 +94,7 @@ drop table t1, t2; CREATE TABLE t1 (c1 INT NOT NULL, c2 CHAR(5)) PARTITION BY HASH(c1) PARTITIONS 4; INSERT INTO t1 VALUES(1,'a'); RENAME TABLE t1 TO db3.t3; -ERROR HY000: Error on rename of './test/t1' to './db3/t3' (errno: 122 - Internal (unspecified) error in handler) +ERROR HY000: Error on rename of './test/t1' to './db3/t3' (errno: 122 "Internal (unspecified) error in handler") SELECT * FROM t1; c1 c2 1 a From 097bd3049c47eb62cc1002da85ca450c7d0591ce Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 2 Dec 2016 20:49:10 +0000 Subject: [PATCH 083/233] MariaRocks port: update test results - EXPLAIN result differences are due to MariaDB's MDEV-11172 - Don't print the value of rocksdb_supported_compression_types to .result file - The rest is trivial Maria-fication --- .../mysql-test/rocksdb/r/rocksdb.result | 163 +++++++++--------- .../rocksdb/mysql-test/rocksdb/t/rocksdb.test | 6 +- 2 files changed, 85 insertions(+), 84 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result index 94a95947282..03416dce36a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result @@ -546,7 +546,7 @@ pk key1 col1 explain select key1 from t30; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t30 index NULL key1 18 NULL # Using index +1 SIMPLE t30 index NULL key1 20 NULL # Using index select key1 from t30; key1 row1-key @@ -621,7 +621,7 @@ row3 row3-key row3-data explain select * from t30 order by key1 limit 3; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t30 index NULL key1 18 NULL # NULL +1 SIMPLE t30 index NULL key1 20 NULL # select * from t30 order by key1 limit 3; pk key1 col1 row1 row1-key row1-data @@ -630,7 +630,7 @@ row3 row3-key row3-data explain select * from t30 order by key1 desc limit 3; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t30 index NULL key1 18 NULL # NULL +1 SIMPLE t30 index NULL key1 20 NULL # select * from t30 order by key1 desc limit 3; pk key1 col1 row5 row5-key row5-data @@ -668,7 +668,7 @@ row4 row4-key row4-data explain select * from t30 order by pk limit 3; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t30 index NULL PRIMARY 18 NULL # NULL +1 SIMPLE t30 index NULL PRIMARY 18 NULL # select * from t30 order by pk limit 3; pk key1 col1 row1 row1-key row1-data @@ -865,7 +865,10 @@ ERROR 42S02: Unknown table 'test.t45' # Now it fails if there is data overlap with what # already exists # -show variables like 'rocksdb%'; +show variables +where +variable_name like 'rocksdb%' and +variable_name not like 'rocksdb_supported_compression_types'; Variable_name Value rocksdb_access_hint_on_compaction_start 1 rocksdb_advise_random_on_open ON @@ -1413,76 +1416,74 @@ drop table t0, t1; # show status like 'rocksdb%'; Variable_name Value -rocksdb_rows_deleted # -rocksdb_rows_inserted # -rocksdb_rows_read # -rocksdb_rows_updated # -rocksdb_system_rows_deleted # -rocksdb_system_rows_inserted # -rocksdb_system_rows_read # -rocksdb_system_rows_updated # -rocksdb_block_cache_add # -rocksdb_block_cache_data_hit # -rocksdb_block_cache_data_miss # -rocksdb_block_cache_filter_hit # -rocksdb_block_cache_filter_miss # -rocksdb_block_cache_hit # -rocksdb_block_cache_index_hit # -rocksdb_block_cache_index_miss # -rocksdb_block_cache_miss # -rocksdb_block_cachecompressed_hit # -rocksdb_block_cachecompressed_miss # -rocksdb_bloom_filter_prefix_checked # -rocksdb_bloom_filter_prefix_useful # -rocksdb_bloom_filter_useful # -rocksdb_bytes_read # -rocksdb_bytes_written # -rocksdb_compact_read_bytes # -rocksdb_compact_write_bytes # -rocksdb_compaction_key_drop_new # -rocksdb_compaction_key_drop_obsolete # -rocksdb_compaction_key_drop_user # -rocksdb_flush_write_bytes # -rocksdb_getupdatessince_calls # -rocksdb_git_date # -rocksdb_git_hash # -rocksdb_l0_num_files_stall_micros # -rocksdb_l0_slowdown_micros # -rocksdb_memtable_compaction_micros # -rocksdb_memtable_hit # -rocksdb_memtable_miss # -rocksdb_no_file_closes # -rocksdb_no_file_errors # -rocksdb_no_file_opens # -rocksdb_num_iterators # -rocksdb_number_block_not_compressed # -rocksdb_number_deletes_filtered # -rocksdb_number_keys_read # -rocksdb_number_keys_updated # -rocksdb_number_keys_written # -rocksdb_number_merge_failures # -rocksdb_number_multiget_bytes_read # -rocksdb_number_multiget_get # -rocksdb_number_multiget_keys_read # -rocksdb_number_reseeks_iteration # -rocksdb_number_sst_entry_delete # -rocksdb_number_sst_entry_merge # -rocksdb_number_sst_entry_other # -rocksdb_number_sst_entry_put # -rocksdb_number_sst_entry_singledelete # -rocksdb_number_stat_computes # -rocksdb_number_superversion_acquires # -rocksdb_number_superversion_cleanups # -rocksdb_number_superversion_releases # -rocksdb_rate_limit_delay_millis # -rocksdb_sequence_number # -rocksdb_snapshot_conflict_errors # -rocksdb_wal_bytes # -rocksdb_wal_synced # -rocksdb_write_other # -rocksdb_write_self # -rocksdb_write_timedout # -rocksdb_write_wal # +Rocksdb_rows_deleted # +Rocksdb_rows_inserted # +Rocksdb_rows_read # +Rocksdb_rows_updated # +Rocksdb_system_rows_deleted # +Rocksdb_system_rows_inserted # +Rocksdb_system_rows_read # +Rocksdb_system_rows_updated # +Rocksdb_block_cache_add # +Rocksdb_block_cache_data_hit # +Rocksdb_block_cache_data_miss # +Rocksdb_block_cache_filter_hit # +Rocksdb_block_cache_filter_miss # +Rocksdb_block_cache_hit # +Rocksdb_block_cache_index_hit # +Rocksdb_block_cache_index_miss # +Rocksdb_block_cache_miss # +Rocksdb_block_cachecompressed_hit # +Rocksdb_block_cachecompressed_miss # +Rocksdb_bloom_filter_prefix_checked # +Rocksdb_bloom_filter_prefix_useful # +Rocksdb_bloom_filter_useful # +Rocksdb_bytes_read # +Rocksdb_bytes_written # +Rocksdb_compact_read_bytes # +Rocksdb_compact_write_bytes # +Rocksdb_compaction_key_drop_new # +Rocksdb_compaction_key_drop_obsolete # +Rocksdb_compaction_key_drop_user # +Rocksdb_flush_write_bytes # +Rocksdb_getupdatessince_calls # +Rocksdb_l0_num_files_stall_micros # +Rocksdb_l0_slowdown_micros # +Rocksdb_memtable_compaction_micros # +Rocksdb_memtable_hit # +Rocksdb_memtable_miss # +Rocksdb_no_file_closes # +Rocksdb_no_file_errors # +Rocksdb_no_file_opens # +Rocksdb_num_iterators # +Rocksdb_number_block_not_compressed # +Rocksdb_number_deletes_filtered # +Rocksdb_number_keys_read # +Rocksdb_number_keys_updated # +Rocksdb_number_keys_written # +Rocksdb_number_merge_failures # +Rocksdb_number_multiget_bytes_read # +Rocksdb_number_multiget_get # +Rocksdb_number_multiget_keys_read # +Rocksdb_number_reseeks_iteration # +Rocksdb_number_sst_entry_delete # +Rocksdb_number_sst_entry_merge # +Rocksdb_number_sst_entry_other # +Rocksdb_number_sst_entry_put # +Rocksdb_number_sst_entry_singledelete # +Rocksdb_number_stat_computes # +Rocksdb_number_superversion_acquires # +Rocksdb_number_superversion_cleanups # +Rocksdb_number_superversion_releases # +Rocksdb_rate_limit_delay_millis # +Rocksdb_sequence_number # +Rocksdb_snapshot_conflict_errors # +Rocksdb_wal_bytes # +Rocksdb_wal_synced # +Rocksdb_write_other # +Rocksdb_write_self # +Rocksdb_write_timedout # +Rocksdb_write_wal # select VARIABLE_NAME from INFORMATION_SCHEMA.global_status where VARIABLE_NAME LIKE 'rocksdb%'; VARIABLE_NAME ROCKSDB_ROWS_DELETED @@ -1516,8 +1517,6 @@ ROCKSDB_COMPACTION_KEY_DROP_OBSOLETE ROCKSDB_COMPACTION_KEY_DROP_USER ROCKSDB_FLUSH_WRITE_BYTES ROCKSDB_GETUPDATESSINCE_CALLS -ROCKSDB_GIT_DATE -ROCKSDB_GIT_HASH ROCKSDB_L0_NUM_FILES_STALL_MICROS ROCKSDB_L0_SLOWDOWN_MICROS ROCKSDB_MEMTABLE_COMPACTION_MICROS @@ -1590,8 +1589,6 @@ ROCKSDB_COMPACTION_KEY_DROP_OBSOLETE ROCKSDB_COMPACTION_KEY_DROP_USER ROCKSDB_FLUSH_WRITE_BYTES ROCKSDB_GETUPDATESSINCE_CALLS -ROCKSDB_GIT_DATE -ROCKSDB_GIT_HASH ROCKSDB_L0_NUM_FILES_STALL_MICROS ROCKSDB_L0_SLOWDOWN_MICROS ROCKSDB_MEMTABLE_COMPACTION_MICROS @@ -2402,10 +2399,10 @@ a truncate t1; INSERT INTO t1 VALUES(X'042000200020',X'042000200020'),(X'200400200020',X'200400200020'); Warnings: -Warning 1264 Out of range value for column 'a' at row 1 -Warning 1264 Out of range value for column 'b' at row 1 -Warning 1264 Out of range value for column 'a' at row 2 -Warning 1264 Out of range value for column 'b' at row 2 +Warning 1366 Incorrect integer value: '\x04 \x00 \x00 ' for column 'a' at row 1 +Warning 1366 Incorrect integer value: '\x04 \x00 \x00 ' for column 'b' at row 1 +Warning 1366 Incorrect integer value: ' \x04\x00 \x00 ' for column 'a' at row 2 +Warning 1366 Incorrect integer value: ' \x04\x00 \x00 ' for column 'b' at row 2 UNLOCK TABLES; DROP TABLE t1; # diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test index aa4b99f1c32..168136bf01c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test @@ -786,7 +786,11 @@ drop table t45; --echo # Now it fails if there is data overlap with what --echo # already exists --echo # -show variables like 'rocksdb%'; +show variables +where + variable_name like 'rocksdb%' and + variable_name not like 'rocksdb_supported_compression_types'; + create table t47 (pk int primary key, col1 varchar(12)) engine=rocksdb; insert into t47 values (1, 'row1'); insert into t47 values (2, 'row2'); From 7f43f736acbdbdd2984b29f5fe7197a687a0c4f8 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 2 Dec 2016 20:53:08 +0000 Subject: [PATCH 084/233] MDEV-11477: MariaRocks: rocksdb.type_varchar failure Update the test results (checked) --- storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result index afb4d5013f2..2f611552c8c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result @@ -721,7 +721,7 @@ create table t (id int primary key, email varchar(100), KEY email_i (email(30))) insert into t values (1, 'abcabcabcabcabcabcabcabcabcabcabc '); explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t index NULL email_i 33 NULL # Using index +1 SIMPLE t ALL NULL NULL NULL NULL # select 'email_i' as index_name, count(*) AS count from t force index(email_i); index_name count email_i 1 @@ -734,7 +734,7 @@ create table t (id int primary key, email varchar(100), KEY email_i (email(30))) insert into t values (1, 'a'); explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t index NULL email_i 33 NULL # Using index +1 SIMPLE t ALL NULL NULL NULL NULL # select 'email_i' as index_name, count(*) AS count from t force index(email_i); index_name count email_i 1 From d903396c18f1660be3b55f57758c7d8362a3b13b Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 2 Dec 2016 21:08:08 +0000 Subject: [PATCH 085/233] MDEV-11321: MariaRocks: type_binary_indexes, type_blob_indexes fail ... due to different index statistics Make statistics calculations in MariaRocks produce the same values that MyRocks produces. Added a comment in rdb_datadic.cc --- storage/rocksdb/ha_rocksdb.cc | 16 +++++++++++++++- storage/rocksdb/rdb_datadic.cc | 7 ++++++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index ecfc6a2f3d8..b55daff426c 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -8464,11 +8464,25 @@ int ha_rocksdb::info(uint flag) if ((x == 0 && rocksdb_debug_optimizer_no_zero_cardinality) || rocksdb_debug_optimizer_n_rows > 0) { + /* + Make MariaRocks behave the same way as MyRocks does: + 1. SQL layer thinks that unique secondary indexes are not extended + with PK columns (both in MySQL and MariaDB) + 2. MariaDB also thinks that indexes with partially-covered columns + are not extended with PK columns. Use the same number of + keyparts that MyRocks would use. + */ + uint ext_key_parts2; + if (k->flags & HA_NOSAME) + ext_key_parts2= k->ext_key_parts; // This is #1 + else + ext_key_parts2= m_key_descr_arr[i]->get_key_parts(); // This is #2. + // Fake cardinality implementation. For example, (idx1, idx2, idx3) index // will have rec_per_key for (idx1)=4, (idx1,2)=2, and (idx1,2,3)=1. // rec_per_key for the whole index is 1, and multiplied by 2^n if // n suffix columns of the index are not used. - x = 1 << (k->ext_key_parts-j-1); + x = 1 << (ext_key_parts2-j-1); } k->rec_per_key[j]= x; } diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index 472af8141a7..3523397bae2 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -283,7 +283,12 @@ void Rdb_key_def::setup(const TABLE *tbl, const Rdb_tbl_def *tbl_def) key_part++; /* For "unique" secondary indexes, pretend they have - "index extensions" + "index extensions". + + MariaDB also has this property: if an index has a partially-covered + column like KEY(varchar_col(N)), then the SQL layer will think it is + not "extended" with PK columns. The code below handles this case, + also. */ if (secondary_key && src_i+1 == key_info->ext_key_parts) { From ec58a1cca8ed9b69ad6765f4f764c18dba73ba98 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 3 Dec 2016 06:17:57 +0000 Subject: [PATCH 086/233] MariaRocks port: update test results MariaDB produces warnings when INSERT IGNORE hits a duplicate (this was introduced in MEV-5168) --- storage/rocksdb/mysql-test/rocksdb/r/insert_with_keys.result | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/insert_with_keys.result b/storage/rocksdb/mysql-test/rocksdb/r/insert_with_keys.result index 9d0fef276e9..3ae0769338f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/insert_with_keys.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/insert_with_keys.result @@ -46,6 +46,8 @@ a b 5 e 6 f INSERT IGNORE INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +Warnings: +Warning 1062 Duplicate entry '1' for key 'PRIMARY' INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z') ON DUPLICATE KEY UPDATE b = CONCAT(b,b); SELECT a,b FROM t1; a b From 0d5257215a827907157caf301fbfb2cc5b7f07cc Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 3 Dec 2016 06:21:31 +0000 Subject: [PATCH 087/233] MariaRocks port: update test results --- storage/rocksdb/mysql-test/rocksdb/r/delete_ignore.result | 2 +- storage/rocksdb/mysql-test/rocksdb/r/update_ignore.result | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete_ignore.result b/storage/rocksdb/mysql-test/rocksdb/r/delete_ignore.result index 1f017dfb990..f8ac42c4e72 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/delete_ignore.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/delete_ignore.result @@ -48,7 +48,7 @@ a b 5 e DELETE IGNORE t1.*, t2.* FROM t1, t2 WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 ); Warnings: -Error 1242 Subquery returns more than 1 row +Warning 1242 Subquery returns more than 1 row SELECT a,b FROM t1; a b 1 a diff --git a/storage/rocksdb/mysql-test/rocksdb/r/update_ignore.result b/storage/rocksdb/mysql-test/rocksdb/r/update_ignore.result index 9dda807edea..d36371be45b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/update_ignore.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/update_ignore.result @@ -25,7 +25,7 @@ ERROR 21000: Subquery returns more than 1 row UPDATE IGNORE t1, t2 SET b = 'upd2a', c = 'upd2b' WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 ); Warnings: -Error 1242 Subquery returns more than 1 row +Warning 1242 Subquery returns more than 1 row SELECT a,b FROM t1 ORDER BY pk; a b 1 upd2a From e6afa256e777561f055cd3dc6f7f482b5307132d Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 3 Dec 2016 12:42:30 +0000 Subject: [PATCH 088/233] MariaRocks port: Make ha_rocksdb::index_flags() return HA_CLUSTERED_INDEX for PK Failure to do so caused a failure in rocksdb.rocksdb test. When test_if_cheaper_ordering computes is_covering= ..., - MySQL calls table->file->primary_key_is_clustered() - MariaDB calls (table->file->index_flags(nr, 0, 1) & HA_CLUSTERED_INDEX) The first produces true, the second used to produce false. --- storage/rocksdb/ha_rocksdb.cc | 2 +- storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index b55daff426c..30eeca53780 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -6160,7 +6160,7 @@ ulong ha_rocksdb::index_flags(uint inx, uint part, bool all_parts) const we need to explicitly "allow" them, otherwise SQL layer will miss some plans. */ - base_flags |= HA_KEYREAD_ONLY; + base_flags |= HA_KEYREAD_ONLY | HA_CLUSTERED_INDEX; } else { diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result index 03416dce36a..f38629534d5 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result @@ -1288,6 +1288,8 @@ drop table t1; # CREATE TABLE t1 (pk INT PRIMARY KEY, a SMALLINT, b INT, KEY (a)) ENGINE=RocksDB; INSERT IGNORE INTO t1 VALUES (1, 157, 0), (2, 1898, -504403), (1, -14659, 0); +Warnings: +Warning 1062 Duplicate entry '1' for key 'PRIMARY' SELECT * FROM t1; pk a b 1 157 0 @@ -1807,7 +1809,7 @@ insert into t1 values (1,1),(2,2),(3,3); # The following must not use 'Using filesort': explain select * from t1 ORDER BY id; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index NULL PRIMARY 4 NULL # NULL +1 SIMPLE t1 index NULL PRIMARY 4 NULL # drop table t1; # # Issue #26: Index-only scans for DATETIME and TIMESTAMP From b504c56bff69ee683f9fa17f019ed978de6fd9d8 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 3 Dec 2016 12:46:05 +0000 Subject: [PATCH 089/233] MariaRocks: test result MariaDB-fication The test stopped giving deadlock errors after the previous cset. --- .../r/rocksdb_concurrent_delete.result | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_concurrent_delete.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_concurrent_delete.result index 9d6d368c686..ea9114c14d1 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_concurrent_delete.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_concurrent_delete.result @@ -1,56 +1,84 @@ +connect con, localhost, root,,; +connection default; SET debug_sync='RESET'; DROP TABLE IF EXISTS t1; CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT "", a INT); INSERT INTO t1 VALUES(1,1), (2,2), (3,3); +connection con; SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go'; SELECT * FROM t1 order by t1.pk ASC FOR UPDATE; +connection default; SET debug_sync='now WAIT_FOR parked'; DELETE FROM t1 WHERE pk = 1; SET debug_sync='now SIGNAL go'; +connection con; pk a 2 2 3 3 +connection default; +disconnect con; set debug_sync='RESET'; drop table t1; +connect con, localhost, root,,; +connection default; SET debug_sync='RESET'; DROP TABLE IF EXISTS t1; CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT "", a INT); INSERT INTO t1 VALUES(1,1), (2,2), (3,3); +connection con; SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go'; SELECT * FROM t1 order by t1.pk DESC FOR UPDATE; +connection default; SET debug_sync='now WAIT_FOR parked'; DELETE FROM t1 WHERE pk = 3; SET debug_sync='now SIGNAL go'; +connection con; pk a 2 2 1 1 +connection default; +disconnect con; set debug_sync='RESET'; drop table t1; +connect con, localhost, root,,; +connection default; SET debug_sync='RESET'; DROP TABLE IF EXISTS t1; CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT "rev:cf2", a INT); INSERT INTO t1 VALUES(1,1), (2,2), (3,3); +connection con; SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go'; SELECT * FROM t1 order by t1.pk ASC FOR UPDATE; +connection default; SET debug_sync='now WAIT_FOR parked'; DELETE FROM t1 WHERE pk = 1; SET debug_sync='now SIGNAL go'; +connection con; pk a 2 2 3 3 +connection default; +disconnect con; set debug_sync='RESET'; drop table t1; +connect con, localhost, root,,; +connection default; SET debug_sync='RESET'; DROP TABLE IF EXISTS t1; CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT "rev:cf2", a INT); INSERT INTO t1 VALUES(1,1), (2,2), (3,3); +connection con; SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go'; SELECT * FROM t1 order by t1.pk DESC FOR UPDATE; +connection default; SET debug_sync='now WAIT_FOR parked'; DELETE FROM t1 WHERE pk = 3; SET debug_sync='now SIGNAL go'; +connection con; pk a 2 2 1 1 +connection default; +disconnect con; set debug_sync='RESET'; drop table t1; From 8018bb737c273eb7ba32d945b04a9fb2ccca2e79 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 3 Dec 2016 12:52:34 +0000 Subject: [PATCH 090/233] MariaRocks port: use correct MTR command separators Failure to do so caused the command not to be run, and test failed --- .../rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc index dcceb38cf99..6ecd13bac41 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc @@ -37,8 +37,9 @@ let $wait_condition = select count(*) = 0 --source include/wait_condition.inc --enable_query_log -let $MYSQL_SST_DUMP=../storage/rocksdb/sst_dump ---exec bash ../storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh $MYSQLTEST_VARDIR $MYSQL_SST_DUMP $no_more_deletes +let $MYSQL_SST_DUMP=../storage/rocksdb/sst_dump; +exec bash ../storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh $MYSQLTEST_VARDIR $MYSQL_SST_DUMP $no_more_deletes ; + eval SET GLOBAL rocksdb_compaction_sequential_deletes= $save_rocksdb_compaction_sequential_deletes; eval SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= $save_rocksdb_compaction_sequential_deletes_file_size; eval SET GLOBAL rocksdb_compaction_sequential_deletes_window= $save_rocksdb_compaction_sequential_deletes_window; From 00e3869a668cd0dfb33fd81e39d6b383688d0621 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 3 Dec 2016 14:00:23 +0000 Subject: [PATCH 091/233] MariaRocks port: get rocksdb.checksum_table to work MariaDB has a different checksumming algorithm. Old one can be used by setting @@old=1 --- .../rocksdb/mysql-test/rocksdb/r/checksum_table.result | 5 +++++ storage/rocksdb/mysql-test/rocksdb/t/checksum_table.test | 8 ++++++++ 2 files changed, 13 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/checksum_table.result b/storage/rocksdb/mysql-test/rocksdb/r/checksum_table.result index 06a4c3f6f1c..bb209856a97 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/checksum_table.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/checksum_table.result @@ -66,6 +66,10 @@ col1 varchar(10), col2 tinyint, col3 double ) engine=rocksdb; +# MariaDB has changed the checksumming algorithm +# Enable the old algorithm: +set @tmp_old=@@old; +set old=1; checksum table t1; Table Checksum test.t1 0 @@ -84,4 +88,5 @@ insert into t1 values (6, '', NULL, 2.78); checksum table t1; Table Checksum test.t1 3183101003 +set old=@tmp_old; drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/checksum_table.test b/storage/rocksdb/mysql-test/rocksdb/t/checksum_table.test index fbe8028f6d5..51c639a85dd 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/checksum_table.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/checksum_table.test @@ -58,6 +58,12 @@ create table t1 ( col3 double ) engine=rocksdb; +--echo # MariaDB has changed the checksumming algorithm +--echo # Enable the old algorithm: +set @tmp_old=@@old; +set old=1; + + checksum table t1; insert into t1 values (1, NULL, NULL, NULL); @@ -72,5 +78,7 @@ insert into t1 values (5, 'xxxYYYzzzT', NULL, 2.78); insert into t1 values (6, '', NULL, 2.78); checksum table t1; +set old=@tmp_old; + drop table t1; From 4f90605a3d1e3f20e8ae79c869958e475685dc45 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 3 Dec 2016 14:56:38 +0000 Subject: [PATCH 092/233] MariaRocks port: make datetime-aware tests work in any timezone This fixes - rocksdb.col_opt_not_null - rocksdb.col_opt_null - rocksdb.type_date_time --- .../rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result | 6 ++++++ storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result | 6 ++++++ storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result | 3 +++ storage/rocksdb/mysql-test/rocksdb/t/col_opt_not_null.test | 5 +++++ storage/rocksdb/mysql-test/rocksdb/t/col_opt_null.test | 4 ++++ storage/rocksdb/mysql-test/rocksdb/t/type_date_time.inc | 4 +++- 6 files changed, 27 insertions(+), 1 deletion(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result index f34de57bb0c..7f197f05b15 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result @@ -912,7 +912,11 @@ DROP TABLE t1; ######################## # date and time columns ######################## +set @col_opt_not_nullsave_time_zone=@@time_zone; +set time_zone='UTC'; DROP TABLE IF EXISTS t1; +set @save_time_zone=@@time_zone; +set time_zone='UTC'; CREATE TABLE t1 ( d DATE NOT NULL, dt DATETIME NOT NULL, @@ -964,6 +968,7 @@ d dt ts t y y4 y2 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 00 2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 12 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 00 +set time_zone=@save_time_zone; DROP TABLE t1; SET TIMESTAMP=UNIX_TIMESTAMP('2013-12-12 12:12:12'); DROP TABLE IF EXISTS t1; @@ -1201,6 +1206,7 @@ pk HEX(c) 1 C 2 C DROP TABLE t1; +set time_zone= @col_opt_not_nullsave_time_zone; ######################## # ENUM columns ######################## diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result index 3a59302bf58..fb0e2af4362 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result @@ -802,7 +802,11 @@ DROP TABLE t1; ######################## # date and time columns ######################## +set @col_opt_nullsave_time_zone=@@time_zone; +set time_zone='UTC'; DROP TABLE IF EXISTS t1; +set @save_time_zone=@@time_zone; +set time_zone='UTC'; CREATE TABLE t1 ( d DATE NULL, dt DATETIME NULL, @@ -854,6 +858,7 @@ d dt ts t y y4 y2 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 00 2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 12 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 00 +set time_zone=@save_time_zone; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( @@ -1021,6 +1026,7 @@ pk HEX(c2) 2 C 3 C DROP TABLE t1; +set time_zone=@col_opt_nullsave_time_zone; ######################## # ENUM columns ######################## diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result index ab008c34ae6..91e7ac257c3 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result @@ -1,4 +1,6 @@ DROP TABLE IF EXISTS t1; +set @save_time_zone=@@time_zone; +set time_zone='UTC'; CREATE TABLE t1 ( d DATE , dt DATETIME , @@ -50,4 +52,5 @@ d dt ts t y y4 y2 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 00 2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 12 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 00 +set time_zone=@save_time_zone; DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_not_null.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_not_null.test index fbb5a932fc0..1de4ccee0f7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_not_null.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_not_null.test @@ -92,6 +92,9 @@ let $extra_col_opts = NOT NULL; --echo # date and time columns --echo ######################## +set @col_opt_not_nullsave_time_zone=@@time_zone; +set time_zone='UTC'; + --source type_date_time.inc SET TIMESTAMP=UNIX_TIMESTAMP('2013-12-12 12:12:12'); @@ -123,6 +126,8 @@ SET TIMESTAMP=UNIX_TIMESTAMP('2013-12-12 12:12:12'); --let $col_default = '12' --source col_not_null.inc +set time_zone= @col_opt_not_nullsave_time_zone; + --echo ######################## --echo # ENUM columns --echo ######################## diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_null.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_null.test index 18f2601eb16..c41abb78642 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_null.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_null.test @@ -88,6 +88,9 @@ let $extra_col_opts = NULL; --echo # date and time columns --echo ######################## +set @col_opt_nullsave_time_zone=@@time_zone; +set time_zone='UTC'; + --source type_date_time.inc --let $col_type = DATE @@ -114,6 +117,7 @@ let $extra_col_opts = NULL; --let $col_default = '12' --source col_null.inc +set time_zone=@col_opt_nullsave_time_zone; --echo ######################## --echo # ENUM columns diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.inc index 69d1154ea39..18ed7436b62 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.inc @@ -6,7 +6,8 @@ --disable_warnings DROP TABLE IF EXISTS t1; --enable_warnings - +set @save_time_zone=@@time_zone; +set time_zone='UTC'; eval CREATE TABLE t1 ( d DATE $extra_col_opts, dt DATETIME $extra_col_opts, @@ -39,6 +40,7 @@ INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES ('999-13-32', '999-11-31 00:00:00', '0', '-839:00:00', '1900', '1900', '-1','2012-12-12 12:12:16'); SELECT d,dt,ts,t,y,y4,y2 FROM t1; +set time_zone=@save_time_zone; DROP TABLE t1; From 044ad5d3d9be0c922d86544420c9cae29684f9fa Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 3 Dec 2016 18:17:21 +0000 Subject: [PATCH 093/233] MariaRocks port: make rocksdb.show_table_status test pass The test relies on having userstat enabled, which is not ON by default in MariaDB. --- .../rocksdb/mysql-test/rocksdb/t/show_table_status-master.opt | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_table_status-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status-master.opt index 83bb6823ee3..843f7012cfa 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/show_table_status-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status-master.opt @@ -1,2 +1,3 @@ --rocksdb_debug_optimizer_n_rows=1000 --rocksdb_table_stats_sampling_pct=100 +--userstat=ON From aecc95a15c13a1c0c05b2f28d2f095f7cd2929d4 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 3 Dec 2016 18:29:36 +0000 Subject: [PATCH 094/233] MariaRocks port: fix rocksdb.autoinc_vars --- storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result | 7 +++---- storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test | 2 ++ 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result index b14a7a4c0a9..0fb3d96c58f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result @@ -52,13 +52,12 @@ DROP TABLE t1; SET auto_increment_increment = 500; SET auto_increment_offset = 300; CREATE TABLE t1 (a TINYINT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +# In MariaDB, this is an error: INSERT INTO t1 (a) VALUES (NULL); -Warnings: -Warning 1264 Out of range value for column 'a' at row 1 +ERROR 22003: Out of range value for column 'a' at row 1 SELECT LAST_INSERT_ID(); LAST_INSERT_ID() -127 +850 SELECT a FROM t1 ORDER BY a; a -127 DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test index c3f3550e303..2fe0a2e3c08 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test @@ -57,6 +57,8 @@ SET auto_increment_increment = 500; SET auto_increment_offset = 300; CREATE TABLE t1 (a TINYINT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +--echo # In MariaDB, this is an error: +--error HA_ERR_AUTOINC_ERANGE INSERT INTO t1 (a) VALUES (NULL); SELECT LAST_INSERT_ID(); SELECT a FROM t1 ORDER BY a; From 9a49210ec3e1d65bad9df1c4017d3d0b8fd476d6 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 3 Dec 2016 20:37:45 +0000 Subject: [PATCH 095/233] MariaRocks port: disable rocksdb.slwo_query_log test (MDEV-11480) --- storage/rocksdb/mysql-test/rocksdb/t/disabled.def | 3 +++ 1 file changed, 3 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index 5e459b0d471..b2c78a36f1e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -2,3 +2,6 @@ cons_snapshot_serializable : Consistent read does not work on serializable level_read_uncommitted : Not supported level_serializable: Not supported + +# Tests that are (temporarily) disabled in MariaDB: +slow_query_log: MDEV-11480 From 81c05c59310a5fcbdbc6733cd5afc2021318c799 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 3 Dec 2016 20:56:40 +0000 Subject: [PATCH 096/233] MariaRocks port: disable rocksdb.select_for_update_skip_locked_nowait --- storage/rocksdb/mysql-test/rocksdb/t/disabled.def | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index b2c78a36f1e..967c2446cd4 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -5,3 +5,4 @@ level_serializable: Not supported # Tests that are (temporarily) disabled in MariaDB: slow_query_log: MDEV-11480 +select_for_update_skip_locked_nowait: MDEV-11481 From 05a593dfc92c5274bcf971b98b45ab79b9869b5c Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 4 Dec 2016 08:28:34 +0000 Subject: [PATCH 097/233] MariaRocks: temporary disable read-free replication Making read-free replication to work is filed as MDEV-10976 --- storage/rocksdb/mysql-test/rocksdb/t/disabled.def | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index 967c2446cd4..4d3ea626526 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -6,3 +6,4 @@ level_serializable: Not supported # Tests that are (temporarily) disabled in MariaDB: slow_query_log: MDEV-11480 select_for_update_skip_locked_nowait: MDEV-11481 +rpl_read_free: MDEV-10976 From 7b708ee3ed294a5af7857fc48361141025525c76 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 4 Dec 2016 09:28:51 +0000 Subject: [PATCH 098/233] MDEV-11329: MariaRocks: rocksdb.add_index_inplace fails - Make ha_rocksdb::check_if_supported_inplace_alter() take into account the Alter_inplace_info::ALTER_PARTITIONED flag - Adjust the testcase to work in MariaDB --- storage/rocksdb/ha_rocksdb.cc | 3 ++- .../rocksdb/r/add_index_inplace.result | 23 +++++++++++-------- .../rocksdb/t/add_index_inplace.cnf | 5 ---- .../rocksdb/t/add_index_inplace.test | 16 ++++++++----- 4 files changed, 25 insertions(+), 22 deletions(-) delete mode 100644 storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.cnf diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 30eeca53780..702657b64cc 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -9656,7 +9656,8 @@ my_core::enum_alter_inplace_result ha_rocksdb::check_if_supported_inplace_alter( if (ha_alter_info->handler_flags & ~(my_core::Alter_inplace_info::DROP_INDEX | my_core::Alter_inplace_info::DROP_UNIQUE_INDEX | - my_core::Alter_inplace_info::ADD_INDEX)) + my_core::Alter_inplace_info::ADD_INDEX | + my_core::Alter_inplace_info::ALTER_PARTITIONED)) { DBUG_RETURN(my_core::HA_ALTER_INPLACE_NOT_SUPPORTED); } diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result index 4a707d3a6f4..e4d38086580 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result @@ -252,7 +252,7 @@ DROP INDEX kij ON t1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `i` int(11) NOT NULL DEFAULT '0', + `i` int(11) NOT NULL, `j` int(11) DEFAULT NULL, `k` int(11) DEFAULT NULL, PRIMARY KEY (`i`), @@ -282,10 +282,10 @@ INSERT INTO t1 (a, b) VALUES (2, 6); INSERT INTO t1 (a, b) VALUES (3, 7); # crash_during_online_index_creation flush logs; -SET SESSION debug="+d,crash_during_online_index_creation"; +SET SESSION debug_dbug="+d,crash_during_online_index_creation"; ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; ERROR HY000: Lost connection to MySQL server during query -SET SESSION debug="-d,crash_during_online_index_creation"; +SET SESSION debug_dbug="-d,crash_during_online_index_creation"; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -301,14 +301,14 @@ DROP TABLE t1; CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; # crash_during_index_creation_partition flush logs; -SET SESSION debug="+d,crash_during_index_creation_partition"; +SET SESSION debug_dbug="+d,crash_during_index_creation_partition"; ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; ERROR HY000: Lost connection to MySQL server during query -SET SESSION debug="-d,crash_during_index_creation_partition"; +SET SESSION debug_dbug="-d,crash_during_index_creation_partition"; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `i` int(11) NOT NULL DEFAULT '0', + `i` int(11) NOT NULL, `j` int(11) DEFAULT NULL, `k` int(11) DEFAULT NULL, PRIMARY KEY (`i`), @@ -336,17 +336,17 @@ DROP TABLE t1; CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; # crash_during_index_creation_partition flush logs; -SET SESSION debug="+d,myrocks_simulate_index_create_rollback"; +SET SESSION debug_dbug="+d,myrocks_simulate_index_create_rollback"; # expected assertion failure from sql layer here for alter rollback call mtr.add_suppression("Assertion `0' failed."); call mtr.add_suppression("Attempting backtrace. You can use the following information to find out"); ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; ERROR HY000: Lost connection to MySQL server during query -SET SESSION debug="-d,myrocks_simulate_index_create_rollback"; +SET SESSION debug_dbug="-d,myrocks_simulate_index_create_rollback"; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `i` int(11) NOT NULL DEFAULT '0', + `i` int(11) NOT NULL, `j` int(11) DEFAULT NULL, `k` int(11) DEFAULT NULL, PRIMARY KEY (`i`), @@ -358,7 +358,7 @@ ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `i` int(11) NOT NULL DEFAULT '0', + `i` int(11) NOT NULL, `j` int(11) DEFAULT NULL, `k` int(11) DEFAULT NULL, PRIMARY KEY (`i`), @@ -371,8 +371,11 @@ SELECT COUNT(*) FROM t1; COUNT(*) 100 DROP TABLE t1; +set @tmp_rocksdb_strict_collation_check= @@rocksdb_strict_collation_check; +set global rocksdb_strict_collation_check=1; CREATE TABLE t1 (a INT, b TEXT); ALTER TABLE t1 ADD KEY kb(b(10)); ERROR HY000: Unsupported collation on string indexed column test.t1.b Use binary collation (binary, latin1_bin, utf8_bin). ALTER TABLE t1 ADD PRIMARY KEY(a); DROP TABLE t1; +set global rocksdb_strict_collation_check= @tmp_rocksdb_strict_collation_check; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.cnf b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.cnf deleted file mode 100644 index 45ec29033c6..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.cnf +++ /dev/null @@ -1,5 +0,0 @@ -[mysql] -no-defaults - -[mysqld.1] -rocksdb_strict_collation_check=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test index e0d7a4465c8..0f781b92e1c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test @@ -1,5 +1,6 @@ --source include/have_rocksdb.inc --source include/have_debug.inc +--source include/have_partition.inc --disable_warnings drop table if exists t1; @@ -181,14 +182,14 @@ INSERT INTO t1 (a, b) VALUES (3, 7); flush logs; --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect -SET SESSION debug="+d,crash_during_online_index_creation"; +SET SESSION debug_dbug="+d,crash_during_online_index_creation"; --error 2013 ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; --enable_reconnect --source include/wait_until_connected_again.inc -SET SESSION debug="-d,crash_during_online_index_creation"; +SET SESSION debug_dbug="-d,crash_during_online_index_creation"; SHOW CREATE TABLE t1; CHECK TABLE t1; @@ -214,14 +215,14 @@ while ($i <= $max) { flush logs; --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect -SET SESSION debug="+d,crash_during_index_creation_partition"; +SET SESSION debug_dbug="+d,crash_during_index_creation_partition"; --error 2013 ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; --enable_reconnect --source include/wait_until_connected_again.inc -SET SESSION debug="-d,crash_during_index_creation_partition"; +SET SESSION debug_dbug="-d,crash_during_index_creation_partition"; SHOW CREATE TABLE t1; @@ -253,7 +254,7 @@ while ($i <= $max) { flush logs; --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect -SET SESSION debug="+d,myrocks_simulate_index_create_rollback"; +SET SESSION debug_dbug="+d,myrocks_simulate_index_create_rollback"; --echo # expected assertion failure from sql layer here for alter rollback call mtr.add_suppression("Assertion `0' failed."); @@ -266,7 +267,7 @@ ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; --enable_reconnect --source include/wait_until_connected_again.inc -SET SESSION debug="-d,myrocks_simulate_index_create_rollback"; +SET SESSION debug_dbug="-d,myrocks_simulate_index_create_rollback"; SHOW CREATE TABLE t1; @@ -280,6 +281,8 @@ SELECT COUNT(*) FROM t1; DROP TABLE t1; # test failure in prepare phase (due to collation) +set @tmp_rocksdb_strict_collation_check= @@rocksdb_strict_collation_check; +set global rocksdb_strict_collation_check=1; CREATE TABLE t1 (a INT, b TEXT); --error 1105 @@ -287,4 +290,5 @@ ALTER TABLE t1 ADD KEY kb(b(10)); ALTER TABLE t1 ADD PRIMARY KEY(a); DROP TABLE t1; +set global rocksdb_strict_collation_check= @tmp_rocksdb_strict_collation_check; From 34b66fcc98c43960ac7848a9e2675efdb983cc91 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 4 Dec 2016 14:27:10 +0000 Subject: [PATCH 099/233] MariaRocks port: get rocksdb.locking_issues test to work Also update test results (MariaDB's dialect changes) --- storage/rocksdb/ha_rocksdb.cc | 17 +- .../rocksdb/r/locking_issues.result | 200 +++++++++++++++++- 2 files changed, 203 insertions(+), 14 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 702657b64cc..9cb5c03509e 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -8636,6 +8636,19 @@ int ha_rocksdb::update_row(const uchar *old_data, uchar *new_data) } +/* + MariaDB's temporary: MyRocks has this function in sql/handler.cc: +*/ + +bool can_hold_read_locks_on_select(THD *thd, thr_lock_type lock_type) +{ + return (lock_type == TL_READ_WITH_SHARED_LOCKS + || lock_type == TL_READ_NO_INSERT + || (lock_type != TL_IGNORE + && thd->lex->sql_command != SQLCOM_SELECT)); +} + + /* The following function was copied from ha_blackhole::store_lock: */ THR_LOCK_DATA **ha_rocksdb::store_lock(THD *thd, THR_LOCK_DATA **to, @@ -8668,11 +8681,7 @@ THR_LOCK_DATA **ha_rocksdb::store_lock(THD *thd, uint sql_command = my_core::thd_sql_command(thd); if ((lock_type == TL_READ && in_lock_tables) || (lock_type == TL_READ_HIGH_PRIORITY && in_lock_tables) || -#ifdef MARIAROCKS_NOT_YET can_hold_read_locks_on_select(thd, lock_type)) -#else - false) -#endif { ulong tx_isolation = my_core::thd_tx_isolation(thd); if (sql_command != SQLCOM_CHECKSUM && diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result index 4b237dcb7aa..44d38befbe0 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result @@ -8,17 +8,25 @@ DROP TABLE IF EXISTS t0; CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2)); INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE; id1 id2 value +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; INSERT INTO t0 VALUES (1,5,0); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection con1; COMMIT; +connection default; +disconnect con1; +disconnect con2; DROP TABLE t0; ----------------------------------------------------------------------- @@ -30,17 +38,25 @@ DROP TABLE t0; DROP TABLE IF EXISTS t0; CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2)); INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE; id1 id2 value +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; INSERT INTO t0 VALUES (1,5,0); -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection con1; COMMIT; +connection default; +disconnect con1; +disconnect con2; DROP TABLE t0; ----------------------------------------------------------------------- @@ -52,17 +68,25 @@ DROP TABLE t0; DROP TABLE IF EXISTS t0; CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2)); INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; SELECT * FROM t0 WHERE id1=1 FOR UPDATE; id1 id2 value 1 1 0 +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE; id1 id2 value INSERT INTO t0 VALUES (1,5,0); +connection con1; COMMIT; +connection default; +disconnect con1; +disconnect con2; DROP TABLE t0; ----------------------------------------------------------------------- @@ -74,17 +98,25 @@ DROP TABLE t0; DROP TABLE IF EXISTS t0; CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2)); INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; SELECT * FROM t0 WHERE id1=1 FOR UPDATE; id1 id2 value 1 1 0 +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE; id1 id2 value INSERT INTO t0 VALUES (1,5,0); +connection con1; COMMIT; +connection default; +disconnect con1; +disconnect con2; DROP TABLE t0; ----------------------------------------------------------------------- @@ -99,18 +131,26 @@ SELECT @@global.rocksdb_lock_scanned_rows; 0 CREATE TABLE t0(id INT PRIMARY KEY, value INT); INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; +connection con1; SELECT * FROM t0 WHERE value > 0 FOR UPDATE; id value 2 1 5 1 +connection con2; UPDATE t0 SET VALUE=10 WHERE id=1; UPDATE t0 SET VALUE=10 WHERE id=5; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection con1; UPDATE t0 SET value=100 WHERE id in (4,5) and value>0; +connection con2; SELECT * FROM t0 WHERE id=4 FOR UPDATE; id value 4 0 @@ -122,7 +162,11 @@ id value 3 0 4 0 5 1 +connection con1; COMMIT; +connection default; +disconnect con1; +disconnect con2; DROP TABLE t0; ----------------------------------------------------------------------- @@ -137,18 +181,26 @@ SELECT @@global.rocksdb_lock_scanned_rows; 0 CREATE TABLE t0(id INT PRIMARY KEY, value INT); INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; +connection con1; SELECT * FROM t0 WHERE value > 0 FOR UPDATE; id value 2 1 5 1 +connection con2; UPDATE t0 SET VALUE=10 WHERE id=1; UPDATE t0 SET VALUE=10 WHERE id=5; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection con1; UPDATE t0 SET value=100 WHERE id in (4,5) and value>0; +connection con2; SELECT * FROM t0 WHERE id=4 FOR UPDATE; id value 4 0 @@ -160,7 +212,11 @@ id value 3 0 4 0 5 1 +connection con1; COMMIT; +connection default; +disconnect con1; +disconnect con2; DROP TABLE t0; ----------------------------------------------------------------------- @@ -176,17 +232,27 @@ SELECT @@global.rocksdb_lock_scanned_rows; SET GLOBAL rocksdb_lock_scanned_rows=ON; CREATE TABLE t0(id INT PRIMARY KEY, value INT); INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; +connection con1; SELECT * FROM t0 WHERE value > 0 FOR UPDATE; id value 2 1 5 1 +connection con2; UPDATE t0 SET VALUE=10 WHERE id=1; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection con1; COMMIT; +connection default; +disconnect con1; +disconnect con2; DROP TABLE t0; SET GLOBAL rocksdb_lock_scanned_rows=0; @@ -203,17 +269,27 @@ SELECT @@global.rocksdb_lock_scanned_rows; SET GLOBAL rocksdb_lock_scanned_rows=ON; CREATE TABLE t0(id INT PRIMARY KEY, value INT); INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; +connection con1; SELECT * FROM t0 WHERE value > 0 FOR UPDATE; id value 2 1 5 1 +connection con2; UPDATE t0 SET VALUE=10 WHERE id=1; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection con1; COMMIT; +connection default; +disconnect con1; +disconnect con2; DROP TABLE t0; SET GLOBAL rocksdb_lock_scanned_rows=0; @@ -225,11 +301,19 @@ SET GLOBAL rocksdb_lock_scanned_rows=0; DROP TABLE IF EXISTS t0; CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); Inserting 200,000 rows +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000; +connection con1; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +connection default; +disconnect con1; +disconnect con2; DROP TABLE t0; ----------------------------------------------------------------------- @@ -240,11 +324,19 @@ DROP TABLE t0; DROP TABLE IF EXISTS t0; CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); Inserting 200,000 rows +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000; +connection con1; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +connection default; +disconnect con1; +disconnect con2; DROP TABLE t0; ----------------------------------------------------------------------- @@ -255,11 +347,19 @@ DROP TABLE t0; DROP TABLE IF EXISTS t0; CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); Inserting 200,000 rows +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; INSERT INTO t0 VALUES(200001,1), (-1,1); +connection con1; id value +connection default; +disconnect con1; +disconnect con2; DROP TABLE t0; ----------------------------------------------------------------------- @@ -270,11 +370,19 @@ DROP TABLE t0; DROP TABLE IF EXISTS t0; CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); Inserting 200,000 rows +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; INSERT INTO t0 VALUES(200001,1), (-1,1); +connection con1; id value +connection default; +disconnect con1; +disconnect con2; DROP TABLE t0; ----------------------------------------------------------------------- @@ -286,15 +394,23 @@ DROP TABLE IF EXISTS t0; CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); Inserting 200,000 rows UPDATE t0 SET value=100 WHERE id=190000; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; DELETE FROM t0 WHERE id=190000; COMMIT; +connection con1; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction COMMIT; +connection default; +disconnect con1; +disconnect con2; DROP TABLE t0; ----------------------------------------------------------------------- @@ -306,15 +422,23 @@ DROP TABLE IF EXISTS t0; CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); Inserting 200,000 rows UPDATE t0 SET value=100 WHERE id=190000; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; DELETE FROM t0 WHERE id=190000; COMMIT; +connection con1; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction COMMIT; +connection default; +disconnect con1; +disconnect con2; DROP TABLE t0; ----------------------------------------------------------------------- @@ -326,15 +450,23 @@ DROP TABLE IF EXISTS t0; CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); Inserting 200,000 rows UPDATE t0 SET value=100 WHERE id=190000; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; UPDATE t0 SET id=200001 WHERE id=190000; COMMIT; +connection con1; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction COMMIT; +connection default; +disconnect con1; +disconnect con2; DROP TABLE t0; ----------------------------------------------------------------------- @@ -346,15 +478,23 @@ DROP TABLE IF EXISTS t0; CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); Inserting 200,000 rows UPDATE t0 SET value=100 WHERE id=190000; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; UPDATE t0 SET id=200001 WHERE id=190000; COMMIT; +connection con1; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction COMMIT; +connection default; +disconnect con1; +disconnect con2; DROP TABLE t0; ----------------------------------------------------------------------- @@ -370,12 +510,18 @@ CREATE TABLE t1(id INT PRIMARY KEY, value INT); CREATE TABLE t2(id INT PRIMARY KEY, value INT); INSERT INTO t1 VALUES (1,1), (2,2), (3,3); INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; lock_scanned_rows is 0 +connection con1; UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3; +connection con2; UPDATE t2 SET value=value+100; SELECT * FROM t2; id value @@ -384,7 +530,11 @@ id value 3 103 4 104 5 105 +connection con1; COMMIT; +connection default; +disconnect con1; +disconnect con2; DROP TABLE t1; DROP TABLE t2; @@ -401,12 +551,18 @@ CREATE TABLE t1(id INT PRIMARY KEY, value INT); CREATE TABLE t2(id INT PRIMARY KEY, value INT); INSERT INTO t1 VALUES (1,1), (2,2), (3,3); INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; lock_scanned_rows is 0 +connection con1; UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3; +connection con2; UPDATE t2 SET value=value+100; SELECT * FROM t2; id value @@ -415,7 +571,11 @@ id value 3 103 4 104 5 105 +connection con1; COMMIT; +connection default; +disconnect con1; +disconnect con2; DROP TABLE t1; DROP TABLE t2; @@ -433,14 +593,20 @@ CREATE TABLE t1(id INT PRIMARY KEY, value INT); CREATE TABLE t2(id INT PRIMARY KEY, value INT); INSERT INTO t1 VALUES (1,1), (2,2), (3,3); INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; lock_scanned_rows is 1 +connection con1; UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3; +connection con2; UPDATE t2 SET value=value+100 WHERE id=3; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t2.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5); SELECT * FROM t2; id value @@ -449,7 +615,11 @@ id value 3 3 4 104 5 105 +connection con1; COMMIT; +connection default; +disconnect con1; +disconnect con2; DROP TABLE t1; DROP TABLE t2; SET GLOBAL rocksdb_lock_scanned_rows=0; @@ -468,14 +638,20 @@ CREATE TABLE t1(id INT PRIMARY KEY, value INT); CREATE TABLE t2(id INT PRIMARY KEY, value INT); INSERT INTO t1 VALUES (1,1), (2,2), (3,3); INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; +connection con2; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; lock_scanned_rows is 1 +connection con1; UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3; +connection con2; UPDATE t2 SET value=value+100 WHERE id=3; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t2.PRIMARY +ERROR HY000: Lock wait timeout exceeded; try restarting transaction UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5); SELECT * FROM t2; id value @@ -484,7 +660,11 @@ id value 3 3 4 104 5 105 +connection con1; COMMIT; +connection default; +disconnect con1; +disconnect con2; DROP TABLE t1; DROP TABLE t2; SET GLOBAL rocksdb_lock_scanned_rows=0; From 8e2cfde953a1ff40b34f199e2c400548c18bac31 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 4 Dec 2016 23:55:54 +0300 Subject: [PATCH 100/233] MariaRocks port: fix rocksdb.collation, rocksdb.collation_exception - port Regex_list_handler from facebook/mysql-5.6/sql/handler.cc put it into a separate file in storage/rocksdb directory - Adjust the build process so that the main library is build with Regex_list_handler (which has dependencies on the server), while RocksDB tools are built without it. - Un-comment @@rdb_collation_exceptions handling in ha_rocksdb.cc - Also adjust rocksdb_set_collation_exception_list() to free the old variable value and alloc the new one. --- storage/rocksdb/CMakeLists.txt | 19 ++-- storage/rocksdb/ha_rocksdb.cc | 19 ++-- .../mysql-test/rocksdb/r/collation.result | 1 + .../mysql-test/rocksdb/t/collation.test | 5 +- storage/rocksdb/rdb_mariadb_server_port.cc | 97 +++++++++++++++++++ storage/rocksdb/rdb_mariadb_server_port.h | 73 ++++++++++++++ 6 files changed, 193 insertions(+), 21 deletions(-) create mode 100644 storage/rocksdb/rdb_mariadb_server_port.cc create mode 100644 storage/rocksdb/rdb_mariadb_server_port.h diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index 247f6b06ad4..d2c6c8b7659 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -43,9 +43,9 @@ set_source_files_properties(${ROCKSDB_LIB_SOURCES} PROPERTIES COMPILE_FLAGS -frt set_source_files_properties(event_listener.cc PROPERTIES COMPILE_FLAGS -frtti) set_source_files_properties(rdb_cf_options.cc PROPERTIES COMPILE_FLAGS -frtti) - -SET(ROCKSDB_SOURCES - ha_rocksdb.cc ha_rocksdb.h ha_rocksdb_proto.h +ADD_CONVENIENCE_LIBRARY( + ROCKSDB_AUX_LIB + ha_rocksdb_proto.h logger.h rdb_comparator.h rdb_datadic.cc rdb_datadic.h @@ -64,6 +64,11 @@ SET(ROCKSDB_SOURCES ${ROCKSDB_LIB_SOURCES} ) +SET(ROCKSDB_SOURCES + rdb_mariadb_server_port.cc rdb_mariadb_server_port.h + ha_rocksdb.cc ha_rocksdb.h +) + IF(WITH_FB_TSAN) SET(PIC_EXT "_pic") ELSE() @@ -102,7 +107,7 @@ IF (NOT "$ENV{WITH_ZSTD}" STREQUAL "") ADD_DEFINITIONS(-DZSTD) ENDIF() -SET(rocksdb_static_libs ${rocksdb_static_libs} ${ZLIB_LIBRARY} "-lrt") +SET(rocksdb_static_libs ROCKSDB_AUX_LIB ${rocksdb_static_libs} ${ZLIB_LIBRARY} "-lrt") MYSQL_ADD_PLUGIN(rocksdb_se ${ROCKSDB_SOURCES} STORAGE_ENGINE DEFAULT STATIC_ONLY LINK_LIBRARIES ${rocksdb_static_libs} @@ -121,11 +126,11 @@ IF (WITH_ROCKSDB_SE_STORAGE_ENGINE) ) set_source_files_properties(${ROCKSDB_TOOL_SOURCES} PROPERTIES COMPILE_FLAGS -frtti) MYSQL_ADD_EXECUTABLE(sst_dump ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/tools/sst_dump.cc ${ROCKSDB_TOOL_SOURCES}) - TARGET_LINK_LIBRARIES(sst_dump rocksdb_se) + TARGET_LINK_LIBRARIES(sst_dump ${rocksdb_static_libs}) MYSQL_ADD_EXECUTABLE(ldb ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/tools/ldb.cc ${ROCKSDB_TOOL_SOURCES}) - TARGET_LINK_LIBRARIES(ldb rocksdb_se) + TARGET_LINK_LIBRARIES(ldb ${rocksdb_static_libs}) MYSQL_ADD_EXECUTABLE(mysql_ldb ${CMAKE_SOURCE_DIR}/storage/rocksdb/tools/mysql_ldb.cc ${ROCKSDB_TOOL_SOURCES}) - TARGET_LINK_LIBRARIES(mysql_ldb rocksdb_se) + TARGET_LINK_LIBRARIES(mysql_ldb ${rocksdb_static_libs}) ENDIF() diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 9cb5c03509e..7444738dcac 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -62,6 +62,7 @@ #include "./rdb_index_merge.h" #include "./rdb_mutex_wrapper.h" #include "./rdb_threads.h" +#include "./rdb_mariadb_server_port.h" #ifdef TARGET_OS_LINUX extern my_bool cachedev_enabled; @@ -155,11 +156,9 @@ Rdb_binlog_manager binlog_manager; static Rdb_background_thread rdb_bg_thread; -#ifdef MARIAROCKS_NOT_YET // List of table names (using regex) that are exceptions to the strict // collation check requirement. Regex_list_handler *rdb_collation_exceptions; -#endif static const char* const ERRSTR_ROLLBACK_ONLY = "This transaction was rolled back and cannot be " @@ -3345,13 +3344,11 @@ static int rocksdb_init_func(void *p) mysql_mutex_init(rdb_mem_cmp_space_mutex_key, &rdb_mem_cmp_space_mutex, MY_MUTEX_INIT_FAST); -#ifdef MARIAROCKS_NOT_YET #if defined(HAVE_PSI_INTERFACE) rdb_collation_exceptions = new Regex_list_handler( key_rwlock_collation_exception_list); #else rdb_collation_exceptions = new Regex_list_handler(); -#endif #endif mysql_mutex_init(rdb_sysvars_psi_mutex_key, &rdb_sysvars_mutex, @@ -3731,9 +3728,9 @@ static int rocksdb_done_func(void *p) mysql_mutex_destroy(&rdb_open_tables.m_mutex); mysql_mutex_destroy(&rdb_sysvars_mutex); -#ifdef MARIAROCKS_NOT_YET + delete rdb_collation_exceptions; -#endif + mysql_mutex_destroy(&rdb_collation_data_mutex); mysql_mutex_destroy(&rdb_mem_cmp_space_mutex); @@ -5126,11 +5123,7 @@ int ha_rocksdb::create_cfs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg, { if (!rdb_is_index_collation_supported( table_arg->key_info[i].key_part[part].field) && -#ifdef MARIAROCKS_NOT_YET !rdb_collation_exceptions->matches(tablename_sys)) -#else - true) -#endif { std::string collation_err; for (auto coll : RDB_INDEX_COLLATIONS) @@ -10715,7 +10708,6 @@ rocksdb_set_rate_limiter_bytes_per_sec( void rdb_set_collation_exception_list(const char *exception_list) { -#ifdef MARIAROCKS_NOT_YET DBUG_ASSERT(rdb_collation_exceptions != nullptr); if (!rdb_collation_exceptions->set_patterns(exception_list)) @@ -10723,7 +10715,6 @@ void rdb_set_collation_exception_list(const char *exception_list) my_core::warn_about_bad_patterns(rdb_collation_exceptions, "strict_collation_exceptions"); } -#endif } void @@ -10736,7 +10727,9 @@ rocksdb_set_collation_exception_list(THD* thd, rdb_set_collation_exception_list(val); - *static_cast(var_ptr) = val; + const char *val_copy= my_strdup(val, MYF(0)); + my_free(*static_cast(var_ptr)); + *static_cast(var_ptr) = val_copy; } void diff --git a/storage/rocksdb/mysql-test/rocksdb/r/collation.result b/storage/rocksdb/mysql-test/rocksdb/r/collation.result index b6bde05cc70..6792b50d00c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/collation.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/collation.result @@ -116,6 +116,7 @@ CREATE TABLE b (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rock CREATE TABLE c (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; ERROR HY000: Unsupported collation on string indexed column test.c.value Use binary collation (binary, latin1_bin, utf8_bin). DROP TABLE a, b; +call mtr.add_suppression("Invalid pattern in strict_collation_exceptions:"); SET GLOBAL rocksdb_strict_collation_exceptions="abc\\"; Invalid pattern in strict_collation_exceptions: abc\ CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation.test b/storage/rocksdb/mysql-test/rocksdb/t/collation.test index c3fcaad776c..e9f53c157f4 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/collation.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/collation.test @@ -1,5 +1,7 @@ --source include/have_rocksdb.inc ---source include/have_fullregex.inc +# MariaDB doesn't have server variables to check for GCC version, so the +# following check is commented out: +# --source include/have_fullregex.inc SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; @@ -165,6 +167,7 @@ CREATE TABLE b (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rock CREATE TABLE c (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE a, b; +call mtr.add_suppression("Invalid pattern in strict_collation_exceptions:"); # test invalid regex (trailing escape) --exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err SET GLOBAL rocksdb_strict_collation_exceptions="abc\\"; diff --git a/storage/rocksdb/rdb_mariadb_server_port.cc b/storage/rocksdb/rdb_mariadb_server_port.cc new file mode 100644 index 00000000000..59315c199cd --- /dev/null +++ b/storage/rocksdb/rdb_mariadb_server_port.cc @@ -0,0 +1,97 @@ +#include + + +/* MySQL includes */ +#include "./debug_sync.h" +#include "./my_bit.h" +#include "./my_stacktrace.h" +#include "./sql_table.h" +#include "./my_global.h" +#include "./log.h" +#include +#include +#ifdef MARIAROCKS_NOT_YET +#include +#endif + +#include + +/* MyRocks includes */ +#include "./rdb_threads.h" + +#include "rdb_mariadb_server_port.h" + +void warn_about_bad_patterns(const Regex_list_handler* regex_list_handler, + const char *name) +{ + // There was some invalid regular expression data in the patterns supplied + + // NO_LINT_DEBUG + sql_print_warning("Invalid pattern in %s: %s", name, + regex_list_handler->bad_pattern().c_str()); +} + + +/* + Set the patterns string. If there are invalid regex patterns they will + be stored in m_bad_patterns and the result will be false, otherwise the + result will be true. +*/ +bool Regex_list_handler::set_patterns(const std::string& pattern_str) +{ + bool pattern_valid= true; + + // Create a normalized version of the pattern string with all delimiters + // replaced by the '|' character + std::string norm_pattern= pattern_str; + std::replace(norm_pattern.begin(), norm_pattern.end(), m_delimiter, '|'); + + // Make sure no one else is accessing the list while we are changing it. + mysql_rwlock_wrlock(&m_rwlock); + + // Clear out any old error information + m_bad_pattern_str.clear(); + + try + { + // Replace all delimiters with the '|' operator and create the regex + // Note that this means the delimiter can not be part of a regular + // expression. This is currently not a problem as we are using the comma + // character as a delimiter and commas are not valid in table names. + const std::regex* pattern= new std::regex(norm_pattern); + + // Free any existing regex information and setup the new one + delete m_pattern; + m_pattern= pattern; + } + catch (const std::regex_error& e) + { + // This pattern is invalid. + pattern_valid= false; + + // Put the bad pattern into a member variable so it can be retrieved later. + m_bad_pattern_str= pattern_str; + } + + // Release the lock + mysql_rwlock_unlock(&m_rwlock); + + return pattern_valid; +} + +bool Regex_list_handler::matches(const std::string& str) const +{ + DBUG_ASSERT(m_pattern != nullptr); + + // Make sure no one else changes the list while we are accessing it. + mysql_rwlock_rdlock(&m_rwlock); + + // See if the table name matches the regex we have created + bool found= std::regex_match(str, *m_pattern); + + // Release the lock + mysql_rwlock_unlock(&m_rwlock); + + return found; +} + diff --git a/storage/rocksdb/rdb_mariadb_server_port.h b/storage/rocksdb/rdb_mariadb_server_port.h new file mode 100644 index 00000000000..e424fbb91f8 --- /dev/null +++ b/storage/rocksdb/rdb_mariadb_server_port.h @@ -0,0 +1,73 @@ +/* + A temporary header to resolve WebScaleSQL vs MariaDB differences + when porting MyRocks to MariaDB. +*/ +#ifndef RDB_MARIADB_SERVER_PORT_H +#define RDB_MARIADB_SERVER_PORT_H + +#include "my_global.h" /* ulonglong */ +#include "atomic_stat.h" +#include "my_pthread.h" +#include +#include + +/* + Code that is on SQL layer in facebook/mysql-5.6, + but is part of the storage engine in MariaRocks +*/ +#include + +class Regex_list_handler +{ + private: +#if defined(HAVE_PSI_INTERFACE) + const PSI_rwlock_key& m_key; +#endif + + char m_delimiter; + std::string m_bad_pattern_str; + const std::regex* m_pattern; + + mutable mysql_rwlock_t m_rwlock; + + Regex_list_handler(const Regex_list_handler& other)= delete; + Regex_list_handler& operator=(const Regex_list_handler& other)= delete; + + public: +#if defined(HAVE_PSI_INTERFACE) + Regex_list_handler(const PSI_rwlock_key& key, + char delimiter= ',') : + m_key(key), +#else + Regex_list_handler(char delimiter= ',') : +#endif + m_delimiter(delimiter), + m_bad_pattern_str(""), + m_pattern(nullptr) + { + mysql_rwlock_init(key, &m_rwlock); + } + + ~Regex_list_handler() + { + mysql_rwlock_destroy(&m_rwlock); + delete m_pattern; + } + + // Set the list of patterns + bool set_patterns(const std::string& patterns); + + // See if a string matches at least one pattern + bool matches(const std::string& str) const; + + // See the list of bad patterns + const std::string& bad_pattern() const + { + return m_bad_pattern_str; + } +}; + +void warn_about_bad_patterns(const Regex_list_handler* regex_list_handler, + const char *name); + +#endif From 73c702eae5715821afdd248794989ea2511685b8 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 15 Dec 2016 15:34:56 +0300 Subject: [PATCH 101/233] MariaRocks port: fix rocksdb.rpl_row_rocksdb test The 'combinations' system in MTR ignores settings from $testname.cnf, and tries to run RBR test with binlog_format=mixed. Fixed by using "source include/have_binlog_format_row.inc" which tells MTR to only run the test with binlog_format=ROW. The test still needs its $testname.cnf to include suite/rpl/my.cnf. This is necessary to setup replication (Using "source include/master-slave.inc" will have MTR set up replication for the test, but only as long as the testsuite doesn't have its own suite/rocksdb/my.cnf. We do have that file (and it doesn't set up replication), so we need to have $testname.cnf to setup replication). --- .../rocksdb/mysql-test/rocksdb/r/rpl_row_rocksdb.result | 9 ++++++--- storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.cnf | 6 ------ .../rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.test | 1 + 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_rocksdb.result index 50905527447..de47f3b39b0 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_rocksdb.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_rocksdb.result @@ -1,20 +1,21 @@ include/master-slave.inc -Warnings: -Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. -Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. [connection master] +connection master; drop table if exists t1; +connection master; select @@binlog_format; @@binlog_format ROW create table t1 (pk int primary key) engine=rocksdb; insert into t1 values (1),(2),(3); include/sync_slave_sql_with_master.inc +connection slave; select * from t1; pk 1 2 3 +connection master; drop table t1; # # Issue #18: slave crash on update with row based binary logging @@ -29,6 +30,7 @@ update t1 set value2=100 where id=1; update t1 set value2=200 where id=2; update t1 set value2=300 where id=3; include/sync_slave_sql_with_master.inc +connection slave; select * from t1 where id=1; id value value2 1 1 100 @@ -38,5 +40,6 @@ id value value2 select * from t1 where id=3; id value value2 3 1 300 +connection master; drop table t1; include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.cnf index b46b417c257..09a1c853ffc 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.cnf +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.cnf @@ -1,7 +1 @@ !include suite/rpl/my.cnf - -[mysqld.1] -binlog_format=row -[mysqld.2] -binlog_format=row - diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.test index 2f00741afbb..b103dfc3ef8 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.test @@ -1,6 +1,7 @@ --source include/have_rocksdb.inc source include/master-slave.inc; +source include/have_binlog_format_row.inc; connection master; --disable_warnings From 0d3d2a5747bbbff45626106a00e2f5a3df539086 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 15 Dec 2016 16:11:22 +0300 Subject: [PATCH 102/233] MariaRocks port: mark rocksdb.rpl_statement to be run only in statement mode. --- storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result | 3 --- storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test | 2 ++ 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result index 6f9a2be0f4e..cdf0c37e339 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result @@ -1,7 +1,4 @@ include/master-slave.inc -Warnings: -Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. -Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. [connection master] connection master; drop table if exists t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test index b4126266956..39a21e67f05 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test @@ -1,6 +1,8 @@ --source include/have_rocksdb.inc source include/master-slave.inc; +source include/have_binlog_format_statement.inc; + connection master; --disable_warnings drop table if exists t1; From 24224839d8bff70dc1c6413db742dc090e6802bd Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 15 Dec 2016 19:25:32 +0300 Subject: [PATCH 103/233] MariaRocks port: make rocksdb.rpl_statement_not_found work --- .../rocksdb/r/rpl_statement_not_found.result | 16 +++++++++------- .../mysql-test/rocksdb/t/rpl_row_not_found.inc | 14 ++++++++++---- .../rocksdb/t/rpl_statement_not_found.test | 1 + 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result index 8d7f3460e86..9e71ffa72f0 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result @@ -1,7 +1,4 @@ include/master-slave.inc -Warnings: -Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. -Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. [connection master] connection master; drop table if exists t1; @@ -17,19 +14,22 @@ kp2 int, col1 int, key (kp1,kp2) ) engine=rocksdb; +set @tmp_binlog_format=@@binlog_format; +set @@binlog_format=ROW; insert into t2 select a,a,a,a from t1; create table t3 like t2; insert into t3 select * from t2; +set binlog_format=@tmp_binlog_format; include/sync_slave_sql_with_master.inc connection slave; -set global debug= 'd,dbug.rocksdb.get_row_by_rowid'; +set global debug_dbug= 'd,dbug.rocksdb.get_row_by_rowid'; include/stop_slave.inc include/start_slave.inc connection master; update t2 set col1=100 where kp1 between 1 and 3 and mod(kp2,2)=0; connection slave; set debug_sync= 'now WAIT_FOR Reached'; -set global debug = ''; +set global debug_dbug = ''; set sql_log_bin=0; delete from t2 where pk=2; delete from t2 where pk=3; @@ -43,7 +43,7 @@ pk kp1 kp2 col1 1 1 1 1 4 4 4 4 connection slave; -set global debug= 'd,dbug.rocksdb.get_row_by_rowid'; +set global debug_dbug= 'd,dbug.rocksdb.get_row_by_rowid'; include/stop_slave.inc include/start_slave.inc connection master; @@ -51,7 +51,7 @@ update t3 set col1=100 where kp1 between 1 and 4 and mod(kp2,2)=0; connection slave; call mtr.add_suppression("Deadlock found when trying to get lock"); set debug_sync= 'now WAIT_FOR Reached'; -set global debug = ''; +set global debug_dbug = ''; set sql_log_bin=0; delete from t3 where pk=2; delete from t3 where pk=3; @@ -64,5 +64,7 @@ pk kp1 kp2 col1 0 0 0 0 1 1 1 1 4 4 4 100 +set debug_sync='RESET'; +connection master; drop table t0, t1, t2, t3; include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.inc b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.inc index 5a78979f048..9575abb7019 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.inc @@ -21,9 +21,14 @@ create table t2 ( col1 int, key (kp1,kp2) ) engine=rocksdb; +# Use RBR for next few statements to avoid the +# 'Unsafe statement written to the binary log' warnings. +set @tmp_binlog_format=@@binlog_format; +set @@binlog_format=ROW; insert into t2 select a,a,a,a from t1; create table t3 like t2; insert into t3 select * from t2; +set binlog_format=@tmp_binlog_format; # For GitHub issue#166 @@ -41,7 +46,7 @@ insert into t3 select * from t2; connection slave; let $old_debug = `select @@global.debug`; -set global debug= 'd,dbug.rocksdb.get_row_by_rowid'; +set global debug_dbug= 'd,dbug.rocksdb.get_row_by_rowid'; --source include/stop_slave.inc --source include/start_slave.inc @@ -50,7 +55,7 @@ update t2 set col1=100 where kp1 between 1 and 3 and mod(kp2,2)=0; connection slave; set debug_sync= 'now WAIT_FOR Reached'; -eval set global debug = '$old_debug'; +eval set global debug_dbug = '$old_debug'; set sql_log_bin=0; delete from t2 where pk=2; delete from t2 where pk=3; @@ -63,7 +68,7 @@ select * from t2 where pk < 5; # For GitHub issue#162 (result file must be updated after fixing #162) connection slave; -set global debug= 'd,dbug.rocksdb.get_row_by_rowid'; +set global debug_dbug= 'd,dbug.rocksdb.get_row_by_rowid'; --source include/stop_slave.inc --source include/start_slave.inc @@ -73,7 +78,7 @@ update t3 set col1=100 where kp1 between 1 and 4 and mod(kp2,2)=0; connection slave; call mtr.add_suppression("Deadlock found when trying to get lock"); set debug_sync= 'now WAIT_FOR Reached'; -eval set global debug = '$old_debug'; +eval set global debug_dbug = '$old_debug'; set sql_log_bin=0; delete from t3 where pk=2; delete from t3 where pk=3; @@ -85,6 +90,7 @@ connection slave; # col1 for pk=4 should be 100 select * from t3 where pk < 5; +set debug_sync='RESET'; # Cleanup connection master; drop table t0, t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.test index d85fb0a1772..019e83acf14 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.test @@ -1,2 +1,3 @@ +--source include/have_binlog_format_statement.inc --source rpl_row_not_found.inc From 9f7dc2bbf553bf8433dc18b68678bbce1a162f04 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 15 Dec 2016 20:56:27 +0300 Subject: [PATCH 104/233] MariaRocks port: make rocksdb.rpl_savepoint pass Run it only with binlog_format=row, update test result --- .../mysql-test/rocksdb/r/rpl_savepoint.result | 13 ++++++++++--- .../rocksdb/mysql-test/rocksdb/t/rpl_savepoint.cnf | 6 ------ .../rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test | 1 + 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_savepoint.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_savepoint.result index 9424238da93..5746119efac 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rpl_savepoint.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_savepoint.result @@ -1,9 +1,8 @@ include/master-slave.inc -Warnings: -Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. -Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. [connection master] +connection master; drop table if exists t1; +connection master; create table t1 (id int primary key, value int); insert into t1 values (1,1), (2,2), (3,3); begin; @@ -21,11 +20,13 @@ id value 2 2 3 3 include/sync_slave_sql_with_master.inc +connection slave; select * from t1; id value 1 1 2 2 3 3 +connection master; begin; insert into t1 values (21, 1); savepoint a; @@ -43,11 +44,13 @@ id value 2 2 3 3 include/sync_slave_sql_with_master.inc +connection slave; select * from t1; id value 1 1 2 2 3 3 +connection master; begin; insert into t1 values (31, 1); savepoint a; @@ -65,11 +68,13 @@ id value 2 2 3 3 include/sync_slave_sql_with_master.inc +connection slave; select * from t1; id value 1 1 2 2 3 3 +connection master; SET autocommit=off; select * from t1; id value @@ -94,10 +99,12 @@ id value 2 2 3 3 include/sync_slave_sql_with_master.inc +connection slave; select * from t1; id value 1 1 2 2 3 3 +connection master; drop table t1; include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.cnf index b46b417c257..09a1c853ffc 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.cnf +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.cnf @@ -1,7 +1 @@ !include suite/rpl/my.cnf - -[mysqld.1] -binlog_format=row -[mysqld.2] -binlog_format=row - diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test index 0f26c24c27d..13325cf2aa1 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test @@ -1,5 +1,6 @@ --source include/have_rocksdb.inc +source include/have_binlog_format_row.inc; source include/master-slave.inc; connection master; From 122bc4b54cec54cbaa259f6f3b63d2d836b694ec Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 26 Dec 2016 19:12:16 +0000 Subject: [PATCH 105/233] MariaRocks port: Remove ifdef MARIAROCKS_NOT_YET in rocksdb_start_tx_and_assign_read_view Looked though the code: MariaDB's method of doing START TRANSACTION WITH CONSISTENT SNAPSHOT doesn't require mysql_bin_log_lock_commits calls. --- storage/rocksdb/ha_rocksdb.cc | 31 +++++++++++-------------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 7444738dcac..c9c1901cc06 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -3099,12 +3099,6 @@ static int rocksdb_start_tx_and_assign_read_view( THD* thd) /*!< in: MySQL thread handle of the user for whom the transaction should be committed */ -#ifdef MARIAROCKS_NOT_YET // consistent snapshot with binlog - char* binlog_file, /* out: binlog file for last commit */ - ulonglong* binlog_pos, /* out: binlog pos for last commit */ - char** gtid_executed, /* out: Gtids logged until last commit */ - int* gtid_executed_length) /*out: Length of gtid_executed string */ -#endif { Rdb_perf_context_guard guard(thd); @@ -3118,26 +3112,23 @@ static int rocksdb_start_tx_and_assign_read_view( "in RocksDB Storage Engine.", MYF(0)); return 1; } -#ifdef MARIAROCKS_NOT_YET // consistent snapshot with binlog - if (binlog_file) - { - if (binlog_pos && mysql_bin_log.is_open()) - mysql_bin_log_lock_commits(); - else - return 1; - } -#endif + /* + MariaDB: there is no need to call mysql_bin_log_lock_commits and then + unlock back. + SQL layer calls start_consistent_snapshot() for all engines, including the + binlog under LOCK_commit_ordered mutex. + The mutex prevents binlog commits from happening (right?) while the storage + engine(s) allocate read snapshots. That way, each storage engine is + synchronized with current binlog position. + */ + mysql_mutex_assert_owner(&LOCK_commit_ordered); + Rdb_transaction* tx= get_or_create_tx(thd); DBUG_ASSERT(!tx->has_snapshot()); tx->set_tx_read_only(true); rocksdb_register_tx(hton, thd, tx); tx->acquire_snapshot(true); -#ifdef MARIAROCKS_NOT_YET // consistent snapshot with binlog - if (binlog_file) - mysql_bin_log_unlock_commits(binlog_file, binlog_pos, gtid_executed, - gtid_executed_length); -#endif return 0; } From 8f2d58b26bf6b34bbad733aecafd199528801e03 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 26 Dec 2016 22:31:46 +0300 Subject: [PATCH 106/233] MariaRocks port: run rocksdb.rpl_row_stats with binlog_format=row only The test still fails but at least we do not try to run it with wrong binlog_format. --- storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.cnf | 6 ------ storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.test | 1 + 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.cnf index b46b417c257..09a1c853ffc 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.cnf +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.cnf @@ -1,7 +1 @@ !include suite/rpl/my.cnf - -[mysqld.1] -binlog_format=row -[mysqld.2] -binlog_format=row - diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.test index c0b0122cbc0..db4d1ca6f9e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.test @@ -1,5 +1,6 @@ --source include/have_rocksdb.inc +source include/have_binlog_format_row.inc; source include/master-slave.inc; connection master; From 4faa9da81c00725dfe8c36e01cf15414918eb1f3 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 27 Dec 2016 01:55:05 +0300 Subject: [PATCH 107/233] MariaRocks port: Make SQL layer allow errors in start_consistent_snapshot(). rocksdb.cons_snapshot_read_committed test used to crash. This happened, because - MyRocks produces an error when one attempts to do a START TRANSACTION WITH CONSISTENT SNAPSHOT with @@tx_isolation=REPEATABLE_READ - MariaDB's SQL layer didn't assume that errors are possible inside hton->start_constistent_snapshot() call. --- sql/sql_parse.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index ac00b21c837..e415b275fff 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -5428,7 +5428,9 @@ end_with_restore_list: (longlong) thd->thread_id); goto error; } - my_ok(thd); + /* MyRocks: hton->start_consistent_snapshot call may fail with an error */ + if (!thd->is_error()) + my_ok(thd); break; case SQLCOM_COMMIT: { From 9ca608028f3c0b419bbc5451f5f2c0ecc8ced0fd Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 27 Dec 2016 00:13:32 +0000 Subject: [PATCH 108/233] MariaRocks port: make rocksdb.rocksdb_qcache test pass --- storage/rocksdb/mysql-test/rocksdb/r/rocksdb_qcache.result | 4 +++- storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache.test | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_qcache.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_qcache.result index 7a17dabf294..57c5b897462 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_qcache.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_qcache.result @@ -28,9 +28,11 @@ pk c show status like 'Qcache_hits'; Variable_name Value Qcache_hits 0 +# MariaDB: Qcache_not_cached is not incremented for select sql_no_cache queries +# so the following query produces 2, not 3: show status like 'Qcache_not_cached'; Variable_name Value -Qcache_not_cached 3 +Qcache_not_cached 2 show global status like 'Qcache_hits'; Variable_name Value Qcache_hits 0 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache.test index 5cfbe3fbd39..b62002b0020 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache.test @@ -24,6 +24,8 @@ select * from t1; select sql_no_cache * from t1; select * from t1 where pk = 1; show status like 'Qcache_hits'; +--echo # MariaDB: Qcache_not_cached is not incremented for select sql_no_cache queries +--echo # so the following query produces 2, not 3: show status like 'Qcache_not_cached'; show global status like 'Qcache_hits'; From d379963d73491485a75f05bd8412669f147cc139 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 30 Dec 2016 02:18:56 +0300 Subject: [PATCH 109/233] MariaRocks port: remove target_lsn parameter of rocksdb_flush_wal - It's from a webscalesql feature that we dont have - MyRocks ignores it anyway --- storage/rocksdb/ha_rocksdb.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index c9c1901cc06..6748501aa0e 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -2562,9 +2562,6 @@ static std::string rdb_xid_to_string(const XID& src) transactions. */ static bool rocksdb_flush_wal(handlerton* hton __attribute__((__unused__))) -#ifdef MARIAROCKS_NOT_YET - ulonglong target_lsn __attribute__((__unused__))) -#endif { DBUG_ASSERT(rdb != nullptr); rocksdb::Status s= rdb->SyncWAL(); From d1af31b3c204a74774153c4c8d9da91db9f4393d Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 31 Dec 2016 21:31:50 +0300 Subject: [PATCH 110/233] MariaRocks port: Make rocksdb_sys_vars suite pass - Fix include paths, add suite.opt - Add a test for @@rocksdb_supported_compression_types Now all tests pass, except rocksdb_sysvars.rocksdb_rpl_skip_tx_api_basic --- .../include/correctboolvalue.inc | 25 ++++ .../include/rocksdb_sys_var.inc | 123 ++++++++++++++++++ .../r/rocksdb_merge_buf_size_basic.result | 4 +- ...cksdb_merge_combine_read_size_basic.result | 4 +- ...t => rocksdb_rpl_skip_tx_api_basic.result} | 0 ...b_supported_compression_types_basic.result | 4 + .../mysql-test/rocksdb_sys_vars/suite.opt | 2 + ...access_hint_on_compaction_start_basic.test | 2 +- .../rocksdb_advise_random_on_open_basic.test | 2 +- ...allow_concurrent_memtable_write_basic.test | 2 +- .../t/rocksdb_allow_mmap_reads_basic.test | 2 +- .../t/rocksdb_allow_mmap_writes_basic.test | 2 +- .../t/rocksdb_allow_os_buffer_basic.test | 2 +- .../t/rocksdb_background_sync_basic.test | 2 +- ...sdb_base_background_compactions_basic.test | 2 +- .../t/rocksdb_block_cache_size_basic.test | 2 +- .../rocksdb_block_restart_interval_basic.test | 2 +- .../t/rocksdb_block_size_basic.test | 2 +- .../t/rocksdb_block_size_deviation_basic.test | 2 +- .../t/rocksdb_bulk_load_basic.test | 2 +- .../t/rocksdb_bulk_load_size_basic.test | 2 +- .../t/rocksdb_bytes_per_sync_basic.test | 2 +- ...b_cache_index_and_filter_blocks_basic.test | 2 +- .../t/rocksdb_checksums_pct_basic.test | 2 +- .../rocksdb_collect_sst_properties_basic.test | 2 +- .../t/rocksdb_commit_in_the_middle_basic.test | 2 +- .../t/rocksdb_compact_cf_basic.test | 2 +- ...cksdb_compaction_readahead_size_basic.test | 2 +- ...b_compaction_sequential_deletes_basic.test | 2 +- ...ion_sequential_deletes_count_sd_basic.test | 2 +- ...on_sequential_deletes_file_size_basic.test | 2 +- ...ction_sequential_deletes_window_basic.test | 2 +- .../t/rocksdb_create_if_missing_basic.test | 2 +- ..._create_missing_column_families_basic.test | 2 +- .../t/rocksdb_datadir_basic.test | 2 +- .../t/rocksdb_db_write_buffer_size_basic.test | 2 +- ...g_optimizer_no_zero_cardinality_basic.test | 2 +- .../t/rocksdb_default_cf_options_basic.test | 2 +- ...te_obsolete_files_period_micros_basic.test | 2 +- .../t/rocksdb_disable_2pc_basic.test | 2 +- .../t/rocksdb_disabledatasync_basic.test | 2 +- .../t/rocksdb_enable_bulk_load_api_basic.test | 2 +- .../rocksdb_enable_thread_tracking_basic.test | 2 +- ...ble_write_thread_adaptive_yield_basic.test | 2 +- .../t/rocksdb_error_if_exists_basic.test | 2 +- ...ocksdb_force_flush_memtable_now_basic.test | 2 +- ...db_force_index_records_in_range_basic.test | 2 +- ...ksdb_hash_index_allow_collision_basic.test | 2 +- .../t/rocksdb_index_type_basic.test | 2 +- .../t/rocksdb_info_log_level_basic.test | 2 +- .../t/rocksdb_is_fd_close_on_exec_basic.test | 2 +- .../t/rocksdb_keep_log_file_num_basic.test | 2 +- .../t/rocksdb_lock_scanned_rows_basic.test | 2 +- .../t/rocksdb_lock_wait_timeout_basic.test | 2 +- .../rocksdb_log_file_time_to_roll_basic.test | 2 +- ...sdb_manifest_preallocation_size_basic.test | 2 +- ...ksdb_max_background_compactions_basic.test | 2 +- .../rocksdb_max_background_flushes_basic.test | 2 +- .../t/rocksdb_max_log_file_size_basic.test | 2 +- .../rocksdb_max_manifest_file_size_basic.test | 2 +- .../t/rocksdb_max_open_files_basic.test | 2 +- .../t/rocksdb_max_row_locks_basic.test | 2 +- .../t/rocksdb_max_subcompactions_basic.test | 2 +- .../t/rocksdb_max_total_wal_size_basic.test | 2 +- ...le_reader_for_compaction_inputs_basic.test | 2 +- .../t/rocksdb_no_block_cache_basic.test | 2 +- .../t/rocksdb_override_cf_options_basic.test | 2 +- .../t/rocksdb_paranoid_checks_basic.test | 2 +- .../rocksdb_pause_background_work_basic.test | 2 +- .../t/rocksdb_perf_context_level_basic.test | 2 +- ...ilter_and_index_blocks_in_cache_basic.test | 2 +- ...ksdb_rate_limiter_bytes_per_sec_basic.test | 2 +- .../t/rocksdb_read_free_rpl_tables_basic.test | 2 +- .../t/rocksdb_records_in_range_basic.test | 2 +- .../t/rocksdb_rpl_skip_tx_api_basic.test | 2 +- ...b_seconds_between_stat_computes_basic.test | 2 +- ...ocksdb_signal_drop_index_thread_basic.test | 2 +- ...cksdb_skip_bloom_filter_on_read_basic.test | 2 +- .../t/rocksdb_skip_fill_cache_basic.test | 2 +- .../t/rocksdb_skip_unique_check_basic.test | 2 +- ...ocksdb_skip_unique_check_tables_basic.test | 2 +- .../rocksdb_stats_dump_period_sec_basic.test | 2 +- .../t/rocksdb_store_checksums_basic.test | 2 +- .../rocksdb_strict_collation_check_basic.test | 2 +- ...sdb_supported_compression_types_basic.test | 7 + ...ocksdb_table_cache_numshardbits_basic.test | 2 +- ...ocksdb_table_stats_sampling_pct_basic.test | 2 +- .../t/rocksdb_unsafe_for_binlog_basic.test | 2 +- .../t/rocksdb_use_adaptive_mutex_basic.test | 2 +- .../t/rocksdb_use_fsync_basic.test | 2 +- .../t/rocksdb_validate_tables_basic.test | 2 +- .../t/rocksdb_verify_checksums_basic.test | 2 +- .../t/rocksdb_wal_bytes_per_sync_basic.test | 2 +- .../t/rocksdb_wal_dir_basic.test | 2 +- .../t/rocksdb_wal_recovery_mode_basic.test | 2 +- .../t/rocksdb_wal_size_limit_mb_basic.test | 2 +- .../t/rocksdb_wal_ttl_seconds_basic.test | 2 +- .../t/rocksdb_whole_key_filtering_basic.test | 2 +- .../t/rocksdb_write_disable_wal_basic.test | 2 +- ..._ignore_missing_column_families_basic.test | 2 +- .../t/rocksdb_write_sync_basic.test | 2 +- 101 files changed, 258 insertions(+), 97 deletions(-) create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/include/correctboolvalue.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/include/rocksdb_sys_var.inc rename storage/rocksdb/mysql-test/rocksdb_sys_vars/r/{rocksdb_rpl_skip_tx_api_basic.test => rocksdb_rpl_skip_tx_api_basic.result} (100%) create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_supported_compression_types_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_supported_compression_types_basic.test diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/correctboolvalue.inc b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/correctboolvalue.inc new file mode 100644 index 00000000000..f675aec19f9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/correctboolvalue.inc @@ -0,0 +1,25 @@ +## +# $input the value of a boolean type +# $output the value of int type +## +--let $int_value=$value +if ($value==on) +{ + --let $int_value=1 +} + +if ($value==off) +{ + --let $int_value=0 +} + +# MySQL allows 'true' and 'false' on bool values +if ($value==true) +{ + --let $int_value=1 +} + +if ($value==false) +{ + --let $int_value=0 +} diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/rocksdb_sys_var.inc b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/rocksdb_sys_var.inc new file mode 100644 index 00000000000..6ba93026674 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/rocksdb_sys_var.inc @@ -0,0 +1,123 @@ +## +# $sys_var name of the variable +# $read_only - true if read-only +# $session - true if this is session, false if global-only +# $suppress_default_value - if true, don't check the default value +# valid_values table should contain valid values +# invalid_values +## + +--eval SET @start_global_value = @@global.$sys_var +if (!$suppress_default_value) +{ + SELECT @start_global_value; + if ($session) + { + --eval SET @start_session_value = @@session.$sys_var + SELECT @start_session_value; + } +} + +if (!$read_only) +{ + --echo '# Setting to valid values in global scope#' + + --let $i=1 + --let $value=query_get_value(select value from valid_values, value, $i) + while ($value != 'No such row') + { + --echo "Trying to set variable @@global.$sys_var to $value" + --eval SET @@global.$sys_var = $value + --eval SELECT @@global.$sys_var + --let $v=`SELECT @@global.$sys_var` + --source include/correctboolvalue.inc + if (!$sticky) + { + if ($v != $int_value) + { + --echo Set @@global.$sys_var to $value but it remained set to $v + --die Wrong variable value + } + } + + --echo "Setting the global scope variable back to default" + --eval SET @@global.$sys_var = DEFAULT + --eval SELECT @@global.$sys_var + + --inc $i + --let $value=query_get_value(select value from valid_values, value, $i) + } + + if ($session) + { + --echo '# Setting to valid values in session scope#' + + --let $i=1 + --let $value=query_get_value(select value from valid_values, value, $i) + while ($value != 'No such row') + { + --echo "Trying to set variable @@session.$sys_var to $value" + --eval SET @@session.$sys_var = $value + --eval SELECT @@session.$sys_var + --let $v=`SELECT @@session.$sys_var` + --source include/correctboolvalue.inc + if (!$sticky) + { + if ($v != $int_value) + { + --echo Set @@session.$sys_var to $value but it remained set to $v + --die Wrong variable value + } + } + --echo "Setting the session scope variable back to default" + --eval SET @@session.$sys_var = DEFAULT + --eval SELECT @@session.$sys_var + + --inc $i + --let $value=query_get_value(select value from valid_values, value, $i) + } + } + if (!$session) + { + --echo "Trying to set variable @@session.$sys_var to 444. It should fail because it is not session." + --Error ER_GLOBAL_VARIABLE + --eval SET @@session.$sys_var = 444 + } + + --echo '# Testing with invalid values in global scope #' + #################################################################### + # Change the value of query_prealloc_size to an invalid value # + #################################################################### + --let $i=1 + --let $value=query_get_value(select value from invalid_values, value, $i) + while ($value != 'No such row') + { + --echo "Trying to set variable @@global.$sys_var to $value" + --Error ER_WRONG_VALUE_FOR_VAR, ER_WRONG_TYPE_FOR_VAR + --eval SET @@global.$sys_var = $value + --eval SELECT @@global.$sys_var + --inc $i + --let $value=query_get_value(select value from invalid_values, value, $i) + } +} + +if ($read_only) +{ + --echo "Trying to set variable @@global.$sys_var to 444. It should fail because it is readonly." + --Error ER_INCORRECT_GLOBAL_LOCAL_VAR + --eval SET @@global.$sys_var = 444 +} + +#################################### +# Restore initial value # +#################################### +if (!$read_only) +{ + --eval SET @@global.$sys_var = @start_global_value + --eval SELECT @@global.$sys_var + if ($session) + { + --eval SET @@session.$sys_var = @start_session_value + --eval SELECT @@session.$sys_var + } +} diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_buf_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_buf_size_basic.result index e82e987bf96..5715b198d5a 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_buf_size_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_buf_size_basic.result @@ -7,7 +7,7 @@ ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `i` int(11) NOT NULL DEFAULT '0', + `i` int(11) NOT NULL, `j` int(11) DEFAULT NULL, PRIMARY KEY (`i`), KEY `kj` (`j`), @@ -19,7 +19,7 @@ ALTER TABLE t1 ADD INDEX kj(j), ADD INDEX kij(i,j), ADD INDEX kji(j,i), ALGORITH SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `i` int(11) NOT NULL DEFAULT '0', + `i` int(11) NOT NULL, `j` int(11) DEFAULT NULL, PRIMARY KEY (`i`), KEY `kj` (`j`), diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_combine_read_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_combine_read_size_basic.result index 122e2451f39..5b73305cd9e 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_combine_read_size_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_combine_read_size_basic.result @@ -7,7 +7,7 @@ ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `i` int(11) NOT NULL DEFAULT '0', + `i` int(11) NOT NULL, `j` int(11) DEFAULT NULL, PRIMARY KEY (`i`), KEY `kj` (`j`), @@ -19,7 +19,7 @@ ALTER TABLE t1 ADD INDEX kj(j), ADD INDEX kij(i,j), ADD INDEX kji(j,i), ALGORITH SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `i` int(11) NOT NULL DEFAULT '0', + `i` int(11) NOT NULL, `j` int(11) DEFAULT NULL, PRIMARY KEY (`i`), KEY `kj` (`j`), diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.result similarity index 100% rename from storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.test rename to storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.result diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_supported_compression_types_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_supported_compression_types_basic.result new file mode 100644 index 00000000000..aa77d739120 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_supported_compression_types_basic.result @@ -0,0 +1,4 @@ +SET @start_global_value = @@global.ROCKSDB_SUPPORTED_COMPRESSION_TYPES; +"Trying to set variable @@global.ROCKSDB_SUPPORTED_COMPRESSION_TYPES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_SUPPORTED_COMPRESSION_TYPES = 444; +ERROR HY000: Variable 'rocksdb_supported_compression_types' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.opt b/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.opt new file mode 100644 index 00000000000..8907deed6d8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.opt @@ -0,0 +1,2 @@ +--ignore-db-dirs=.rocksdb + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test index a6b753ba87a..51765574a91 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_ACCESS_HINT_ON_COMPACTION_START --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test index b6ccea0f882..06593363df2 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_ADVISE_RANDOM_ON_OPEN --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test index b250aa5eb7f..4f618609223 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test index 067f5820045..72fed6b2275 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_ALLOW_MMAP_READS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test index 51fbf62d5a9..8b8da14f56c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_ALLOW_MMAP_WRITES --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_os_buffer_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_os_buffer_basic.test index c38d0c7b210..bbd4140898c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_os_buffer_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_os_buffer_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_ALLOW_OS_BUFFER --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test index e0c2bd366cc..d8efc082266 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_BACKGROUND_SYNC --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test index 8e49110513a..9f001ce103e 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_BASE_BACKGROUND_COMPACTIONS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test index 68715796a04..39688e63556 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_BLOCK_CACHE_SIZE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test index 2b14e1fb654..0688ef73281 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_BLOCK_RESTART_INTERVAL --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test index 11d18e3223f..150c1e533a7 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_BLOCK_SIZE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test index a54700aae4d..98d179c028c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_BLOCK_SIZE_DEVIATION --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test index 6cd9e0e1560..dd55c849adb 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_BULK_LOAD --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test index 1b57255202b..70d1c44806a 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_BULK_LOAD_SIZE --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test index 2958273695d..d1d6b2b5695 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_BYTES_PER_SYNC --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test index db1f5936812..27d0aa99d01 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_CACHE_INDEX_AND_FILTER_BLOCKS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test index 44126e35f57..b595cb62a56 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test @@ -11,7 +11,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_CHECKSUMS_PCT --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test index c47c62e41b4..9c0e111d7b9 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test @@ -3,6 +3,6 @@ --let $sys_var=ROCKSDB_COLLECT_SST_PROPERTIES --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test index 62c8e680aab..ec860cfcfc2 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_COMMIT_IN_THE_MIDDLE --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test index c65f722fe6e..c5783c7344c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test @@ -10,7 +10,7 @@ CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; --let $read_only=0 --let $session=0 --let $sticky=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test index ba45defb7a1..c0651a3a14d 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test @@ -17,7 +17,7 @@ SELECT @@global.rocksdb_compaction_readahead_size; --let $sys_var=ROCKSDB_COMPACTION_READAHEAD_SIZE --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test index 5ec719baeb6..24399c85d88 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'2000001\''); --let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test index 6c35ed634f7..b3a437d6cd4 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test index ff132f7049c..aaf71179221 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test index b38c79b5ef0..d5be34695c0 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'2000001\''); --let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test index 77422aa164c..ab92a0a0867 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_CREATE_IF_MISSING --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test index b8aeb6c9b19..21c0f0ead2c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_CREATE_MISSING_COLUMN_FAMILIES --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test index 20f33d6bdfd..fd3569c8f0a 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_DATADIR --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test index 7ef5422dcd3..df6a24902af 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_DB_WRITE_BUFFER_SIZE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test index 52e25ab358f..41c4ae6322d 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test index f756d1eb2f5..1febc6db093 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_DEFAULT_CF_OPTIONS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test index 744bd946d9a..3c2cd2db87f 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICROS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test index 061a4c902b5..04a9492c1d8 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test @@ -14,7 +14,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $read_only=0 --let $session=0 --let $sticky=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disabledatasync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disabledatasync_basic.test index b365370f214..02778feec90 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disabledatasync_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disabledatasync_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_DISABLEDATASYNC --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test index 407093acbea..52313ffbe70 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_ENABLE_BULK_LOAD_API --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test index 251d7d5803d..566d56563fb 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_ENABLE_THREAD_TRACKING --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test index 9d6502598b0..1904dd2cd69 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test index 495770e8efb..933642a73a6 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_ERROR_IF_EXISTS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test index 9529fae7516..4386af1ee19 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test @@ -11,7 +11,7 @@ CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; --let $read_only=0 --let $session=0 --let $sticky=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test index 08e8d0c16de..30263ea4aa1 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test @@ -17,7 +17,7 @@ SELECT @@session.rocksdb_force_index_records_in_range; --let $sys_var=ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test index 5899f7b67d0..e787dd33a34 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_HASH_INDEX_ALLOW_COLLISION --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test index 711703c2148..49369ffd765 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_INDEX_TYPE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test index 990a9a62148..fb2ce5e713b 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test @@ -15,7 +15,7 @@ INSERT INTO invalid_values VALUES('foo'); --let $sys_var=ROCKSDB_INFO_LOG_LEVEL --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test index 741e20fac9f..4d39c2a3656 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_IS_FD_CLOSE_ON_EXEC --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test index 511f9f8a06d..0eff718c14c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_KEEP_LOG_FILE_NUM --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test index 52f7f502d96..35b4128c3e5 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test @@ -16,7 +16,7 @@ INSERT INTO invalid_values VALUES(1000); --let $sys_var=ROCKSDB_LOCK_SCANNED_ROWS --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test index 0c524db9cbd..24096677e1b 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_LOCK_WAIT_TIMEOUT --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test index 76aee161efc..63a7c5fedfb 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_LOG_FILE_TIME_TO_ROLL --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test index 48d14fbf9f6..6f248ece9e9 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_MANIFEST_PREALLOCATION_SIZE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test index 441c0577c10..61c170e84ae 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_MAX_BACKGROUND_COMPACTIONS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test index de3ab148ec6..db5b7112e9c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_MAX_BACKGROUND_FLUSHES --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test index b0dca55e18b..cbe5d925fda 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_MAX_LOG_FILE_SIZE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test index 9464f0aa1ad..f399b296732 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_MAX_MANIFEST_FILE_SIZE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test index c82af39f7b5..ba3293264ab 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_MAX_OPEN_FILES --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test index a9e440d4b98..4eb00329cf2 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_MAX_ROW_LOCKS --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test index 0ebc9c204fb..a4494dd8262 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_MAX_SUBCOMPACTIONS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test index 0f881868ae2..35ba859c649 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_MAX_TOTAL_WAL_SIZE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test index cc84a2c60be..1d2ea6e6663 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_NEW_TABLE_READER_FOR_COMPACTION_INPUTS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test index 39c84fb2c2d..be1e3e88392 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_NO_BLOCK_CACHE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test index bc680c0772a..1f4325b89d6 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_OVERRIDE_CF_OPTIONS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test index 5b0e4798678..5bdd9d3d50b 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_PARANOID_CHECKS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test index fd2f3098840..3f2f6bc703e 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test @@ -14,7 +14,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $read_only=0 --let $session=0 --let $sticky=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test index 1fd61a80955..46f74578471 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_PERF_CONTEXT_LEVEL --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test index af095097909..d25131062d4 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test index d683e8045da..8277011831a 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test @@ -36,7 +36,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''), (3.14); # Test all the valid and invalid values --let $sys_var=ROCKSDB_RATE_LIMITER_BYTES_PER_SEC --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test index 9ff20edcfb2..71f42a47f4b 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test @@ -9,7 +9,7 @@ CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; --let $sys_var=ROCKSDB_READ_FREE_RPL_TABLES --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test index 4fab0b3123c..21503475e3e 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_RECORDS_IN_RANGE --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rpl_skip_tx_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rpl_skip_tx_api_basic.test index f6c0a219a9f..f3c67a7ee3f 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rpl_skip_tx_api_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rpl_skip_tx_api_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_RPL_SKIP_TX_API --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test index a71df41affc..53c2e6e62bf 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test index b33f444199b..ea90c7b7c58 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test @@ -13,7 +13,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $read_only=0 --let $session=0 --let $sticky=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test index 80a9c4b3c43..82b56e0bbcb 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_SKIP_BLOOM_FILTER_ON_READ --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test index 2465e569f79..cc1b608b7b3 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_SKIP_FILL_CACHE --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_basic.test index fe90a49365b..bbee6141305 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_basic.test @@ -15,7 +15,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_SKIP_UNIQUE_CHECK --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test index c64eeedb594..3fe265ae930 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test @@ -9,7 +9,7 @@ CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; --let $sys_var=ROCKSDB_SKIP_UNIQUE_CHECK_TABLES --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test index 7854faa8ddf..2fbb0c6ea6d 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_STATS_DUMP_PERIOD_SEC --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_checksums_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_checksums_basic.test index 023b6420b96..a3ab6695689 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_checksums_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_checksums_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_STORE_CHECKSUMS --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test index eabc45ef6be..17aa63b8bb3 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test @@ -13,7 +13,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_STRICT_COLLATION_CHECK --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_supported_compression_types_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_supported_compression_types_basic.test new file mode 100644 index 00000000000..52bf63c21cc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_supported_compression_types_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_SUPPORTED_COMPRESSION_TYPES +--let $read_only=1 +--let $session=0 +--let $suppress_default_value=1 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test index 77da9df9acd..11bdd6abce8 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_TABLE_CACHE_NUMSHARDBITS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test index c3016742042..3bed5e6ec73 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test @@ -15,7 +15,7 @@ INSERT INTO invalid_values VALUES('\'484436\''); --let $sys_var=ROCKSDB_TABLE_STATS_SAMPLING_PCT --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test index 302a4173efc..f5f4536d769 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_UNSAFE_FOR_BINLOG --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test index a0f0a212987..7ce7bec1f6e 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_USE_ADAPTIVE_MUTEX --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test index 0d8e35d03cb..90b41c4aa57 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_USE_FSYNC --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test index 6eb965c5863..ed12b319cfc 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_VALIDATE_TABLES --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_checksums_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_checksums_basic.test index d8c9c559703..8a8e4adbf95 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_checksums_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_checksums_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_VERIFY_CHECKSUMS --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test index eeeeed8f767..afab0f20d40 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_WAL_BYTES_PER_SYNC --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test index 0d667d8de2c..a40c77669f2 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_WAL_DIR --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test index e0d345b4268..ce202f2e2b5 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_WAL_RECOVERY_MODE --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test index d597b5d8016..95880ea3e63 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_WAL_SIZE_LIMIT_MB --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test index 9342c861168..e65d3851392 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_WAL_TTL_SECONDS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test index ff332a6fbe4..83e8e2382a2 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_WHOLE_KEY_FILTERING --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test index f7fa33e03e6..d732bebac7f 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_WRITE_DISABLE_WAL --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test index 1c9d35f4a57..f38b7c9601d 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_sync_basic.test index 3655ddb8c69..3044a038b9a 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_sync_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_sync_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_WRITE_SYNC --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; From ae0a490eb3447846f0c18e3485c5ae2d95c3a330 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 31 Dec 2016 22:08:05 +0300 Subject: [PATCH 111/233] MariaRocks port: Remove ifdef in ha_rocksdb::set_skip_unique_check_tables We expose @@rocksdb_skip_unique_check_tables anyway, so it's not really disabled. --- storage/rocksdb/ha_rocksdb.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 6748501aa0e..2d8609f4d8d 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -4799,7 +4799,6 @@ void ha_rocksdb::set_skip_unique_check_tables(const char* whitelist) { DBUG_ASSERT(whitelist != nullptr); -#ifdef MARIAROCKS_NOT_YET // regex_handler #if defined(HAVE_PSI_INTERFACE) Regex_list_handler regex_handler(key_rwlock_skip_unique_check_tables); #else @@ -4812,7 +4811,6 @@ void ha_rocksdb::set_skip_unique_check_tables(const char* whitelist) } m_skip_unique_check= regex_handler.matches(m_tbl_def->base_tablename()); -#endif } int ha_rocksdb::open(const char *name, int mode, uint test_if_locked) From cfb59f3196aac1b41cdda79952031dcc64042914 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 31 Dec 2016 23:30:09 +0300 Subject: [PATCH 112/233] Copy of commit f6ed777697db4ad7aee1e98c53243dced2b5891c Author: Chenyu Yan Date: Thu Dec 29 16:10:25 2016 -0800 Fix BIG_TIMEOUT constant Summary: Update the constant to reflect what the comments indicates. Reviewed By: gunnarku Differential Revision: D4374476 fbshipit-source-id: dd7f484 --- storage/rocksdb/CMakeLists.txt | 19 +- storage/rocksdb/event_listener.cc | 31 +- storage/rocksdb/event_listener.h | 13 +- storage/rocksdb/ha_rocksdb.cc | 2280 ++++++++++------- storage/rocksdb/ha_rocksdb.h | 376 +-- storage/rocksdb/ha_rocksdb_proto.h | 6 +- storage/rocksdb/logger.h | 2 +- .../rocksdb/include/locking_issues_case3.inc | 3 +- .../rocksdb/include/locking_issues_case5.inc | 3 +- .../rocksdb/include/locking_issues_case6.inc | 3 +- .../rocksdb/r/2pc_group_commit.result | 44 + .../rocksdb/r/add_index_inplace.result | 208 +- .../rocksdb/r/add_index_inplace_crash.result | 96 + .../rocksdb/r/allow_os_buffer.result | 1 - .../rocksdb/r/autoinc_vars_thread.result | 24 + .../rocksdb/r/autoinc_vars_thread_2.result | 53 + .../mysql-test/rocksdb/r/bulk_load.result | 15 + .../rocksdb/r/commit_in_the_middle_ddl.result | 14 + .../r/corrupted_data_reads_debug.result | 6 +- .../rocksdb/mysql-test/rocksdb/r/index.result | 20 + .../mysql-test/rocksdb/r/lock_info.result | 31 + .../rocksdb/r/locking_issues.result | 16 +- .../r/optimizer_loose_index_scans.result | 281 ++ .../mysql-test/rocksdb/r/rocksdb.result | 47 +- .../rocksdb/r/rocksdb_cf_options.result | 20 +- .../rocksdb/r/rocksdb_checksums.result | 28 +- .../r/rocksdb_deadlock_detect_rc.result | 54 + .../r/rocksdb_deadlock_detect_rr.result | 54 + .../r/rocksdb_deadlock_stress_rc.result | 8 + .../r/rocksdb_deadlock_stress_rr.result | 8 + .../mysql-test/rocksdb/r/rocksdb_locks.result | 1 + .../rocksdb/r/rollback_savepoint.result | 22 + .../r/select_lock_in_share_mode.result | 7 +- .../mysql-test/rocksdb/r/show_engine.result | 3 +- .../mysql-test/rocksdb/r/statistics.result | 4 +- .../mysql-test/rocksdb/r/tmpdir.result | 26 + .../mysql-test/rocksdb/r/trx_info.result | 13 + .../mysql-test/rocksdb/r/trx_info_rpl.result | 15 + .../r/type_char_indexes_collation.result | 20 +- .../mysql-test/rocksdb/r/type_decimal.result | 76 - .../mysql-test/rocksdb/r/type_varchar.result | 6 +- .../rocksdb/r/type_varchar_debug.result | 254 -- .../rocksdb/r/use_direct_reads_writes.result | 2 + .../rocksdb/t/2pc_group_commit-master.opt | 1 + .../rocksdb/t/2pc_group_commit.test | 64 + .../rocksdb/t/add_index_inplace.test | 279 +- .../rocksdb/t/add_index_inplace_crash.test | 117 + .../t/add_index_inplace_sstfilewriter.test | 1 - .../mysql-test/rocksdb/t/allow_os_buffer.test | 30 - .../rocksdb/t/autoinc_vars_thread.test | 53 + .../rocksdb/t/autoinc_vars_thread_2.test | 141 + .../mysql-test/rocksdb/t/bulk_load.test | 9 + .../mysql-test/rocksdb/t/collation.test | 4 +- .../rocksdb/t/commit_in_the_middle_ddl.test | 27 + .../rocksdb/t/corrupted_data_reads_debug.test | 6 +- .../rocksdb/mysql-test/rocksdb/t/index.test | 21 + .../t/insert_optimized_config-master.opt | 1 + .../mysql-test/rocksdb/t/lock_info.test | 31 + .../t/optimizer_loose_index_scans.test | 3 + .../rocksdb/mysql-test/rocksdb/t/rocksdb.test | 52 +- .../rocksdb/t/rocksdb_checksums.test | 27 +- .../rocksdb/t/rocksdb_deadlock_detect.inc | 90 + .../t/rocksdb_deadlock_detect_rc-master.opt | 1 + .../rocksdb/t/rocksdb_deadlock_detect_rc.test | 1 + .../rocksdb/t/rocksdb_deadlock_detect_rr.test | 1 + .../rocksdb/t/rocksdb_deadlock_stress.inc | 18 + .../rocksdb/t/rocksdb_deadlock_stress.py | 94 + .../t/rocksdb_deadlock_stress_rc-master.opt | 1 + .../rocksdb/t/rocksdb_deadlock_stress_rc.test | 1 + .../rocksdb/t/rocksdb_deadlock_stress_rr.test | 1 + .../rocksdb/t/rocksdb_locks-master.opt | 1 + .../mysql-test/rocksdb/t/rocksdb_locks.test | 1 + .../rocksdb/t/rollback_savepoint.test | 31 + .../rocksdb/t/select_lock_in_share_mode.test | 4 - .../mysql-test/rocksdb/t/show_engine.test | 3 +- .../rocksdb/mysql-test/rocksdb/t/tmpdir.test | 35 + .../mysql-test/rocksdb/t/trx_info.test | 17 + .../mysql-test/rocksdb/t/trx_info_rpl.cnf | 8 + .../mysql-test/rocksdb/t/trx_info_rpl.test | 42 + .../t/type_char_indexes_collation.test | 17 +- .../rocksdb/t/type_decimal-master.opt | 1 + .../mysql-test/rocksdb/t/type_decimal.test | 75 - .../mysql-test/rocksdb/t/type_varchar.test | 6 +- .../rocksdb/t/type_varchar_debug.test | 137 - .../rocksdb/t/use_direct_reads_writes.test | 47 + .../r/rocksdb_allow_os_buffer_basic.result | 7 - .../r/rocksdb_block_cache_size_basic.result | 2 +- .../r/rocksdb_deadlock_detect_basic.result | 121 + ...int_snapshot_conflict_queries_basic.result | 64 + ...t => rocksdb_rpl_skip_tx_api_basic.result} | 0 .../r/rocksdb_store_checksums_basic.result | 100 - ...sdb_store_row_debug_checksums_basic.result | 100 + .../r/rocksdb_tmpdir_basic.result | 29 + .../r/rocksdb_trace_sst_api_basic.result | 100 + .../r/rocksdb_use_direct_reads_basic.result | 7 + .../r/rocksdb_use_direct_writes_basic.result | 7 + .../r/rocksdb_verify_checksums_basic.result | 100 - ...db_verify_row_debug_checksums_basic.result | 100 + .../t/rocksdb_deadlock_detect_basic.test | 20 + ...print_snapshot_conflict_queries_basic.test | 18 + ...ksdb_store_row_debug_checksums_basic.test} | 2 +- .../t/rocksdb_tmpdir_basic.test | 38 + ....test => rocksdb_trace_sst_api_basic.test} | 2 +- ...st => rocksdb_use_direct_reads_basic.test} | 2 +- .../t/rocksdb_use_direct_writes_basic.test | 6 + ...ksdb_verify_row_debug_checksums_basic.test | 18 + storage/rocksdb/properties_collector.cc | 47 +- storage/rocksdb/properties_collector.h | 26 +- storage/rocksdb/rdb_buff.h | 98 +- storage/rocksdb/rdb_cf_manager.cc | 34 +- storage/rocksdb/rdb_cf_manager.h | 24 +- storage/rocksdb/rdb_cf_options.cc | 34 +- storage/rocksdb/rdb_cf_options.h | 28 +- storage/rocksdb/rdb_compact_filter.h | 5 + storage/rocksdb/rdb_comparator.h | 14 +- storage/rocksdb/rdb_datadic.cc | 890 ++++--- storage/rocksdb/rdb_datadic.h | 275 +- storage/rocksdb/rdb_i_s.cc | 935 +++++-- storage/rocksdb/rdb_i_s.h | 3 +- storage/rocksdb/rdb_index_merge.cc | 105 +- storage/rocksdb/rdb_index_merge.h | 35 +- storage/rocksdb/rdb_mutex_wrapper.cc | 11 +- storage/rocksdb/rdb_mutex_wrapper.h | 11 +- storage/rocksdb/rdb_perf_context.cc | 18 +- storage/rocksdb/rdb_perf_context.h | 16 +- storage/rocksdb/rdb_sst_info.cc | 127 +- storage/rocksdb/rdb_sst_info.h | 61 +- storage/rocksdb/rdb_threads.cc | 6 +- storage/rocksdb/rdb_threads.h | 4 +- storage/rocksdb/rdb_utils.cc | 27 +- storage/rocksdb/rdb_utils.h | 37 +- storage/rocksdb/tools/mysql_ldb.cc | 2 +- 132 files changed, 6102 insertions(+), 3327 deletions(-) create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_crash.result delete mode 100644 storage/rocksdb/mysql-test/rocksdb/r/allow_os_buffer.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/commit_in_the_middle_ddl.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/lock_info.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/optimizer_loose_index_scans.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_detect_rc.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_detect_rr.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_stress_rc.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_stress_rr.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rollback_savepoint.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/tmpdir.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/trx_info.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/trx_info_rpl.result delete mode 100644 storage/rocksdb/mysql-test/rocksdb/r/type_varchar_debug.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test delete mode 100644 storage/rocksdb/mysql-test/rocksdb/t/allow_os_buffer.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/commit_in_the_middle_ddl.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/lock_info.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/optimizer_loose_index_scans.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rc-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rc.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rr.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.py create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rc-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rc.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rr.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rollback_savepoint.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/tmpdir.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/trx_info.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_decimal-master.opt delete mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_os_buffer_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_deadlock_detect_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_print_snapshot_conflict_queries_basic.result rename storage/rocksdb/mysql-test/rocksdb_sys_vars/r/{rocksdb_rpl_skip_tx_api_basic.test => rocksdb_rpl_skip_tx_api_basic.result} (100%) delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_checksums_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_row_debug_checksums_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_tmpdir_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_trace_sst_api_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_reads_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_writes_basic.result delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_checksums_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_row_debug_checksums_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_deadlock_detect_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_print_snapshot_conflict_queries_basic.test rename storage/rocksdb/mysql-test/rocksdb_sys_vars/t/{rocksdb_verify_checksums_basic.test => rocksdb_store_row_debug_checksums_basic.test} (91%) create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_tmpdir_basic.test rename storage/rocksdb/mysql-test/rocksdb_sys_vars/t/{rocksdb_store_checksums_basic.test => rocksdb_trace_sst_api_basic.test} (92%) rename storage/rocksdb/mysql-test/rocksdb_sys_vars/t/{rocksdb_allow_os_buffer_basic.test => rocksdb_use_direct_reads_basic.test} (74%) create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_writes_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_row_debug_checksums_basic.test diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index f2c52ce84c4..a5d07a12d96 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -4,6 +4,11 @@ IF (NOT EXISTS "${CMAKE_SOURCE_DIR}/rocksdb/Makefile") MESSAGE(SEND_ERROR "Missing Makefile in rocksdb directory. Try \"git submodule update\".") ENDIF() +CHECK_FUNCTION_EXISTS(sched_getcpu HAVE_SCHED_GETCPU) +IF(HAVE_SCHED_GETCPU) + ADD_DEFINITIONS(-DHAVE_SCHED_GETCPU=1) +ENDIF() + # get a list of rocksdb library source files # run with env -i to avoid passing variables EXECUTE_PROCESS( @@ -51,32 +56,26 @@ ENDIF() SET(rocksdb_static_libs ) IF (NOT "$ENV{WITH_SNAPPY}" STREQUAL "") SET(rocksdb_static_libs ${rocksdb_static_libs} - $ENV{WITH_SNAPPY}/lib/libsnappy${PIC_EXT}.a) + $ENV{WITH_SNAPPY}/libsnappy${PIC_EXT}.a) ADD_DEFINITIONS(-DSNAPPY) -ELSE() - SET(rocksdb_static_libs ${rocksdb_static_libs} snappy) ENDIF() IF (NOT "$ENV{WITH_LZ4}" STREQUAL "") SET(rocksdb_static_libs ${rocksdb_static_libs} - $ENV{WITH_LZ4}/lib/liblz4${PIC_EXT}.a) + $ENV{WITH_LZ4}/liblz4${PIC_EXT}.a) ADD_DEFINITIONS(-DLZ4) -ELSE() - SET(rocksdb_static_libs ${rocksdb_static_libs} lz4) ENDIF() IF (NOT "$ENV{WITH_BZ2}" STREQUAL "") SET(rocksdb_static_libs ${rocksdb_static_libs} - $ENV{WITH_BZ2}/lib/libbz2${PIC_EXT}.a) + $ENV{WITH_BZ2}/libbz2${PIC_EXT}.a) ADD_DEFINITIONS(-DBZIP2) -ELSE() - SET(rocksdb_static_libs ${rocksdb_static_libs} bz2) ENDIF() # link ZSTD only if instructed IF (NOT "$ENV{WITH_ZSTD}" STREQUAL "") SET(rocksdb_static_libs ${rocksdb_static_libs} - $ENV{WITH_ZSTD}/lib/libzstd${PIC_EXT}.a) + $ENV{WITH_ZSTD}/libzstd${PIC_EXT}.a) ADD_DEFINITIONS(-DZSTD) ENDIF() diff --git a/storage/rocksdb/event_listener.cc b/storage/rocksdb/event_listener.cc index 851e4782416..e4338f07986 100644 --- a/storage/rocksdb/event_listener.cc +++ b/storage/rocksdb/event_listener.cc @@ -39,7 +39,7 @@ extract_index_stats( ) { std::vector ret; for (auto fn : files) { - auto it = props.find(fn); + const auto it = props.find(fn); DBUG_ASSERT(it != props.end()); std::vector stats; Rdb_tbl_prop_coll::read_stats_from_tbl_props(it->second, &stats); @@ -48,6 +48,19 @@ extract_index_stats( return ret; } +void Rdb_event_listener::update_index_stats( + const rocksdb::TableProperties& props +) { + DBUG_ASSERT(m_ddl_manager != nullptr); + const auto tbl_props = + std::make_shared(props); + + std::vector stats; + Rdb_tbl_prop_coll::read_stats_from_tbl_props(tbl_props, &stats); + + m_ddl_manager->adjust_stats(stats); +} + void Rdb_event_listener::OnCompactionCompleted( rocksdb::DB *db, const rocksdb::CompactionJobInfo& ci @@ -67,14 +80,14 @@ void Rdb_event_listener::OnFlushCompleted( const rocksdb::FlushJobInfo& flush_job_info ) { DBUG_ASSERT(db != nullptr); - DBUG_ASSERT(m_ddl_manager != nullptr); - - auto tbl_props = std::make_shared( - flush_job_info.table_properties); - - std::vector stats; - Rdb_tbl_prop_coll::read_stats_from_tbl_props(tbl_props, &stats); - m_ddl_manager->adjust_stats(stats); + update_index_stats(flush_job_info.table_properties); } +void Rdb_event_listener::OnExternalFileIngested( + rocksdb::DB* db, + const rocksdb::ExternalFileIngestionInfo& info +) { + DBUG_ASSERT(db != nullptr); + update_index_stats(info.table_properties); +} } // namespace myrocks diff --git a/storage/rocksdb/event_listener.h b/storage/rocksdb/event_listener.h index f92c6d0dea1..8895da45c7b 100644 --- a/storage/rocksdb/event_listener.h +++ b/storage/rocksdb/event_listener.h @@ -24,17 +24,26 @@ class Rdb_ddl_manager; class Rdb_event_listener : public rocksdb::EventListener { public: - explicit Rdb_event_listener(Rdb_ddl_manager* ddl_manager) : + Rdb_event_listener(const Rdb_event_listener&) = delete; + Rdb_event_listener& operator=(const Rdb_event_listener&) = delete; + + explicit Rdb_event_listener(Rdb_ddl_manager* const ddl_manager) : m_ddl_manager(ddl_manager) { } void OnCompactionCompleted( - rocksdb::DB *db, const rocksdb::CompactionJobInfo& ci) override; + rocksdb::DB* db, const rocksdb::CompactionJobInfo& ci) override; void OnFlushCompleted( rocksdb::DB* db, const rocksdb::FlushJobInfo& flush_job_info) override; + void OnExternalFileIngested( + rocksdb::DB* db, const rocksdb::ExternalFileIngestionInfo& ingestion_info) + override; private: Rdb_ddl_manager* m_ddl_manager; + + void update_index_stats( + const rocksdb::TableProperties& props); }; } // namespace myrocks diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 4fbaf01f154..a557f96b92f 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -28,11 +28,13 @@ #include #include #include +#include /* MySQL includes */ #include "./debug_sync.h" #include "./my_bit.h" #include "./my_stacktrace.h" +#include "./sql_audit.h" #include "./sql_table.h" #include #include @@ -44,7 +46,6 @@ #include "rocksdb/slice_transform.h" #include "rocksdb/utilities/checkpoint.h" #include "rocksdb/utilities/convenience.h" -#include "rocksdb/utilities/flashcache.h" #include "rocksdb/utilities/memory_util.h" /* MyRocks includes */ @@ -59,10 +60,6 @@ #include "./rdb_mutex_wrapper.h" #include "./rdb_threads.h" -#ifdef TARGET_OS_LINUX -extern my_bool cachedev_enabled; -#endif /* TARGET_OS_LINUX */ - // Internal MySQL APIs not exposed in any header. extern "C" { @@ -96,7 +93,7 @@ static st_export_stats export_stats; /** Updates row counters based on the table type and operation type. */ -void ha_rocksdb::update_row_stats(operation_type type) { +void ha_rocksdb::update_row_stats(const operation_type &type) { DBUG_ASSERT(type < ROWS_MAX); // Find if we are modifying system databases. if (table->s && m_tbl_def->m_is_mysql_system_table) @@ -111,7 +108,7 @@ static handler *rocksdb_create_handler(my_core::handlerton *hton, my_core::MEM_ROOT *mem_root); bool can_use_bloom_filter(THD *thd, - const std::shared_ptr& kd, + const Rdb_key_def& kd, const rocksdb::Slice &eq_cond, const bool use_all_keys, bool is_ascending); @@ -138,6 +135,7 @@ static std::shared_ptr Rdb_dict_manager dict_manager; Rdb_cf_manager cf_manager; Rdb_ddl_manager ddl_manager; +const char* m_mysql_gtid; Rdb_binlog_manager binlog_manager; @@ -164,30 +162,32 @@ static const char* const ERRSTR_ROLLBACK_ONLY static void rocksdb_flush_all_memtables() { - Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); - for (auto cf_handle : cf_manager.get_all_cf()) { + const Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); + for (const auto &cf_handle : cf_manager.get_all_cf()) { rdb->Flush(rocksdb::FlushOptions(), cf_handle); } } static void -rocksdb_compact_column_family_stub(THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save) +rocksdb_compact_column_family_stub(THD* const thd, + struct st_mysql_sys_var* const var, + void* const var_ptr, + const void* const save) { } static int -rocksdb_compact_column_family(THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - struct st_mysql_value* value) +rocksdb_compact_column_family(THD* const thd, + struct st_mysql_sys_var* const var, + void* const var_ptr, + struct st_mysql_value* const value) { char buff[STRING_BUFFER_USUAL_SIZE]; int len = sizeof(buff); - if (const char* cf = value->val_str(value, buff, &len)) { + DBUG_ASSERT(value != nullptr); + + if (const char* const cf = value->val_str(value, buff, &len)) { bool is_automatic; auto cfh = cf_manager.get_cf(cf, "", nullptr, &is_automatic); if (cfh != nullptr && rdb != nullptr) { @@ -205,6 +205,8 @@ rocksdb_compact_column_family(THD* thd, namespace // anonymous namespace = not visible outside this source file { +const ulong TABLE_HASH_SIZE= 32; + struct Rdb_open_tables_map { /* Hash table used to track the handlers of open tables */ @@ -214,7 +216,8 @@ struct Rdb_open_tables_map void init_hash(void) { - (void) my_hash_init(&m_hash, my_core::system_charset_info, 32, 0, 0, + (void) my_hash_init(&m_hash, my_core::system_charset_info, TABLE_HASH_SIZE, + 0, 0, (my_hash_get_key) Rdb_open_tables_map::get_hash_key, 0, 0); } @@ -224,12 +227,12 @@ struct Rdb_open_tables_map my_hash_free(&m_hash); } - static uchar* get_hash_key(Rdb_table_handler *table_handler, - size_t *length, + static uchar* get_hash_key(Rdb_table_handler* const table_handler, + size_t* const length, my_bool not_used __attribute__((__unused__))); - Rdb_table_handler* get_table_handler(const char *table_name); - void release_table_handler(Rdb_table_handler *table_handler); + Rdb_table_handler* get_table_handler(const char* const table_name); + void release_table_handler(Rdb_table_handler* const table_handler); std::vector get_table_names(void) const; }; @@ -250,14 +253,14 @@ static std::string rdb_normalize_dir(std::string dir) static int rocksdb_create_checkpoint( - THD* thd __attribute__((__unused__)), - struct st_mysql_sys_var* var __attribute__((__unused__)), - void* save __attribute__((__unused__)), - struct st_mysql_value* value) + THD* const thd __attribute__((__unused__)), + struct st_mysql_sys_var* const var __attribute__((__unused__)), + void* const save __attribute__((__unused__)), + struct st_mysql_value* const value) { - char buf[512]; + char buf[FN_REFLEN]; int len = sizeof(buf); - const char* checkpoint_dir_raw= value->val_str(value, buf, &len); + const char* const checkpoint_dir_raw= value->val_str(value, buf, &len); if (checkpoint_dir_raw) { if (rdb != nullptr) { std::string checkpoint_dir= rdb_normalize_dir(checkpoint_dir_raw); @@ -280,7 +283,7 @@ static int rocksdb_create_checkpoint( } delete checkpoint; } else { - std::string err_text(status.ToString()); + const std::string err_text(status.ToString()); my_printf_error(ER_UNKNOWN_ERROR, "RocksDB: failed to initialize checkpoint. status %d %s\n", MYF(0), status.code(), err_text.c_str()); @@ -294,26 +297,26 @@ static int rocksdb_create_checkpoint( /* This method is needed to indicate that the ROCKSDB_CREATE_CHECKPOINT command is not read-only */ static void -rocksdb_create_checkpoint_stub(THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save) +rocksdb_create_checkpoint_stub(THD* const thd, + struct st_mysql_sys_var* const var, + void* const var_ptr, + const void* const save) { } static void -rocksdb_force_flush_memtable_now_stub(THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save) +rocksdb_force_flush_memtable_now_stub(THD* const thd, + struct st_mysql_sys_var* const var, + void* const var_ptr, + const void* const save) { } static int -rocksdb_force_flush_memtable_now(THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - struct st_mysql_value* value) +rocksdb_force_flush_memtable_now(THD* const thd, + struct st_mysql_sys_var* const var, + void* const var_ptr, + struct st_mysql_value* const value) { sql_print_information("RocksDB: Manual memtable flush\n"); rocksdb_flush_all_memtables(); @@ -321,22 +324,22 @@ rocksdb_force_flush_memtable_now(THD* thd, } static void rocksdb_drop_index_wakeup_thread( - my_core::THD* thd __attribute__((__unused__)), - struct st_mysql_sys_var* var __attribute__((__unused__)), - void* var_ptr __attribute__((__unused__)), - const void* save); + my_core::THD* const thd __attribute__((__unused__)), + struct st_mysql_sys_var* const var __attribute__((__unused__)), + void* const var_ptr __attribute__((__unused__)), + const void* const save); static my_bool rocksdb_pause_background_work= 0; static mysql_mutex_t rdb_sysvars_mutex; static void rocksdb_set_pause_background_work( - my_core::THD* thd __attribute__((__unused__)), - struct st_mysql_sys_var* var __attribute__((__unused__)), - void* var_ptr __attribute__((__unused__)), - const void* save) + my_core::THD* const thd __attribute__((__unused__)), + struct st_mysql_sys_var* const var __attribute__((__unused__)), + void* const var_ptr __attribute__((__unused__)), + const void* const save) { mysql_mutex_lock(&rdb_sysvars_mutex); - bool pause_requested= *static_cast(save); + const bool pause_requested= *static_cast(save); if (rocksdb_pause_background_work != pause_requested) { if (pause_requested) { rdb->PauseBackgroundWork(); @@ -411,8 +414,10 @@ static char * rocksdb_datadir; static uint32_t rocksdb_table_stats_sampling_pct; static my_bool rocksdb_enable_bulk_load_api= 1; static my_bool rpl_skip_tx_api_var= 0; +static my_bool rocksdb_print_snapshot_conflict_queries= 0; std::atomic rocksdb_snapshot_conflict_errors(0); +std::atomic rocksdb_wal_group_syncs(0); static rocksdb::DBOptions rdb_init_rocksdb_db_options(void) { @@ -449,11 +454,13 @@ static TYPELIB info_log_level_typelib = { }; static void -rocksdb_set_rocksdb_info_log_level(THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save) +rocksdb_set_rocksdb_info_log_level(THD* const thd, + struct st_mysql_sys_var* const var, + void* const var_ptr, + const void* const save) { + DBUG_ASSERT(save != nullptr); + mysql_mutex_lock(&rdb_sysvars_mutex); rocksdb_info_log_level = *static_cast(save); rocksdb_db_options.info_log->SetInfoLogLevel( @@ -474,10 +481,30 @@ static TYPELIB index_type_typelib = { nullptr }; +const ulong RDB_MAX_LOCK_WAIT_SECONDS= 1024*1024*1024; +const ulong RDB_MAX_ROW_LOCKS= 1024*1024*1024; +const ulong RDB_DEFAULT_BULK_LOAD_SIZE= 1000; +const ulong RDB_MAX_BULK_LOAD_SIZE= 1024*1024*1024; +const size_t RDB_DEFAULT_MERGE_BUF_SIZE= 64*1024*1024; +const size_t RDB_MIN_MERGE_BUF_SIZE= 100; +const size_t RDB_DEFAULT_MERGE_COMBINE_READ_SIZE= 1024*1024*1024; +const size_t RDB_MIN_MERGE_COMBINE_READ_SIZE= 100; +const int64 RDB_DEFAULT_BLOCK_CACHE_SIZE= 512*1024*1024; +const int64 RDB_MIN_BLOCK_CACHE_SIZE= 1024; +const int RDB_MAX_CHECKSUMS_PCT= 100; + //TODO: 0 means don't wait at all, and we don't support it yet? static MYSQL_THDVAR_ULONG(lock_wait_timeout, PLUGIN_VAR_RQCMDARG, "Number of seconds to wait for lock", - nullptr, nullptr, /*default*/ 1, /*min*/ 1, /*max*/ 1024*1024*1024, 0); + nullptr, nullptr, /*default*/ 1, /*min*/ 1, + /*max*/ RDB_MAX_LOCK_WAIT_SECONDS, 0); + +static MYSQL_THDVAR_BOOL(deadlock_detect, PLUGIN_VAR_RQCMDARG, + "Enables deadlock detection", nullptr, nullptr, FALSE); + +static MYSQL_THDVAR_BOOL(trace_sst_api, PLUGIN_VAR_RQCMDARG, + "Generate trace output in the log for each call to the SstFileWriter", + nullptr, nullptr, FALSE); static MYSQL_THDVAR_BOOL(bulk_load, PLUGIN_VAR_RQCMDARG, "Use bulk-load mode for inserts. This enables both " @@ -490,6 +517,11 @@ static MYSQL_SYSVAR_BOOL(enable_bulk_load_api, "Enables using SstFileWriter for bulk loading", nullptr, nullptr, rocksdb_enable_bulk_load_api); +static MYSQL_THDVAR_STR(tmpdir, + PLUGIN_VAR_OPCMDARG|PLUGIN_VAR_MEMALLOC, + "Directory for temporary files during DDL operations.", + nullptr, nullptr, ""); + static MYSQL_THDVAR_STR(skip_unique_check_tables, PLUGIN_VAR_RQCMDARG|PLUGIN_VAR_MEMALLOC, "Skip unique constraint checking for the specified tables", nullptr, nullptr, @@ -520,8 +552,10 @@ static MYSQL_THDVAR_BOOL(skip_bloom_filter_on_read, PLUGIN_VAR_RQCMDARG, static MYSQL_THDVAR_ULONG(max_row_locks, PLUGIN_VAR_RQCMDARG, "Maximum number of locks a transaction can have", - nullptr, nullptr, /*default*/ 1024*1024*1024, /*min*/ 1, - /*max*/ 1024*1024*1024, 0); + nullptr, nullptr, + /*default*/ RDB_MAX_ROW_LOCKS, + /*min*/ 1, + /*max*/ RDB_MAX_ROW_LOCKS, 0); static MYSQL_THDVAR_BOOL(lock_scanned_rows, PLUGIN_VAR_RQCMDARG, "Take and hold locks on rows that are scanned but not updated", @@ -529,22 +563,25 @@ static MYSQL_THDVAR_BOOL(lock_scanned_rows, PLUGIN_VAR_RQCMDARG, static MYSQL_THDVAR_ULONG(bulk_load_size, PLUGIN_VAR_RQCMDARG, "Max #records in a batch for bulk-load mode", - nullptr, nullptr, /*default*/ 1000, /*min*/ 1, /*max*/ 1024*1024*1024, 0); + nullptr, nullptr, + /*default*/ RDB_DEFAULT_BULK_LOAD_SIZE, + /*min*/ 1, + /*max*/ RDB_MAX_BULK_LOAD_SIZE, 0); static MYSQL_THDVAR_ULONGLONG(merge_buf_size, PLUGIN_VAR_RQCMDARG, "Size to allocate for merge sort buffers written out to disk " "during inplace index creation.", nullptr, nullptr, - /* default (64MB) */ (ulonglong) 67108864, - /* min (100B) */ 100, + /* default (64MB) */ RDB_DEFAULT_MERGE_BUF_SIZE, + /* min (100B) */ RDB_MIN_MERGE_BUF_SIZE, /* max */ SIZE_T_MAX, 1); static MYSQL_THDVAR_ULONGLONG(merge_combine_read_size, PLUGIN_VAR_RQCMDARG, "Size that we have to work with during combine (reading from disk) phase of " "external sort during fast index creation.", nullptr, nullptr, - /* default (1GB) */ (ulonglong) 1073741824, - /* min (100B) */ 100, + /* default (1GB) */ RDB_DEFAULT_MERGE_COMBINE_READ_SIZE, + /* min (100B) */ RDB_MIN_MERGE_COMBINE_READ_SIZE, /* max */ SIZE_T_MAX, 1); static MYSQL_SYSVAR_BOOL(create_if_missing, @@ -600,8 +637,10 @@ static MYSQL_SYSVAR_UINT(wal_recovery_mode, rocksdb_wal_recovery_mode, PLUGIN_VAR_RQCMDARG, "DBOptions::wal_recovery_mode for RocksDB", - nullptr, nullptr, 2, - /* min */ 0L, /* max */ 3, 0); + nullptr, nullptr, + /* default */ (uint) rocksdb::WALRecoveryMode::kPointInTimeRecovery, + /* min */ (uint) rocksdb::WALRecoveryMode::kTolerateCorruptedTailRecords, + /* max */ (uint) rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords, 0); static MYSQL_SYSVAR_ULONG(compaction_readahead_size, rocksdb_db_options.compaction_readahead_size, @@ -621,22 +660,24 @@ static MYSQL_SYSVAR_UINT(access_hint_on_compaction_start, rocksdb_access_hint_on_compaction_start, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::access_hint_on_compaction_start for RocksDB", - nullptr, nullptr, 1, - /* min */ 0L, /* max */ 3, 0); + nullptr, nullptr, + /* default */ (uint) rocksdb::Options::AccessHint::NORMAL, + /* min */ (uint) rocksdb::Options::AccessHint::NONE, + /* max */ (uint) rocksdb::Options::AccessHint::WILLNEED, 0); static MYSQL_SYSVAR_BOOL(allow_concurrent_memtable_write, *reinterpret_cast( &rocksdb_db_options.allow_concurrent_memtable_write), PLUGIN_VAR_RQCMDARG, "DBOptions::allow_concurrent_memtable_write for RocksDB", - nullptr, nullptr, rocksdb_db_options.allow_concurrent_memtable_write); + nullptr, nullptr, false); static MYSQL_SYSVAR_BOOL(enable_write_thread_adaptive_yield, *reinterpret_cast( &rocksdb_db_options.enable_write_thread_adaptive_yield), PLUGIN_VAR_RQCMDARG, "DBOptions::enable_write_thread_adaptive_yield for RocksDB", - nullptr, nullptr, rocksdb_db_options.enable_write_thread_adaptive_yield); + nullptr, nullptr, false); static MYSQL_SYSVAR_INT(max_open_files, rocksdb_db_options.max_open_files, @@ -760,11 +801,17 @@ static MYSQL_SYSVAR_ULONG(manifest_preallocation_size, nullptr, nullptr, rocksdb_db_options.manifest_preallocation_size, /* min */ 0L, /* max */ LONG_MAX, 0); -static MYSQL_SYSVAR_BOOL(allow_os_buffer, - *reinterpret_cast(&rocksdb_db_options.allow_os_buffer), +static MYSQL_SYSVAR_BOOL(use_direct_reads, + *reinterpret_cast(&rocksdb_db_options.use_direct_reads), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::allow_os_buffer for RocksDB", - nullptr, nullptr, rocksdb_db_options.allow_os_buffer); + "DBOptions::use_direct_reads for RocksDB", + nullptr, nullptr, rocksdb_db_options.use_direct_reads); + +static MYSQL_SYSVAR_BOOL(use_direct_writes, + *reinterpret_cast(&rocksdb_db_options.use_direct_writes), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::use_direct_writes for RocksDB", + nullptr, nullptr, rocksdb_db_options.use_direct_writes); static MYSQL_SYSVAR_BOOL(allow_mmap_reads, *reinterpret_cast(&rocksdb_db_options.allow_mmap_reads), @@ -833,8 +880,10 @@ static MYSQL_SYSVAR_BOOL(enable_thread_tracking, static MYSQL_SYSVAR_LONGLONG(block_cache_size, rocksdb_block_cache_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "block_cache size for RocksDB", - nullptr, nullptr, /* RocksDB's default is 8 MB: */ 8*1024*1024L, - /* min */ 1024L, /* max */ LONGLONG_MAX, /* Block size */1024L); + nullptr, nullptr, + /* default */ RDB_DEFAULT_BLOCK_CACHE_SIZE, + /* min */ RDB_MIN_BLOCK_CACHE_SIZE, + /* max */ LONGLONG_MAX, /* Block size */ RDB_MIN_BLOCK_CACHE_SIZE); static MYSQL_SYSVAR_BOOL(cache_index_and_filter_blocks, *reinterpret_cast( @@ -1070,18 +1119,24 @@ static MYSQL_SYSVAR_BOOL(compaction_sequential_deletes_count_sd, "Counting SingleDelete as rocksdb_compaction_sequential_deletes", nullptr, nullptr, rocksdb_compaction_sequential_deletes_count_sd); +static MYSQL_SYSVAR_BOOL(print_snapshot_conflict_queries, + rocksdb_print_snapshot_conflict_queries, + PLUGIN_VAR_RQCMDARG, + "Logging queries that got snapshot conflict errors into *.err log", + nullptr, nullptr, rocksdb_print_snapshot_conflict_queries); + static MYSQL_THDVAR_INT(checksums_pct, PLUGIN_VAR_RQCMDARG, "How many percentages of rows to be checksummed", - nullptr, nullptr, 100, - /* min */ 0, /* max */ 100, 0); + nullptr, nullptr, RDB_MAX_CHECKSUMS_PCT, + /* min */ 0, /* max */ RDB_MAX_CHECKSUMS_PCT, 0); -static MYSQL_THDVAR_BOOL(store_checksums, +static MYSQL_THDVAR_BOOL(store_row_debug_checksums, PLUGIN_VAR_RQCMDARG, "Include checksums when writing index/table records", nullptr, nullptr, false /* default value */); -static MYSQL_THDVAR_BOOL(verify_checksums, +static MYSQL_THDVAR_BOOL(verify_row_debug_checksums, PLUGIN_VAR_RQCMDARG, "Verify checksums when reading index/table records", nullptr, nullptr, false /* default value */); @@ -1114,15 +1169,16 @@ static MYSQL_SYSVAR_UINT( RDB_DEFAULT_TBL_STATS_SAMPLE_PCT, /* everything */ 0, /* max */ RDB_TBL_STATS_SAMPLE_PCT_MAX, 0); -static const longlong ROCKSDB_WRITE_BUFFER_SIZE_DEFAULT= 4194304; static const int ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE= 100; static struct st_mysql_sys_var* rocksdb_system_variables[]= { MYSQL_SYSVAR(lock_wait_timeout), + MYSQL_SYSVAR(deadlock_detect), MYSQL_SYSVAR(max_row_locks), MYSQL_SYSVAR(lock_scanned_rows), MYSQL_SYSVAR(bulk_load), MYSQL_SYSVAR(skip_unique_check_tables), + MYSQL_SYSVAR(trace_sst_api), MYSQL_SYSVAR(skip_unique_check), MYSQL_SYSVAR(commit_in_the_middle), MYSQL_SYSVAR(read_free_rpl_tables), @@ -1130,6 +1186,7 @@ static struct st_mysql_sys_var* rocksdb_system_variables[]= { MYSQL_SYSVAR(bulk_load_size), MYSQL_SYSVAR(merge_buf_size), MYSQL_SYSVAR(enable_bulk_load_api), + MYSQL_SYSVAR(tmpdir), MYSQL_SYSVAR(merge_combine_read_size), MYSQL_SYSVAR(skip_bloom_filter_on_read), @@ -1157,7 +1214,8 @@ static struct st_mysql_sys_var* rocksdb_system_variables[]= { MYSQL_SYSVAR(wal_ttl_seconds), MYSQL_SYSVAR(wal_size_limit_mb), MYSQL_SYSVAR(manifest_preallocation_size), - MYSQL_SYSVAR(allow_os_buffer), + MYSQL_SYSVAR(use_direct_reads), + MYSQL_SYSVAR(use_direct_writes), MYSQL_SYSVAR(allow_mmap_reads), MYSQL_SYSVAR(allow_mmap_writes), MYSQL_SYSVAR(is_fd_close_on_exec), @@ -1219,13 +1277,14 @@ static struct st_mysql_sys_var* rocksdb_system_variables[]= { MYSQL_SYSVAR(compaction_sequential_deletes_window), MYSQL_SYSVAR(compaction_sequential_deletes_file_size), MYSQL_SYSVAR(compaction_sequential_deletes_count_sd), + MYSQL_SYSVAR(print_snapshot_conflict_queries), MYSQL_SYSVAR(datadir), MYSQL_SYSVAR(create_checkpoint), MYSQL_SYSVAR(checksums_pct), - MYSQL_SYSVAR(store_checksums), - MYSQL_SYSVAR(verify_checksums), + MYSQL_SYSVAR(store_row_debug_checksums), + MYSQL_SYSVAR(verify_row_debug_checksums), MYSQL_SYSVAR(validate_tables), MYSQL_SYSVAR(table_stats_sampling_pct), @@ -1233,7 +1292,8 @@ static struct st_mysql_sys_var* rocksdb_system_variables[]= { }; -static rocksdb::WriteOptions rdb_get_rocksdb_write_options(my_core::THD* thd) +static rocksdb::WriteOptions rdb_get_rocksdb_write_options( + my_core::THD* const thd) { rocksdb::WriteOptions opt; @@ -1253,7 +1313,7 @@ static rocksdb::WriteOptions rdb_get_rocksdb_write_options(my_core::THD* thd) */ uchar* Rdb_open_tables_map::get_hash_key( - Rdb_table_handler *table_handler, size_t *length, + Rdb_table_handler* const table_handler, size_t* const length, my_bool not_used __attribute__((__unused__))) { *length= table_handler->m_table_name_length; @@ -1325,7 +1385,7 @@ static PSI_thread_info all_rocksdb_threads[]= static void init_rocksdb_psi_keys() { - const char* category= "rocksdb"; + const char* const category= "rocksdb"; int count; if (PSI_server == nullptr) @@ -1357,21 +1417,21 @@ static void init_rocksdb_psi_keys() static Rdb_drop_index_thread rdb_drop_idx_thread; static void rocksdb_drop_index_wakeup_thread( - my_core::THD* thd __attribute__((__unused__)), - struct st_mysql_sys_var* var __attribute__((__unused__)), - void* var_ptr __attribute__((__unused__)), - const void* save) + my_core::THD* const thd __attribute__((__unused__)), + struct st_mysql_sys_var* const var __attribute__((__unused__)), + void* const var_ptr __attribute__((__unused__)), + const void* const save) { if (*static_cast(save)) { rdb_drop_idx_thread.signal(); } } -static inline uint32_t rocksdb_perf_context_level(THD* thd) +static inline uint32_t rocksdb_perf_context_level(THD* const thd) { DBUG_ASSERT(thd != nullptr); - int session_perf_context_level= THDVAR(thd, perf_context_level); + const int session_perf_context_level= THDVAR(thd, perf_context_level); if (session_perf_context_level > rocksdb::PerfLevel::kUninitialized) { return session_perf_context_level; } @@ -1381,7 +1441,7 @@ static inline uint32_t rocksdb_perf_context_level(THD* thd) value. */ - int global_perf_context_level= THDVAR(nullptr, perf_context_level); + const int global_perf_context_level= THDVAR(nullptr, perf_context_level); if (global_perf_context_level > rocksdb::PerfLevel::kUninitialized) { return global_perf_context_level; } @@ -1397,7 +1457,7 @@ static inline uint32_t rocksdb_perf_context_level(THD* thd) interface Rdb_tx_list_walker { virtual ~Rdb_tx_list_walker() {} - virtual void process_tran(const Rdb_transaction*) = 0; + virtual void process_tran(const Rdb_transaction* const) = 0; }; /* @@ -1412,7 +1472,10 @@ class Rdb_snapshot_notifier : public rocksdb::TransactionNotifier void SnapshotCreated(const rocksdb::Snapshot *snapshot) override; public: - explicit Rdb_snapshot_notifier(Rdb_transaction* owning_tx) + Rdb_snapshot_notifier(const Rdb_snapshot_notifier&) = delete; + Rdb_snapshot_notifier& operator=(const Rdb_snapshot_notifier&) = delete; + + explicit Rdb_snapshot_notifier(Rdb_transaction* const owning_tx) : m_owning_tx(owning_tx) {} // If the owning Rdb_transaction gets destructed we need to not reference @@ -1467,10 +1530,17 @@ public: const char* m_mysql_log_file_name; my_off_t m_mysql_log_offset; const char* m_mysql_gtid; + const char* m_mysql_max_gtid; String m_detailed_error; int64_t m_snapshot_timestamp= 0; bool m_ddl_transaction; + /* + for distinction between rdb_transaction_impl and rdb_writebatch_impl + when using walk tx list + */ + virtual bool is_writebatch_trx() const = 0; + static void init_mutex() { mysql_mutex_init(key_mutex_tx_list, &s_tx_list_mutex, MY_MUTEX_INIT_FAST); @@ -1484,17 +1554,20 @@ public: static void walk_tx_list(Rdb_tx_list_walker* walker) { + DBUG_ASSERT(walker != nullptr); + mysql_mutex_lock(&s_tx_list_mutex); for (auto it : s_tx_list) walker->process_tran(it); mysql_mutex_unlock(&s_tx_list_mutex); } - int set_status_error(THD *thd, const rocksdb::Status &s, - const std::shared_ptr& kd, - Rdb_tbl_def *tbl_def) + int set_status_error(THD* const thd, const rocksdb::Status &s, + const Rdb_key_def& kd, + Rdb_tbl_def* const tbl_def) { DBUG_ASSERT(!s.ok()); + DBUG_ASSERT(tbl_def != nullptr); if (s.IsTimedOut()) { @@ -1509,17 +1582,40 @@ public: my_core::thd_mark_transaction_to_rollback(thd, false /*just statement*/); m_detailed_error.copy(timeout_message("index", tbl_def->full_tablename().c_str(), - kd->get_name().c_str())); + kd.get_name().c_str())); return HA_ERR_LOCK_WAIT_TIMEOUT; } - if (s.IsBusy()) + + if (s.IsDeadlock()) { - rocksdb_snapshot_conflict_errors++; + my_core::thd_mark_transaction_to_rollback(thd, + false /* just statement */); + return HA_ERR_LOCK_DEADLOCK; + } + else if (s.IsBusy()) + { + rocksdb_snapshot_conflict_errors++; + if (rocksdb_print_snapshot_conflict_queries) + { + char user_host_buff[MAX_USER_HOST_SIZE + 1]; + make_user_name(thd, user_host_buff); + // NO_LINT_DEBUG + sql_print_warning("Got snapshot conflict errors: User: %s " + "Query: %s", user_host_buff, thd->query()); + } return HA_ERR_LOCK_DEADLOCK; } - /* TODO: who returns HA_ERR_ROCKSDB_TOO_MANY_LOCKS now?? */ + if (s.IsLockLimit()) + { + return HA_ERR_ROCKSDB_TOO_MANY_LOCKS; + } + + if (s.IsIOError() || s.IsCorruption()) + { + rdb_handle_io_error(s, RDB_IO_ERROR_GENERAL); + } my_error(ER_INTERNAL_ERROR, MYF(0), s.ToString().c_str()); return HA_ERR_INTERNAL_ERROR; } @@ -1527,7 +1623,7 @@ public: THD* get_thd() const { return m_thd; } /* Used for tracking io_perf counters */ - void io_perf_start(Rdb_io_perf *io_perf) + void io_perf_start(Rdb_io_perf* const io_perf) { /* Since perf_context is tracked per thread, it is difficult and expensive @@ -1560,7 +1656,7 @@ public: } } - void io_perf_end_and_record(Rdb_io_perf *io_perf) + void io_perf_end_and_record(Rdb_io_perf* const io_perf) { if (m_tbl_io_perf == io_perf) { @@ -1579,11 +1675,13 @@ public: ulonglong get_write_count() const { return m_write_count; } + int get_timeout_sec() const { return m_timeout_sec; } + ulonglong get_lock_count() const { return m_lock_count; } virtual void set_sync(bool sync)= 0; - virtual void release_lock(rocksdb::ColumnFamilyHandle* column_family, + virtual void release_lock(rocksdb::ColumnFamilyHandle* const column_family, const std::string& rowkey)= 0; virtual bool prepare(const rocksdb::TransactionName& name)= 0; @@ -1626,18 +1724,21 @@ public: else { my_core::thd_binlog_pos(m_thd, &m_mysql_log_file_name, - &m_mysql_log_offset, &m_mysql_gtid); + &m_mysql_log_offset, &m_mysql_gtid, + &m_mysql_max_gtid); binlog_manager.update(m_mysql_log_file_name, m_mysql_log_offset, - m_mysql_gtid, get_write_batch()); + m_mysql_max_gtid, get_write_batch()); return commit_no_binlog(); } } virtual void rollback()= 0; - void snapshot_created(const rocksdb::Snapshot *snapshot) + void snapshot_created(const rocksdb::Snapshot* const snapshot) { + DBUG_ASSERT(snapshot != nullptr); + m_read_opts.snapshot = snapshot; rdb->GetEnv()->GetCurrentTime(&m_snapshot_timestamp); m_is_delayed_snapshot = false; @@ -1676,7 +1777,7 @@ public: return rc; } - void start_bulk_load(ha_rocksdb* bulk_load) + void start_bulk_load(ha_rocksdb* const bulk_load) { /* If we already have an open bulk load of a table and the name doesn't @@ -1684,17 +1785,19 @@ public: multiple bulk loads to occur on a partitioned table, but then closes them all out when we switch to another table. */ + DBUG_ASSERT(bulk_load != nullptr); + if (!m_curr_bulk_load.empty() && !bulk_load->same_table(*m_curr_bulk_load[0])) { - auto res= finish_bulk_load(); + const auto res= finish_bulk_load(); SHIP_ASSERT(res == 0); } m_curr_bulk_load.push_back(bulk_load); } - void end_bulk_load(ha_rocksdb* bulk_load) + void end_bulk_load(ha_rocksdb* const bulk_load) { for (auto it = m_curr_bulk_load.begin(); it != m_curr_bulk_load.end(); it++) @@ -1710,6 +1813,11 @@ public: SHIP_ASSERT(0); } + int num_ongoing_bulk_load() const + { + return m_curr_bulk_load.size(); + } + /* Flush the data accumulated so far. This assumes we're doing a bulk insert. @@ -1737,13 +1845,14 @@ public: return false; } - virtual rocksdb::Status put(rocksdb::ColumnFamilyHandle* column_family, + virtual rocksdb::Status put(rocksdb::ColumnFamilyHandle* const column_family, const rocksdb::Slice& key, const rocksdb::Slice& value)= 0; - virtual rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle* column_family, - const rocksdb::Slice& key)= 0; + virtual rocksdb::Status delete_key( + rocksdb::ColumnFamilyHandle* const column_family, + const rocksdb::Slice& key)= 0; virtual rocksdb::Status single_delete( - rocksdb::ColumnFamilyHandle* column_family, + rocksdb::ColumnFamilyHandle* const column_family, const rocksdb::Slice& key)= 0; virtual bool has_modifications() const= 0; @@ -1758,22 +1867,24 @@ public: return get_indexed_write_batch()->GetWriteBatch(); } - virtual rocksdb::Status get(rocksdb::ColumnFamilyHandle* column_family, + virtual rocksdb::Status get(rocksdb::ColumnFamilyHandle* const column_family, const rocksdb::Slice& key, std::string* value) const= 0; virtual rocksdb::Status get_for_update( - rocksdb::ColumnFamilyHandle* column_family, - const rocksdb::Slice& key, std::string* value)= 0; + rocksdb::ColumnFamilyHandle* const column_family, + const rocksdb::Slice& key, std::string* const value, bool exclusive)= 0; - rocksdb::Iterator *get_iterator(rocksdb::ColumnFamilyHandle* column_family, - bool skip_bloom_filter, - bool fill_cache, - bool read_current= false, - bool create_snapshot= true) + rocksdb::Iterator *get_iterator( + rocksdb::ColumnFamilyHandle* const column_family, + bool skip_bloom_filter, + bool fill_cache, + bool read_current= false, + bool create_snapshot= true) { // Make sure we are not doing both read_current (which implies we don't // want a snapshot) and create_snapshot which makes sure we create // a snapshot + DBUG_ASSERT(column_family != nullptr); DBUG_ASSERT(!read_current || !create_snapshot); if (create_snapshot) @@ -1817,7 +1928,7 @@ public: return true; } - int rollback_to_savepoint(void *savepoint) + int rollback_to_savepoint(void* const savepoint) { if (has_modifications()) { @@ -1852,7 +1963,7 @@ public: m_tx_read_only= val; } - explicit Rdb_transaction(THD *thd): m_thd(thd), m_tbl_io_perf(nullptr) + explicit Rdb_transaction(THD* const thd): m_thd(thd), m_tbl_io_perf(nullptr) { mysql_mutex_lock(&s_tx_list_mutex); s_tx_list.insert(this); @@ -1884,7 +1995,7 @@ class Rdb_transaction_impl : public Rdb_transaction void set_lock_timeout(int timeout_sec_arg) override { if (m_rocksdb_tx) - m_rocksdb_tx->SetLockTimeout(m_timeout_sec * 1000); + m_rocksdb_tx->SetLockTimeout(rdb_convert_sec_to_ms(m_timeout_sec)); } void set_sync(bool sync) override @@ -1892,7 +2003,7 @@ class Rdb_transaction_impl : public Rdb_transaction m_rocksdb_tx->GetWriteOptions()->sync= sync; } - void release_lock(rocksdb::ColumnFamilyHandle* column_family, + void release_lock(rocksdb::ColumnFamilyHandle* const column_family, const std::string &rowkey) override { if (!THDVAR(m_thd, lock_scanned_rows)) @@ -1901,6 +2012,8 @@ class Rdb_transaction_impl : public Rdb_transaction } } + virtual bool is_writebatch_trx() const override { return false; } + private: void release_tx(void) { @@ -1934,7 +2047,7 @@ class Rdb_transaction_impl : public Rdb_transaction { bool res= false; release_snapshot(); - rocksdb::Status s= m_rocksdb_tx->Commit(); + const rocksdb::Status s= m_rocksdb_tx->Commit(); if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); @@ -2016,37 +2129,35 @@ class Rdb_transaction_impl : public Rdb_transaction return m_read_opts.snapshot != nullptr; } - const char *err_too_many_locks= - "Number of locks held by the transaction exceeded @@rocksdb_max_row_locks"; - - rocksdb::Status put(rocksdb::ColumnFamilyHandle* column_family, + rocksdb::Status put(rocksdb::ColumnFamilyHandle* const column_family, const rocksdb::Slice& key, const rocksdb::Slice& value) override { ++m_write_count; ++m_lock_count; if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks) - return rocksdb::Status::Aborted(rocksdb::Slice(err_too_many_locks)); + return rocksdb::Status::Aborted(rocksdb::Status::kLockLimit); return m_rocksdb_tx->Put(column_family, key, value); } - rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle* column_family, + rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle* const column_family, const rocksdb::Slice& key) override { ++m_write_count; ++m_lock_count; if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks) - return rocksdb::Status::Aborted(rocksdb::Slice(err_too_many_locks)); + return rocksdb::Status::Aborted(rocksdb::Status::kLockLimit); return m_rocksdb_tx->Delete(column_family, key); } - rocksdb::Status single_delete(rocksdb::ColumnFamilyHandle* column_family, - const rocksdb::Slice& key) override + rocksdb::Status single_delete( + rocksdb::ColumnFamilyHandle* const column_family, + const rocksdb::Slice& key) override { ++m_write_count; ++m_lock_count; if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks) - return rocksdb::Status::Aborted(rocksdb::Slice(err_too_many_locks)); + return rocksdb::Status::Aborted(rocksdb::Status::kLockLimit); return m_rocksdb_tx->SingleDelete(column_family, key); } @@ -2076,29 +2187,34 @@ class Rdb_transaction_impl : public Rdb_transaction return m_rocksdb_tx->GetWriteBatch(); } - rocksdb::Status get(rocksdb::ColumnFamilyHandle* column_family, + rocksdb::Status get(rocksdb::ColumnFamilyHandle* const column_family, const rocksdb::Slice& key, std::string* value) const override { return m_rocksdb_tx->Get(m_read_opts, column_family, key, value); } - rocksdb::Status get_for_update(rocksdb::ColumnFamilyHandle* column_family, - const rocksdb::Slice& key, - std::string* value) override + rocksdb::Status get_for_update( + rocksdb::ColumnFamilyHandle* const column_family, + const rocksdb::Slice& key, + std::string* const value, bool exclusive) override { if (++m_lock_count > m_max_row_locks) - return rocksdb::Status::Aborted(rocksdb::Slice(err_too_many_locks)); - return m_rocksdb_tx->GetForUpdate(m_read_opts, column_family, key, value); + return rocksdb::Status::Aborted(rocksdb::Status::kLockLimit); + + return m_rocksdb_tx->GetForUpdate(m_read_opts, column_family, key, value, + exclusive); } rocksdb::Iterator *get_iterator(const rocksdb::ReadOptions &options, - rocksdb::ColumnFamilyHandle* column_family) - override + rocksdb::ColumnFamilyHandle* const column_family) + override { return m_rocksdb_tx->GetIterator(options, column_family); } + const rocksdb::Transaction* get_rdb_trx() const { return m_rocksdb_tx; } + bool is_tx_started() const override { return (m_rocksdb_tx != nullptr); @@ -2109,7 +2225,8 @@ class Rdb_transaction_impl : public Rdb_transaction rocksdb::TransactionOptions tx_opts; rocksdb::WriteOptions write_opts; tx_opts.set_snapshot= false; - tx_opts.lock_timeout= m_timeout_sec * 1000; + tx_opts.lock_timeout= rdb_convert_sec_to_ms(m_timeout_sec); + tx_opts.deadlock_detect= THDVAR(m_thd, deadlock_detect); write_opts.sync= THDVAR(m_thd, write_sync); write_opts.disableWAL= THDVAR(m_thd, write_disable_wal); @@ -2155,10 +2272,10 @@ class Rdb_transaction_impl : public Rdb_transaction /* TODO: here we must release the locks taken since the start_stmt() call */ if (m_rocksdb_tx) { - const rocksdb::Snapshot *org_snapshot = m_rocksdb_tx->GetSnapshot(); + const rocksdb::Snapshot* const org_snapshot = m_rocksdb_tx->GetSnapshot(); m_rocksdb_tx->RollbackToSavePoint(); - const rocksdb::Snapshot *cur_snapshot = m_rocksdb_tx->GetSnapshot(); + const rocksdb::Snapshot* const cur_snapshot = m_rocksdb_tx->GetSnapshot(); if (org_snapshot != cur_snapshot) { if (org_snapshot != nullptr) @@ -2173,7 +2290,7 @@ class Rdb_transaction_impl : public Rdb_transaction } } - explicit Rdb_transaction_impl(THD *thd) : + explicit Rdb_transaction_impl(THD* const thd) : Rdb_transaction(thd), m_rocksdb_tx(nullptr) { // Create a notifier that can be called when a snapshot gets generated. @@ -2224,8 +2341,8 @@ class Rdb_writebatch_impl : public Rdb_transaction { bool res= false; release_snapshot(); - rocksdb::Status s= rdb->GetBaseDB()->Write(write_opts, - m_batch->GetWriteBatch()); + const rocksdb::Status s= rdb->GetBaseDB()->Write(write_opts, + m_batch->GetWriteBatch()); if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); @@ -2239,6 +2356,8 @@ class Rdb_writebatch_impl : public Rdb_transaction return res; } public: + bool is_writebatch_trx() const override { return true; } + void set_lock_timeout(int timeout_sec_arg) override { // Nothing to do here. @@ -2249,7 +2368,7 @@ class Rdb_writebatch_impl : public Rdb_transaction write_opts.sync= sync; } - void release_lock(rocksdb::ColumnFamilyHandle* column_family, + void release_lock(rocksdb::ColumnFamilyHandle* const column_family, const std::string &rowkey) override { // Nothing to do here since we don't hold any row locks. @@ -2281,7 +2400,7 @@ class Rdb_writebatch_impl : public Rdb_transaction } } - rocksdb::Status put(rocksdb::ColumnFamilyHandle* column_family, + rocksdb::Status put(rocksdb::ColumnFamilyHandle* const column_family, const rocksdb::Slice& key, const rocksdb::Slice& value) override { @@ -2292,7 +2411,7 @@ class Rdb_writebatch_impl : public Rdb_transaction return rocksdb::Status::OK(); } - rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle* column_family, + rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle* const column_family, const rocksdb::Slice& key) override { ++m_write_count; @@ -2300,8 +2419,9 @@ class Rdb_writebatch_impl : public Rdb_transaction return rocksdb::Status::OK(); } - rocksdb::Status single_delete(rocksdb::ColumnFamilyHandle* column_family, - const rocksdb::Slice& key) override + rocksdb::Status single_delete( + rocksdb::ColumnFamilyHandle* const column_family, + const rocksdb::Slice& key) override { ++m_write_count; m_batch->SingleDelete(column_family, key); @@ -2324,26 +2444,27 @@ class Rdb_writebatch_impl : public Rdb_transaction return m_batch; } - rocksdb::Status get(rocksdb::ColumnFamilyHandle* column_family, + rocksdb::Status get(rocksdb::ColumnFamilyHandle* const column_family, const rocksdb::Slice& key, - std::string* value) const override + std::string* const value) const override { return m_batch->GetFromBatchAndDB( rdb, m_read_opts, column_family, key, value); } - rocksdb::Status get_for_update(rocksdb::ColumnFamilyHandle* column_family, - const rocksdb::Slice& key, - std::string* value) override + rocksdb::Status get_for_update( + rocksdb::ColumnFamilyHandle* const column_family, + const rocksdb::Slice& key, + std::string* const value, bool exclusive) override { return get(column_family, key, value); } rocksdb::Iterator *get_iterator(const rocksdb::ReadOptions &options, - rocksdb::ColumnFamilyHandle* column_family) - override + rocksdb::ColumnFamilyHandle* const column_family) + override { - auto it = rdb->NewIterator(options); + const auto it = rdb->NewIterator(options); return m_batch->NewIteratorWithBase(it); } @@ -2372,7 +2493,7 @@ class Rdb_writebatch_impl : public Rdb_transaction m_batch->RollbackToSavePoint(); } - explicit Rdb_writebatch_impl(THD *thd) : + explicit Rdb_writebatch_impl(THD* const thd) : Rdb_transaction(thd), m_batch(nullptr) { m_batch = new rocksdb::WriteBatchWithIndex(rocksdb::BytewiseComparator(), @@ -2386,7 +2507,8 @@ class Rdb_writebatch_impl : public Rdb_transaction } }; -void Rdb_snapshot_notifier::SnapshotCreated(const rocksdb::Snapshot *snapshot) +void Rdb_snapshot_notifier::SnapshotCreated( + const rocksdb::Snapshot* const snapshot) { if (m_owning_tx != nullptr) { @@ -2397,7 +2519,7 @@ void Rdb_snapshot_notifier::SnapshotCreated(const rocksdb::Snapshot *snapshot) std::multiset Rdb_transaction::s_tx_list; mysql_mutex_t Rdb_transaction::s_tx_list_mutex; -static Rdb_transaction* &get_tx_from_thd(THD *thd) +static Rdb_transaction* &get_tx_from_thd(THD* const thd) { return *reinterpret_cast( my_core::thd_ha_data(thd, rocksdb_hton)); @@ -2411,7 +2533,10 @@ class Rdb_perf_context_guard THD *m_thd; public: - explicit Rdb_perf_context_guard(THD *thd) : m_thd(thd) + Rdb_perf_context_guard(const Rdb_perf_context_guard&) = delete; + Rdb_perf_context_guard& operator=(const Rdb_perf_context_guard&) = delete; + + explicit Rdb_perf_context_guard(THD* const thd) : m_thd(thd) { Rdb_transaction*& tx= get_tx_from_thd(m_thd); /* @@ -2440,7 +2565,7 @@ class Rdb_perf_context_guard TODO: maybe, call this in external_lock() and store in ha_rocksdb.. */ -static Rdb_transaction *get_or_create_tx(THD *thd) +static Rdb_transaction *get_or_create_tx(THD* const thd) { Rdb_transaction*& tx= get_tx_from_thd(thd); // TODO: this is called too many times.. O(#rows) @@ -2468,7 +2593,7 @@ static Rdb_transaction *get_or_create_tx(THD *thd) } -static int rocksdb_close_connection(handlerton* hton, THD* thd) +static int rocksdb_close_connection(handlerton* const hton, THD* const thd) { Rdb_transaction*& tx= get_tx_from_thd(thd); if (tx != nullptr) @@ -2506,7 +2631,7 @@ static std::string rdb_xid_to_string(const XID& src) */ uchar fidbuf[RDB_FORMATID_SZ]; int64 signed_fid8= src.formatID; - uint64 raw_fid8= *reinterpret_cast(&signed_fid8); + const uint64 raw_fid8= *reinterpret_cast(&signed_fid8); rdb_netbuf_store_uint64(fidbuf, raw_fid8); buf.append(reinterpret_cast(fidbuf), RDB_FORMATID_SZ); @@ -2521,11 +2646,13 @@ static std::string rdb_xid_to_string(const XID& src) Called by hton->flush_logs after MySQL group commit prepares a set of transactions. */ -static bool rocksdb_flush_wal(handlerton* hton __attribute__((__unused__)), - ulonglong target_lsn __attribute__((__unused__))) +static bool rocksdb_flush_wal( + handlerton* const hton __attribute__((__unused__)), + ulonglong target_lsn __attribute__((__unused__))) { DBUG_ASSERT(rdb != nullptr); - rocksdb::Status s= rdb->SyncWAL(); + rocksdb_wal_group_syncs++; + const rocksdb::Status s= rdb->SyncWAL(); if (!s.ok()) { return 1; } @@ -2536,8 +2663,8 @@ static bool rocksdb_flush_wal(handlerton* hton __attribute__((__unused__)), For a slave, prepare() updates the slave_gtid_info table which tracks the replication progress. */ -static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx, - bool async) +static int rocksdb_prepare(handlerton* const hton, THD* const thd, + bool prepare_tx, bool async) { Rdb_transaction*& tx= get_tx_from_thd(thd); if (!tx->can_prepare()) @@ -2550,8 +2677,8 @@ static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx, this is an SQL statement end and autocommit is on */ std::vector slave_gtid_info; my_core::thd_slave_gtid_info(thd, &slave_gtid_info); - for (auto it : slave_gtid_info) { - rocksdb::WriteBatchBase* write_batch = tx->get_blind_write_batch(); + for (const auto &it : slave_gtid_info) { + rocksdb::WriteBatchBase* const write_batch = tx->get_blind_write_batch(); binlog_manager.update_slave_gtid_info(it.id, it.db, it.gtid, write_batch); } @@ -2564,7 +2691,8 @@ static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx, if (!tx->prepare(rdb_xid_to_string(xid))) { return 1; } - if (thd->durability_property == HA_IGNORE_DURABILITY) { + if (thd->durability_property == HA_IGNORE_DURABILITY + && THDVAR(thd, write_sync)) { /** we set the log sequence as '1' just to trigger hton->flush_logs */ @@ -2582,14 +2710,14 @@ static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx, do nothing for prepare/commit by xid this is needed to avoid crashes in XA scenarios */ -static int rocksdb_commit_by_xid(handlerton* hton, XID* xid) +static int rocksdb_commit_by_xid(handlerton* const hton, XID* const xid) { - auto name= rdb_xid_to_string(*xid); - rocksdb::Transaction *trx= rdb->GetTransactionByName(name); + const auto name= rdb_xid_to_string(*xid); + rocksdb::Transaction* const trx= rdb->GetTransactionByName(name); if (trx == nullptr) { return 1; } - rocksdb::Status s= trx->Commit(); + const rocksdb::Status s= trx->Commit(); if (!s.ok()) { return 1; } @@ -2597,15 +2725,16 @@ static int rocksdb_commit_by_xid(handlerton* hton, XID* xid) return 0; } -static int rocksdb_rollback_by_xid(handlerton* hton __attribute__((__unused__)), - XID* xid) +static int rocksdb_rollback_by_xid( + handlerton* const hton __attribute__((__unused__)), + XID* const xid) { - auto name= rdb_xid_to_string(*xid); - rocksdb::Transaction *trx= rdb->GetTransactionByName(name); + const auto name= rdb_xid_to_string(*xid); + rocksdb::Transaction* const trx= rdb->GetTransactionByName(name); if (trx == nullptr) { return 1; } - rocksdb::Status s= trx->Rollback(); + const rocksdb::Status s= trx->Rollback(); if (!s.ok()) { return 1; } @@ -2616,13 +2745,13 @@ static int rocksdb_rollback_by_xid(handlerton* hton __attribute__((__unused__)), /** Rebuilds an XID from a serialized version stored in a string. */ -static void rdb_xid_from_string(const std::string& src, XID *dst) +static void rdb_xid_from_string(const std::string& src, XID* const dst) { DBUG_ASSERT(dst != nullptr); uint offset= 0; uint64 raw_fid8= rdb_netbuf_to_uint64(reinterpret_cast(src.data())); - int64 signed_fid8= *reinterpret_cast(&raw_fid8); + const int64 signed_fid8= *reinterpret_cast(&raw_fid8); dst->formatID= signed_fid8; offset += RDB_FORMATID_SZ; dst->gtrid_length= src.at(offset); @@ -2640,8 +2769,10 @@ static void rdb_xid_from_string(const std::string& src, XID *dst) Reading last committed binary log info from RocksDB system row. The info is needed for crash safe slave/master to work. */ -static int rocksdb_recover(handlerton* hton, XID* xid_list, uint len, - char* binlog_file, my_off_t* binlog_pos) +static int rocksdb_recover(handlerton* const hton, XID* const xid_list, + uint len, char* const binlog_file, + my_off_t* const binlog_pos, + Gtid* const binlog_max_gtid) { if (binlog_file && binlog_pos) { @@ -2658,6 +2789,9 @@ static int rocksdb_recover(handlerton* hton, XID* xid_list, uint len, " file name %s\n", pos, file_buf); if (*gtid_buf) { + global_sid_lock->rdlock(); + binlog_max_gtid->parse(global_sid_map, gtid_buf); + global_sid_lock->unlock(); fprintf(stderr, "RocksDB: Last MySQL Gtid %s\n", gtid_buf); } } @@ -2686,7 +2820,8 @@ static int rocksdb_recover(handlerton* hton, XID* xid_list, uint len, return count; } -static int rocksdb_commit(handlerton* hton, THD* thd, bool commit_tx, bool) +static int rocksdb_commit(handlerton* const hton, THD* const thd, + bool commit_tx, bool) { DBUG_ENTER("rocksdb_commit"); @@ -2731,7 +2866,8 @@ static int rocksdb_commit(handlerton* hton, THD* thd, bool commit_tx, bool) } -static int rocksdb_rollback(handlerton* hton, THD* thd, bool rollback_tx) +static int rocksdb_rollback(handlerton* const hton, THD* const thd, + bool rollback_tx) { Rdb_perf_context_guard guard(thd); Rdb_transaction*& tx= get_tx_from_thd(thd); @@ -2771,7 +2907,7 @@ static int rocksdb_rollback(handlerton* hton, THD* thd, bool rollback_tx) return 0; } -static bool print_stats(THD* thd, +static bool print_stats(THD* const thd, std::string const& type, std::string const& name, std::string const& status, @@ -2782,26 +2918,55 @@ static bool print_stats(THD* thd, } static std::string format_string( - const char *format, + const char* const format, ...) { std::string res; va_list args; va_list args_copy; + char static_buff[256]; + + DBUG_ASSERT(format != nullptr); va_start(args, format); va_copy(args_copy, args); - size_t len = vsnprintf(nullptr, 0, format, args) + 1; + // Calculate how much space we will need + int len = vsnprintf(nullptr, 0, format, args); va_end(args); - if (len == 0) { + if (len < 0) + { + res = std::string(""); + } + else if (len == 0) + { + // Shortcut for an empty string res = std::string(""); } - else { - char buff[len]; + else + { + // For short enough output use a static buffer + char* buff= static_buff; + std::unique_ptr dynamic_buff= nullptr; + + len++; // Add one for null terminator + + // for longer output use an allocated buffer + if (static_cast(len) > sizeof(static_buff)) + { + dynamic_buff.reset(new char[len]); + buff= dynamic_buff.get(); + } + + // Now re-do the vsnprintf with the buffer which is now large enough (void) vsnprintf(buff, len, format, args_copy); + // Convert to a std::string. Note we could have created a std::string + // large enough and then converted the buffer to a 'char*' and created + // the output in place. This would probably work but feels like a hack. + // Since this isn't code that needs to be super-performant we are going + // with this 'safer' method. res = std::string(buff); } @@ -2858,8 +3023,10 @@ class Rdb_snapshot_status : public Rdb_tx_list_walker /* Implement Rdb_transaction interface */ /* Create one row in the snapshot status table */ - void process_tran(const Rdb_transaction *tx) override + void process_tran(const Rdb_transaction* const tx) override { + DBUG_ASSERT(tx != nullptr); + /* Calculate the duration the snapshot has existed */ int64_t snapshot_timestamp = tx->m_snapshot_timestamp; if (snapshot_timestamp != 0) @@ -2868,27 +3035,129 @@ class Rdb_snapshot_status : public Rdb_tx_list_walker rdb->GetEnv()->GetCurrentTime(&curr_time); THD* thd = tx->get_thd(); - + char buffer[1024]; + thd_security_context(thd, buffer, sizeof buffer, 0); m_data += format_string("---SNAPSHOT, ACTIVE %lld sec\n" - "MySQL thread id %lu, OS thread handle %p\n" + "%s\n" "lock count %llu, write count %llu\n", curr_time - snapshot_timestamp, - my_core::thd_get_thread_id(thd), thd, + buffer, tx->get_lock_count(), tx->get_write_count()); } } }; +/** + * @brief + * walks through all non-replication transactions and copies + * out relevant information for information_schema.rocksdb_trx + */ +class Rdb_trx_info_aggregator : public Rdb_tx_list_walker +{ + private: + std::vector *m_trx_info; + + public: + explicit Rdb_trx_info_aggregator(std::vector* const trx_info) : + m_trx_info(trx_info) {} + + void process_tran(const Rdb_transaction* const tx) override + { + static const std::map state_map = { + {rocksdb::Transaction::STARTED, "STARTED"}, + {rocksdb::Transaction::AWAITING_PREPARE, "AWAITING_PREPARE"}, + {rocksdb::Transaction::PREPARED, "PREPARED"}, + {rocksdb::Transaction::AWAITING_COMMIT, "AWAITING_COMMIT"}, + {rocksdb::Transaction::COMMITED, "COMMITED"}, + {rocksdb::Transaction::AWAITING_ROLLBACK, "AWAITING_ROLLBACK"}, + {rocksdb::Transaction::ROLLEDBACK, "ROLLEDBACK"}, + }; + + DBUG_ASSERT(tx != nullptr); + + THD* const thd = tx->get_thd(); + ulong thread_id = thd_thread_id(thd); + + if (tx->is_writebatch_trx()) { + const auto wb_impl = static_cast(tx); + DBUG_ASSERT(wb_impl); + m_trx_info->push_back({"", /* name */ + 0, /* trx_id */ + wb_impl->get_write_count(), + 0, /* lock_count */ + 0, /* timeout_sec */ + "", /* state */ + "", /* waiting_key */ + 0, /* waiting_cf_id */ + 1, /*is_replication */ + 1, /* skip_trx_api */ + wb_impl->is_tx_read_only(), + 0, /* deadlock detection */ + wb_impl->num_ongoing_bulk_load(), + thread_id, + "" /* query string */ }); + } else { + const auto tx_impl= static_cast(tx); + DBUG_ASSERT(tx_impl); + const rocksdb::Transaction *rdb_trx = tx_impl->get_rdb_trx(); + + if (rdb_trx == nullptr) { + return; + } + + std::string query_str; + LEX_STRING* const lex_str = thd_query_string(thd); + if (lex_str != nullptr && lex_str->str != nullptr) { + query_str = std::string(lex_str->str); + } + + const auto state_it = state_map.find(rdb_trx->GetState()); + DBUG_ASSERT(state_it != state_map.end()); + const int is_replication = (thd->rli_slave != nullptr); + uint32_t waiting_cf_id; + std::string waiting_key; + rdb_trx->GetWaitingTxns(&waiting_cf_id, &waiting_key), + + m_trx_info->push_back({rdb_trx->GetName(), + rdb_trx->GetID(), + tx_impl->get_write_count(), + tx_impl->get_lock_count(), + tx_impl->get_timeout_sec(), + state_it->second, + waiting_key, + waiting_cf_id, + is_replication, + 0, /* skip_trx_api */ + tx_impl->is_tx_read_only(), + rdb_trx->IsDeadlockDetect(), + tx_impl->num_ongoing_bulk_load(), + thread_id, + query_str}); + } + } +}; + +/* + returns a vector of info for all non-replication threads + for use by information_schema.rocksdb_trx +*/ +std::vector rdb_get_all_trx_info() { + std::vector trx_info; + Rdb_trx_info_aggregator trx_info_agg(&trx_info); + Rdb_transaction::walk_tx_list(&trx_info_agg); + return trx_info; +} + /* Generate the snapshot status table */ -static bool rocksdb_show_snapshot_status(handlerton* hton, - THD* thd, - stat_print_fn* stat_print) +static bool rocksdb_show_snapshot_status(handlerton* const hton, + THD* const thd, + stat_print_fn* const stat_print) { Rdb_snapshot_status showStatus; Rdb_transaction::walk_tx_list(&showStatus); - // Send the result data back to MySQL */ + /* Send the result data back to MySQL */ return print_stats(thd, "SNAPSHOTS", "rocksdb", showStatus.getResult(), stat_print); } @@ -2900,9 +3169,9 @@ static bool rocksdb_show_snapshot_status(handlerton* hton, what column families are there) */ -static bool rocksdb_show_status(handlerton* hton, - THD* thd, - stat_print_fn* stat_print, +static bool rocksdb_show_status(handlerton* const hton, + THD* const thd, + stat_print_fn* const stat_print, enum ha_stat_type stat_type) { bool res= false; @@ -2916,7 +3185,7 @@ static bool rocksdb_show_status(handlerton* hton, } /* Per column family stats */ - for (auto cf_name : cf_manager.get_cf_names()) + for (const auto &cf_name : cf_manager.get_cf_names()) { rocksdb::ColumnFamilyHandle* cfh; bool is_automatic; @@ -2948,13 +3217,13 @@ static bool rocksdb_show_status(handlerton* hton, { rocksdb::ColumnFamilyDescriptor cf_desc; cf_handle->GetDescriptor(&cf_desc); - auto* table_factory = cf_desc.options.table_factory.get(); + auto* const table_factory = cf_desc.options.table_factory.get(); if (table_factory != nullptr) { std::string tf_name = table_factory->Name(); if (tf_name.find("BlockBasedTable") != std::string::npos) { - const rocksdb::BlockBasedTableOptions* bbt_opt = + const rocksdb::BlockBasedTableOptions* const bbt_opt = reinterpret_cast( table_factory->GetOptions()); if (bbt_opt != nullptr) @@ -3003,9 +3272,11 @@ static bool rocksdb_show_status(handlerton* hton, return res; } -static inline void rocksdb_register_tx(handlerton *hton, THD *thd, - Rdb_transaction *tx) +static inline void rocksdb_register_tx(handlerton* const hton, THD* const thd, + Rdb_transaction* const tx) { + DBUG_ASSERT(tx != nullptr); + trans_register_ha(thd, FALSE, rocksdb_hton); if (my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { @@ -3036,14 +3307,14 @@ static inline void rocksdb_register_tx(handlerton *hton, THD *thd, InnoDB and RocksDB transactions. */ static int rocksdb_start_tx_and_assign_read_view( - handlerton* hton, /*!< in: RocksDB handlerton */ - THD* thd, /*!< in: MySQL thread handle of the + handlerton* const hton, /*!< in: RocksDB handlerton */ + THD* const thd, /*!< in: MySQL thread handle of the user for whom the transaction should be committed */ - char* binlog_file, /* out: binlog file for last commit */ - ulonglong* binlog_pos, /* out: binlog pos for last commit */ - char** gtid_executed, /* out: Gtids logged until last commit */ - int* gtid_executed_length) /*out: Length of gtid_executed string */ + char* const binlog_file, /* out: binlog file for last commit */ + ulonglong* const binlog_pos, /* out: binlog pos for last commit */ + char** gtid_executed, /* out: Gtids logged until last commit */ + int* const gtid_executed_length)/*out: Length of gtid_executed string */ { Rdb_perf_context_guard guard(thd); @@ -3066,7 +3337,7 @@ static int rocksdb_start_tx_and_assign_read_view( return 1; } - Rdb_transaction* tx= get_or_create_tx(thd); + Rdb_transaction* const tx= get_or_create_tx(thd); DBUG_ASSERT(!tx->has_snapshot()); tx->set_tx_read_only(true); rocksdb_register_tx(hton, thd, tx); @@ -3084,20 +3355,22 @@ static int rocksdb_start_tx_and_assign_read_view( * Current SAVEPOINT does not correctly handle ROLLBACK and does not return * errors. This needs to be addressed in future versions (Issue#96). */ -static int rocksdb_savepoint(handlerton *hton, THD *thd, void *savepoint) +static int rocksdb_savepoint(handlerton* const hton, THD* const thd, + void* const savepoint) { return 0; } -static int rocksdb_rollback_to_savepoint(handlerton *hton, THD *thd, - void *savepoint) +static int rocksdb_rollback_to_savepoint(handlerton* const hton, THD* const thd, + void* const savepoint) { Rdb_transaction*& tx= get_tx_from_thd(thd); return tx->rollback_to_savepoint(savepoint); } -static bool rocksdb_rollback_to_savepoint_can_release_mdl(handlerton *hton, - THD *thd) +static bool rocksdb_rollback_to_savepoint_can_release_mdl( + handlerton* const hton, + THD* const thd) { return true; } @@ -3195,7 +3468,7 @@ static void rocksdb_update_table_stats( static rocksdb::Status check_rocksdb_options_compatibility( - const char *dbpath, + const char* const dbpath, const rocksdb::Options& main_opts, const std::vector& cf_descr) { @@ -3253,7 +3526,7 @@ static rocksdb::Status check_rocksdb_options_compatibility( Storage Engine initialization function, invoked when plugin is loaded. */ -static int rocksdb_init_func(void *p) +static int rocksdb_init_func(void* const p) { DBUG_ENTER("rocksdb_init_func"); @@ -3348,12 +3621,22 @@ static int rocksdb_init_func(void *p) (rocksdb_access_hint_on_compaction_start); if (rocksdb_db_options.allow_mmap_reads && - !rocksdb_db_options.allow_os_buffer) + rocksdb_db_options.use_direct_reads) { - // allow_mmap_reads implies allow_os_buffer and RocksDB will not open if - // mmap_reads is on and os_buffer is off. (NO_LINT_DEBUG) - sql_print_error("RocksDB: Can't disable allow_os_buffer " - "if allow_mmap_reads is enabled\n"); + // allow_mmap_reads implies !use_direct_reads and RocksDB will not open if + // mmap_reads and direct_reads are both on. (NO_LINT_DEBUG) + sql_print_error("RocksDB: Can't enable both use_direct_reads " + "and allow_mmap_reads\n"); + rdb_open_tables.free_hash(); + DBUG_RETURN(1); + } + + if (rocksdb_db_options.allow_mmap_writes && + rocksdb_db_options.use_direct_writes) + { + // See above comment for allow_mmap_reads. (NO_LINT_DEBUG) + sql_print_error("RocksDB: Can't enable both use_direct_writes " + "and allow_mmap_writes\n"); rdb_open_tables.free_hash(); DBUG_RETURN(1); } @@ -3418,7 +3701,7 @@ static int rocksdb_init_func(void *p) mysql_mutex_unlock(&rdb_sysvars_mutex); } - if (!rocksdb_cf_options_map.init(ROCKSDB_WRITE_BUFFER_SIZE_DEFAULT, + if (!rocksdb_cf_options_map.init( rocksdb_tbl_options, properties_collector_factory, rocksdb_default_cf_options, @@ -3464,33 +3747,6 @@ static int rocksdb_init_func(void *p) rocksdb::Options main_opts(rocksdb_db_options, rocksdb_cf_options_map.get_defaults()); - /* - Flashcache configuration: - When running on Flashcache, mysqld opens Flashcache device before - initializing storage engines, and setting file descriptor at - cachedev_fd global variable. - RocksDB has Flashcache-aware configuration. When this is enabled, - RocksDB adds background threads into Flashcache blacklists, which - makes sense for Flashcache use cases. - */ - if (cachedev_enabled) - { - flashcache_aware_env= - rocksdb::NewFlashcacheAwareEnv(rocksdb::Env::Default(), - cachedev_fd); - if (flashcache_aware_env.get() == nullptr) - { - // NO_LINT_DEBUG - sql_print_error("RocksDB: Failed to open flashcache device at fd %d", - cachedev_fd); - rdb_open_tables.free_hash(); - DBUG_RETURN(1); - } - sql_print_information("RocksDB: Disabling flashcache on background " - "writer threads, fd %d", cachedev_fd); - main_opts.env= flashcache_aware_env.get(); - } - main_opts.env->SetBackgroundThreads(main_opts.max_background_flushes, rocksdb::Env::Priority::HIGH); main_opts.env->SetBackgroundThreads(main_opts.max_background_compactions, @@ -3557,7 +3813,7 @@ static int rocksdb_init_func(void *p) */ std::vector compaction_enabled_cf_handles; compaction_enabled_cf_handles.reserve(compaction_enabled_cf_indices.size()); - for (auto index : compaction_enabled_cf_indices) + for (const auto &index : compaction_enabled_cf_indices) { compaction_enabled_cf_handles.push_back(cf_handles[index]); } @@ -3566,7 +3822,7 @@ static int rocksdb_init_func(void *p) if (!status.ok()) { - std::string err_text= status.ToString(); + const std::string err_text= status.ToString(); // NO_LINT_DEBUG sql_print_error("RocksDB: Error enabling compaction: %s", err_text.c_str()); rdb_open_tables.free_hash(); @@ -3603,6 +3859,18 @@ static int rocksdb_init_func(void *p) rdb->PauseBackgroundWork(); } + // NO_LINT_DEBUG + sql_print_information("RocksDB: global statistics using %s indexer", + STRINGIFY_ARG(RDB_INDEXER)); +#if defined(HAVE_SCHED_GETCPU) + if (sched_getcpu() == -1) + { + // NO_LINT_DEBUG + sql_print_information("RocksDB: sched_getcpu() failed - " + "global statistics will use thread_id_indexer_t instead"); + } +#endif + sql_print_information("RocksDB instance opened"); DBUG_RETURN(0); } @@ -3611,7 +3879,7 @@ static int rocksdb_init_func(void *p) Storage Engine deinitialization function, invoked when plugin is unloaded. */ -static int rocksdb_done_func(void *p) +static int rocksdb_done_func(void* const p) { int error= 0; DBUG_ENTER("rocksdb_done_func"); @@ -3704,7 +3972,7 @@ static int rocksdb_done_func(void *p) */ Rdb_table_handler* Rdb_open_tables_map::get_table_handler( - const char *table_name) + const char* const table_name) { Rdb_table_handler *table_handler; uint length; @@ -3746,7 +4014,7 @@ Rdb_table_handler* Rdb_open_tables_map::get_table_handler( } thr_lock_init(&table_handler->m_thr_lock); - my_io_perf_atomic_init(&table_handler->m_io_perf_read); + table_handler->m_io_perf_read.init(); } DBUG_ASSERT(table_handler->m_ref_count >= 0); table_handler->m_ref_count++; @@ -3787,9 +4055,9 @@ std::vector Rdb_open_tables_map::get_table_names(void) const void ha_rocksdb::load_auto_incr_value() { - int save_active_index= active_index; + const int save_active_index= active_index; active_index= table->s->next_number_index; - uint8 save_table_status= table->status; + const uint8 save_table_status= table->status; /* load_auto_incr_value() may be called by statements that @@ -3797,13 +4065,13 @@ void ha_rocksdb::load_auto_incr_value() index_last() creates a snapshot. When a snapshot is created here, it has to be released as well. (GitHub issue#189) */ - Rdb_transaction *tx= get_or_create_tx(table->in_use); - bool is_new_snapshot= !tx->has_snapshot(); + Rdb_transaction* const tx= get_or_create_tx(table->in_use); + const bool is_new_snapshot= !tx->has_snapshot(); // Do a lookup. We only need index column, so it should be index-only. // (another reason to make it index-only is that table->read_set is // not set appropriately and non-index-only lookup will not read the value) - bool save_keyread_only= m_keyread_only; + const bool save_keyread_only= m_keyread_only; m_keyread_only= true; if (!index_last(table->record[0])) @@ -3838,7 +4106,8 @@ void ha_rocksdb::update_auto_incr_val() longlong new_val; field= table->key_info[table->s->next_number_index].key_part[0].field; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + my_bitmap_map* const old_map= + dbug_tmp_use_all_columns(table, table->read_set); new_val= field->val_int() + 1; dbug_tmp_restore_column_map(table->read_set, old_map); @@ -3854,12 +4123,12 @@ void ha_rocksdb::update_auto_incr_val() int ha_rocksdb::load_hidden_pk_value() { - int save_active_index= active_index; + const int save_active_index= active_index; active_index= m_tbl_def->m_key_count - 1; - uint8 save_table_status= table->status; + const uint8 save_table_status= table->status; - Rdb_transaction *tx= get_or_create_tx(table->in_use); - bool is_new_snapshot= !tx->has_snapshot(); + Rdb_transaction* const tx= get_or_create_tx(table->in_use); + const bool is_new_snapshot= !tx->has_snapshot(); // Do a lookup. if (!index_last(table->record[0])) @@ -3901,13 +4170,12 @@ int ha_rocksdb::load_hidden_pk_value() longlong ha_rocksdb::update_hidden_pk_val() { DBUG_ASSERT(has_hidden_pk(table)); - longlong new_val; - new_val= m_tbl_def->m_hidden_pk_val++; + const longlong new_val= m_tbl_def->m_hidden_pk_val++; return new_val; } /* Get the id of the hidden pk id from m_last_rowkey */ -int ha_rocksdb::read_hidden_pk_id_from_rowkey(longlong* hidden_pk_id) +int ha_rocksdb::read_hidden_pk_id_from_rowkey(longlong* const hidden_pk_id) { DBUG_ASSERT(hidden_pk_id != nullptr); DBUG_ASSERT(table != nullptr); @@ -3921,7 +4189,7 @@ int ha_rocksdb::read_hidden_pk_id_from_rowkey(longlong* hidden_pk_id) return 1; const int length= Field_longlong::PACK_LENGTH; - const uchar *from= reinterpret_cast(reader.read(length)); + const uchar* from= reinterpret_cast(reader.read(length)); if (from == nullptr) { return 1; /* Mem-comparable image doesn't have enough bytes */ @@ -3940,15 +4208,16 @@ int ha_rocksdb::read_hidden_pk_id_from_rowkey(longlong* hidden_pk_id) */ void Rdb_open_tables_map::release_table_handler( - Rdb_table_handler *table_handler) + Rdb_table_handler* const table_handler) { mysql_mutex_lock(&m_mutex); + DBUG_ASSERT(table_handler != nullptr); DBUG_ASSERT(table_handler->m_ref_count > 0); if (!--table_handler->m_ref_count) { // Last rereference was released. Tear down the hash entry. - auto ret __attribute__((__unused__)) = + const auto ret __attribute__((__unused__)) = my_hash_delete(&m_hash, reinterpret_cast(table_handler)); DBUG_ASSERT(!ret); // the hash entry must actually be found and deleted my_core::thr_lock_delete(&table_handler->m_thr_lock); @@ -3959,19 +4228,19 @@ void Rdb_open_tables_map::release_table_handler( } -static handler* rocksdb_create_handler(my_core::handlerton *hton, - my_core::TABLE_SHARE *table_arg, - my_core::MEM_ROOT *mem_root) +static handler* rocksdb_create_handler(my_core::handlerton* const hton, + my_core::TABLE_SHARE* const table_arg, + my_core::MEM_ROOT* const mem_root) { return new (mem_root) ha_rocksdb(hton, table_arg); } -ha_rocksdb::ha_rocksdb(my_core::handlerton *hton, - my_core::TABLE_SHARE *table_arg) +ha_rocksdb::ha_rocksdb(my_core::handlerton* const hton, + my_core::TABLE_SHARE* const table_arg) : handler(hton, table_arg), m_table_handler(nullptr), m_scan_it(nullptr), - m_scan_it_skips_bloom(false), m_tbl_def(nullptr), - m_pk_descr(nullptr), m_key_descr_arr(nullptr), + m_scan_it_skips_bloom(false), m_scan_it_snapshot(nullptr), + m_tbl_def(nullptr), m_pk_descr(nullptr), m_key_descr_arr(nullptr), m_pk_can_be_decoded(false), m_maybe_unpack_info(false), m_pk_tuple(nullptr), m_pk_packed_tuple(nullptr), @@ -3979,11 +4248,14 @@ ha_rocksdb::ha_rocksdb(my_core::handlerton *hton, m_sk_match_prefix(nullptr), m_sk_match_prefix_buf(nullptr), m_sk_packed_tuple_old(nullptr), m_pack_buffer(nullptr), m_lock_rows(RDB_LOCK_NONE), - m_keyread_only(FALSE), m_encoder_arr(nullptr), + m_keyread_only(FALSE), m_bulk_load_tx(nullptr), m_encoder_arr(nullptr), m_row_checksums_checked(0), m_in_rpl_delete_rows(false), m_in_rpl_update_rows(false), m_force_skip_unique_check(false) -{} +{ + // TODO(alexyang): create a valid PSI_mutex_key for this mutex + mysql_mutex_init(0, &m_bulk_load_mutex, MY_MUTEX_INIT_FAST); +} static const char *ha_rocksdb_exts[] = { @@ -4003,10 +4275,10 @@ bool ha_rocksdb::same_table(const ha_rocksdb& other) const bool ha_rocksdb::init_with_fields() { - uint pk= table_share->primary_key; + const uint pk= table_share->primary_key; if (pk != MAX_KEY) { - uint key_parts= table_share->key_info[pk].user_defined_key_parts; + const uint key_parts= table_share->key_info[pk].user_defined_key_parts; check_keyread_allowed(pk /*PK*/, key_parts-1, true); } else @@ -4028,8 +4300,8 @@ bool ha_rocksdb::init_with_fields() void ha_rocksdb::convert_record_to_storage_format( const rocksdb::Slice& pk_packed_slice, - Rdb_string_writer *pk_unpack_info, - rocksdb::Slice *packed_rec) + Rdb_string_writer* const pk_unpack_info, + rocksdb::Slice* const packed_rec) { DBUG_ASSERT_IMP(m_maybe_unpack_info, pk_unpack_info); m_storage_record.length(0); @@ -4040,9 +4312,7 @@ void ha_rocksdb::convert_record_to_storage_format( // If a primary key may have non-empty unpack_info for certain values, // (m_maybe_unpack_info=TRUE), we write the unpack_info block. The block // itself was prepared in Rdb_key_def::pack_record. - if (m_maybe_unpack_info && - m_pk_descr->m_kv_format_version >= - Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1) + if (m_maybe_unpack_info) { m_storage_record.append(reinterpret_cast(pk_unpack_info->ptr()), pk_unpack_info->get_current_pos()); @@ -4056,10 +4326,10 @@ void ha_rocksdb::convert_record_to_storage_format( continue; } - Field *field= table->field[i]; + Field* const field= table->field[i]; if (m_encoder_arr[i].maybe_null()) { - char *data= (char*)m_storage_record.ptr(); + char* const data= (char*)m_storage_record.ptr(); if (field->is_null()) { data[m_encoder_arr[i].m_null_offset]|= m_encoder_arr[i].m_null_mask; @@ -4072,7 +4342,7 @@ void ha_rocksdb::convert_record_to_storage_format( { my_core::Field_blob *blob= (my_core::Field_blob*)field; /* Get the number of bytes needed to store length*/ - uint length_bytes= blob->pack_length() - portable_sizeof_char_ptr; + const uint length_bytes= blob->pack_length() - portable_sizeof_char_ptr; /* Store the length of the value */ m_storage_record.append(reinterpret_cast(blob->ptr), length_bytes); @@ -4084,7 +4354,7 @@ void ha_rocksdb::convert_record_to_storage_format( } else if (m_encoder_arr[i].m_field_type == MYSQL_TYPE_VARCHAR) { - Field_varstring* field_var= (Field_varstring*)field; + Field_varstring* const field_var= (Field_varstring*)field; uint data_len; /* field_var->length_bytes is 1 or 2 */ if (field_var->length_bytes == 1) @@ -4102,17 +4372,17 @@ void ha_rocksdb::convert_record_to_storage_format( else { /* Copy the field data */ - uint len= field->pack_length_in_rec(); + const uint len= field->pack_length_in_rec(); m_storage_record.append(reinterpret_cast(field->ptr), len); } } - if (should_store_checksums()) + if (should_store_row_debug_checksums()) { - uint32_t key_crc32= my_core::crc32(0, + const uint32_t key_crc32= my_core::crc32(0, rdb_slice_to_uchar_ptr(&pk_packed_slice), pk_packed_slice.size()); - uint32_t val_crc32= my_core::crc32( + const uint32_t val_crc32= my_core::crc32( 0, rdb_mysql_str_to_uchar_str(&m_storage_record), m_storage_record.length()); uchar key_crc_buf[RDB_CHECKSUM_SIZE]; @@ -4136,7 +4406,7 @@ void ha_rocksdb::convert_record_to_storage_format( @detail Two special cases when we still unpack all fields: - When this table is being updated (m_lock_rows==RDB_LOCK_WRITE). - - When @@rocksdb_verify_checksums is ON (In this mode, we need to read all + - When @@rocksdb_verify_row_debug_checksums is ON (In this mode, we need to read all fields to find whether there is a row checksum at the end. We could skip the fields instead of decoding them, but currently we do decoding.) @@ -4159,7 +4429,7 @@ void ha_rocksdb::setup_read_decoders() continue; } - if (m_lock_rows == RDB_LOCK_WRITE || m_verify_checksums || + if (m_lock_rows == RDB_LOCK_WRITE || m_verify_row_debug_checksums || bitmap_is_set(table->read_set, table->field[i]->field_index)) { // We will need to decode this field @@ -4232,8 +4502,9 @@ void dbug_modify_key_varchar8(String &on_disk_rec) #endif -int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key, - uchar * buf) +int ha_rocksdb::convert_record_from_storage_format( + const rocksdb::Slice* const key, + uchar* const buf) { DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read1", dbug_append_garbage_at_end(m_retrieved_record);); @@ -4242,8 +4513,8 @@ int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key, DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read3", dbug_modify_rec_varchar12(m_retrieved_record);); - rocksdb::Slice retrieved_rec_slice(&m_retrieved_record.front(), - m_retrieved_record.size()); + const rocksdb::Slice retrieved_rec_slice(&m_retrieved_record.front(), + m_retrieved_record.size()); return convert_record_from_storage_format(key, &retrieved_rec_slice, buf); } @@ -4272,15 +4543,16 @@ int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key, other Error inpacking the data */ -int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key, - const rocksdb::Slice *value, - uchar * buf) +int ha_rocksdb::convert_record_from_storage_format( + const rocksdb::Slice* const key, + const rocksdb::Slice* const value, + uchar* const buf) { DBUG_ASSERT(key != nullptr); DBUG_ASSERT(buf != nullptr); Rdb_string_reader reader(value); - my_ptrdiff_t ptr_diff= buf - table->record[0]; + const my_ptrdiff_t ptr_diff= buf - table->record[0]; /* Decode PK fields from the key @@ -4301,8 +4573,7 @@ int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key, return HA_ERR_INTERNAL_ERROR; } - if (m_maybe_unpack_info && m_pk_descr->m_kv_format_version >= - Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1) + if (m_maybe_unpack_info) { unpack_info= reader.read(RDB_UNPACK_HEADER_SIZE); @@ -4329,10 +4600,10 @@ int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key, { const Rdb_field_encoder* const field_dec= it->m_field_enc; const bool decode= it->m_decode; - bool isNull = field_dec->maybe_null() && + const bool isNull = field_dec->maybe_null() && ((null_bytes[field_dec->m_null_offset] & field_dec->m_null_mask) != 0); - Field *field= table->field[field_dec->m_field_index]; + Field* const field= table->field[field_dec->m_field_index]; /* Skip the bytes we need to skip */ if (it->m_skip && !reader.read(it->m_skip)) @@ -4363,9 +4634,9 @@ int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key, if (field_dec->m_field_type == MYSQL_TYPE_BLOB) { - my_core::Field_blob *blob= (my_core::Field_blob*)field; + my_core::Field_blob* const blob= (my_core::Field_blob*)field; /* Get the number of bytes needed to store length*/ - uint length_bytes= blob->pack_length() - portable_sizeof_char_ptr; + const uint length_bytes= blob->pack_length() - portable_sizeof_char_ptr; blob->move_field_offset(ptr_diff); @@ -4378,8 +4649,9 @@ int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key, memcpy(blob->ptr, data_len_str, length_bytes); - uint32 data_len= blob->get_length((uchar*)data_len_str, length_bytes, - table->s->db_low_byte_first); + const uint32 data_len= blob->get_length((uchar*)data_len_str, + length_bytes, + table->s->db_low_byte_first); const char *blob_ptr; if (!(blob_ptr= reader.read(data_len))) { @@ -4398,7 +4670,7 @@ int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key, } else if (field_dec->m_field_type == MYSQL_TYPE_VARCHAR) { - Field_varstring* field_var= (Field_varstring*)field; + Field_varstring* const field_var= (Field_varstring*)field; const char *data_len_str; if (!(data_len_str= reader.read(field_var->length_bytes))) return HA_ERR_INTERNAL_ERROR; @@ -4431,7 +4703,7 @@ int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key, else { const char *data_bytes; - uint len= field_dec->m_pack_length_in_rec; + const uint len= field_dec->m_pack_length_in_rec; if (len > 0) { if ((data_bytes= reader.read(len)) == nullptr) @@ -4444,7 +4716,7 @@ int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key, } } - if (m_verify_checksums) + if (m_verify_row_debug_checksums) { if (reader.remaining_bytes() == RDB_CHECKSUM_CHUNK_SIZE && reader.read(1)[0] == RDB_CHECKSUM_DATA_TAG) @@ -4454,9 +4726,9 @@ int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key, uint32_t stored_val_chksum= rdb_netbuf_to_uint32((const uchar*)reader.read(RDB_CHECKSUM_SIZE)); - uint32_t computed_key_chksum= + const uint32_t computed_key_chksum= my_core::crc32(0, rdb_slice_to_uchar_ptr(key), key->size()); - uint32_t computed_val_chksum= + const uint32_t computed_val_chksum= my_core::crc32(0, rdb_slice_to_uchar_ptr(value), value->size() - RDB_CHECKSUM_CHUNK_SIZE); @@ -4488,55 +4760,19 @@ int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key, } -void ha_rocksdb::get_storage_type(Rdb_field_encoder *encoder, uint kp) +void ha_rocksdb::get_storage_type(Rdb_field_encoder* const encoder, + const uint &kp) { - Field *field= table->key_info[table->s->primary_key].key_part[kp].field; - - if (field->real_type() == MYSQL_TYPE_NEWDECIMAL) + // STORE_SOME uses unpack_info. + if (m_pk_descr->has_unpack_info(kp)) { - // Index-only is supported for DECIMAL columns. - // A DECIMAL value can be restored from its mem-comparable form. - // This works for both the old data format and the new data format. - if (m_pk_descr->m_kv_format_version >= - Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1) - { - // New format, don't store Decimal value in the row. - encoder->m_storage_type= Rdb_field_encoder::STORE_NONE; - } - else - { - // Old format. Pretend there's no way to unpack the decimal from - // its mem-comparable form. - encoder->m_storage_type= Rdb_field_encoder::STORE_ALL; - } - return; + DBUG_ASSERT(m_pk_descr->can_unpack(kp)); + encoder->m_storage_type= Rdb_field_encoder::STORE_SOME; + m_maybe_unpack_info= true; } - - if (m_pk_descr->m_kv_format_version >= - Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1) + else if (m_pk_descr->can_unpack(kp)) { - // STORE_SOME uses unpack_info, so our key must be of version - // higher than PRIMARY_FORMAT_VERSION_UPDATE1 to use this - // feature. - if (m_pk_descr->has_unpack_info(kp)) - { - DBUG_ASSERT(m_pk_descr->can_unpack(kp)); - encoder->m_storage_type= Rdb_field_encoder::STORE_SOME; - m_maybe_unpack_info= true; - } - else if (m_pk_descr->can_unpack(kp)) - { - encoder->m_storage_type= Rdb_field_encoder::STORE_NONE; - } - } - else - { - // For old versions, we can only store none if there is no - // unpack_info (and it's unpackable). - if (m_pk_descr->can_unpack(kp) && !m_pk_descr->has_unpack_info(kp)) - { - encoder->m_storage_type= Rdb_field_encoder::STORE_NONE; - } + encoder->m_storage_type= Rdb_field_encoder::STORE_NONE; } } @@ -4564,7 +4800,7 @@ void ha_rocksdb::setup_field_converters() for (i= 0; i < table->s->fields; i++) { - Field *field= table->field[i]; + Field* const field= table->field[i]; m_encoder_arr[i].m_storage_type= Rdb_field_encoder::STORE_ALL; /* @@ -4580,7 +4816,7 @@ void ha_rocksdb::setup_field_converters() if (!has_hidden_pk(table) && field->part_of_key.is_set(table->s->primary_key)) { - KEY *pk_info= &table->key_info[table->s->primary_key]; + KEY* const pk_info= &table->key_info[table->s->primary_key]; for (uint kp= 0; kp < pk_info->user_defined_key_parts; kp++) { /* key_part->fieldnr is counted from 1 */ @@ -4621,13 +4857,14 @@ void ha_rocksdb::setup_field_converters() m_null_bytes_in_rec= null_bytes; } -int ha_rocksdb::alloc_key_buffers(const TABLE* table_arg, - const Rdb_tbl_def* tbl_def_arg) +int ha_rocksdb::alloc_key_buffers(const TABLE* const table_arg, + const Rdb_tbl_def* const tbl_def_arg) { DBUG_ENTER("ha_rocksdb::alloc_key_buffers"); DBUG_ASSERT(m_pk_tuple == nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); - std::shared_ptr* kd_arr= tbl_def_arg->m_key_descr_arr; + std::shared_ptr* const kd_arr= tbl_def_arg->m_key_descr_arr; uint key_len= 0; m_pk_descr= kd_arr[pk_index(table_arg, tbl_def_arg)]; @@ -4651,7 +4888,7 @@ int ha_rocksdb::alloc_key_buffers(const TABLE* table_arg, DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } - uint pack_key_len= m_pk_descr->max_storage_fmt_length(); + const uint pack_key_len= m_pk_descr->max_storage_fmt_length(); m_pk_packed_tuple= reinterpret_cast(my_malloc(pack_key_len, MYF(0))); if (m_pk_packed_tuple == nullptr) { @@ -4668,7 +4905,7 @@ int ha_rocksdb::alloc_key_buffers(const TABLE* table_arg, // TODO: move this into get_table_handler() ?? kd_arr[i]->setup(table_arg, tbl_def_arg); - uint packed_len= kd_arr[i]->max_storage_fmt_length(); + const uint packed_len= kd_arr[i]->max_storage_fmt_length(); if (packed_len > max_packed_sk_len) { max_packed_sk_len= packed_len; @@ -4716,7 +4953,7 @@ void ha_rocksdb::free_key_buffers() m_pack_buffer= nullptr; } -void ha_rocksdb::set_use_read_free_rpl(const char* whitelist) +void ha_rocksdb::set_use_read_free_rpl(const char* const whitelist) { DBUG_ASSERT(whitelist != nullptr); @@ -4734,7 +4971,7 @@ void ha_rocksdb::set_use_read_free_rpl(const char* whitelist) m_use_read_free_rpl = regex_handler.matches(m_tbl_def->base_tablename()); } -void ha_rocksdb::set_skip_unique_check_tables(const char* whitelist) +void ha_rocksdb::set_skip_unique_check_tables(const char* const whitelist) { DBUG_ASSERT(whitelist != nullptr); @@ -4752,7 +4989,7 @@ void ha_rocksdb::set_skip_unique_check_tables(const char* whitelist) m_skip_unique_check= regex_handler.matches(m_tbl_def->base_tablename()); } -int ha_rocksdb::open(const char *name, int mode, uint test_if_locked) +int ha_rocksdb::open(const char* const name, int mode, uint test_if_locked) { DBUG_ENTER("ha_rocksdb::open"); @@ -4806,6 +5043,18 @@ int ha_rocksdb::open(const char *name, int mode, uint test_if_locked) DBUG_RETURN(err); } + /* + init_with_fields() is used to initialize table flags based on the field + definitions in table->field[]. + It is called by open_binary_frm(), but that function calls the method for + a temporary ha_rocksdb object which is later destroyed. + + If we are here in ::open(), then init_with_fields() has not been called + for this object. Call it ourselves, we want all member variables to be + properly initialized. + */ + init_with_fields(); + setup_field_converters(); info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); @@ -4818,7 +5067,7 @@ int ha_rocksdb::open(const char *name, int mode, uint test_if_locked) Initialize the necessary variables for them: */ - m_verify_checksums= false; + m_verify_row_debug_checksums= false; /* TODO: move the following to where TABLE_SHARE is opened: */ if (table->found_next_number_field) @@ -4858,7 +5107,7 @@ int ha_rocksdb::close(void) DBUG_RETURN(0); } -bool ha_rocksdb::get_error_message(const int error, String *buf) +bool ha_rocksdb::get_error_message(const int error, String* const buf) { static_assert(HA_ERR_ROCKSDB_LAST > HA_ERR_FIRST, "HA_ERR_ROCKSDB_LAST > HA_ERR_FIRST"); @@ -4869,7 +5118,7 @@ bool ha_rocksdb::get_error_message(const int error, String *buf) DBUG_ASSERT(error <= HA_ERR_ROCKSDB_LAST); DBUG_ASSERT(buf != nullptr); - Rdb_transaction* tx = get_tx_from_thd(ha_thd()); + Rdb_transaction* const tx = get_tx_from_thd(ha_thd()); bool temp_error = false; switch (error) { @@ -4901,9 +5150,9 @@ static const std::set RDB_INDEX_COLLATIONS= {&my_charset_bin, &my_charset_utf8_bin, &my_charset_latin1_bin}; -static bool rdb_is_index_collation_supported(const my_core::Field *field) +static bool rdb_is_index_collation_supported(const my_core::Field* const field) { - my_core::enum_field_types type= field->real_type(); + const my_core::enum_field_types type= field->real_type(); /* Handle [VAR](CHAR|BINARY) or TEXT|BLOB */ if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING || type == MYSQL_TYPE_BLOB) @@ -4930,11 +5179,11 @@ static bool rdb_is_index_collation_supported(const my_core::Field *field) 0 - Ok other - error, either given table ddl is not supported by rocksdb or OOM. */ -int ha_rocksdb::create_key_defs(const TABLE *table_arg, - Rdb_tbl_def *tbl_def_arg, - const TABLE *old_table_arg /* = nullptr */, - const Rdb_tbl_def *old_tbl_def_arg - /* = nullptr */) +int ha_rocksdb::create_key_defs(const TABLE* const table_arg, + Rdb_tbl_def* const tbl_def_arg, + const TABLE* const old_table_arg /* = nullptr */, + const Rdb_tbl_def* const old_tbl_def_arg + /* = nullptr */) const { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); @@ -5011,8 +5260,9 @@ int ha_rocksdb::create_key_defs(const TABLE *table_arg, 0 - Ok other - error */ -int ha_rocksdb::create_cfs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg, - std::array* cfs) +int ha_rocksdb::create_cfs(const TABLE* const table_arg, + Rdb_tbl_def* const tbl_def_arg, + std::array* const cfs) const { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); @@ -5043,7 +5293,7 @@ int ha_rocksdb::create_cfs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg, !rdb_collation_exceptions->matches(tablename_sys)) { std::string collation_err; - for (auto coll : RDB_INDEX_COLLATIONS) + for (const auto &coll : RDB_INDEX_COLLATIONS) { if (collation_err != "") { @@ -5066,8 +5316,8 @@ int ha_rocksdb::create_cfs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg, index comment has Column Family name. If there was no comment, we get NULL, and it means use the default column family. */ - const char *comment = get_key_comment(i, table_arg, tbl_def_arg); - const char *key_name = get_key_name(i, table_arg, tbl_def_arg); + const char* const comment = get_key_comment(i, table_arg, tbl_def_arg); + const char* const key_name = get_key_name(i, table_arg, tbl_def_arg); if (looks_like_per_index_cf_typo(comment)) { @@ -5112,11 +5362,11 @@ int ha_rocksdb::create_cfs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg, 0 - Ok other - error, either given table ddl is not supported by rocksdb or OOM. */ -int ha_rocksdb::create_inplace_key_defs(const TABLE *table_arg, - Rdb_tbl_def *tbl_def_arg, - const TABLE *old_table_arg, - const Rdb_tbl_def *old_tbl_def_arg, - const std::array& cfs) +int ha_rocksdb::create_inplace_key_defs(const TABLE* const table_arg, + Rdb_tbl_def* const tbl_def_arg, + const TABLE* const old_table_arg, + const Rdb_tbl_def* const old_tbl_def_arg, + const std::array& cfs) const { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); @@ -5124,31 +5374,30 @@ int ha_rocksdb::create_inplace_key_defs(const TABLE *table_arg, DBUG_ENTER("create_key_def"); - std::shared_ptr* old_key_descr= + std::shared_ptr* const old_key_descr= old_tbl_def_arg->m_key_descr_arr; - std::shared_ptr* new_key_descr= + std::shared_ptr* const new_key_descr= tbl_def_arg->m_key_descr_arr; - std::unordered_map old_key_pos = + const std::unordered_map old_key_pos = get_old_key_positions(table_arg, tbl_def_arg, old_table_arg, old_tbl_def_arg); uint i; for (i= 0; i < tbl_def_arg->m_key_count; i++) { - auto it = old_key_pos.find(get_key_name(i, table_arg, tbl_def_arg)); + const auto &it = old_key_pos.find(get_key_name(i, table_arg, tbl_def_arg)); if (it != old_key_pos.end()) { /* Found matching index in old table definition, so copy it over to the new one created. */ - const std::shared_ptr& okd= - old_key_descr[it->second]; + const Rdb_key_def& okd= *old_key_descr[it->second]; uint16 index_dict_version= 0; uchar index_type= 0; uint16 kv_version= 0; - GL_INDEX_ID gl_index_id= okd->get_gl_index_id(); + const GL_INDEX_ID gl_index_id= okd.get_gl_index_id(); if (!dict_manager.get_index_info(gl_index_id, &index_dict_version, &index_type, &kv_version)) { @@ -5166,15 +5415,15 @@ int ha_rocksdb::create_inplace_key_defs(const TABLE *table_arg, itself. */ new_key_descr[i]= std::make_shared( - okd->get_index_number(), + okd.get_index_number(), i, - okd->get_cf(), + okd.get_cf(), index_dict_version, index_type, kv_version, - okd->m_is_reverse_cf, - okd->m_is_auto_cf, - okd->m_name.c_str(), + okd.m_is_reverse_cf, + okd.m_is_auto_cf, + okd.m_name.c_str(), dict_manager.get_stats(gl_index_id)); } else if (create_key_def(table_arg, i, tbl_def_arg, @@ -5191,10 +5440,10 @@ int ha_rocksdb::create_inplace_key_defs(const TABLE *table_arg, } std::unordered_map ha_rocksdb::get_old_key_positions( - const TABLE* table_arg, - const Rdb_tbl_def* tbl_def_arg, - const TABLE* old_table_arg, - const Rdb_tbl_def* old_tbl_def_arg) + const TABLE* const table_arg, + const Rdb_tbl_def* const tbl_def_arg, + const TABLE* const old_table_arg, + const Rdb_tbl_def* const old_tbl_def_arg) const { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(old_table_arg != nullptr); @@ -5203,7 +5452,7 @@ std::unordered_map ha_rocksdb::get_old_key_positions( DBUG_ENTER("get_old_key_positions"); - std::shared_ptr* old_key_descr= + std::shared_ptr* const old_key_descr= old_tbl_def_arg->m_key_descr_arr; std::unordered_map old_key_pos; std::unordered_map new_key_pos; @@ -5231,14 +5480,14 @@ std::unordered_map ha_rocksdb::get_old_key_positions( CREATE TABLE t1 (a INT, b INT, KEY ka(a)) ENGINE=RocksDB; ALTER TABLE t1 DROP INDEX ka, ADD INDEX ka(b), ALGORITHM=INPLACE; */ - const KEY* old_key = &old_table_arg->key_info[i]; - auto it = new_key_pos.find(old_key->name); + const KEY* const old_key = &old_table_arg->key_info[i]; + const auto &it = new_key_pos.find(old_key->name); if (it == new_key_pos.end()) { continue; } - KEY* new_key = &table_arg->key_info[it->second]; + KEY* const new_key = &table_arg->key_info[it->second]; if (!compare_key_parts(old_key, new_key)) { @@ -5250,7 +5499,8 @@ std::unordered_map ha_rocksdb::get_old_key_positions( } /* Check two keys to ensure that key parts within keys match */ -int ha_rocksdb::compare_key_parts(const KEY* old_key, const KEY* new_key) +int ha_rocksdb::compare_key_parts(const KEY* const old_key, + const KEY* const new_key) const { DBUG_ASSERT(old_key != nullptr); DBUG_ASSERT(new_key != nullptr); @@ -5293,17 +5543,17 @@ int ha_rocksdb::compare_key_parts(const KEY* old_key, const KEY* new_key) 0 - Ok other - error, either given table ddl is not supported by rocksdb or OOM. */ -int ha_rocksdb::create_key_def(const TABLE *table_arg, uint i, - const Rdb_tbl_def* tbl_def_arg, - std::shared_ptr* new_key_def, - const struct key_def_cf_info& cf_info) +int ha_rocksdb::create_key_def(const TABLE* const table_arg, const uint &i, + const Rdb_tbl_def* const tbl_def_arg, + std::shared_ptr* const new_key_def, + const struct key_def_cf_info& cf_info) const { DBUG_ENTER("create_key_def"); DBUG_ASSERT(new_key_def != nullptr); DBUG_ASSERT(*new_key_def == nullptr); - uint index_id= ddl_manager.get_and_update_next_number(&dict_manager); - uint16_t index_dict_version= Rdb_key_def::INDEX_INFO_VERSION_LATEST; + const uint index_id= ddl_manager.get_and_update_next_number(&dict_manager); + const uint16_t index_dict_version= Rdb_key_def::INDEX_INFO_VERSION_LATEST; uchar index_type; uint16_t kv_version; @@ -5316,24 +5566,16 @@ int ha_rocksdb::create_key_def(const TABLE *table_arg, uint i, { index_type= Rdb_key_def::INDEX_TYPE_PRIMARY; uint16 pk_latest_version= Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; - DBUG_EXECUTE_IF("MYROCKS_FORMAT_VERSION_INITIAL", - {pk_latest_version= - Rdb_key_def::PRIMARY_FORMAT_VERSION_INITIAL; - }); kv_version= pk_latest_version; } else { index_type= Rdb_key_def::INDEX_TYPE_SECONDARY; uint16 sk_latest_version= Rdb_key_def::SECONDARY_FORMAT_VERSION_LATEST; - DBUG_EXECUTE_IF("MYROCKS_FORMAT_VERSION_INITIAL", - {sk_latest_version= - Rdb_key_def::SECONDARY_FORMAT_VERSION_INITIAL; - }); kv_version= sk_latest_version; } - const char *key_name = get_key_name(i, table_arg, m_tbl_def); + const char* const key_name = get_key_name(i, table_arg, m_tbl_def); *new_key_def= std::make_shared( index_id, i, cf_info.cf_handle, index_dict_version, index_type, kv_version, cf_info.is_reverse_cf, cf_info.is_auto_cf, key_name); @@ -5342,7 +5584,7 @@ int ha_rocksdb::create_key_def(const TABLE *table_arg, uint i, } int rdb_normalize_tablename(const std::string& tablename, - std::string* strbuf) + std::string* const strbuf) { DBUG_ASSERT(strbuf != nullptr); @@ -5368,7 +5610,7 @@ int rdb_normalize_tablename(const std::string& tablename, Check to see if the user's original statement includes foreign key references */ -bool ha_rocksdb::contains_foreign_key(THD* thd) +bool ha_rocksdb::contains_foreign_key(THD* const thd) { bool success; const char* str = thd_query_string(thd)->str; @@ -5431,9 +5673,9 @@ bool ha_rocksdb::contains_foreign_key(THD* thd) @return 0 on success, non-zero on failure to split */ int rdb_split_normalized_tablename(const std::string& fullname, - std::string* db, - std::string* table, - std::string* partition) + std::string* const db, + std::string* const table, + std::string* const partition) { DBUG_ASSERT(!fullname.empty()); @@ -5458,7 +5700,7 @@ int rdb_split_normalized_tablename(const std::string& fullname, dotpos++; - size_t partpos = fullname.find(RDB_PARTITION_STR, dotpos, + const size_t partpos = fullname.find(RDB_PARTITION_STR, dotpos, strlen(RDB_PARTITION_STR)); if (partpos != std::string::npos) @@ -5503,14 +5745,14 @@ int rdb_split_normalized_tablename(const std::string& fullname, ha_create_table() in handle.cc */ -int ha_rocksdb::create(const char *name, TABLE *table_arg, - HA_CREATE_INFO *create_info) +int ha_rocksdb::create(const char* const name, TABLE* const table_arg, + HA_CREATE_INFO* const create_info) { int res; std::string str; DBUG_ENTER("ha_rocksdb::create"); - THD *thd = my_core::thd_get_current_thd(); + THD* const thd = my_core::thd_get_current_thd(); if (get_table_if_exists(name)) { if (thd->lex->sql_command == SQLCOM_TRUNCATE) { @@ -5564,8 +5806,8 @@ int ha_rocksdb::create(const char *name, TABLE *table_arg, } bool write_err= false; - std::unique_ptr wb= dict_manager.begin(); - rocksdb::WriteBatch *batch= wb.get(); + const std::unique_ptr wb= dict_manager.begin(); + rocksdb::WriteBatch* const batch= wb.get(); /* Create table/key descriptions and put them into the data dictionary */ m_tbl_def= new Rdb_tbl_def(str); @@ -5642,7 +5884,7 @@ error: bool ha_rocksdb::check_keyread_allowed(uint inx, uint part, bool all_parts) const { bool res= true; - KEY *key_info= &table_share->key_info[inx]; + KEY* const key_info= &table_share->key_info[inx]; Rdb_field_packing dummy1; res= dummy1.setup(nullptr, key_info->key_part[part].field, inx, part, @@ -5666,7 +5908,7 @@ bool ha_rocksdb::check_keyread_allowed(uint inx, uint part, bool all_parts) cons } } - uint pk= table_share->primary_key; + const uint pk= table_share->primary_key; if (inx == pk && all_parts && part + 1 == table_share->key_info[pk].user_defined_key_parts) { @@ -5676,12 +5918,11 @@ bool ha_rocksdb::check_keyread_allowed(uint inx, uint part, bool all_parts) cons return res; } -int ha_rocksdb::read_key_exact(const std::shared_ptr& kd, - rocksdb::Iterator *iter, - bool full_key_match, +int ha_rocksdb::read_key_exact(const Rdb_key_def& kd, + rocksdb::Iterator* const iter, + const bool &full_key_match, const rocksdb::Slice& key_slice) const { - DBUG_ASSERT(kd != nullptr); DBUG_ASSERT(iter != nullptr); /* @@ -5689,7 +5930,7 @@ int ha_rocksdb::read_key_exact(const std::shared_ptr& kd, index_tuple= lookup_tuple. lookup_tuple may be a prefix of the index. */ - if (kd->m_is_reverse_cf) + if (kd.m_is_reverse_cf) { if (!full_key_match) { @@ -5700,7 +5941,7 @@ int ha_rocksdb::read_key_exact(const std::shared_ptr& kd, } } - if (!iter->Valid() || !kd->value_matches_prefix(iter->key(), key_slice)) + if (!iter->Valid() || !kd.value_matches_prefix(iter->key(), key_slice)) { /* Got a record that is not equal to the lookup value, or even a record @@ -5711,20 +5952,18 @@ int ha_rocksdb::read_key_exact(const std::shared_ptr& kd, return 0; } -int ha_rocksdb::read_before_key(const std::shared_ptr& kd, - bool full_key_match, +int ha_rocksdb::read_before_key(const Rdb_key_def& kd, + const bool &full_key_match, const rocksdb::Slice& key_slice) { - DBUG_ASSERT(kd != nullptr); - /* We are looking for record with the biggest t.key such that t.key < lookup_tuple. */ - if (kd->m_is_reverse_cf) + if (kd.m_is_reverse_cf) { if (m_scan_it->Valid() && full_key_match && - kd->value_matches_prefix(m_scan_it->key(), key_slice)) + kd.value_matches_prefix(m_scan_it->key(), key_slice)) { /* We are using full key and we've hit an exact match */ m_scan_it->Next(); @@ -5741,12 +5980,10 @@ int ha_rocksdb::read_before_key(const std::shared_ptr& kd, return m_scan_it->Valid() ? 0 : HA_ERR_KEY_NOT_FOUND; } -int ha_rocksdb::read_after_key(const std::shared_ptr& kd, - bool full_key_match, +int ha_rocksdb::read_after_key(const Rdb_key_def& kd, + const bool &full_key_match, const rocksdb::Slice& key_slice) { - DBUG_ASSERT(kd != nullptr); - /* We are looking for the first record such that @@ -5755,7 +5992,7 @@ int ha_rocksdb::read_after_key(const std::shared_ptr& kd, with HA_READ_AFTER_KEY, $GT = '>', with HA_READ_KEY_OR_NEXT, $GT = '>=' */ - if (kd->m_is_reverse_cf) + if (kd.m_is_reverse_cf) { if (!m_scan_it->Valid()) { @@ -5769,7 +6006,7 @@ int ha_rocksdb::read_after_key(const std::shared_ptr& kd, - when using full extended key and when we've got an exact match */ if (!full_key_match || - !kd->value_matches_prefix(m_scan_it->key(), key_slice)) + !kd.value_matches_prefix(m_scan_it->key(), key_slice)) { m_scan_it->Prev(); } @@ -5780,13 +6017,13 @@ int ha_rocksdb::read_after_key(const std::shared_ptr& kd, } int ha_rocksdb::position_to_correct_key( - const std::shared_ptr& kd, - enum ha_rkey_function find_flag, - bool full_key_match, - const uchar* key, - key_part_map keypart_map, + const Rdb_key_def& kd, + const enum ha_rkey_function &find_flag, + const bool &full_key_match, + const uchar* const key, + const key_part_map &keypart_map, const rocksdb::Slice& key_slice, - bool* move_forward) + bool* const move_forward) { int rc= 0; @@ -5799,7 +6036,7 @@ int ha_rocksdb::position_to_correct_key( case HA_READ_BEFORE_KEY: *move_forward= false; rc= read_before_key(kd, full_key_match, key_slice); - if (rc == 0 && !kd->covers_key(m_scan_it->key())) + if (rc == 0 && !kd.covers_key(m_scan_it->key())) { /* The record we've got is not from this index */ rc= HA_ERR_KEY_NOT_FOUND; @@ -5808,7 +6045,7 @@ int ha_rocksdb::position_to_correct_key( case HA_READ_AFTER_KEY: case HA_READ_KEY_OR_NEXT: rc= read_after_key(kd, full_key_match, key_slice); - if (rc == 0 && !kd->covers_key(m_scan_it->key())) + if (rc == 0 && !kd.covers_key(m_scan_it->key())) { /* The record we've got is not from this index */ rc= HA_ERR_KEY_NOT_FOUND; @@ -5835,21 +6072,21 @@ int ha_rocksdb::position_to_correct_key( if (rc == 0) { const rocksdb::Slice& rkey= m_scan_it->key(); - if (!kd->covers_key(rkey)) + if (!kd.covers_key(rkey)) { /* The record we've got is not from this index */ rc= HA_ERR_KEY_NOT_FOUND; } else if (find_flag == HA_READ_PREFIX_LAST) { - uint size = kd->pack_index_tuple(table, m_pack_buffer, + uint size = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, key, keypart_map); rocksdb::Slice lookup_tuple( reinterpret_cast(m_sk_packed_tuple), size); // We need to compare the key we've got with the original search prefix. - if (!kd->value_matches_prefix(rkey, lookup_tuple)) + if (!kd.value_matches_prefix(rkey, lookup_tuple)) { rc= HA_ERR_KEY_NOT_FOUND; } @@ -5864,12 +6101,12 @@ int ha_rocksdb::position_to_correct_key( return rc; } -int ha_rocksdb::calc_eq_cond_len(const std::shared_ptr& kd, - enum ha_rkey_function find_flag, +int ha_rocksdb::calc_eq_cond_len(const Rdb_key_def& kd, + const enum ha_rkey_function &find_flag, const rocksdb::Slice& slice, - int bytes_changed_by_succ, - const key_range *end_key, - uint* end_key_packed_size) + const int &bytes_changed_by_succ, + const key_range* const end_key, + uint* const end_key_packed_size) { if (find_flag == HA_READ_KEY_EXACT) return slice.size(); @@ -5877,7 +6114,7 @@ int ha_rocksdb::calc_eq_cond_len(const std::shared_ptr& kd, if (find_flag == HA_READ_PREFIX_LAST) { /* - We have made the kd->successor(m_sk_packed_tuple) call above. + We have made the kd.successor(m_sk_packed_tuple) call above. The slice is at least Rdb_key_def::INDEX_NUMBER_SIZE bytes long. */ @@ -5886,7 +6123,7 @@ int ha_rocksdb::calc_eq_cond_len(const std::shared_ptr& kd, if (end_key) { - *end_key_packed_size= kd->pack_index_tuple(table, m_pack_buffer, + *end_key_packed_size= kd.pack_index_tuple(table, m_pack_buffer, m_end_key_packed_tuple, end_key->key, end_key->keypart_map); @@ -5914,13 +6151,13 @@ int ha_rocksdb::calc_eq_cond_len(const std::shared_ptr& kd, return Rdb_key_def::INDEX_NUMBER_SIZE; } -int ha_rocksdb::read_row_from_primary_key(uchar *buf) +int ha_rocksdb::read_row_from_primary_key(uchar* const buf) { DBUG_ASSERT(buf != nullptr); int rc; const rocksdb::Slice& rkey= m_scan_it->key(); - uint pk_size= rkey.size(); + const uint pk_size= rkey.size(); const char *pk_data= rkey.data(); memcpy(m_pk_packed_tuple, pk_data, pk_size); @@ -5941,11 +6178,10 @@ int ha_rocksdb::read_row_from_primary_key(uchar *buf) return rc; } -int ha_rocksdb::read_row_from_secondary_key(uchar *buf, - const std::shared_ptr& kd, bool move_forward) +int ha_rocksdb::read_row_from_secondary_key(uchar* const buf, + const Rdb_key_def& kd, bool move_forward) { DBUG_ASSERT(buf != nullptr); - DBUG_ASSERT(kd != nullptr); int rc= 0; uint pk_size; @@ -5954,7 +6190,7 @@ int ha_rocksdb::read_row_from_secondary_key(uchar *buf, { /* Get the key columns and primary key value */ const rocksdb::Slice& rkey= m_scan_it->key(); - pk_size= kd->get_primary_key_tuple(table, m_pk_descr, &rkey, + pk_size= kd.get_primary_key_tuple(table, *m_pk_descr, &rkey, m_pk_packed_tuple); const rocksdb::Slice& value= m_scan_it->value(); if (pk_size == RDB_INVALID_KEY_LEN) @@ -5968,14 +6204,14 @@ int ha_rocksdb::read_row_from_secondary_key(uchar *buf, } else { - if (kd->m_is_reverse_cf) + if (kd.m_is_reverse_cf) move_forward= !move_forward; rc= find_icp_matching_index_rec(move_forward, buf); if (!rc) { const rocksdb::Slice& rkey= m_scan_it->key(); - pk_size= kd->get_primary_key_tuple(table, m_pk_descr, &rkey, + pk_size= kd.get_primary_key_tuple(table, *m_pk_descr, &rkey, m_pk_packed_tuple); if (pk_size == RDB_INVALID_KEY_LEN) { @@ -6006,20 +6242,21 @@ int ha_rocksdb::read_row_from_secondary_key(uchar *buf, available. In this case, we should just do a primary key lookup. */ int ha_rocksdb::try_keyonly_read_from_sk(uchar* buf, - const std::shared_ptr& kd, + const Rdb_key_def& kd, const rocksdb::Slice& key, const rocksdb::Slice& value, const uint rowid_size) { - int res= kd->unpack_record(table, buf, &key, &value, m_verify_checksums); + const int res= + kd.unpack_record(table, buf, &key, &value, m_verify_row_debug_checksums); uint size= rowid_size; if (res == UNPACK_INFO_MISSING) { // If we can't unpack, then do a PK lookup. if (rowid_size == RDB_INVALID_KEY_LEN) { - size= kd->get_primary_key_tuple( - table, m_pk_descr, &key, m_pk_packed_tuple); + size= kd.get_primary_key_tuple( + table, *m_pk_descr, &key, m_pk_packed_tuple); } return get_row_by_rowid(buf, m_pk_packed_tuple, size); } @@ -6094,7 +6331,7 @@ ulong ha_rocksdb::index_flags(uint inx, uint part, bool all_parts) const m_scan_it points at the index key-value pair that we should read the (pk,row) pair for. */ -int ha_rocksdb::secondary_index_read(const int keyno, uchar *buf) +int ha_rocksdb::secondary_index_read(const int keyno, uchar* const buf) { DBUG_ASSERT(buf != nullptr); DBUG_ASSERT(table != nullptr); @@ -6114,8 +6351,8 @@ int ha_rocksdb::secondary_index_read(const int keyno, uchar *buf) int rc; //TODO: We could here check if we have ran out of range we're scanning - uint size= m_key_descr_arr[keyno]->get_primary_key_tuple( - table, m_pk_descr, &key, m_pk_packed_tuple); + const uint size= m_key_descr_arr[keyno]->get_primary_key_tuple( + table, *m_pk_descr, &key, m_pk_packed_tuple); if (size == RDB_INVALID_KEY_LEN) { return HA_ERR_INTERNAL_ERROR; @@ -6126,7 +6363,7 @@ int ha_rocksdb::secondary_index_read(const int keyno, uchar *buf) if (m_keyread_only && m_lock_rows == RDB_LOCK_NONE && !has_hidden_pk(table)) { - rc= try_keyonly_read_from_sk(buf, m_key_descr_arr[keyno], + rc= try_keyonly_read_from_sk(buf, *m_key_descr_arr[keyno], key, m_scan_it->value(), size); } else @@ -6154,8 +6391,8 @@ int ha_rocksdb::secondary_index_read(const int keyno, uchar *buf) ha_rocksdb::read_range_first passes end_key to ha_rocksdb::index_read_map_impl function. */ -int ha_rocksdb::read_range_first(const key_range *start_key, - const key_range *end_key, +int ha_rocksdb::read_range_first(const key_range* const start_key, + const key_range* const end_key, bool eq_range_arg, bool sorted) { int result; @@ -6207,7 +6444,7 @@ int ha_rocksdb::read_range_first(const key_range *start_key, } -int ha_rocksdb::index_read_map(uchar *buf, const uchar *key, +int ha_rocksdb::index_read_map(uchar* const buf, const uchar* const key, key_part_map keypart_map, enum ha_rkey_function find_flag) { @@ -6226,17 +6463,17 @@ int ha_rocksdb::index_read_map(uchar *buf, const uchar *key, On range scan, equal condition length is MIN(start_key, end_key) of the rocksdb::Slice expression. */ -int ha_rocksdb::index_read_map_impl(uchar *buf, const uchar *key, +int ha_rocksdb::index_read_map_impl(uchar* const buf, const uchar* const key, key_part_map keypart_map, enum ha_rkey_function find_flag, - const key_range *end_key) + const key_range* end_key) { int rc= 0; DBUG_ENTER("ha_rocksdb::index_read_map"); ha_statistic_increment(&SSV::ha_read_key_count); - const std::shared_ptr& kd= m_key_descr_arr[active_index]; - uint actual_key_parts= kd->get_key_parts(); + const Rdb_key_def& kd= *m_key_descr_arr[active_index]; + const uint actual_key_parts= kd.get_key_parts(); bool using_full_key= is_using_full_key(keypart_map, actual_key_parts); if (!end_key) @@ -6253,8 +6490,9 @@ int ha_rocksdb::index_read_map_impl(uchar *buf, const uchar *key, Equality lookup over primary key, using full tuple. This is a special case, use DB::Get. */ - uint size= kd->pack_index_tuple(table, m_pack_buffer, m_pk_packed_tuple, - key, keypart_map); + const uint size= kd.pack_index_tuple( + table, m_pack_buffer, m_pk_packed_tuple, + key, keypart_map); rc= get_row_by_rowid(buf, m_pk_packed_tuple, size); if (!rc) { @@ -6275,15 +6513,15 @@ int ha_rocksdb::index_read_map_impl(uchar *buf, const uchar *key, { key_part_map tmp_map= (key_part_map(1) << table->key_info[active_index].user_defined_key_parts) - 1; - packed_size= kd->pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, + packed_size= kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, key, tmp_map); if (table->key_info[active_index].user_defined_key_parts != - kd->get_key_parts()) + kd.get_key_parts()) using_full_key= false; } else { - packed_size= kd->pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, + packed_size= kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, key, keypart_map); } @@ -6313,24 +6551,24 @@ int ha_rocksdb::index_read_map_impl(uchar *buf, const uchar *key, find_flag == HA_READ_AFTER_KEY) { /* See below */ - bytes_changed_by_succ= kd->successor(m_sk_packed_tuple, packed_size); + bytes_changed_by_succ= kd.successor(m_sk_packed_tuple, packed_size); } rocksdb::Slice slice( reinterpret_cast(m_sk_packed_tuple), packed_size); uint end_key_packed_size= 0; - uint eq_cond_len= calc_eq_cond_len(kd, find_flag, slice, - bytes_changed_by_succ, end_key, - &end_key_packed_size); + const uint eq_cond_len= calc_eq_cond_len(kd, find_flag, slice, + bytes_changed_by_succ, end_key, + &end_key_packed_size); bool use_all_keys= false; if (find_flag == HA_READ_KEY_EXACT && - my_count_bits(keypart_map) == kd->get_key_parts()) + my_count_bits(keypart_map) == kd.get_key_parts()) use_all_keys= true; - Rdb_transaction *tx= get_or_create_tx(table->in_use); - bool is_new_snapshot= !tx->has_snapshot(); + Rdb_transaction* const tx= get_or_create_tx(table->in_use); + const bool is_new_snapshot= !tx->has_snapshot(); // Loop as long as we get a deadlock error AND we end up creating the // snapshot here (i.e. it did not exist prior to this) for (;;) @@ -6413,14 +6651,14 @@ int ha_rocksdb::index_read_map_impl(uchar *buf, const uchar *key, other - error code */ -int ha_rocksdb::find_icp_matching_index_rec(bool move_forward, uchar *buf) +int ha_rocksdb::find_icp_matching_index_rec(const bool &move_forward, + uchar* const buf) { DBUG_ASSERT(buf != nullptr); if (pushed_idx_cond && pushed_idx_cond_keyno == active_index) { - const std::shared_ptr& kd= m_key_descr_arr[active_index]; - DBUG_ASSERT(kd != nullptr); + const Rdb_key_def& kd= *m_key_descr_arr[active_index]; while (1) { @@ -6429,9 +6667,9 @@ int ha_rocksdb::find_icp_matching_index_rec(bool move_forward, uchar *buf) table->status= STATUS_NOT_FOUND; return HA_ERR_END_OF_FILE; } - rocksdb::Slice rkey= m_scan_it->key(); + const rocksdb::Slice rkey= m_scan_it->key(); - if (!kd->covers_key(rkey)) + if (!kd.covers_key(rkey)) { table->status= STATUS_NOT_FOUND; return HA_ERR_END_OF_FILE; @@ -6439,22 +6677,22 @@ int ha_rocksdb::find_icp_matching_index_rec(bool move_forward, uchar *buf) if (m_sk_match_prefix) { - rocksdb::Slice prefix((const char*)m_sk_match_prefix, + const rocksdb::Slice prefix((const char*)m_sk_match_prefix, m_sk_match_length); - if (!kd->value_matches_prefix(rkey, prefix)) + if (!kd.value_matches_prefix(rkey, prefix)) { table->status= STATUS_NOT_FOUND; return HA_ERR_END_OF_FILE; } } - rocksdb::Slice value= m_scan_it->value(); + const rocksdb::Slice value= m_scan_it->value(); if (try_keyonly_read_from_sk(buf, kd, rkey, value, RDB_INVALID_KEY_LEN)) { return HA_ERR_INTERNAL_ERROR; } - enum icp_result icp_status= check_index_cond(); + const enum icp_result icp_status= check_index_cond(); if (icp_status == ICP_NO_MATCH) { if (move_forward) @@ -6488,26 +6726,26 @@ int ha_rocksdb::find_icp_matching_index_rec(bool move_forward, uchar *buf) } -int ha_rocksdb::index_read_last_map(uchar * buf, const uchar * key, +int ha_rocksdb::index_read_last_map(uchar* const buf, const uchar* const key, key_part_map keypart_map) { return index_read_map(buf, key, keypart_map, HA_READ_PREFIX_LAST); } -int ha_rocksdb::check(THD* thd, HA_CHECK_OPT* check_opt) +int ha_rocksdb::check(THD* const thd, HA_CHECK_OPT* const check_opt) { - uint pk= pk_index(table, m_tbl_def); + const uint pk= pk_index(table, m_tbl_def); String rowkey_copy; String sec_key_copy; - const char* table_name = table->s->table_name.str; + const char* const table_name = table->s->table_name.str; // Only when debugging: don't use snapshot when reading // Rdb_transaction *tx= get_or_create_tx(table->in_use); // tx->snapshot= nullptr; - bool save_verify_checksums= m_verify_checksums; - m_verify_checksums= true; + const bool save_verify_row_debug_checksums= m_verify_row_debug_checksums; + m_verify_row_debug_checksums= true; /* For each secondary index, check that we can get a PK value from it */ // NO_LINT_DEBUG @@ -6606,18 +6844,20 @@ int ha_rocksdb::check(THD* thd, HA_CHECK_OPT* check_opt) print_and_error: { std::string buf; - buf = rdb_hexdump(rowkey_copy.ptr(), rowkey_copy.length(), 1000); + buf = rdb_hexdump(rowkey_copy.ptr(), rowkey_copy.length(), + RDB_MAX_HEXDUMP_LEN); // NO_LINT_DEBUG sql_print_error("CHECKTABLE %s: rowkey: %s", table_name, buf.c_str()); buf= rdb_hexdump(m_retrieved_record.data(), m_retrieved_record.size(), - 1000); + RDB_MAX_HEXDUMP_LEN); // NO_LINT_DEBUG sql_print_error("CHECKTABLE %s: record: %s", table_name, buf.c_str()); - buf = rdb_hexdump(sec_key_copy.ptr(), sec_key_copy.length(), 1000); + buf = rdb_hexdump(sec_key_copy.ptr(), sec_key_copy.length(), + RDB_MAX_HEXDUMP_LEN); // NO_LINT_DEBUG sql_print_error("CHECKTABLE %s: index: %s", table_name, buf.c_str()); @@ -6646,7 +6886,7 @@ print_and_error: } extra(HA_EXTRA_NO_KEYREAD); - m_verify_checksums= save_verify_checksums; + m_verify_row_debug_checksums= save_verify_row_debug_checksums; /* TODO: we should check also for PK records that are missing in the secondary indexes. @@ -6655,14 +6895,14 @@ print_and_error: */ return HA_ADMIN_OK; error: - m_verify_checksums= save_verify_checksums; + m_verify_row_debug_checksums= save_verify_row_debug_checksums; ha_index_or_rnd_end(); extra(HA_EXTRA_NO_KEYREAD); return HA_ADMIN_CORRUPT; } -static void dbug_dump_str(FILE *out, const char *str, int len) +static void dbug_dump_str(FILE* const out, const char* const str, int len) { fprintf(out, "\""); for (int i= 0; i < len; i++) @@ -6682,9 +6922,9 @@ static void dbug_dump_str(FILE *out, const char *str, int len) dbug_dump_database(rdb); */ -void dbug_dump_database(rocksdb::DB *db) +void dbug_dump_database(rocksdb::DB* const db) { - FILE *out= fopen("/tmp/rocksdb.dump", "wt"); + FILE* const out= fopen("/tmp/rocksdb.dump", "wt"); if (!out) return; @@ -6702,6 +6942,30 @@ void dbug_dump_database(rocksdb::DB *db) fclose(out); } +rocksdb::Status ha_rocksdb::get_for_update( + Rdb_transaction* const tx, + rocksdb::ColumnFamilyHandle* const column_family, + const rocksdb::Slice& key, + std::string* const value) const +{ + DBUG_ASSERT(m_lock_rows != RDB_LOCK_NONE); + const bool exclusive = m_lock_rows != RDB_LOCK_READ; + + rocksdb::Status s= tx->get_for_update(column_family, key, value, exclusive); + + // If we have a lock conflict and we are running in READ COMMITTTED mode + // release and reacquire the snapshot and then retry the get_for_update(). + if (s.IsBusy() && !s.IsDeadlock() && + my_core::thd_tx_isolation(ha_thd()) == ISO_READ_COMMITTED) + { + tx->release_snapshot(); + tx->acquire_snapshot(false); + + s= tx->get_for_update(column_family, key, value, exclusive); + } + + return s; +} /* Given a rowid (i.e. packed PK) as a parameter, get the record. @@ -6711,7 +6975,7 @@ void dbug_dump_database(rocksdb::DB *db) other - HA_ERR error code. */ -int ha_rocksdb::get_row_by_rowid(uchar *buf, const char *rowid, +int ha_rocksdb::get_row_by_rowid(uchar* const buf, const char* const rowid, const uint rowid_size) { DBUG_ASSERT(buf != nullptr); @@ -6723,7 +6987,7 @@ int ha_rocksdb::get_row_by_rowid(uchar *buf, const char *rowid, rocksdb::Slice key_slice(rowid, rowid_size); - Rdb_transaction *tx= get_or_create_tx(table->in_use); + Rdb_transaction* const tx= get_or_create_tx(table->in_use); DBUG_ASSERT(tx != nullptr); DEBUG_SYNC(ha_thd(), "rocksdb.get_row_by_rowid"); @@ -6747,11 +7011,13 @@ int ha_rocksdb::get_row_by_rowid(uchar *buf, const char *rowid, s= tx->get(m_pk_descr->get_cf(), key_slice, &m_retrieved_record); } else - s= tx->get_for_update(m_pk_descr->get_cf(), key_slice, &m_retrieved_record); + { + s= get_for_update(tx, m_pk_descr->get_cf(), key_slice, &m_retrieved_record); + } if (!s.IsNotFound() && !s.ok()) { - DBUG_RETURN(tx->set_status_error(table->in_use, s, m_pk_descr, m_tbl_def)); + DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def)); } found= !s.IsNotFound(); @@ -6776,7 +7042,7 @@ int ha_rocksdb::get_row_by_rowid(uchar *buf, const char *rowid, } -int ha_rocksdb::index_next(uchar *buf) +int ha_rocksdb::index_next(uchar* const buf) { bool moves_forward= true; ha_statistic_increment(&SSV::ha_read_next_count); @@ -6792,7 +7058,7 @@ int ha_rocksdb::index_next(uchar *buf) } -int ha_rocksdb::index_prev(uchar *buf) +int ha_rocksdb::index_prev(uchar* const buf) { bool moves_forward= false; ha_statistic_increment(&SSV::ha_read_prev_count); @@ -6809,7 +7075,7 @@ int ha_rocksdb::index_prev(uchar *buf) } -int ha_rocksdb::index_next_with_direction(uchar *buf, bool move_forward) +int ha_rocksdb::index_next_with_direction(uchar* const buf, bool move_forward) { int rc; DBUG_ENTER("ha_rocksdb::index_next"); @@ -6840,7 +7106,7 @@ int ha_rocksdb::index_next_with_direction(uchar *buf, bool move_forward) } -int ha_rocksdb::index_first(uchar *buf) +int ha_rocksdb::index_first(uchar* const buf) { m_sk_match_prefix= nullptr; ha_statistic_increment(&SSV::ha_read_first_count); @@ -6852,7 +7118,7 @@ int ha_rocksdb::index_first(uchar *buf) } -int ha_rocksdb::index_last(uchar *buf) +int ha_rocksdb::index_last(uchar* const buf) { m_sk_match_prefix= nullptr; ha_statistic_increment(&SSV::ha_read_last_count); @@ -6895,7 +7161,7 @@ int ha_rocksdb::index_last(uchar *buf) */ -int ha_rocksdb::index_first_intern(uchar *buf) +int ha_rocksdb::index_first_intern(uchar* const buf) { DBUG_ASSERT(buf != nullptr); @@ -6915,27 +7181,27 @@ int ha_rocksdb::index_first_intern(uchar *buf) DBUG_ASSERT(key != nullptr); - const std::shared_ptr& kd= m_key_descr_arr[active_index]; - if (kd->m_is_reverse_cf) + const Rdb_key_def& kd= *m_key_descr_arr[active_index]; + if (kd.m_is_reverse_cf) { - kd->get_supremum_key(key, &key_size); + kd.get_supremum_key(key, &key_size); } else { - kd->get_infimum_key(key, &key_size); + kd.get_infimum_key(key, &key_size); } rocksdb::Slice index_key((const char*)key, key_size); - Rdb_transaction *tx= get_or_create_tx(table->in_use); + Rdb_transaction* const tx= get_or_create_tx(table->in_use); DBUG_ASSERT(tx != nullptr); - bool is_new_snapshot= !tx->has_snapshot(); + const bool is_new_snapshot= !tx->has_snapshot(); // Loop as long as we get a deadlock error AND we end up creating the // snapshot here (i.e. it did not exist prior to this) for (;;) { - setup_scan_iterator(kd, &index_key, false, !kd->m_is_reverse_cf, + setup_scan_iterator(kd, &index_key, false, !kd.m_is_reverse_cf, Rdb_key_def::INDEX_NUMBER_SIZE); m_skip_scan_it_next_call= true; @@ -6998,7 +7264,7 @@ int ha_rocksdb::index_first_intern(uchar *buf) So, need to: it->Seek(n) || it->SeekToLast(); it->Prev(); */ -int ha_rocksdb::index_last_intern(uchar *buf) +int ha_rocksdb::index_last_intern(uchar* const buf) { DBUG_ASSERT(buf != nullptr); @@ -7018,19 +7284,19 @@ int ha_rocksdb::index_last_intern(uchar *buf) DBUG_ASSERT(key != nullptr); - const std::shared_ptr& kd= m_key_descr_arr[active_index]; - if (kd->m_is_reverse_cf) + const Rdb_key_def& kd= *m_key_descr_arr[active_index]; + if (kd.m_is_reverse_cf) { - kd->get_infimum_key(key, &key_size); + kd.get_infimum_key(key, &key_size); } else { - kd->get_supremum_key(key, &key_size); + kd.get_supremum_key(key, &key_size); } rocksdb::Slice index_key((const char*)key, key_size); - Rdb_transaction *tx= get_or_create_tx(table->in_use); + Rdb_transaction* const tx= get_or_create_tx(table->in_use); DBUG_ASSERT(tx != nullptr); bool is_new_snapshot= !tx->has_snapshot(); @@ -7038,7 +7304,7 @@ int ha_rocksdb::index_last_intern(uchar *buf) // snapshot here (i.e. it did not exist prior to this) for (;;) { - setup_scan_iterator(kd, &index_key, false, kd->m_is_reverse_cf, + setup_scan_iterator(kd, &index_key, false, kd.m_is_reverse_cf, Rdb_key_def::INDEX_NUMBER_SIZE); if (!m_scan_it->Valid()) @@ -7097,7 +7363,7 @@ void ha_rocksdb::unlock_row() { if (m_lock_rows != RDB_LOCK_NONE) { - Rdb_transaction *tx= get_or_create_tx(table->in_use); + Rdb_transaction* const tx= get_or_create_tx(table->in_use); tx->release_lock(m_pk_descr->get_cf(), std::string(m_last_rowkey.ptr(), m_last_rowkey.length())); } @@ -7109,14 +7375,14 @@ void ha_rocksdb::unlock_row() - If the index is PRIMARY KEY, and if all of the columns of the table are covered by the PRIMARY KEY, SingleDelete can be used. */ -bool ha_rocksdb::can_use_single_delete(uint index) +bool ha_rocksdb::can_use_single_delete(const uint &index) const { return (index != pk_index(table, m_tbl_def) || (!has_hidden_pk(table) && table->key_info[index].actual_key_parts == table->s->fields)); } -bool ha_rocksdb::skip_unique_check() +bool ha_rocksdb::skip_unique_check() const { /* We want to skip unique checks if: @@ -7147,7 +7413,7 @@ bool ha_rocksdb::commit_in_the_middle() @retval true if bulk commit failed @retval false if bulk commit was skipped or succeeded */ -bool ha_rocksdb::do_bulk_commit(Rdb_transaction *tx) +bool ha_rocksdb::do_bulk_commit(Rdb_transaction* const tx) { DBUG_ASSERT(tx != nullptr); return commit_in_the_middle() && @@ -7161,7 +7427,7 @@ bool ha_rocksdb::do_bulk_commit(Rdb_transaction *tx) does not contain a primary key. (In which case we generate a hidden 'auto-incremented' pk.) */ -bool ha_rocksdb::has_hidden_pk(const TABLE* table) +bool ha_rocksdb::has_hidden_pk(const TABLE* const table) const { DBUG_ASSERT(table != nullptr); return Rdb_key_def::table_has_hidden_pk(table); @@ -7171,8 +7437,8 @@ bool ha_rocksdb::has_hidden_pk(const TABLE* table) Returns true if given index number is a hidden_pk. - This is used when a table is created with no primary key. */ -bool ha_rocksdb::is_hidden_pk(const uint index, const TABLE* table_arg, - const Rdb_tbl_def* tbl_def_arg) +bool ha_rocksdb::is_hidden_pk(const uint index, const TABLE* const table_arg, + const Rdb_tbl_def* const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); @@ -7183,8 +7449,8 @@ bool ha_rocksdb::is_hidden_pk(const uint index, const TABLE* table_arg, } /* Returns index of primary key */ -uint ha_rocksdb::pk_index(const TABLE* table_arg, - const Rdb_tbl_def* tbl_def_arg) +uint ha_rocksdb::pk_index(const TABLE* const table_arg, + const Rdb_tbl_def* const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); @@ -7196,8 +7462,8 @@ uint ha_rocksdb::pk_index(const TABLE* table_arg, } /* Returns true if given index number is a primary key */ -bool ha_rocksdb::is_pk(const uint index, const TABLE* table_arg, - const Rdb_tbl_def *tbl_def_arg) +bool ha_rocksdb::is_pk(const uint index, const TABLE* const table_arg, + const Rdb_tbl_def* const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); @@ -7207,8 +7473,9 @@ bool ha_rocksdb::is_pk(const uint index, const TABLE* table_arg, is_hidden_pk(index, table_arg, tbl_def_arg); } -const char* ha_rocksdb::get_key_name(const uint index, const TABLE* table_arg, - const Rdb_tbl_def* tbl_def_arg) +const char* ha_rocksdb::get_key_name(const uint index, + const TABLE* const table_arg, + const Rdb_tbl_def* const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); @@ -7221,8 +7488,8 @@ const char* ha_rocksdb::get_key_name(const uint index, const TABLE* table_arg, } const char* ha_rocksdb::get_key_comment(const uint index, - const TABLE* table_arg, - const Rdb_tbl_def* tbl_def_arg) + const TABLE* const table_arg, + const Rdb_tbl_def* const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); @@ -7234,7 +7501,7 @@ const char* ha_rocksdb::get_key_comment(const uint index, return table_arg->key_info[index].comment.str; } -int ha_rocksdb::write_row(uchar *buf) +int ha_rocksdb::write_row(uchar* const buf) { DBUG_ENTER("ha_rocksdb::write_row"); DBUG_ASSERT(buf == table->record[0]); @@ -7253,7 +7520,7 @@ int ha_rocksdb::write_row(uchar *buf) } } - int rv= update_write_row(nullptr, buf, skip_unique_check()); + const int rv= update_write_row(nullptr, buf, skip_unique_check()); if (rv == 0) { @@ -7273,18 +7540,18 @@ int ha_rocksdb::write_row(uchar *buf) called without setting m_last_rowkey. This function sets m_last_rowkey for Read Free Replication. */ -void ha_rocksdb::set_last_rowkey(const uchar *old_data) +void ha_rocksdb::set_last_rowkey(const uchar* const old_data) { if (old_data && use_read_free_rpl()) { - int old_pk_size= m_pk_descr->pack_record( + const int old_pk_size= m_pk_descr->pack_record( table, m_pack_buffer, old_data, m_pk_packed_tuple, nullptr, false); m_last_rowkey.copy((const char*)m_pk_packed_tuple, old_pk_size, &my_charset_bin); } } -int ha_rocksdb::get_pk_for_update(struct update_row_info* row_info) +int ha_rocksdb::get_pk_for_update(struct update_row_info* const row_info) { int size; @@ -7327,9 +7594,10 @@ int ha_rocksdb::get_pk_for_update(struct update_row_info* row_info) return 0; } -int ha_rocksdb::check_and_lock_unique_pk(uint key_id, +int ha_rocksdb::check_and_lock_unique_pk(const uint &key_id, const struct update_row_info& row_info, - bool* found, bool* pk_changed) + bool* const found, + bool* const pk_changed) { DBUG_ASSERT(found != nullptr); DBUG_ASSERT(pk_changed != nullptr); @@ -7374,22 +7642,21 @@ int ha_rocksdb::check_and_lock_unique_pk(uint key_id, 2) T1 Get(empty) -> T1 Put(insert, not committed yet) -> T2 Get(empty) -> T2 Put(insert, blocked) -> T1 commit -> T2 commit(overwrite) */ - rocksdb::Status s= row_info.tx->get_for_update(m_pk_descr->get_cf(), - row_info.new_pk_slice, - &m_retrieved_record); + const rocksdb::Status s= get_for_update(row_info.tx, m_pk_descr->get_cf(), + row_info.new_pk_slice, &m_retrieved_record); if (!s.ok() && !s.IsNotFound()) { return row_info.tx->set_status_error(table->in_use, s, - m_key_descr_arr[key_id], m_tbl_def); + *m_key_descr_arr[key_id], m_tbl_def); } *found= !s.IsNotFound(); return 0; } -int ha_rocksdb::check_and_lock_sk(uint key_id, - const struct update_row_info& row_info, - bool* found) const +int ha_rocksdb::check_and_lock_sk(const uint &key_id, + const struct update_row_info& row_info, + bool* const found) const { DBUG_ASSERT(found != nullptr); *found= false; @@ -7417,7 +7684,7 @@ int ha_rocksdb::check_and_lock_sk(uint key_id, return 0; } - const std::shared_ptr& kd= m_key_descr_arr[key_id]; + const Rdb_key_def& kd= *m_key_descr_arr[key_id]; /* Calculate the new key for obtaining the lock @@ -7425,9 +7692,9 @@ int ha_rocksdb::check_and_lock_sk(uint key_id, For unique secondary indexes, the key used for locking does not include the extended fields. */ - int size= kd->pack_record(table, m_pack_buffer, row_info.new_data, - m_sk_packed_tuple, nullptr, false, 0, - user_defined_key_parts, &n_null_fields); + int size= kd.pack_record(table, m_pack_buffer, row_info.new_data, + m_sk_packed_tuple, nullptr, false, 0, + user_defined_key_parts, &n_null_fields); if (n_null_fields > 0) { /* @@ -7437,8 +7704,8 @@ int ha_rocksdb::check_and_lock_sk(uint key_id, return 0; } - rocksdb::Slice new_slice= rocksdb::Slice((const char*)m_sk_packed_tuple, - size); + const rocksdb::Slice new_slice= rocksdb::Slice((const char*)m_sk_packed_tuple, + size); /* For UPDATEs, if the key has changed, we need to obtain a lock. INSERTs @@ -7446,11 +7713,11 @@ int ha_rocksdb::check_and_lock_sk(uint key_id, */ if (row_info.old_data != nullptr) { - size= kd->pack_record(table, m_pack_buffer, row_info.old_data, + size= kd.pack_record(table, m_pack_buffer, row_info.old_data, m_sk_packed_tuple_old, nullptr, false, row_info.hidden_pk_id, user_defined_key_parts); - rocksdb::Slice old_slice= rocksdb::Slice( + const rocksdb::Slice old_slice= rocksdb::Slice( (const char*)m_sk_packed_tuple_old, size); /* @@ -7474,7 +7741,7 @@ int ha_rocksdb::check_and_lock_sk(uint key_id, the latest committed data. */ - bool all_parts_used= (user_defined_key_parts == kd->get_key_parts()); + const bool all_parts_used= (user_defined_key_parts == kd.get_key_parts()); /* This iterator seems expensive since we need to allocate and free @@ -7487,24 +7754,24 @@ int ha_rocksdb::check_and_lock_sk(uint key_id, The bloom filter may need to be disabled for this lookup. */ - bool total_order_seek= + const bool total_order_seek= !can_use_bloom_filter(ha_thd(), kd, new_slice, all_parts_used, - is_ascending(m_key_descr_arr[key_id], + is_ascending(*m_key_descr_arr[key_id], HA_READ_KEY_EXACT)); - bool fill_cache= !THDVAR(ha_thd(), skip_fill_cache); + const bool fill_cache= !THDVAR(ha_thd(), skip_fill_cache); /* psergey-todo: we just need to take lock, lookups not needed: */ std::string dummy_value; - rocksdb::Status s= row_info.tx->get_for_update(kd->get_cf(), new_slice, - &dummy_value); + const rocksdb::Status s= get_for_update(row_info.tx, kd.get_cf(), new_slice, + &dummy_value); if (!s.ok() && !s.IsNotFound()) { return row_info.tx->set_status_error(table->in_use, s, kd, m_tbl_def); } - rocksdb::Iterator *iter= row_info.tx->get_iterator(kd->get_cf(), + rocksdb::Iterator* const iter= row_info.tx->get_iterator(kd.get_cf(), total_order_seek, fill_cache, true /* read current data */, @@ -7523,7 +7790,7 @@ int ha_rocksdb::check_and_lock_sk(uint key_id, int ha_rocksdb::check_uniqueness_and_lock( const struct update_row_info& row_info, - bool* pk_changed) + bool* const pk_changed) { /* Go through each index and determine if the index has uniqueness @@ -7562,20 +7829,22 @@ int ha_rocksdb::check_uniqueness_and_lock( return 0; } -int ha_rocksdb::bulk_load_key(Rdb_transaction* tx, - const std::shared_ptr& kd, +int ha_rocksdb::bulk_load_key(Rdb_transaction* const tx, + const Rdb_key_def& kd, const rocksdb::Slice& key, const rocksdb::Slice& value) { - rocksdb::ColumnFamilyHandle* cf= kd->get_cf(); + rocksdb::ColumnFamilyHandle* const cf= kd.get_cf(); DBUG_ASSERT(cf != nullptr); if (m_sst_info == nullptr) { m_sst_info= std::make_shared(rdb, m_table_handler->m_table_name, - kd->get_name(), cf, rocksdb_db_options); + kd.get_name(), cf, rocksdb_db_options, + THDVAR(ha_thd(), trace_sst_api)); tx->start_bulk_load(this); + m_bulk_load_tx= tx; } DBUG_ASSERT(m_sst_info != nullptr); @@ -7596,6 +7865,19 @@ int ha_rocksdb::finalize_bulk_load() { int rc= 0; + /* Skip if there are no possible ongoing bulk loads */ + if (m_sst_info == nullptr && m_bulk_load_tx == nullptr) + { + return rc; + } + + mysql_mutex_lock(&m_bulk_load_mutex); + + /* + We need this check because it's possible that m_sst_info has been + flushed and cleared by another thread by the time the mutex has been + acquired. + */ if (m_sst_info != nullptr) { rc= m_sst_info->commit(); @@ -7609,29 +7891,27 @@ int ha_rocksdb::finalize_bulk_load() } m_sst_info= nullptr; - - Rdb_transaction *tx= get_or_create_tx(ha_thd()); - if (tx != nullptr) - { - tx->end_bulk_load(this); - } + m_bulk_load_tx->end_bulk_load(this); + m_bulk_load_tx= nullptr; } + mysql_mutex_unlock(&m_bulk_load_mutex); return rc; } -int ha_rocksdb::update_pk(const std::shared_ptr& kd, +int ha_rocksdb::update_pk(const Rdb_key_def& kd, const struct update_row_info& row_info, - bool pk_changed) + const bool &pk_changed) { - uint key_id= kd->get_keyno(); - bool hidden_pk= is_hidden_pk(key_id, table, m_tbl_def); + const uint key_id= kd.get_keyno(); + const bool hidden_pk= is_hidden_pk(key_id, table, m_tbl_def); if (!hidden_pk && pk_changed) { /* The old key needs to be deleted. */ - rocksdb::Status s= delete_or_singledelete(key_id, row_info.tx, kd->get_cf(), + const rocksdb::Status s= delete_or_singledelete( + key_id, row_info.tx, kd.get_cf(), row_info.old_pk_slice); if (!s.ok()) { @@ -7650,7 +7930,7 @@ int ha_rocksdb::update_pk(const std::shared_ptr& kd, &value_slice); int rc= 0; - auto cf= m_pk_descr->get_cf(); + const auto cf= m_pk_descr->get_cf(); if (rocksdb_enable_bulk_load_api && THDVAR(table->in_use, bulk_load) && !hidden_pk) { @@ -7680,7 +7960,7 @@ int ha_rocksdb::update_pk(const std::shared_ptr& kd, } else { - auto s= row_info.tx->put(cf, row_info.new_pk_slice, value_slice); + const auto s= row_info.tx->put(cf, row_info.new_pk_slice, value_slice); if (!s.ok()) { if (s.IsBusy()) @@ -7691,7 +7971,7 @@ int ha_rocksdb::update_pk(const std::shared_ptr& kd, } else { - rc = row_info.tx->set_status_error(table->in_use, s, m_pk_descr, + rc = row_info.tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def); } } @@ -7700,8 +7980,8 @@ int ha_rocksdb::update_pk(const std::shared_ptr& kd, return rc; } -int ha_rocksdb::update_sk(const TABLE* table_arg, - const std::shared_ptr& kd, +int ha_rocksdb::update_sk(const TABLE* const table_arg, + const Rdb_key_def& kd, const struct update_row_info& row_info) { int new_packed_size; @@ -7711,7 +7991,7 @@ int ha_rocksdb::update_sk(const TABLE* table_arg, rocksdb::Slice new_value_slice; rocksdb::Slice old_key_slice; - uint key_id = kd->get_keyno(); + const uint key_id = kd.get_keyno(); /* Can skip updating this key if none of the key fields have changed. */ @@ -7720,20 +8000,20 @@ int ha_rocksdb::update_sk(const TABLE* table_arg, return 0; } - bool store_checksums= should_store_checksums(); + const bool store_row_debug_checksums= should_store_row_debug_checksums(); - new_packed_size= kd->pack_record(table_arg, m_pack_buffer, row_info.new_data, + new_packed_size= kd.pack_record(table_arg, m_pack_buffer, row_info.new_data, m_sk_packed_tuple, &m_sk_tails, - store_checksums, + store_row_debug_checksums, row_info.hidden_pk_id); if (row_info.old_data != nullptr) { // The old value - old_packed_size= kd->pack_record(table_arg, m_pack_buffer, + old_packed_size= kd.pack_record(table_arg, m_pack_buffer, row_info.old_data, m_sk_packed_tuple_old, &m_sk_tails_old, - store_checksums, + store_row_debug_checksums, row_info.hidden_pk_id); /* @@ -7770,7 +8050,7 @@ int ha_rocksdb::update_sk(const TABLE* table_arg, reinterpret_cast(m_sk_packed_tuple_old), old_packed_size); - row_info.tx->get_indexed_write_batch()->SingleDelete(kd->get_cf(), + row_info.tx->get_indexed_write_batch()->SingleDelete(kd.get_cf(), old_key_slice); } @@ -7800,19 +8080,19 @@ int ha_rocksdb::update_sk(const TABLE* table_arg, reinterpret_cast(m_sk_tails.ptr()), m_sk_tails.get_current_pos()); - write_batch->Put(kd->get_cf(), new_key_slice, new_value_slice); + write_batch->Put(kd.get_cf(), new_key_slice, new_value_slice); return 0; } int ha_rocksdb::update_indexes(const struct update_row_info& row_info, - bool pk_changed) + const bool &pk_changed) { int rc; for (uint key_id= 0; key_id < m_tbl_def->m_key_count; key_id++) { - const std::shared_ptr& kd= m_key_descr_arr[key_id]; + const Rdb_key_def& kd= *m_key_descr_arr[key_id]; if (is_pk(key_id, table, m_tbl_def)) { rc= update_pk(kd, row_info, pk_changed); @@ -7831,8 +8111,8 @@ int ha_rocksdb::update_indexes(const struct update_row_info& row_info, return 0; } -int ha_rocksdb::update_write_row(const uchar *old_data, - const uchar *new_data, +int ha_rocksdb::update_write_row(const uchar* const old_data, + const uchar* const new_data, const bool skip_unique_check) { bool pk_changed= false; @@ -7908,21 +8188,20 @@ int ha_rocksdb::update_write_row(const uchar *old_data, */ void ha_rocksdb::setup_scan_iterator( - const std::shared_ptr& kd, - rocksdb::Slice *slice, + const Rdb_key_def& kd, + rocksdb::Slice* const slice, const bool use_all_keys, const bool is_ascending, const uint eq_cond_len) { - DBUG_ASSERT(kd != nullptr); DBUG_ASSERT(slice != nullptr); DBUG_ASSERT(slice->size() >= eq_cond_len); - Rdb_transaction *tx= get_or_create_tx(table->in_use); + Rdb_transaction* const tx= get_or_create_tx(table->in_use); bool skip_bloom= true; - rocksdb::Slice eq_cond(slice->data(), eq_cond_len); + const rocksdb::Slice eq_cond(slice->data(), eq_cond_len); if (can_use_bloom_filter(ha_thd(), kd, eq_cond, use_all_keys, is_ascending)) { skip_bloom= false; @@ -7951,8 +8230,21 @@ void ha_rocksdb::setup_scan_iterator( */ if (!m_scan_it) { - bool fill_cache= !THDVAR(ha_thd(), skip_fill_cache); - m_scan_it= tx->get_iterator(kd->get_cf(), skip_bloom, fill_cache); + const bool fill_cache= !THDVAR(ha_thd(), skip_fill_cache); + if (commit_in_the_middle()) + { + DBUG_ASSERT(m_scan_it_snapshot == nullptr); + m_scan_it_snapshot= rdb->GetSnapshot(); + + auto read_opts = rocksdb::ReadOptions(); + read_opts.total_order_seek = true; // TODO: set based on WHERE conditions + read_opts.snapshot= m_scan_it_snapshot; + m_scan_it= rdb->NewIterator(read_opts, kd.get_cf()); + } + else + { + m_scan_it= tx->get_iterator(kd.get_cf(), skip_bloom, fill_cache); + } m_scan_it_skips_bloom= skip_bloom; } /* @@ -7962,6 +8254,17 @@ void ha_rocksdb::setup_scan_iterator( m_scan_it->Seek(*slice); } +void ha_rocksdb::release_scan_iterator() +{ + delete m_scan_it; + m_scan_it= nullptr; + + if (m_scan_it_snapshot) + { + rdb->ReleaseSnapshot(m_scan_it_snapshot); + m_scan_it_snapshot= nullptr; + } +} void ha_rocksdb::setup_iterator_for_rnd_scan() { @@ -7974,7 +8277,7 @@ void ha_rocksdb::setup_iterator_for_rnd_scan() rocksdb::Slice table_key((const char*)m_pk_packed_tuple, key_size); - setup_scan_iterator(m_pk_descr, &table_key); + setup_scan_iterator(*m_pk_descr, &table_key); m_skip_scan_it_next_call= true; } @@ -7983,7 +8286,7 @@ int ha_rocksdb::rnd_init(bool scan) { DBUG_ENTER("ha_rocksdb::rnd_init"); - Rdb_transaction *tx= get_or_create_tx(table->in_use); + Rdb_transaction* const tx= get_or_create_tx(table->in_use); setup_read_decoders(); if (scan) @@ -8005,7 +8308,7 @@ int ha_rocksdb::rnd_init(bool scan) } -int ha_rocksdb::rnd_next(uchar *buf) +int ha_rocksdb::rnd_next(uchar* const buf) { int rc; ha_statistic_increment(&SSV::ha_read_rnd_next_count); @@ -8033,7 +8336,7 @@ int ha_rocksdb::rnd_next(uchar *buf) /* See also secondary_index_read(). */ -int ha_rocksdb::rnd_next_with_direction(uchar *buf, bool move_forward) +int ha_rocksdb::rnd_next_with_direction(uchar* const buf, bool move_forward) { int rc; DBUG_ENTER("ha_rocksdb::rnd_next"); @@ -8054,65 +8357,79 @@ int ha_rocksdb::rnd_next_with_direction(uchar *buf, bool move_forward) DBUG_RETURN(HA_ERR_END_OF_FILE); } - if (m_skip_scan_it_next_call) + for ( ; ; ) { - m_skip_scan_it_next_call= false; - } - else - { - if (move_forward) - m_scan_it->Next(); /* this call cannot fail */ + if (m_skip_scan_it_next_call) + { + m_skip_scan_it_next_call= false; + } else - m_scan_it->Prev(); /* this call cannot fail */ - } + { + if (move_forward) + m_scan_it->Next(); /* this call cannot fail */ + else + m_scan_it->Prev(); /* this call cannot fail */ + } + + if (!m_scan_it->Valid()) + { + rc= HA_ERR_END_OF_FILE; + break; + } - if (m_scan_it->Valid()) - { /* check if we're out of this table */ - rocksdb::Slice key= m_scan_it->key(); + const rocksdb::Slice key= m_scan_it->key(); if (!m_pk_descr->covers_key(key)) { rc= HA_ERR_END_OF_FILE; + break; + } + + if (m_lock_rows != RDB_LOCK_NONE) + { + /* + Lock the row we've just read. + + Now we call get_for_update which will 1) Take a lock and 2) Will fail + if the row was deleted since the snapshot was taken. + */ + Rdb_transaction* const tx= get_or_create_tx(table->in_use); + DEBUG_SYNC(ha_thd(), "rocksdb_concurrent_delete"); + const rocksdb::Status s= get_for_update(tx, m_pk_descr->get_cf(), key, + &m_retrieved_record); + if (s.IsNotFound() && + my_core::thd_tx_isolation(ha_thd()) == ISO_READ_COMMITTED) + { + // This occurs if we accessed a row, tried to lock it, failed, + // released and reacquired the snapshot (because of READ COMMITTED + // mode) and the row was deleted by someone else in the meantime. + // If so, we just want to move on to the next row. + continue; + } + + if (!s.ok()) + { + DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, + m_tbl_def)); + } + + // If we called get_for_update() use the value from that call not from + // the iterator as it may be stale since we don't have a snapshot + // when m_lock_rows is not RDB_LOCK_NONE. + m_last_rowkey.copy(key.data(), key.size(), &my_charset_bin); + rc= convert_record_from_storage_format(&key, buf); } else { - if (m_lock_rows != RDB_LOCK_NONE) - { - /* - Lock the row we've just read. - - Now we call get_for_update which will 1) Take a lock and 2) Will fail - if the row was deleted since the snapshot was taken. - */ - Rdb_transaction *tx= get_or_create_tx(table->in_use); - DEBUG_SYNC(ha_thd(), "rocksdb_concurrent_delete"); - rocksdb::Status s= tx->get_for_update(m_pk_descr->get_cf(), key, - &m_retrieved_record); - if (!s.ok()) - { - DBUG_RETURN(tx->set_status_error(table->in_use, s, m_pk_descr, - m_tbl_def)); - } - - // If we called get_for_update() use the value from that call not from - // the iterator as it may be stale since we don't have a snapshot - // when m_lock_rows is not RDB_LOCK_NONE. - m_last_rowkey.copy(key.data(), key.size(), &my_charset_bin); - rc= convert_record_from_storage_format(&key, buf); - } - else - { - // Use the value from the iterator - rocksdb::Slice value= m_scan_it->value(); - m_last_rowkey.copy(key.data(), key.size(), &my_charset_bin); - rc= convert_record_from_storage_format(&key, &value, buf); - } - - table->status= 0; + // Use the value from the iterator + rocksdb::Slice value= m_scan_it->value(); + m_last_rowkey.copy(key.data(), key.size(), &my_charset_bin); + rc= convert_record_from_storage_format(&key, &value, buf); } + + table->status= 0; + break; } - else - rc= HA_ERR_END_OF_FILE; if (!rc) { stats.rows_read++; @@ -8135,7 +8452,7 @@ int ha_rocksdb::rnd_end() int ha_rocksdb::index_init(uint idx, bool sorted) { - Rdb_transaction *tx= get_or_create_tx(table->in_use); + Rdb_transaction* const tx= get_or_create_tx(table->in_use); DBUG_ASSERT(tx != nullptr); setup_read_decoders(); @@ -8160,7 +8477,7 @@ int ha_rocksdb::index_end() } -int ha_rocksdb::delete_all_rows(ha_rows* nrows) +int ha_rocksdb::delete_all_rows(ha_rows* const nrows) { DBUG_ENTER("ha_rocksdb::delete_all_rows"); /* @@ -8187,7 +8504,7 @@ int ha_rocksdb::truncate() The row is either table->record[0] or table->record[1]. (Check out InnoDB: row_update_for_mysql() has "UT_NOT_USED(mysql_rec)" */ -int ha_rocksdb::delete_row(const uchar *buf) +int ha_rocksdb::delete_row(const uchar* const buf) { DBUG_ENTER("ha_rocksdb::delete_row"); @@ -8195,15 +8512,15 @@ int ha_rocksdb::delete_row(const uchar *buf) set_last_rowkey(buf); rocksdb::Slice key_slice(m_last_rowkey.ptr(), m_last_rowkey.length()); - Rdb_transaction *tx= get_or_create_tx(table->in_use); + Rdb_transaction* const tx= get_or_create_tx(table->in_use); - uint index= pk_index(table, m_tbl_def); + const uint index= pk_index(table, m_tbl_def); rocksdb::Status s= delete_or_singledelete(index, tx, m_pk_descr->get_cf(), key_slice); if (!s.ok()) { - DBUG_RETURN(tx->set_status_error(table->in_use, s, m_pk_descr, m_tbl_def)); + DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def)); } longlong hidden_pk_id= 0; @@ -8217,13 +8534,13 @@ int ha_rocksdb::delete_row(const uchar *buf) if (!is_pk(i, table, m_tbl_def)) { int packed_size; - const std::shared_ptr& kd= m_key_descr_arr[i]; - packed_size= kd->pack_record(table, m_pack_buffer, buf, m_sk_packed_tuple, + const Rdb_key_def& kd= *m_key_descr_arr[i]; + packed_size= kd.pack_record(table, m_pack_buffer, buf, m_sk_packed_tuple, nullptr, false, hidden_pk_id); rocksdb::Slice secondary_key_slice( reinterpret_cast(m_sk_packed_tuple), packed_size); /* Deleting on secondary key doesn't need any locks: */ - tx->get_indexed_write_batch()->SingleDelete(kd->get_cf(), + tx->get_indexed_write_batch()->SingleDelete(kd.get_cf(), secondary_key_slice); } } @@ -8241,9 +8558,9 @@ int ha_rocksdb::delete_row(const uchar *buf) rocksdb::Status ha_rocksdb::delete_or_singledelete(uint index, - Rdb_transaction *tx, - rocksdb::ColumnFamilyHandle* column_family, - const rocksdb::Slice& key) + Rdb_transaction* const tx, + rocksdb::ColumnFamilyHandle* const column_family, + const rocksdb::Slice& key) { if (can_use_single_delete(index)) return tx->single_delete(column_family, key); @@ -8339,7 +8656,7 @@ int ha_rocksdb::info(uint flag) { continue; } - KEY* k= &table->key_info[i]; + KEY* const k= &table->key_info[i]; for (uint j = 0; j < k->actual_key_parts; j++) { const Rdb_index_stats& k_stats= m_key_descr_arr[i]->m_stats; @@ -8381,7 +8698,7 @@ int ha_rocksdb::info(uint flag) DBUG_RETURN(0); } -void ha_rocksdb::position(const uchar *record) +void ha_rocksdb::position(const uchar* const record) { DBUG_ENTER("ha_rocksdb::position"); @@ -8402,7 +8719,7 @@ void ha_rocksdb::position(const uchar *record) read set (this may potentially disable index-only access when PK column cannot be restored from its mem-comparable form in the secondary indexes). */ - uint packed_size= m_pk_descr->pack_record(table, m_pack_buffer, record, + const uint packed_size= m_pk_descr->pack_record(table, m_pack_buffer, record, ref, nullptr, false, hidden_pk_id); /* @@ -8416,7 +8733,7 @@ void ha_rocksdb::position(const uchar *record) } -int ha_rocksdb::rnd_pos(uchar *buf, uchar *pos) +int ha_rocksdb::rnd_pos(uchar* const buf, uchar* const pos) { int rc; size_t len; @@ -8462,15 +8779,15 @@ void ha_rocksdb::calc_updated_indexes() for (uint keynr= 0; keynr < table->s->keys; keynr++) { - const std::shared_ptr& kd= m_key_descr_arr[keynr]; + const Rdb_key_def& kd= *m_key_descr_arr[keynr]; /* Walk over all key parts, including the "extended key" suffix */ - uint key_parts= kd->get_key_parts(); + const uint key_parts= kd.get_key_parts(); for (uint kp= 0; kp < key_parts; kp++) { if (has_hidden_pk(table) && kp + 1 == key_parts) break; - Field *field= kd->get_table_field_for_part_no(table, kp); + Field* const field= kd.get_table_field_for_part_no(table, kp); if (bitmap_is_set(table->write_set, field->field_index)) { m_update_scope.set_bit(keynr); @@ -8482,7 +8799,7 @@ void ha_rocksdb::calc_updated_indexes() } -int ha_rocksdb::update_row(const uchar *old_data, uchar *new_data) +int ha_rocksdb::update_row(const uchar* const old_data, uchar* const new_data) { DBUG_ENTER("ha_rocksdb::update_row"); DBUG_ASSERT(m_lock_rows == RDB_LOCK_WRITE); @@ -8494,8 +8811,7 @@ int ha_rocksdb::update_row(const uchar *old_data, uchar *new_data) DBUG_ASSERT(new_data == table->record[0]); ha_statistic_increment(&SSV::ha_update_count); - int rv; - rv= update_write_row(old_data, new_data, false); + const int rv= update_write_row(old_data, new_data, false); if (rv == 0) { @@ -8507,7 +8823,7 @@ int ha_rocksdb::update_row(const uchar *old_data, uchar *new_data) /* The following function was copied from ha_blackhole::store_lock: */ -THR_LOCK_DATA **ha_rocksdb::store_lock(THD *thd, +THR_LOCK_DATA **ha_rocksdb::store_lock(THD* const thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type) { @@ -8533,9 +8849,9 @@ THR_LOCK_DATA **ha_rocksdb::store_lock(THD *thd, The following logic was copied directly from ha_innobase::store_lock_with_x_type() in storage/innobase/handler/ha_innodb.cc and causes MyRocks to leave - locks in place on rows that are in a table that is not beint updated. + locks in place on rows that are in a table that is not being updated. */ - uint sql_command = my_core::thd_sql_command(thd); + const uint sql_command = my_core::thd_sql_command(thd); if ((lock_type == TL_READ && in_lock_tables) || (lock_type == TL_READ_HIGH_PRIORITY && in_lock_tables) || can_hold_read_locks_on_select(thd, lock_type)) @@ -8593,14 +8909,29 @@ THR_LOCK_DATA **ha_rocksdb::store_lock(THD *thd, DBUG_RETURN(to); } -void ha_rocksdb::read_thd_vars(THD *thd) +void ha_rocksdb::read_thd_vars(THD* const thd) { - m_store_checksums= THDVAR(thd, store_checksums); - m_verify_checksums= THDVAR(thd, verify_checksums); + m_store_row_debug_checksums= THDVAR(thd, store_row_debug_checksums); + m_verify_row_debug_checksums= THDVAR(thd, verify_row_debug_checksums); m_checksums_pct= THDVAR(thd, checksums_pct); } -int ha_rocksdb::external_lock(THD *thd, int lock_type) +const char* ha_rocksdb::thd_rocksdb_tmpdir() +{ + const char* tmp_dir = THDVAR(ha_thd(), tmpdir); + + /* + We want to treat an empty string as nullptr, in these cases DDL operations + will use the default --tmpdir passed to mysql instead. + */ + if (tmp_dir != nullptr && *tmp_dir == '\0') { + tmp_dir= nullptr; + } + + return(tmp_dir); +} + +int ha_rocksdb::external_lock(THD* const thd, int lock_type) { int res= 0; DBUG_ENTER("ha_rocksdb::external_lock"); @@ -8621,7 +8952,7 @@ int ha_rocksdb::external_lock(THD *thd, int lock_type) if (lock_type == F_UNLCK) { - Rdb_transaction* tx= get_or_create_tx(thd); + Rdb_transaction* const tx= get_or_create_tx(thd); tx->io_perf_end_and_record(&m_io_perf); if (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) @@ -8656,7 +8987,7 @@ int ha_rocksdb::external_lock(THD *thd, int lock_type) is, handler->start_stmt() is not called for INSERTs. So, we put this code here. */ - Rdb_transaction* tx= get_or_create_tx(thd); + Rdb_transaction* const tx= get_or_create_tx(thd); read_thd_vars(thd); m_update_scope_is_valid= false; @@ -8726,9 +9057,9 @@ int ha_rocksdb::external_lock(THD *thd, int lock_type) */ -int ha_rocksdb::start_stmt(THD *thd, thr_lock_type lock_type) +int ha_rocksdb::start_stmt(THD* const thd, thr_lock_type lock_type) { - Rdb_transaction* tx= get_or_create_tx(thd); + Rdb_transaction* const tx= get_or_create_tx(thd); read_thd_vars(thd); rocksdb_register_tx(ht, thd, tx); tx->io_perf_start(&m_io_perf); @@ -8750,17 +9081,17 @@ rocksdb::Range get_range( } static rocksdb::Range get_range( - const std::shared_ptr& kd, + const Rdb_key_def& kd, uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2], int offset1, int offset2) { - return get_range(kd->get_index_number(), buf, offset1, offset2); + return get_range(kd.get_index_number(), buf, offset1, offset2); } -rocksdb::Range get_range(const std::shared_ptr& kd, +rocksdb::Range get_range(const Rdb_key_def& kd, uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]) { - if (kd->m_is_reverse_cf) + if (kd.m_is_reverse_cf) { return myrocks::get_range(kd, buf, 1, 0); } @@ -8771,9 +9102,9 @@ rocksdb::Range get_range(const std::shared_ptr& kd, } rocksdb::Range ha_rocksdb::get_range( - int i, uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]) const + const int &i, uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]) const { - return myrocks::get_range(m_key_descr_arr[i], buf); + return myrocks::get_range(*m_key_descr_arr[i], buf); } @@ -8801,7 +9132,7 @@ void Rdb_drop_index_thread::run() ? 24*60*60 // no filtering : 60; // filtering - auto ret __attribute__((__unused__)) = mysql_cond_timedwait( + const auto ret __attribute__((__unused__)) = mysql_cond_timedwait( &m_signal_cond, &m_signal_mutex, &ts); if (m_stop) { break; @@ -8817,7 +9148,7 @@ void Rdb_drop_index_thread::run() rocksdb::ReadOptions read_opts; read_opts.total_order_seek = true; // disable bloom filter - for (auto d : indices) { + for (const auto d : indices) { uint32 cf_flags= 0; if (!dict_manager.get_cf_flags(d.cf_id, &cf_flags)) { @@ -8828,12 +9159,13 @@ void Rdb_drop_index_thread::run() } rocksdb::ColumnFamilyHandle* cfh= cf_manager.get_cf(d.cf_id); DBUG_ASSERT(cfh); - bool is_reverse_cf= cf_flags & Rdb_key_def::REVERSE_CF_FLAG; + const bool is_reverse_cf= cf_flags & Rdb_key_def::REVERSE_CF_FLAG; bool index_removed= false; uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE]= {0}; rdb_netbuf_store_uint32(key_buf, d.index_id); - rocksdb::Slice key = rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + const rocksdb::Slice + key = rocksdb::Slice((char*)key_buf, sizeof(key_buf)); uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]; rocksdb::Range range = get_range(d.index_id, buf, is_reverse_cf?1:0, is_reverse_cf?0:1); @@ -8904,7 +9236,7 @@ void Rdb_drop_index_thread::run() } -Rdb_tbl_def* ha_rocksdb::get_table_if_exists(const char* tablename) +Rdb_tbl_def* ha_rocksdb::get_table_if_exists(const char* const tablename) { DBUG_ASSERT(tablename != nullptr); @@ -8926,14 +9258,14 @@ Rdb_tbl_def* ha_rocksdb::get_table_if_exists(const char* tablename) tablename points to line in form "./dbname/tablename". */ -int ha_rocksdb::delete_table(const char *tablename) +int ha_rocksdb::delete_table(const char* const tablename) { DBUG_ENTER("ha_rocksdb::delete_table"); - std::unique_ptr wb= dict_manager.begin(); - rocksdb::WriteBatch *batch= wb.get(); + const std::unique_ptr wb= dict_manager.begin(); + rocksdb::WriteBatch* const batch= wb.get(); /* Find the table in the hash */ - Rdb_tbl_def *tbl = get_table_if_exists(tablename); + Rdb_tbl_def* const tbl = get_table_if_exists(tablename); if (!tbl) DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); dict_manager.add_drop_table(tbl->m_key_descr_arr, tbl->m_key_count, batch); @@ -8952,13 +9284,14 @@ int ha_rocksdb::delete_table(const char *tablename) } -void ha_rocksdb::remove_rows(Rdb_tbl_def *tbl) +void ha_rocksdb::remove_rows(Rdb_tbl_def* const tbl) { - rocksdb::WriteOptions wo= rdb_get_rocksdb_write_options(handler::ha_thd()); + const rocksdb::WriteOptions wo= + rdb_get_rocksdb_write_options(handler::ha_thd()); rocksdb::ReadOptions opts; opts.total_order_seek= true; - rocksdb::Iterator* it= rdb->NewIterator(opts); + rocksdb::Iterator* const it= rdb->NewIterator(opts); char key_buf[MAX_KEY_LENGTH]; uint key_len; @@ -8969,15 +9302,15 @@ void ha_rocksdb::remove_rows(Rdb_tbl_def *tbl) */ for (uint i= 0; i < tbl->m_key_count ; i++) { - const std::shared_ptr& kd= tbl->m_key_descr_arr[i]; - kd->get_infimum_key(reinterpret_cast(key_buf), &key_len); + const Rdb_key_def& kd= *tbl->m_key_descr_arr[i]; + kd.get_infimum_key(reinterpret_cast(key_buf), &key_len); - rocksdb::Slice table_key(key_buf, key_len); + const rocksdb::Slice table_key(key_buf, key_len); it->Seek(table_key); while (it->Valid()) { - rocksdb::Slice key= it->key(); - if (!kd->covers_key(key)) + const rocksdb::Slice key= it->key(); + if (!kd.covers_key(key)) { break; } @@ -8993,7 +9326,7 @@ void ha_rocksdb::remove_rows(Rdb_tbl_def *tbl) } -int ha_rocksdb::rename_table(const char *from, const char *to) +int ha_rocksdb::rename_table(const char* const from, const char* const to) { DBUG_ASSERT(from != nullptr); DBUG_ASSERT(to != nullptr); @@ -9051,8 +9384,8 @@ int ha_rocksdb::rename_table(const char *from, const char *to) } } - std::unique_ptr wb= dict_manager.begin(); - rocksdb::WriteBatch *batch= wb.get(); + const std::unique_ptr wb= dict_manager.begin(); + rocksdb::WriteBatch* const batch= wb.get(); dict_manager.lock(); write_err= ddl_manager.rename(from_str, to_str, batch) || dict_manager.commit(batch); @@ -9072,7 +9405,7 @@ int ha_rocksdb::rename_table(const char *from, const char *to) */ -bool ha_rocksdb::check_if_incompatible_data(HA_CREATE_INFO *info, +bool ha_rocksdb::check_if_incompatible_data(HA_CREATE_INFO* const info, uint table_changes) { DBUG_ENTER("ha_rocksdb::check_if_incompatible_data"); @@ -9110,8 +9443,8 @@ int ha_rocksdb::extra(enum ha_extra_function operation) Given a starting key and an ending key, estimate the number of rows that will exist between the two keys. */ -ha_rows ha_rocksdb::records_in_range(uint inx, key_range *min_key, - key_range *max_key) +ha_rows ha_rocksdb::records_in_range(uint inx, key_range* const min_key, + key_range* const max_key) { DBUG_ENTER("ha_rocksdb::records_in_range"); ha_rows ret = THDVAR(ha_thd(), records_in_range); @@ -9119,38 +9452,37 @@ ha_rows ha_rocksdb::records_in_range(uint inx, key_range *min_key, DBUG_RETURN(ret); } if (table->force_index) { - ha_rows force_rows = THDVAR(ha_thd(), force_index_records_in_range); + const ha_rows force_rows = THDVAR(ha_thd(), force_index_records_in_range); if (force_rows) { DBUG_RETURN(force_rows); } } - const std::shared_ptr& kd= m_key_descr_arr[inx]; - DBUG_ASSERT(kd); + const Rdb_key_def& kd= *m_key_descr_arr[inx]; uint size1=0; if (min_key) { - size1 = kd->pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, + size1 = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, min_key->key, min_key->keypart_map); if (min_key->flag == HA_READ_PREFIX_LAST_OR_PREV || min_key->flag == HA_READ_PREFIX_LAST || min_key->flag == HA_READ_AFTER_KEY) { - kd->successor(m_sk_packed_tuple, size1); + kd.successor(m_sk_packed_tuple, size1); } } else { - kd->get_infimum_key(m_sk_packed_tuple, &size1); + kd.get_infimum_key(m_sk_packed_tuple, &size1); } uint size2=0; if (max_key) { - size2 = kd->pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple_old, + size2 = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple_old, max_key->key, max_key->keypart_map); if (max_key->flag == HA_READ_PREFIX_LAST_OR_PREV || max_key->flag == HA_READ_PREFIX_LAST || max_key->flag == HA_READ_AFTER_KEY) { - kd->successor(m_sk_packed_tuple_old, size2); + kd.successor(m_sk_packed_tuple_old, size2); } // pad the upper key with FFFFs to make sure it is more than the lower if (size1 > size2) { @@ -9158,11 +9490,11 @@ ha_rows ha_rocksdb::records_in_range(uint inx, key_range *min_key, size2 = size1; } } else { - kd->get_supremum_key(m_sk_packed_tuple_old, &size2); + kd.get_supremum_key(m_sk_packed_tuple_old, &size2); } - rocksdb::Slice slice1((const char*) m_sk_packed_tuple, size1); - rocksdb::Slice slice2((const char*) m_sk_packed_tuple_old, size2); + const rocksdb::Slice slice1((const char*) m_sk_packed_tuple, size1); + const rocksdb::Slice slice2((const char*) m_sk_packed_tuple_old, size2); // slice1 >= slice2 means no row will match if (slice1.compare(slice2) >= 0) @@ -9171,15 +9503,15 @@ ha_rows ha_rocksdb::records_in_range(uint inx, key_range *min_key, } rocksdb::Range r( - kd->m_is_reverse_cf ? slice2 : slice1, - kd->m_is_reverse_cf ? slice1 : slice2 + kd.m_is_reverse_cf ? slice2 : slice1, + kd.m_is_reverse_cf ? slice1 : slice2 ); uint64_t sz=0; - auto disk_size = kd->m_stats.m_actual_disk_size; + auto disk_size = kd.m_stats.m_actual_disk_size; if (disk_size == 0) - disk_size = kd->m_stats.m_data_size; - auto rows = kd->m_stats.m_rows; + disk_size = kd.m_stats.m_data_size; + auto rows = kd.m_stats.m_rows; if (rows == 0 || disk_size == 0) { rows= 1; @@ -9187,7 +9519,7 @@ ha_rows ha_rocksdb::records_in_range(uint inx, key_range *min_key, } // Getting statistics, including from Memtables rdb->GetApproximateSizes( - kd->get_cf(), + kd.get_cf(), &r, 1, &sz, true); ret = rows*sz/disk_size; @@ -9217,7 +9549,7 @@ ha_rows ha_rocksdb::records_in_range(uint inx, key_range *min_key, } -void ha_rocksdb::update_create_info(HA_CREATE_INFO *create_info) +void ha_rocksdb::update_create_info(HA_CREATE_INFO* const create_info) { if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) { @@ -9239,7 +9571,7 @@ void ha_rocksdb::update_create_info(HA_CREATE_INFO *create_info) column family, the first row of the next index id should be the last row of the previous index id. */ -int ha_rocksdb::optimize(THD *thd, HA_CHECK_OPT* check_opt) +int ha_rocksdb::optimize(THD* const thd, HA_CHECK_OPT* const check_opt) { int rc= 0; for (uint i= 0; i < table->s->keys; i++) @@ -9257,24 +9589,23 @@ int ha_rocksdb::optimize(THD *thd, HA_CHECK_OPT* check_opt) return rc; } -int ha_rocksdb::analyze(THD* thd, HA_CHECK_OPT* check_opt) +int ha_rocksdb::calculate_stats(const TABLE* const table_arg, THD* const thd, + HA_CHECK_OPT* const check_opt) { - DBUG_ENTER("ha_rocksdb::analyze"); - - if (!table) - DBUG_RETURN(1); + DBUG_ENTER("ha_rocksdb::calculate_stats"); // find per column family key ranges which need to be queried std::unordered_map> ranges; std::unordered_set ids_to_check; - std::vector buf(table->s->keys * 2 * Rdb_key_def::INDEX_NUMBER_SIZE); - for (uint i = 0; i < table->s->keys; i++) + std::vector buf(table_arg->s->keys * 2 * + Rdb_key_def::INDEX_NUMBER_SIZE); + for (uint i = 0; i < table_arg->s->keys; i++) { - auto bufp = &buf[i * 2 * Rdb_key_def::INDEX_NUMBER_SIZE]; - const std::shared_ptr& kd= m_key_descr_arr[i]; - ranges[kd->get_cf()].push_back(get_range(i, bufp)); - ids_to_check.insert(kd->get_gl_index_id()); + const auto bufp = &buf[i * 2 * Rdb_key_def::INDEX_NUMBER_SIZE]; + const Rdb_key_def& kd= *m_key_descr_arr[i]; + ranges[kd.get_cf()].push_back(get_range(i, bufp)); + ids_to_check.insert(kd.get_gl_index_id()); } // for analyze statements, force flush on memtable to get accurate cardinality @@ -9292,8 +9623,8 @@ int ha_rocksdb::analyze(THD* thd, HA_CHECK_OPT* check_opt) rocksdb::TablePropertiesCollection props; for (auto it : ranges) { - auto old_size __attribute__((__unused__)) = props.size(); - auto status = rdb->GetPropertiesOfTablesInRange( + const auto old_size __attribute__((__unused__)) = props.size(); + const auto status = rdb->GetPropertiesOfTablesInRange( it.first, &it.second[0], it.second.size(), &props); DBUG_ASSERT(props.size() >= old_size); if (!status.ok()) @@ -9303,13 +9634,13 @@ int ha_rocksdb::analyze(THD* thd, HA_CHECK_OPT* check_opt) int num_sst= 0; // group stats per index id std::unordered_map stats; - for (auto it : ids_to_check) + for (const auto &it : ids_to_check) { // Initialize the stats to 0. If there are no files that contain // this gl_index_id, then 0 should be stored for the cached stats. stats[it] = Rdb_index_stats(it); } - for (auto it : props) + for (const auto &it : props) { std::vector sst_stats; Rdb_tbl_prop_coll::read_stats_from_tbl_props(it.second, &sst_stats); @@ -9317,7 +9648,7 @@ int ha_rocksdb::analyze(THD* thd, HA_CHECK_OPT* check_opt) sst_stats is a list of index statistics for indexes that have entries in the current SST file. */ - for (auto it1 : sst_stats) + for (const auto &it1 : sst_stats) { /* Only update statistics for indexes that belong to this SQL table. @@ -9330,8 +9661,8 @@ int ha_rocksdb::analyze(THD* thd, HA_CHECK_OPT* check_opt) if (ids_to_check.find(it1.m_gl_index_id) == ids_to_check.end()) continue; - const std::shared_ptr& kd= - ddl_manager.find(it1.m_gl_index_id); + auto kd= ddl_manager.safe_find(it1.m_gl_index_id); + DBUG_ASSERT(kd != nullptr); stats[it1.m_gl_index_id].merge(it1, true, kd->max_storage_fmt_length()); } num_sst++; @@ -9344,10 +9675,20 @@ int ha_rocksdb::analyze(THD* thd, HA_CHECK_OPT* check_opt) DBUG_RETURN(0); } -void ha_rocksdb::get_auto_increment(ulonglong offset, ulonglong increment, +int ha_rocksdb::analyze(THD* const thd, HA_CHECK_OPT* const check_opt) +{ + DBUG_ENTER("ha_rocksdb::analyze"); + + if (!table) + DBUG_RETURN(1); + + DBUG_RETURN(calculate_stats(table, thd, check_opt)); +} + +void ha_rocksdb::get_auto_increment(ulonglong off, ulonglong inc, ulonglong nb_desired_values, - ulonglong *first_value, - ulonglong *nb_reserved_values) + ulonglong* const first_value, + ulonglong* const nb_reserved_values) { /* MySQL has a somewhat complicated way of handling the auto-increment value. @@ -9364,7 +9705,49 @@ void ha_rocksdb::get_auto_increment(ulonglong offset, ulonglong increment, m_auto_incr_val this should be safe - if we had to grab a mutex, doing an actual reserve of some values might be a better solution. */ - *first_value= m_tbl_def->m_auto_incr_val++; + DEBUG_SYNC(ha_thd(), "rocksdb.autoinc_vars"); + + if (off > inc) + { + off= 1; + } + + // Local variable reference to simplify code below + std::atomic& auto_incr= m_tbl_def->m_auto_incr_val; + + if (inc == 1 && off == 1) + { + // Optimization for the standard case where we are always simply + // incrementing from the last position + *first_value= auto_incr++; + } + else + { + // The next value can be more complicated if either `inc` or 'off' is not 1 + longlong new_val; + longlong last_val= auto_incr; + + // Loop until we can correctly update the atomic value + do + { + // Calculate the next value in the auto increment series: + // offset + N * increment + // where N is 0, 1, 2, ... + // + // For further information please visit: + // http://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html + new_val= ((last_val + (inc - off) - 1) / inc) * inc + off; + + // Attempt to store the new value (plus 1 since m_auto_incr_val contains + // the next available value) into the atomic value. If the current + // value no longer matches what we have in 'last_val' this will fail and + // we will repeat the loop (`last_val` will automatically get updated + // with the current value). + } while (!auto_incr.compare_exchange_weak(last_val, new_val + 1)); + + *first_value= new_val; + } + *nb_reserved_values= 1; } @@ -9374,9 +9757,9 @@ void ha_rocksdb::get_auto_increment(ulonglong offset, ulonglong increment, /* Debugger help function */ static char dbug_item_print_buf[512]; -const char *dbug_print_item(Item *item) +const char *dbug_print_item(Item* const item) { - char *buf= dbug_item_print_buf; + char* const buf= dbug_item_print_buf; String str(buf, sizeof(dbug_item_print_buf), &my_charset_bin); str.length(0); if (!item) @@ -9412,7 +9795,7 @@ const char *dbug_print_item(Item *item) Part of condition we couldn't check (always nullptr). */ -class Item* ha_rocksdb::idx_cond_push(uint keyno, class Item* idx_cond) +class Item* ha_rocksdb::idx_cond_push(uint keyno, class Item* const idx_cond) { DBUG_ASSERT(keyno != MAX_KEY); DBUG_ASSERT(idx_cond != nullptr); @@ -9444,7 +9827,7 @@ class Item* ha_rocksdb::idx_cond_push(uint keyno, class Item* idx_cond) and return it) */ -enum icp_result ha_rocksdb::check_index_cond() +enum icp_result ha_rocksdb::check_index_cond() const { DBUG_ASSERT(pushed_idx_cond); DBUG_ASSERT(pushed_idx_cond_keyno != MAX_KEY); @@ -9470,16 +9853,14 @@ enum icp_result ha_rocksdb::check_index_cond() @param kd @param find_flag */ -bool ha_rocksdb::is_ascending(const std::shared_ptr& kd, +bool ha_rocksdb::is_ascending(const Rdb_key_def& kd, enum ha_rkey_function find_flag) const { - DBUG_ASSERT(kd != nullptr); - bool is_ascending; switch (find_flag) { case HA_READ_KEY_EXACT: { - is_ascending = !kd->m_is_reverse_cf; + is_ascending = !kd.m_is_reverse_cf; break; } case HA_READ_PREFIX: @@ -9490,7 +9871,7 @@ bool ha_rocksdb::is_ascending(const std::shared_ptr& kd, case HA_READ_KEY_OR_NEXT: case HA_READ_AFTER_KEY: { - is_ascending = !kd->m_is_reverse_cf; + is_ascending = !kd.m_is_reverse_cf; break; } case HA_READ_KEY_OR_PREV: @@ -9498,7 +9879,7 @@ bool ha_rocksdb::is_ascending(const std::shared_ptr& kd, case HA_READ_PREFIX_LAST: case HA_READ_PREFIX_LAST_OR_PREV: { - is_ascending = kd->m_is_reverse_cf; + is_ascending = kd.m_is_reverse_cf; break; } default: @@ -9513,7 +9894,7 @@ bool ha_rocksdb::is_ascending(const std::shared_ptr& kd, my_core::enum_alter_inplace_result ha_rocksdb::check_if_supported_inplace_alter( TABLE* /* altered_table */, - my_core::Alter_inplace_info *ha_alter_info) + my_core::Alter_inplace_info* const ha_alter_info) { DBUG_ENTER("check_if_supported_alter"); @@ -9557,24 +9938,24 @@ my_core::enum_alter_inplace_result ha_rocksdb::check_if_supported_inplace_alter( @retval false Success */ bool ha_rocksdb::prepare_inplace_alter_table( - TABLE *altered_table, - my_core::Alter_inplace_info *ha_alter_info) + TABLE* const altered_table, + my_core::Alter_inplace_info* const ha_alter_info) { DBUG_ENTER("prepare_inplace_alter_table"); - uint n_dropped_keys= ha_alter_info->index_drop_count; - uint n_added_keys= ha_alter_info->index_add_count; - uint old_n_keys= m_tbl_def->m_key_count; + const uint n_dropped_keys= ha_alter_info->index_drop_count; + const uint n_added_keys= ha_alter_info->index_add_count; + const uint old_n_keys= m_tbl_def->m_key_count; uint new_n_keys= altered_table->s->keys; if (has_hidden_pk(altered_table)) { new_n_keys += 1; } - std::shared_ptr* old_key_descr= m_tbl_def->m_key_descr_arr; - std::shared_ptr* new_key_descr= + std::shared_ptr* const old_key_descr= m_tbl_def->m_key_descr_arr; + std::shared_ptr* const new_key_descr= new std::shared_ptr[new_n_keys]; - Rdb_tbl_def* new_tdef= new Rdb_tbl_def(m_tbl_def->full_tablename()); + Rdb_tbl_def* const new_tdef= new Rdb_tbl_def(m_tbl_def->full_tablename()); new_tdef->m_key_descr_arr= new_key_descr; new_tdef->m_key_count= new_n_keys; new_tdef->m_auto_incr_val= @@ -9610,7 +9991,7 @@ bool ha_rocksdb::prepare_inplace_alter_table( /* Determine which(if any) key definition(s) need to be dropped */ for (i = 0; i < ha_alter_info->index_drop_count; i++) { - const KEY* key = ha_alter_info->index_drop_buffer[i]; + const KEY* const key = ha_alter_info->index_drop_buffer[i]; for (j = 0; j < old_n_keys; j++) { if (!old_key_descr[j]->m_name.compare(key->name)) @@ -9667,14 +10048,14 @@ bool ha_rocksdb::prepare_inplace_alter_table( @retval true Error @retval false Success */ -bool ha_rocksdb::inplace_alter_table(TABLE* altered_table, - my_core::Alter_inplace_info *ha_alter_info) +bool ha_rocksdb::inplace_alter_table(TABLE* const altered_table, + my_core::Alter_inplace_info* const ha_alter_info) { /* Do nothing for now */ DBUG_ENTER("inplace_alter_table"); DBUG_ASSERT(ha_alter_info->handler_ctx); - Rdb_inplace_alter_ctx* ctx= + Rdb_inplace_alter_ctx* const ctx= static_cast (ha_alter_info->handler_ctx); if (ha_alter_info->handler_flags & my_core::Alter_inplace_info::ADD_INDEX) @@ -9703,16 +10084,16 @@ bool ha_rocksdb::inplace_alter_table(TABLE* altered_table, /** Scan the Primary Key index entries and populate the new secondary keys. */ -int ha_rocksdb::inplace_populate_sk(const TABLE* new_table_arg, +int ha_rocksdb::inplace_populate_sk(const TABLE* const new_table_arg, const std::unordered_set>& indexes) { DBUG_ENTER("ha_rocksdb::inplace_populate_sk"); - std::unique_ptr wb= dict_manager.begin(); - rocksdb::WriteBatch *batch= wb.get(); + const std::unique_ptr wb= dict_manager.begin(); + rocksdb::WriteBatch* const batch= wb.get(); /* Update the data dictionary */ std::unordered_set create_index_ids; - for (auto& index : indexes) + for (const auto& index : indexes) { create_index_ids.insert(index->get_gl_index_id()); } @@ -9722,15 +10103,54 @@ int ha_rocksdb::inplace_populate_sk(const TABLE* new_table_arg, const bool hidden_pk_exists = has_hidden_pk(table); int res= 0; - Rdb_transaction *tx; - ulonglong rdb_merge_buf_size= THDVAR(ha_thd(), merge_buf_size); - ulonglong rdb_merge_combine_read_size= THDVAR(ha_thd(), + Rdb_transaction *tx= get_or_create_tx(table->in_use); + + /* + There is one specific scenario where m_sst_info may not be nullptr. This + happens if the handler we're using happens to be the handler where the PK + bulk load was done on. The sequence of events that lead to this is as + follows (T1 is PK bulk load, T2 is SK alter table): + + T1: Execute last INSERT statement + T1: Return TABLE and handler object back to Table_cache_manager + T1: Close connection + T2: Execute ALTER statement + T2: Take same TABLE/handler from Table_cache_manager + T2: Call closefrm which will call finalize_bulk_load on every other open + table/handler *except* the one it's on. + T2: Acquire stale snapshot of PK + T1: Call finalize_bulk_load + + This is rare because usually, closefrm will call the destructor (and thus + finalize_bulk_load) on the handler where PK bulk load is done. However, if + the thread ids of the bulk load thread and the alter thread differ by a + multiple of table_cache_instances (8 by default), then they hash to the + same bucket in Table_cache_manager and the alter thread will not not call + the destructor on the handler it is holding. Thus, its m_sst_info will not + be nullptr. + + At this point, it is safe to refresh the snapshot because we know all other + open handlers have been closed at this point, and the one we're on is the + only one left. + */ + if (m_sst_info != nullptr) + { + if ((res= finalize_bulk_load())) + { + DBUG_RETURN(res); + } + tx->commit(); + } + + const ulonglong rdb_merge_buf_size= THDVAR(ha_thd(), merge_buf_size); + const ulonglong rdb_merge_combine_read_size= THDVAR(ha_thd(), merge_combine_read_size); - for (auto& index : indexes) + for (const auto& index : indexes) { const rocksdb::Comparator* index_comp= index->get_cf()->GetComparator(); - Rdb_index_merge rdb_merge(rdb_merge_buf_size, rdb_merge_combine_read_size, + Rdb_index_merge rdb_merge(thd_rocksdb_tmpdir(), rdb_merge_buf_size, + rdb_merge_combine_read_size, index_comp); if ((res= rdb_merge.init())) @@ -9743,7 +10163,7 @@ int ha_rocksdb::inplace_populate_sk(const TABLE* new_table_arg, as the pk index position may have changed in the case of hidden primary keys. */ - uint pk= pk_index(table, m_tbl_def); + const uint pk= pk_index(table, m_tbl_def); ha_index_init(pk, true); /* Scan each record in the primary key in order */ @@ -9761,15 +10181,16 @@ int ha_rocksdb::inplace_populate_sk(const TABLE* new_table_arg, } /* Create new secondary index entry */ - int new_packed_size= index->pack_record(new_table_arg, m_pack_buffer, + const int new_packed_size= index->pack_record( + new_table_arg, m_pack_buffer, table->record[0], m_sk_packed_tuple, &m_sk_tails, - should_store_checksums(), + should_store_row_debug_checksums(), hidden_pk_id); - rocksdb::Slice key= rocksdb::Slice( + const rocksdb::Slice key= rocksdb::Slice( reinterpret_cast(m_sk_packed_tuple), new_packed_size); - rocksdb::Slice val= rocksdb::Slice( + const rocksdb::Slice val= rocksdb::Slice( reinterpret_cast(m_sk_tails.ptr()), m_sk_tails.get_current_pos()); @@ -9792,7 +10213,6 @@ int ha_rocksdb::inplace_populate_sk(const TABLE* new_table_arg, DBUG_RETURN(res); } - tx= get_or_create_tx(table->in_use); ha_index_end(); /* @@ -9806,7 +10226,7 @@ int ha_rocksdb::inplace_populate_sk(const TABLE* new_table_arg, /* Insert key and slice to SST via SSTFileWriter API. */ - if ((res= bulk_load_key(tx, index, merge_key, merge_val))) + if ((res= bulk_load_key(tx, *index, merge_key, merge_val))) { break; } @@ -9874,11 +10294,11 @@ int ha_rocksdb::inplace_populate_sk(const TABLE* new_table_arg, @retval false Success */ bool ha_rocksdb::commit_inplace_alter_table( - my_core::TABLE *altered_table, - my_core::Alter_inplace_info *ha_alter_info, + my_core::TABLE* const altered_table, + my_core::Alter_inplace_info* const ha_alter_info, bool commit) { - Rdb_inplace_alter_ctx* ctx0= + Rdb_inplace_alter_ctx* const ctx0= static_cast (ha_alter_info->handler_ctx); DBUG_ENTER("commit_inplace_alter_table"); @@ -9954,8 +10374,8 @@ bool ha_rocksdb::commit_inplace_alter_table( my_core::Alter_inplace_info::DROP_UNIQUE_INDEX | my_core::Alter_inplace_info::ADD_INDEX)) { - std::unique_ptr wb= dict_manager.begin(); - rocksdb::WriteBatch *batch= wb.get(); + const std::unique_ptr wb= dict_manager.begin(); + rocksdb::WriteBatch* const batch= wb.get(); std::unordered_set create_index_ids; m_tbl_def= ctx0->m_new_tdef; @@ -9965,12 +10385,13 @@ bool ha_rocksdb::commit_inplace_alter_table( dict_manager.lock(); for (inplace_alter_handler_ctx** pctx = ctx_array; *pctx; pctx++) { - Rdb_inplace_alter_ctx* ctx= static_cast (*pctx); + Rdb_inplace_alter_ctx* const ctx= + static_cast (*pctx); /* Mark indexes to be dropped */ dict_manager.add_drop_index(ctx->m_dropped_index_ids, batch); - for (auto& index : ctx->m_added_indexes) + for (const auto& index : ctx->m_added_indexes) { create_index_ids.insert(index->get_gl_index_id()); } @@ -9998,6 +10419,20 @@ bool ha_rocksdb::commit_inplace_alter_table( /* Mark ongoing create indexes as finished/remove from data dictionary */ dict_manager.finish_indexes_operation(create_index_ids, Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + + /* + We need to recalculate the index stats here manually. The reason is that + the secondary index does not exist inside + m_index_num_to_keydef until it is committed to the data dictionary, which + prevents us from updating the stats normally as the ddl_manager cannot + find the proper gl_index_ids yet during adjust_stats calls. + */ + if (calculate_stats(altered_table, nullptr, nullptr)) + { + /* Failed to update index statistics, should never happen */ + DBUG_ASSERT(0); + } + rdb_drop_idx_thread.signal(); } @@ -10059,7 +10494,6 @@ struct rocksdb_status_counters_t { uint64_t number_multiget_bytes_read; uint64_t number_deletes_filtered; uint64_t number_merge_failures; - uint64_t sequence_number; uint64_t bloom_filter_prefix_checked; uint64_t bloom_filter_prefix_useful; uint64_t number_reseeks_iteration; @@ -10116,7 +10550,6 @@ DEF_SHOW_FUNC(number_multiget_keys_read, NUMBER_MULTIGET_KEYS_READ) DEF_SHOW_FUNC(number_multiget_bytes_read, NUMBER_MULTIGET_BYTES_READ) DEF_SHOW_FUNC(number_deletes_filtered, NUMBER_FILTERED_DELETES) DEF_SHOW_FUNC(number_merge_failures, NUMBER_MERGE_FAILURES) -DEF_SHOW_FUNC(sequence_number, SEQUENCE_NUMBER) DEF_SHOW_FUNC(bloom_filter_prefix_checked, BLOOM_FILTER_PREFIX_CHECKED) DEF_SHOW_FUNC(bloom_filter_prefix_useful, BLOOM_FILTER_PREFIX_USEFUL) DEF_SHOW_FUNC(number_reseeks_iteration, NUMBER_OF_RESEEKS_IN_ITERATION) @@ -10209,7 +10642,6 @@ static SHOW_VAR rocksdb_status_vars[]= { DEF_STATUS_VAR(number_multiget_bytes_read), DEF_STATUS_VAR(number_deletes_filtered), DEF_STATUS_VAR(number_merge_failures), - DEF_STATUS_VAR(sequence_number), DEF_STATUS_VAR(bloom_filter_prefix_checked), DEF_STATUS_VAR(bloom_filter_prefix_useful), DEF_STATUS_VAR(number_reseeks_iteration), @@ -10232,6 +10664,9 @@ static SHOW_VAR rocksdb_status_vars[]= { DEF_STATUS_VAR_PTR("snapshot_conflict_errors", &rocksdb_snapshot_conflict_errors, SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("wal_group_syncs", + &rocksdb_wal_group_syncs, + SHOW_LONGLONG), DEF_STATUS_VAR_PTR("number_stat_computes", &rocksdb_number_stat_computes, SHOW_LONGLONG), DEF_STATUS_VAR_PTR("number_sst_entry_put", &rocksdb_num_sst_entry_put, SHOW_LONGLONG), @@ -10254,48 +10689,61 @@ static SHOW_VAR rocksdb_status_vars[]= { void Rdb_background_thread::run() { + // How many seconds to wait till flushing the WAL next time. + const int WAKE_UP_INTERVAL = 1; + timespec ts_next_sync; clock_gettime(CLOCK_REALTIME, &ts_next_sync); - ts_next_sync.tv_sec++; + ts_next_sync.tv_sec += WAKE_UP_INTERVAL; for (;;) { - // wait for 1 second or until we received a condition to stop the thread + // Wait until the next timeout or until we receive a signal to stop the + // thread. Request to stop the thread should only be triggered when the + // storage engine is being unloaded. mysql_mutex_lock(&m_signal_mutex); - auto ret __attribute__((__unused__)) = mysql_cond_timedwait( - &m_signal_cond, &m_signal_mutex, &ts_next_sync); - // make sure that no program error is returned + const auto ret __attribute__((__unused__)) = mysql_cond_timedwait( + &m_signal_cond, &m_signal_mutex, &ts_next_sync); + + // Check that we receive only the expected error codes. DBUG_ASSERT(ret == 0 || ret == ETIMEDOUT); - bool local_stop= m_stop; - bool local_save_stats= m_save_stats; + const bool local_stop= m_stop; + const bool local_save_stats= m_save_stats; reset(); mysql_mutex_unlock(&m_signal_mutex); if (local_stop) { + // If we're here then that's because condition variable was signaled by + // another thread and we're shutting down. Break out the loop to make + // sure that shutdown thread can proceed. break; } + // This path should be taken only when the timer expired. + DBUG_ASSERT(ret == ETIMEDOUT); + if (local_save_stats) { ddl_manager.persist_stats(); } - // Flush the WAL if need be but don't do it more frequent - // than once per second timespec ts; clock_gettime(CLOCK_REALTIME, &ts); - if (ts.tv_sec - ts_next_sync.tv_sec >= 1) + + // Flush the WAL. + if (rdb && rocksdb_background_sync) { - if (rdb && rocksdb_background_sync) - { - DBUG_ASSERT(!rocksdb_db_options.allow_mmap_writes); - rocksdb::Status s= rdb->SyncWAL(); - if (!s.ok()) - rdb_handle_io_error(s, RDB_IO_ERROR_BG_THREAD); + DBUG_ASSERT(!rocksdb_db_options.allow_mmap_writes); + const rocksdb::Status s= rdb->SyncWAL(); + if (!s.ok()) { + rdb_handle_io_error(s, RDB_IO_ERROR_BG_THREAD); } - ts_next_sync.tv_sec= ts.tv_sec + 1; } + + // Set the next timestamp for mysql_cond_timedwait() (which ends up calling + // pthread_cond_timedwait()) to wait on. + ts_next_sync.tv_sec= ts.tv_sec + WAKE_UP_INTERVAL; } // save remaining stats which might've left unsaved @@ -10322,7 +10770,7 @@ void Rdb_background_thread::run() This is aware of extended keys. */ bool can_use_bloom_filter(THD *thd, - const std::shared_ptr& kd, + const Rdb_key_def& kd, const rocksdb::Slice &eq_cond, const bool use_all_keys, bool is_ascending) @@ -10334,7 +10782,7 @@ bool can_use_bloom_filter(THD *thd, return can_use; } - rocksdb::Options opt = rdb->GetOptions(kd->get_cf()); + rocksdb::Options opt = rdb->GetOptions(kd.get_cf()); if (opt.prefix_extractor) { /* @@ -10379,7 +10827,7 @@ bool can_use_bloom_filter(THD *thd, } /* For modules that need access to the global data structures */ -rocksdb::DB *rdb_get_rocksdb_db() +rocksdb::TransactionDB *rdb_get_rocksdb_db() { return rdb; } @@ -10395,8 +10843,8 @@ rocksdb::BlockBasedTableOptions& rdb_get_table_options() } -int rdb_get_table_perf_counters(const char *tablename, - Rdb_perf_counters *counters) +int rdb_get_table_perf_counters(const char* const tablename, + Rdb_perf_counters* const counters) { DBUG_ASSERT(counters != nullptr); DBUG_ASSERT(tablename != nullptr); @@ -10436,6 +10884,14 @@ void rdb_handle_io_error(rocksdb::Status status, RDB_IO_ERROR_TYPE err_type) status.ToString().c_str()); break; } + case RDB_IO_ERROR_GENERAL: + { + sql_print_error("RocksDB: Failed on I/O - status %d, %s", + status.code(), status.ToString().c_str()); + sql_print_error("RocksDB: Aborting on I/O error."); + abort_with_stack_traces(); + break; + } default: DBUG_ASSERT(0); break; @@ -10463,7 +10919,7 @@ void rdb_handle_io_error(rocksdb::Status status, RDB_IO_ERROR_TYPE err_type) break; } default: - sql_print_warning("RocksDB: Failed to write to RocksDB " + sql_print_warning("RocksDB: Failed to read/write in RocksDB " "- status %d, %s", status.code(), status.ToString().c_str()); break; @@ -10489,15 +10945,15 @@ Rdb_binlog_manager *rdb_get_binlog_manager(void) void rocksdb_set_compaction_options( - my_core::THD* thd __attribute__((__unused__)), - my_core::st_mysql_sys_var* var __attribute__((__unused__)), - void* var_ptr, - const void* save) + my_core::THD* const thd __attribute__((__unused__)), + my_core::st_mysql_sys_var* const var __attribute__((__unused__)), + void* const var_ptr, + const void* const save) { if (var_ptr && save) { *(uint64_t*)var_ptr = *(const uint64_t*) save; } - Rdb_compact_params params = { + const Rdb_compact_params params = { (uint64_t)rocksdb_compaction_sequential_deletes, (uint64_t)rocksdb_compaction_sequential_deletes_window, (uint64_t)rocksdb_compaction_sequential_deletes_file_size @@ -10508,14 +10964,14 @@ rocksdb_set_compaction_options( } void rocksdb_set_table_stats_sampling_pct( - my_core::THD* thd __attribute__((__unused__)), - my_core::st_mysql_sys_var* var __attribute__((__unused__)), - void* var_ptr __attribute__((__unused__)), - const void* save) + my_core::THD* const thd __attribute__((__unused__)), + my_core::st_mysql_sys_var* const var __attribute__((__unused__)), + void* const var_ptr __attribute__((__unused__)), + const void* const save) { mysql_mutex_lock(&rdb_sysvars_mutex); - uint32_t new_val= *static_cast(save); + const uint32_t new_val= *static_cast(save); if (new_val != rocksdb_table_stats_sampling_pct) { rocksdb_table_stats_sampling_pct = new_val; @@ -10540,12 +10996,12 @@ void rocksdb_set_table_stats_sampling_pct( */ void rocksdb_set_rate_limiter_bytes_per_sec( - my_core::THD* thd, - my_core::st_mysql_sys_var* var __attribute__((__unused__)), - void* var_ptr __attribute__((__unused__)), - const void* save) + my_core::THD* const thd, + my_core::st_mysql_sys_var* const var __attribute__((__unused__)), + void* const var_ptr __attribute__((__unused__)), + const void* const save) { - uint64_t new_val= *static_cast(save); + const uint64_t new_val= *static_cast(save); if (new_val == 0 || rocksdb_rate_limiter_bytes_per_sec == 0) { /* @@ -10567,7 +11023,7 @@ rocksdb_set_rate_limiter_bytes_per_sec( } } -void rdb_set_collation_exception_list(const char *exception_list) +void rdb_set_collation_exception_list(const char* const exception_list) { DBUG_ASSERT(rdb_collation_exceptions != nullptr); @@ -10579,12 +11035,12 @@ void rdb_set_collation_exception_list(const char *exception_list) } void -rocksdb_set_collation_exception_list(THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save) +rocksdb_set_collation_exception_list(THD* const thd, + struct st_mysql_sys_var* const var, + void* const var_ptr, + const void* const save) { - const char* val = *static_cast(save); + const char* const val = *static_cast(save); rdb_set_collation_exception_list(val); @@ -10592,16 +11048,16 @@ rocksdb_set_collation_exception_list(THD* thd, } void -rocksdb_set_bulk_load(THD* thd, - struct st_mysql_sys_var* var __attribute__((__unused__)), - void* var_ptr, - const void* save) +rocksdb_set_bulk_load(THD* const thd, + struct st_mysql_sys_var* const var __attribute__((__unused__)), + void* const var_ptr, + const void* const save) { Rdb_transaction*& tx= get_tx_from_thd(thd); if (tx != nullptr) { - int rc= tx->finish_bulk_load(); + const int rc= tx->finish_bulk_load(); if (rc != 0) { // NO_LINT_DEBUG @@ -10695,5 +11151,7 @@ myrocks::rdb_i_s_perf_context_global, myrocks::rdb_i_s_cfoptions, myrocks::rdb_i_s_global_info, myrocks::rdb_i_s_ddl, -myrocks::rdb_i_s_index_file_map +myrocks::rdb_i_s_index_file_map, +myrocks::rdb_i_s_lock_info, +myrocks::rdb_i_s_trx_info mysql_declare_plugin_end; diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 8f63050e74c..a15ae6dd443 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -62,6 +62,30 @@ namespace myrocks { +/* + * class for exporting transaction information for + * information_schema.rocksdb_trx + */ +struct Rdb_trx_info { + std::string name; + ulonglong trx_id; + ulonglong write_count; + ulonglong lock_count; + int timeout_sec; + std::string state; + std::string waiting_key; + ulonglong waiting_cf_id; + int is_replication; + int skip_trx_api; + int read_only; + int deadlock_detect; + int num_ongoing_bulk_load; + ulong thread_id; + std::string query_str; +}; + +std::vector rdb_get_all_trx_info(); + /* This is - the name of the default Column Family (the CF which stores indexes which @@ -160,7 +184,7 @@ const char * const PER_INDEX_CF_NAME = "$per_index_cf"; #define HA_ERR_ROCKSDB_TOO_MANY_LOCKS (HA_ERR_LAST + 3) #define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TOO_MANY_LOCKS -inline bool looks_like_per_index_cf_typo(const char *name) +inline bool looks_like_per_index_cf_typo(const char* const name) { return (name && name[0]=='$' && strcmp(name, PER_INDEX_CF_NAME)); } @@ -237,13 +261,19 @@ enum operation_type { ROWS_MAX }; +#if defined(HAVE_SCHED_GETCPU) +#define RDB_INDEXER get_sched_indexer_t +#else +#define RDB_INDEXER thread_id_indexer_t +#endif + /* Global statistics struct used inside MyRocks */ struct st_global_stats { - ib_counter_t rows[ROWS_MAX]; + ib_counter_t rows[ROWS_MAX]; // system_rows_ stats are only for system // tables. They are not counted in rows_* stats. - ib_counter_t system_rows[ROWS_MAX]; + ib_counter_t system_rows[ROWS_MAX]; }; /* Struct used for exporting status to MySQL */ @@ -270,8 +300,8 @@ namespace std { { std::size_t operator()(const myrocks::GL_INDEX_ID& gl_index_id) const { - uint64_t val = ((uint64_t) gl_index_id.cf_id << 32 | - (uint64_t) gl_index_id.index_id); + const uint64_t val = ((uint64_t) gl_index_id.cf_id << 32 | + (uint64_t) gl_index_id.index_id); return std::hash()(val); } }; @@ -296,6 +326,8 @@ class ha_rocksdb: public my_core::handler /* Whether m_scan_it was created with skip_bloom=true */ bool m_scan_it_skips_bloom; + const rocksdb::Snapshot *m_scan_it_snapshot; + Rdb_tbl_def *m_tbl_def; /* Primary Key encoder from KeyTupleFormat to StorageFormat */ @@ -407,42 +439,46 @@ class ha_rocksdb: public my_core::handler /* SST information used for bulk loading the primary key */ std::shared_ptr m_sst_info; + Rdb_transaction *m_bulk_load_tx; + /* Mutex to protect finalizing bulk load */ + mysql_mutex_t m_bulk_load_mutex; /* MySQL index number for duplicate key error */ int m_dupp_errkey; - int create_key_defs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg, - const TABLE *old_table_arg= nullptr, - const Rdb_tbl_def *old_tbl_def_arg= nullptr) + int create_key_defs(const TABLE* const table_arg, + Rdb_tbl_def* const tbl_def_arg, + const TABLE* const old_table_arg= nullptr, + const Rdb_tbl_def* const old_tbl_def_arg= nullptr) const __attribute__((__nonnull__(2, 3), __warn_unused_result__)); - int secondary_index_read(const int keyno, uchar *buf) + int secondary_index_read(const int keyno, uchar* const buf) __attribute__((__nonnull__, __warn_unused_result__)); void setup_iterator_for_rnd_scan(); - void setup_scan_iterator(const std::shared_ptr& kd, - rocksdb::Slice *slice) + void setup_scan_iterator(const Rdb_key_def& kd, rocksdb::Slice* const slice) __attribute__((__nonnull__)) { setup_scan_iterator(kd, slice, false, false, 0); } - bool is_ascending(const std::shared_ptr& keydef, + bool is_ascending(const Rdb_key_def& keydef, enum ha_rkey_function find_flag) const __attribute__((__nonnull__, __warn_unused_result__)); - void setup_scan_iterator(const std::shared_ptr& kd, + void setup_scan_iterator(const Rdb_key_def& kd, rocksdb::Slice *slice, const bool use_all_keys, const bool is_ascending, const uint eq_cond_len) __attribute__((__nonnull__)); - void release_scan_iterator(void) - { - delete m_scan_it; - m_scan_it= nullptr; - } + void release_scan_iterator(void); - int get_row_by_rowid(uchar *buf, const char *rowid, + rocksdb::Status get_for_update(Rdb_transaction* const tx, + rocksdb::ColumnFamilyHandle* const column_family, + const rocksdb::Slice& key, + std::string* const value) const; + + int get_row_by_rowid(uchar* const buf, const char* const rowid, const uint rowid_size) __attribute__((__nonnull__, __warn_unused_result__)); - int get_row_by_rowid(uchar *buf, const uchar *rowid, + int get_row_by_rowid(uchar* const buf, const uchar* const rowid, const uint rowid_size) __attribute__((__nonnull__, __warn_unused_result__)) { @@ -454,21 +490,21 @@ class ha_rocksdb: public my_core::handler void load_auto_incr_value(); longlong update_hidden_pk_val(); int load_hidden_pk_value() __attribute__((__warn_unused_result__)); - int read_hidden_pk_id_from_rowkey(longlong *hidden_pk_id) + int read_hidden_pk_id_from_rowkey(longlong* const hidden_pk_id) __attribute__((__nonnull__, __warn_unused_result__)); - bool can_use_single_delete(uint index) + bool can_use_single_delete(const uint &index) const __attribute__((__warn_unused_result__)); - bool skip_unique_check() __attribute__((__warn_unused_result__)); + bool skip_unique_check() const __attribute__((__warn_unused_result__)); void set_force_skip_unique_check(bool skip) override; bool commit_in_the_middle() __attribute__((__warn_unused_result__)); - bool do_bulk_commit(Rdb_transaction *tx) + bool do_bulk_commit(Rdb_transaction* const tx) __attribute__((__nonnull__, __warn_unused_result__)); - bool has_hidden_pk(const TABLE* table) + bool has_hidden_pk(const TABLE* const table) const __attribute__((__nonnull__, __warn_unused_result__)); - void update_row_stats(operation_type type); + void update_row_stats(const operation_type &type); - void set_last_rowkey(const uchar *old_data); + void set_last_rowkey(const uchar* const old_data); /* Array of table->s->fields elements telling how to store fields in the @@ -504,14 +540,15 @@ class ha_rocksdb: public my_core::handler */ uint m_null_bytes_in_rec; - void get_storage_type(Rdb_field_encoder *encoder, uint kp); + void get_storage_type(Rdb_field_encoder* const encoder, const uint &kp); void setup_field_converters(); - int alloc_key_buffers(const TABLE* table_arg, const Rdb_tbl_def* tbl_def_arg) + int alloc_key_buffers(const TABLE* const table_arg, + const Rdb_tbl_def* const tbl_def_arg) __attribute__((__nonnull__, __warn_unused_result__)); void free_key_buffers(); // the buffer size should be at least 2*Rdb_key_def::INDEX_NUMBER_SIZE - rocksdb::Range get_range(int i, uchar buf[]) const; + rocksdb::Range get_range(const int &i, uchar buf[]) const; /* Perf timers for data reads @@ -534,18 +571,20 @@ public: Controls whether writes include checksums. This is updated from the session variable at the start of each query. */ - bool m_store_checksums; + bool m_store_row_debug_checksums; /* Same as above but for verifying checksums when reading */ - bool m_verify_checksums; + bool m_verify_row_debug_checksums; int m_checksums_pct; - ha_rocksdb(my_core::handlerton *hton, my_core::TABLE_SHARE *table_arg); + ha_rocksdb(my_core::handlerton* const hton, + my_core::TABLE_SHARE* const table_arg); ~ha_rocksdb() { int err __attribute__((__unused__)); err= finalize_bulk_load(); DBUG_ASSERT(err == 0); + mysql_mutex_destroy(&m_bulk_load_mutex); } /** @brief @@ -614,41 +653,44 @@ public: return true; } - bool should_store_checksums() const + bool should_store_row_debug_checksums() const { - return m_store_checksums && (rand() % 100 < m_checksums_pct); + return m_store_row_debug_checksums && (rand() % 100 < m_checksums_pct); } - int rename_table(const char *from, const char *to) + int rename_table(const char* const from, const char* const to) __attribute__((__nonnull__, __warn_unused_result__)); - int convert_record_from_storage_format(const rocksdb::Slice *key, - const rocksdb::Slice *value, - uchar *buf) + int convert_record_from_storage_format(const rocksdb::Slice* const key, + const rocksdb::Slice* const value, + uchar* const buf) __attribute__((__nonnull__, __warn_unused_result__)); - int convert_record_from_storage_format(const rocksdb::Slice *key, - uchar *buf) + int convert_record_from_storage_format(const rocksdb::Slice* const key, + uchar* const buf) __attribute__((__nonnull__, __warn_unused_result__)); void convert_record_to_storage_format(const rocksdb::Slice& pk_packed_slice, - Rdb_string_writer *pk_unpack_info, - rocksdb::Slice *packed_rec) + Rdb_string_writer* const pk_unpack_info, + rocksdb::Slice* const packed_rec) __attribute__((__nonnull__)); - static const char* get_key_name(const uint index, const TABLE* table_arg, - const Rdb_tbl_def* tbl_def_arg) + static const char* get_key_name(const uint index, + const TABLE* const table_arg, + const Rdb_tbl_def* const tbl_def_arg) __attribute__((__nonnull__, __warn_unused_result__)); - static const char* get_key_comment(const uint index, const TABLE* table_arg, - const Rdb_tbl_def* tbl_def_arg) + static const char* get_key_comment(const uint index, + const TABLE* const table_arg, + const Rdb_tbl_def* const tbl_def_arg) __attribute__((__nonnull__, __warn_unused_result__)); - static bool is_hidden_pk(const uint index, const TABLE* table_arg, - const Rdb_tbl_def* tbl_def_arg) + static bool is_hidden_pk(const uint index, const TABLE* const table_arg, + const Rdb_tbl_def* const tbl_def_arg) __attribute__((__nonnull__, __warn_unused_result__)); - static uint pk_index(const TABLE* table_arg, const Rdb_tbl_def* tbl_def_arg) + static uint pk_index(const TABLE* const table_arg, + const Rdb_tbl_def* const tbl_def_arg) __attribute__((__nonnull__, __warn_unused_result__)); static bool is_pk(const uint index, const TABLE* table_arg, @@ -686,52 +728,53 @@ public: ha_rows estimate_rows_upper_bound() { return HA_POS_ERROR; } /* At the moment, we're ok with default handler::index_init() implementation. */ - int index_read_map(uchar * buf, const uchar * key, + int index_read_map(uchar* const buf, const uchar* const key, key_part_map keypart_map, enum ha_rkey_function find_flag); __attribute__((__warn_unused_result__)); - int index_read_map_impl(uchar * buf, const uchar * key, + int index_read_map_impl(uchar* const buf, const uchar* const key, key_part_map keypart_map, enum ha_rkey_function find_flag, - const key_range *end_key) + const key_range* end_key) __attribute__((__warn_unused_result__));; - int index_read_last_map(uchar * buf, const uchar * key, + int index_read_last_map(uchar* const buf, const uchar* const key, key_part_map keypart_map) __attribute__((__warn_unused_result__));; - int read_range_first(const key_range *start_key, - const key_range *end_key, + int read_range_first(const key_range* const start_key, + const key_range* const end_key, bool eq_range, bool sorted) __attribute__((__warn_unused_result__));; virtual double scan_time() { return (double) (stats.records+stats.deleted) / 20.0+10; } virtual double read_time(uint, uint, ha_rows rows) override; - int open(const char *name, int mode, uint test_if_locked) + int open(const char* const name, int mode, uint test_if_locked) __attribute__((__warn_unused_result__)); int close(void) __attribute__((__warn_unused_result__)); - int write_row(uchar *buf) __attribute__((__warn_unused_result__)); - int update_row(const uchar *old_data, uchar *new_data) + int write_row(uchar* const buf) __attribute__((__warn_unused_result__)); + int update_row(const uchar* const old_data, uchar* const new_data) + __attribute__((__warn_unused_result__)); + int delete_row(const uchar* const buf) __attribute__((__warn_unused_result__)); - int delete_row(const uchar *buf) __attribute__((__warn_unused_result__)); rocksdb::Status delete_or_singledelete(uint index, - Rdb_transaction *tx, - rocksdb::ColumnFamilyHandle* cf, + Rdb_transaction* const tx, + rocksdb::ColumnFamilyHandle* const cf, const rocksdb::Slice& key) __attribute__((__warn_unused_result__)); - int index_next(uchar *buf) __attribute__((__warn_unused_result__)); - int index_next_with_direction(uchar *buf, bool move_forward) + int index_next(uchar* const buf) __attribute__((__warn_unused_result__)); + int index_next_with_direction(uchar* const buf, bool move_forward) __attribute__((__warn_unused_result__)); - int index_prev(uchar *buf) __attribute__((__warn_unused_result__)); + int index_prev(uchar* const buf) __attribute__((__warn_unused_result__)); - int index_first(uchar *buf) __attribute__((__warn_unused_result__)); - int index_last(uchar *buf) __attribute__((__warn_unused_result__)); + int index_first(uchar* const buf) __attribute__((__warn_unused_result__)); + int index_last(uchar* const buf) __attribute__((__warn_unused_result__)); - class Item* idx_cond_push(uint keyno, class Item* idx_cond); + class Item* idx_cond_push(uint keyno, class Item* const idx_cond); /* Default implementation from cancel_pushed_idx_cond() suits us */ @@ -758,31 +801,32 @@ private: bool skip_unique_check; }; - int create_cfs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg, - std::array* cfs); + int create_cfs(const TABLE* const table_arg, Rdb_tbl_def* const tbl_def_arg, + std::array* const cfs) const; __attribute__((__nonnull__, __warn_unused_result__)); - int create_key_def(const TABLE *table_arg, uint i, - const Rdb_tbl_def* tbl_def_arg, - std::shared_ptr* new_key_def, - const struct key_def_cf_info& cf_info); + int create_key_def(const TABLE* const table_arg, const uint &i, + const Rdb_tbl_def* const tbl_def_arg, + std::shared_ptr* const new_key_def, + const struct key_def_cf_info& cf_info) const; __attribute__((__nonnull__, __warn_unused_result__)); - int create_inplace_key_defs(const TABLE *table_arg, - Rdb_tbl_def *tbl_def_arg, - const TABLE *old_table_arg, - const Rdb_tbl_def *old_tbl_def_arg, - const std::array& cfs); + int create_inplace_key_defs(const TABLE* const table_arg, + Rdb_tbl_def* vtbl_def_arg, + const TABLE* const old_table_arg, + const Rdb_tbl_def* const old_tbl_def_arg, + const std::array& cfs) const; __attribute__((__nonnull__, __warn_unused_result__)); std::unordered_map get_old_key_positions( const TABLE* table_arg, const Rdb_tbl_def* tbl_def_arg, const TABLE* old_table_arg, - const Rdb_tbl_def* old_tbl_def_arg) + const Rdb_tbl_def* old_tbl_def_arg) const __attribute__((__nonnull__)); - int compare_key_parts(const KEY* old_key, const KEY* new_key); + int compare_key_parts(const KEY* const old_key, + const KEY* const new_key) const; __attribute__((__nonnull__, __warn_unused_result__)); int index_first_intern(uchar *buf) @@ -790,89 +834,99 @@ private: int index_last_intern(uchar *buf) __attribute__((__nonnull__, __warn_unused_result__)); - enum icp_result check_index_cond(); - int find_icp_matching_index_rec(bool move_forward, uchar *buf) + enum icp_result check_index_cond() const; + int find_icp_matching_index_rec(const bool &move_forward, uchar* const buf) __attribute__((__nonnull__, __warn_unused_result__)); void calc_updated_indexes(); - int update_write_row(const uchar *old_data, const uchar *new_data, + int update_write_row(const uchar* const old_data, const uchar* const new_data, const bool skip_unique_check) __attribute__((__warn_unused_result__)); - int get_pk_for_update(struct update_row_info* row_info); - int check_and_lock_unique_pk(uint key_id, + int get_pk_for_update(struct update_row_info* const row_info); + int check_and_lock_unique_pk(const uint &key_id, const struct update_row_info& row_info, - bool* found, bool* pk_changed) + bool* const found, bool* const pk_changed) __attribute__((__warn_unused_result__)); - int check_and_lock_sk(uint key_id, const struct update_row_info& row_info, - bool* found) const + int check_and_lock_sk(const uint &key_id, + const struct update_row_info& row_info, + bool* const found) const __attribute__((__warn_unused_result__)); int check_uniqueness_and_lock(const struct update_row_info& row_info, - bool* pk_changed) + bool* const pk_changed) __attribute__((__warn_unused_result__)); bool over_bulk_load_threshold(int* err) __attribute__((__warn_unused_result__)); - int bulk_load_key(Rdb_transaction* tx, - const std::shared_ptr& kd, + int bulk_load_key(Rdb_transaction* const tx, + const Rdb_key_def& kd, const rocksdb::Slice& key, const rocksdb::Slice& value) __attribute__((__nonnull__, __warn_unused_result__)); - int update_pk(const std::shared_ptr& kd, + int update_pk(const Rdb_key_def& kd, const struct update_row_info& row_info, - bool pk_changed) + const bool &pk_changed) __attribute__((__warn_unused_result__)); - int update_sk(const TABLE* table_arg, - const std::shared_ptr& kd, + int update_sk(const TABLE* const table_arg, + const Rdb_key_def& kd, const struct update_row_info& row_info) __attribute__((__warn_unused_result__)); - int update_indexes(const struct update_row_info& row_info, bool pk_changed) + int update_indexes(const struct update_row_info& row_info, + const bool &pk_changed) __attribute__((__warn_unused_result__)); - int read_key_exact(const std::shared_ptr& kd, - rocksdb::Iterator* iter, bool using_full_key, + int read_key_exact(const Rdb_key_def& kd, + rocksdb::Iterator* const iter, const bool &using_full_key, const rocksdb::Slice& key_slice) const __attribute__((__nonnull__, __warn_unused_result__)); - int read_before_key(const std::shared_ptr& kd, - bool using_full_key, const rocksdb::Slice& key_slice) + int read_before_key(const Rdb_key_def& kd, + const bool &using_full_key, + const rocksdb::Slice& key_slice) __attribute__((__nonnull__, __warn_unused_result__)); - int read_after_key(const std::shared_ptr& kd, - bool using_full_key, const rocksdb::Slice& key_slice) + int read_after_key(const Rdb_key_def& kd, + const bool &using_full_key, + const rocksdb::Slice& key_slice) __attribute__((__nonnull__, __warn_unused_result__)); - int position_to_correct_key(const std::shared_ptr& kd, - enum ha_rkey_function find_flag, - bool full_key_match, const uchar* key, - key_part_map keypart_map, + int position_to_correct_key(const Rdb_key_def& kd, + const enum ha_rkey_function &find_flag, + const bool &full_key_match, + const uchar* const key, + const key_part_map &keypart_map, const rocksdb::Slice& key_slice, - bool* move_forward) + bool* const move_forward) __attribute__((__warn_unused_result__)); - int read_row_from_primary_key(uchar* buf) + int read_row_from_primary_key(uchar* const buf) __attribute__((__nonnull__, __warn_unused_result__)); - int read_row_from_secondary_key(uchar* buf, - const std::shared_ptr& kd, + int read_row_from_secondary_key(uchar* const buf, + const Rdb_key_def& kd, bool move_forward) __attribute__((__nonnull__, __warn_unused_result__)); int try_keyonly_read_from_sk(uchar* buf, - const std::shared_ptr& kd, + const Rdb_key_def& kd, const rocksdb::Slice& key, const rocksdb::Slice& value, uint rowid_size) __attribute__((__nonnull__, __warn_unused_result__)); - int calc_eq_cond_len(const std::shared_ptr& kd, - enum ha_rkey_function find_flag, - const rocksdb::Slice& slice, int bytes_changed_by_succ, - const key_range *end_key, uint* end_key_packed_size) + int calc_eq_cond_len(const Rdb_key_def& kd, + const enum ha_rkey_function &find_flag, + const rocksdb::Slice& slice, + const int &bytes_changed_by_succ, + const key_range* const end_key, + uint* const end_key_packed_size) __attribute__((__warn_unused_result__)); - Rdb_tbl_def* get_table_if_exists(const char* tablename) + Rdb_tbl_def* get_table_if_exists(const char* const tablename) __attribute__((__nonnull__, __warn_unused_result__)); - void read_thd_vars(THD *thd) + void read_thd_vars(THD* const thd) __attribute__((__nonnull__)); - bool contains_foreign_key(THD* thd) + const char* thd_rocksdb_tmpdir() __attribute__((__nonnull__, __warn_unused_result__)); - int inplace_populate_sk(const TABLE* table_arg, + bool contains_foreign_key(THD* const thd) + __attribute__((__nonnull__, __warn_unused_result__)); + + int inplace_populate_sk(const TABLE* const table_arg, const std::unordered_set>& indexes); public: @@ -892,12 +946,13 @@ public: int rnd_init(bool scan) __attribute__((__warn_unused_result__)); int rnd_end() __attribute__((__warn_unused_result__)); - int rnd_next(uchar *buf) __attribute__((__warn_unused_result__)); - int rnd_next_with_direction(uchar *buf, bool move_forward) + int rnd_next(uchar* const buf) __attribute__((__warn_unused_result__)); + int rnd_next_with_direction(uchar* const buf, bool move_forward) __attribute__((__warn_unused_result__)); - int rnd_pos(uchar *buf, uchar *pos) __attribute__((__warn_unused_result__)); - void position(const uchar *record); + int rnd_pos(uchar* const buf, uchar* const pos) + __attribute__((__warn_unused_result__)); + void position(const uchar* const record); int info(uint) override; /* This function will always return success, therefore no annotation related @@ -905,11 +960,11 @@ public: * required by the interface. */ int extra(enum ha_extra_function operation); - int start_stmt(THD *thd, thr_lock_type lock_type) + int start_stmt(THD* const thd, thr_lock_type lock_type) __attribute__((__warn_unused_result__)); - int external_lock(THD *thd, int lock_type) + int external_lock(THD* const thd, int lock_type) __attribute__((__warn_unused_result__)); - int delete_all_rows(ha_rows* nrows) __attribute__((__warn_unused_result__)); + int delete_all_rows(ha_rows* const nrows) __attribute__((__warn_unused_result__)); int truncate() __attribute__((__warn_unused_result__)); int reset() override @@ -919,64 +974,67 @@ public: return 0; } - int check(THD* thd, HA_CHECK_OPT* check_opt) + int check(THD* const thd, HA_CHECK_OPT* const check_opt) __attribute__((__warn_unused_result__)); - void remove_rows(Rdb_tbl_def *tbl); - ha_rows records_in_range(uint inx, key_range *min_key, - key_range *max_key) + void remove_rows(Rdb_tbl_def* const tbl); + ha_rows records_in_range(uint inx, key_range* const min_key, + key_range* const max_key) __attribute__((__warn_unused_result__)); - int delete_table(const char *from) __attribute__((__warn_unused_result__)); - int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info) + int delete_table(const char* const from) __attribute__((__warn_unused_result__)); + int create(const char* const name, TABLE* const form, + HA_CREATE_INFO* const create_info) __attribute__((__warn_unused_result__)); - bool check_if_incompatible_data(HA_CREATE_INFO *info, + bool check_if_incompatible_data(HA_CREATE_INFO* const info, uint table_changes) __attribute__((__warn_unused_result__)); - THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, + THR_LOCK_DATA **store_lock(THD* const thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type) __attribute__((__warn_unused_result__)); - my_bool register_query_cache_table(THD *thd, char *table_key, + my_bool register_query_cache_table(THD* const thd, char* const table_key, uint key_length, - qc_engine_callback - *engine_callback, - ulonglong *engine_data) + qc_engine_callback* const engine_callback, + ulonglong* const engine_data) { /* Currently, we don't support query cache */ return FALSE; } - bool get_error_message(const int error, String *buf) + bool get_error_message(const int error, String* const buf) __attribute__((__nonnull__)); void get_auto_increment(ulonglong offset, ulonglong increment, ulonglong nb_desired_values, - ulonglong *first_value, - ulonglong *nb_reserved_values); - void update_create_info(HA_CREATE_INFO *create_info); - int optimize(THD *thd, HA_CHECK_OPT *check_opt) + ulonglong* const first_value, + ulonglong* const nb_reserved_values); + void update_create_info(HA_CREATE_INFO* const create_info); + int optimize(THD* const thd, HA_CHECK_OPT* const check_opt) __attribute__((__warn_unused_result__)); - int analyze(THD* thd, HA_CHECK_OPT* check_opt) + int analyze(THD* const thd, HA_CHECK_OPT* const check_opt) + __attribute__((__warn_unused_result__)); + int calculate_stats(const TABLE* const table_arg, THD* const thd, + HA_CHECK_OPT* const check_opt) __attribute__((__warn_unused_result__)); enum_alter_inplace_result check_if_supported_inplace_alter( TABLE *altered_table, - my_core::Alter_inplace_info *ha_alter_info) override; + my_core::Alter_inplace_info* const ha_alter_info) override; - bool prepare_inplace_alter_table(TABLE *altered_table, - my_core::Alter_inplace_info *ha_alter_info); + bool prepare_inplace_alter_table(TABLE* const altered_table, + my_core::Alter_inplace_info* const ha_alter_info); - bool inplace_alter_table(TABLE *altered_table, - my_core::Alter_inplace_info *ha_alter_info); + bool inplace_alter_table(TABLE* const altered_table, + my_core::Alter_inplace_info* const ha_alter_info); - bool commit_inplace_alter_table(TABLE *altered_table, - my_core::Alter_inplace_info *ha_alter_info, - bool commit); + bool commit_inplace_alter_table(TABLE* const altered_table, + my_core::Alter_inplace_info* const ha_alter_info, + bool commit); int finalize_bulk_load() __attribute__((__warn_unused_result__)); - void set_use_read_free_rpl(const char* whitelist); - void set_skip_unique_check_tables(const char* whitelist); + void set_use_read_free_rpl(const char* const whitelist); + void set_skip_unique_check_tables(const char* const whitelist); public: virtual void rpl_before_delete_rows() override; @@ -1000,10 +1058,10 @@ public: struct Rdb_inplace_alter_ctx : public my_core::inplace_alter_handler_ctx { /* The new table definition */ - Rdb_tbl_def* m_new_tdef; + Rdb_tbl_def* const m_new_tdef; /* Stores the original key definitions */ - std::shared_ptr* m_old_key_descr; + std::shared_ptr* const m_old_key_descr; /* Stores the new key definitions */ std::shared_ptr* m_new_key_descr; @@ -1015,10 +1073,10 @@ struct Rdb_inplace_alter_ctx : public my_core::inplace_alter_handler_ctx const uint m_new_n_keys; /* Stores the added key glids */ - std::unordered_set> m_added_indexes; + const std::unordered_set> m_added_indexes; /* Stores the dropped key glids */ - std::unordered_set m_dropped_index_ids; + const std::unordered_set m_dropped_index_ids; /* Stores number of keys to add */ const uint m_n_added_keys; diff --git a/storage/rocksdb/ha_rocksdb_proto.h b/storage/rocksdb/ha_rocksdb_proto.h index df41d2de619..be7b5518423 100644 --- a/storage/rocksdb/ha_rocksdb_proto.h +++ b/storage/rocksdb/ha_rocksdb_proto.h @@ -24,13 +24,15 @@ /* RocksDB includes */ #include "rocksdb/table.h" +#include "rocksdb/utilities/transaction_db.h" namespace myrocks { enum RDB_IO_ERROR_TYPE { RDB_IO_ERROR_TX_COMMIT, RDB_IO_ERROR_DICT_COMMIT, - RDB_IO_ERROR_BG_THREAD + RDB_IO_ERROR_BG_THREAD, + RDB_IO_ERROR_GENERAL }; void rdb_handle_io_error(rocksdb::Status status, RDB_IO_ERROR_TYPE err_type); @@ -58,7 +60,7 @@ void rdb_queue_save_stats_request(); Access to singleton objects. */ -rocksdb::DB *rdb_get_rocksdb_db(); +rocksdb::TransactionDB *rdb_get_rocksdb_db(); class Rdb_cf_manager; Rdb_cf_manager& rdb_get_cf_manager(); diff --git a/storage/rocksdb/logger.h b/storage/rocksdb/logger.h index a1333e8dfd4..dd6dd138e56 100644 --- a/storage/rocksdb/logger.h +++ b/storage/rocksdb/logger.h @@ -61,7 +61,7 @@ class Rdb_logger : public rocksdb::Logger Logv(rocksdb::InfoLogLevel::INFO_LEVEL, format, ap); } - void SetRocksDBLogger(std::shared_ptr logger) + void SetRocksDBLogger(const std::shared_ptr logger) { m_logger = logger; } diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case3.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case3.inc index bd9af241e5c..c23717c4fda 100644 --- a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case3.inc +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case3.inc @@ -59,8 +59,9 @@ eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000; connection con1; ---error ER_LOCK_DEADLOCK +--error 0,ER_LOCK_DEADLOCK reap; +--echo ERROR: $mysql_errno connection default; disconnect con1; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case5.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case5.inc index 3e4f6350b79..b77a54e4360 100644 --- a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case5.inc +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case5.inc @@ -64,8 +64,9 @@ DELETE FROM t0 WHERE id=190000; COMMIT; connection con1; ---error ER_LOCK_DEADLOCK +--error 0,ER_LOCK_DEADLOCK reap; +--echo ERROR: $mysql_errno COMMIT; connection default; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case6.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case6.inc index 4cb5cae15aa..9494146ba5c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case6.inc +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case6.inc @@ -64,8 +64,9 @@ UPDATE t0 SET id=200001 WHERE id=190000; COMMIT; connection con1; ---error ER_LOCK_DEADLOCK +--error 0,ER_LOCK_DEADLOCK reap; +--echo ERROR: $mysql_errno COMMIT; connection default; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result b/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result new file mode 100644 index 00000000000..bfa06f88011 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result @@ -0,0 +1,44 @@ +# Disable for valgrind because this takes too long +DROP DATABASE IF EXISTS mysqlslap; +CREATE DATABASE mysqlslap; +USE mysqlslap; +CREATE TABLE t1(id BIGINT AUTO_INCREMENT, value BIGINT, PRIMARY KEY(id)) ENGINE=rocksdb; +# 2PC enabled, MyRocks durability enabled +SET GLOBAL rocksdb_disable_2pc=0; +SET GLOBAL rocksdb_write_sync=1; +## 2PC + durability + single thread +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +select case when variable_value-@c = 1000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +case when variable_value-@c = 1000 then 'true' else 'false' end +true +## 2PC + durability + group commit +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +select case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' else 'false' end +true +# 2PC enabled, MyRocks durability disabled +SET GLOBAL rocksdb_disable_2pc=0; +SET GLOBAL rocksdb_write_sync=0; +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +case when variable_value-@c = 0 then 'true' else 'false' end +true +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +case when variable_value-@c = 0 then 'true' else 'false' end +true +# 2PC disabled, MyRocks durability enabled +SET GLOBAL rocksdb_disable_2pc=1; +SET GLOBAL rocksdb_write_sync=1; +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +case when variable_value-@c = 0 then 'true' else 'false' end +true +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +case when variable_value-@c = 0 then 'true' else 'false' end +true +SET GLOBAL rocksdb_disable_2pc=1; +SET GLOBAL rocksdb_write_sync=0; +DROP TABLE t1; +DROP DATABASE mysqlslap; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result index 4a707d3a6f4..a7d381fbdb1 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result @@ -276,103 +276,121 @@ SELECT COUNT(*) FROM t1; COUNT(*) 100 DROP TABLE t1; -CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; -INSERT INTO t1 (a, b) VALUES (1, 5); -INSERT INTO t1 (a, b) VALUES (2, 6); -INSERT INTO t1 (a, b) VALUES (3, 7); -# crash_during_online_index_creation -flush logs; -SET SESSION debug="+d,crash_during_online_index_creation"; -ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; -ERROR HY000: Lost connection to MySQL server during query -SET SESSION debug="-d,crash_during_online_index_creation"; -SHOW CREATE TABLE t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT NULL, - `b` int(11) DEFAULT NULL, - KEY `ka` (`a`), - KEY `kab` (`a`,`b`) -) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 -CHECK TABLE t1; -Table Op Msg_type Msg_text -test.t1 check status OK -DROP TABLE t1; -CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; -# crash_during_index_creation_partition -flush logs; -SET SESSION debug="+d,crash_during_index_creation_partition"; -ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; -ERROR HY000: Lost connection to MySQL server during query -SET SESSION debug="-d,crash_during_index_creation_partition"; -SHOW CREATE TABLE t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `i` int(11) NOT NULL DEFAULT '0', - `j` int(11) DEFAULT NULL, - `k` int(11) DEFAULT NULL, - PRIMARY KEY (`i`), - KEY `j` (`j`) -) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 -/*!50100 PARTITION BY KEY (i) -PARTITIONS 4 */ -ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; -SELECT * FROM t1 ORDER BY i LIMIT 10; -i j k -1 1 1 -2 2 2 -3 3 3 -4 4 4 -5 5 5 -6 6 6 -7 7 7 -8 8 8 -9 9 9 -10 10 10 -SELECT COUNT(*) FROM t1; -COUNT(*) -100 -DROP TABLE t1; -CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; -# crash_during_index_creation_partition -flush logs; -SET SESSION debug="+d,myrocks_simulate_index_create_rollback"; -# expected assertion failure from sql layer here for alter rollback -call mtr.add_suppression("Assertion `0' failed."); -call mtr.add_suppression("Attempting backtrace. You can use the following information to find out"); -ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; -ERROR HY000: Lost connection to MySQL server during query -SET SESSION debug="-d,myrocks_simulate_index_create_rollback"; -SHOW CREATE TABLE t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `i` int(11) NOT NULL DEFAULT '0', - `j` int(11) DEFAULT NULL, - `k` int(11) DEFAULT NULL, - PRIMARY KEY (`i`), - KEY `j` (`j`) -) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 -/*!50100 PARTITION BY KEY (i) -PARTITIONS 4 */ -ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; -SHOW CREATE TABLE t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `i` int(11) NOT NULL DEFAULT '0', - `j` int(11) DEFAULT NULL, - `k` int(11) DEFAULT NULL, - PRIMARY KEY (`i`), - KEY `j` (`j`), - KEY `kij` (`i`,`j`) -) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 -/*!50100 PARTITION BY KEY (i) -PARTITIONS 4 */ -SELECT COUNT(*) FROM t1; -COUNT(*) -100 -DROP TABLE t1; CREATE TABLE t1 (a INT, b TEXT); ALTER TABLE t1 ADD KEY kb(b(10)); ERROR HY000: Unsupported collation on string indexed column test.t1.b Use binary collation (binary, latin1_bin, utf8_bin). ALTER TABLE t1 ADD PRIMARY KEY(a); DROP TABLE t1; +set global rocksdb_bulk_load=1; +# Establish connection con1 (user=root) +# Switch to connection con1 +show global variables like 'rocksdb_bulk_load'; +Variable_name Value +rocksdb_bulk_load ON +show session variables like 'rocksdb_bulk_load'; +Variable_name Value +rocksdb_bulk_load ON +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; +INSERT INTO t1 VALUES (1,1); +# Disconnecting on con1 +# Establish connection con2 (user=root) +# Switch to connection con2 +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY); +COUNT(*) +1 +SELECT COUNT(*) FROM t1 FORCE INDEX(kj); +COUNT(*) +1 +DROP TABLE t1; +# Establish connection con1 (user=root) +# Establish connection con2 (user=root) +# Switch to connection con1 +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; +set rocksdb_bulk_load=1; +INSERT INTO t1 VALUES (1,1); +# Switch to connection con2 +SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY); +COUNT(*) +0 +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY); +COUNT(*) +1 +SELECT COUNT(*) FROM t1 FORCE INDEX(kj); +COUNT(*) +1 +set global rocksdb_bulk_load=0; +DROP TABLE t1; +SET @prior_rocksdb_merge_combine_read_size= @@rocksdb_merge_combine_read_size; +SET @prior_rocksdb_strict_collation_check= @@rocksdb_strict_collation_check; +SET @prior_rocksdb_merge_buf_size = @@rocksdb_merge_buf_size; +SET global rocksdb_strict_collation_check = off; +SET session rocksdb_merge_combine_read_size = 566; +SET session rocksdb_merge_buf_size = 336; +show variables like '%rocksdb_bulk_load%'; +Variable_name Value +rocksdb_bulk_load OFF +rocksdb_bulk_load_size 1000 +CREATE TABLE t1 (a VARCHAR(80)) ENGINE=RocksDB; +INSERT INTO t1 (a) VALUES (REPEAT("a", 80)); +INSERT INTO t1 (a) VALUES (REPEAT("a", 80)); +INSERT INTO t1 (a) VALUES (REPEAT("a", 80)); +INSERT INTO t1 (a) VALUES (REPEAT("a", 80)); +ALTER TABLE t1 ADD INDEX ka(a), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(80) DEFAULT NULL, + KEY `ka` (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(ka) WHERE a > ""; +a +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +DROP TABLE t1; +SET session rocksdb_merge_buf_size = @prior_rocksdb_merge_buf_size; +SET session rocksdb_merge_combine_read_size = @prior_rocksdb_merge_combine_read_size; +SET global rocksdb_strict_collation_check = @prior_rocksdb_strict_collation_check; +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; +set global rocksdb_force_flush_memtable_now=1; +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +larger +1 +larger +1 +Table Op Msg_type Msg_text +test.t1 analyze status OK +larger +1 +larger +1 +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +select 1300 < 1300 * 1.5 as "same"; +same +1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_crash.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_crash.result new file mode 100644 index 00000000000..987b34948e8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_crash.result @@ -0,0 +1,96 @@ +drop table if exists t1; +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +# crash_during_online_index_creation +flush logs; +SET SESSION debug="+d,crash_during_online_index_creation"; +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +ERROR HY000: Lost connection to MySQL server during query +SET SESSION debug="-d,crash_during_online_index_creation"; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `ka` (`a`), + KEY `kab` (`a`,`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; +# crash_during_index_creation_partition +flush logs; +SET SESSION debug="+d,crash_during_index_creation_partition"; +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +ERROR HY000: Lost connection to MySQL server during query +SET SESSION debug="-d,crash_during_index_creation_partition"; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL DEFAULT '0', + `j` int(11) DEFAULT NULL, + `k` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `j` (`j`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +/*!50100 PARTITION BY KEY (i) +PARTITIONS 4 */ +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +SELECT * FROM t1 ORDER BY i LIMIT 10; +i j k +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +SELECT COUNT(*) FROM t1; +COUNT(*) +100 +DROP TABLE t1; +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; +# crash_during_index_creation_partition +flush logs; +SET SESSION debug="+d,myrocks_simulate_index_create_rollback"; +# expected assertion failure from sql layer here for alter rollback +call mtr.add_suppression("Assertion `0' failed."); +call mtr.add_suppression("Attempting backtrace. You can use the following information to find out"); +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +ERROR HY000: Lost connection to MySQL server during query +SET SESSION debug="-d,myrocks_simulate_index_create_rollback"; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL DEFAULT '0', + `j` int(11) DEFAULT NULL, + `k` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `j` (`j`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +/*!50100 PARTITION BY KEY (i) +PARTITIONS 4 */ +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL DEFAULT '0', + `j` int(11) DEFAULT NULL, + `k` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `j` (`j`), + KEY `kij` (`i`,`j`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +/*!50100 PARTITION BY KEY (i) +PARTITIONS 4 */ +SELECT COUNT(*) FROM t1; +COUNT(*) +100 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_os_buffer.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_os_buffer.result deleted file mode 100644 index d15566f5a2c..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/r/allow_os_buffer.result +++ /dev/null @@ -1 +0,0 @@ - RocksDB: Can't disable allow_os_buffer if allow_mmap_reads is enabled diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread.result new file mode 100644 index 00000000000..aefef4fbf38 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread.result @@ -0,0 +1,24 @@ +#--------------------------- +# two threads inserting simultaneously with increment > 1 +# Issue #390 +#--------------------------- +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +SET auto_increment_increment = 2; +SET auto_increment_offset = 1; +INSERT INTO t1 VALUES(NULL); +SET auto_increment_increment = 2; +SET auto_increment_offset = 1; +SET debug_sync='rocksdb.autoinc_vars SIGNAL parked1 WAIT_FOR go NO_CLEAR_EVENT'; +INSERT INTO t1 VALUES(NULL); +SET debug_sync='rocksdb.autoinc_vars SIGNAL parked2 WAIT_FOR go NO_CLEAR_EVENT'; +INSERT INTO t1 VALUES(NULL); +SET debug_sync='now WAIT_FOR parked1'; +SET debug_sync='now WAIT_FOR parked2'; +SET debug_sync='now SIGNAL go'; +SET debug_sync='RESET'; +SELECT * FROM t1; +a +1 +3 +5 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result new file mode 100644 index 00000000000..652515c6c09 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result @@ -0,0 +1,53 @@ +#--------------------------- +# ten threads inserting simultaneously with increment > 1 +# Issue #390 +#--------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, thr INT) ENGINE=rocksdb; +SET auto_increment_increment = 100; +SET auto_increment_offset = 9 + 1; +SET auto_increment_increment = 100; +SET auto_increment_offset = 8 + 1; +SET auto_increment_increment = 100; +SET auto_increment_offset = 7 + 1; +SET auto_increment_increment = 100; +SET auto_increment_offset = 6 + 1; +SET auto_increment_increment = 100; +SET auto_increment_offset = 5 + 1; +SET auto_increment_increment = 100; +SET auto_increment_offset = 4 + 1; +SET auto_increment_increment = 100; +SET auto_increment_offset = 3 + 1; +SET auto_increment_increment = 100; +SET auto_increment_offset = 2 + 1; +SET auto_increment_increment = 100; +SET auto_increment_offset = 1 + 1; +SET auto_increment_increment = 100; +SET auto_increment_offset = 0 + 1; +LOAD DATA INFILE INTO TABLE t1; +LOAD DATA INFILE INTO TABLE t1; +LOAD DATA INFILE INTO TABLE t1; +LOAD DATA INFILE INTO TABLE t1; +LOAD DATA INFILE INTO TABLE t1; +LOAD DATA INFILE INTO TABLE t1; +LOAD DATA INFILE INTO TABLE t1; +LOAD DATA INFILE INTO TABLE t1; +LOAD DATA INFILE INTO TABLE t1; +LOAD DATA INFILE INTO TABLE t1; +SELECT COUNT(*) FROM t1; +COUNT(*) +1000000 +SELECT thr, COUNT(pk) FROM t1 GROUP BY thr; +thr COUNT(pk) +0 100000 +1 100000 +2 100000 +3 100000 +4 100000 +5 100000 +6 100000 +7 100000 +8 100000 +9 100000 +SELECT * FROM t1 ORDER BY pk INTO OUTFILE ; +All pk values matched their expected values +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result index 4440cb3ea8d..50b73a98111 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result @@ -17,6 +17,21 @@ LOAD DATA INFILE INTO TABLE t1; LOAD DATA INFILE INTO TABLE t2; LOAD DATA INFILE INTO TABLE t3; set rocksdb_bulk_load=0; +SHOW TABLE STATUS WHERE name LIKE 't%'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t2 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t3 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned +ANALYZE TABLE t1, t2, t3; +Table Op Msg_type Msg_text +test.t1 analyze status OK +test.t2 analyze status OK +test.t3 analyze status OK +SHOW TABLE STATUS WHERE name LIKE 't%'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t2 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t3 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned select count(pk) from t1; count(pk) 10000000 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/commit_in_the_middle_ddl.result b/storage/rocksdb/mysql-test/rocksdb/r/commit_in_the_middle_ddl.result new file mode 100644 index 00000000000..4d64d12816f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/commit_in_the_middle_ddl.result @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS a; +create table a (id int, value int, primary key (id) comment 'cf_a') engine=rocksdb; +set rocksdb_bulk_load=1; +set rocksdb_commit_in_the_middle=1; +alter table a add index v (value) COMMENT 'cf_a'; +set rocksdb_bulk_load=0; +set rocksdb_commit_in_the_middle=0; +select count(*) from a force index(primary); +count(*) +100000 +select count(*) from a force index(v); +count(*) +100000 +DROP TABLE a; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result b/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result index 28c200ebf30..20ac751f582 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result @@ -16,13 +16,13 @@ pk col1 1 1 2 2 3 3 -set @tmp1=@@rocksdb_verify_checksums; -set rocksdb_verify_checksums=1; +set @tmp1=@@rocksdb_verify_row_debug_checksums; +set rocksdb_verify_row_debug_checksums=1; set session debug= "+d,myrocks_simulate_bad_row_read1"; select * from t1 where pk=1; ERROR HY000: Got error 122 from storage engine set session debug= "-d,myrocks_simulate_bad_row_read1"; -set rocksdb_verify_checksums=@tmp1; +set rocksdb_verify_row_debug_checksums=@tmp1; select * from t1 where pk=1; pk col1 1 1 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index.result b/storage/rocksdb/mysql-test/rocksdb/r/index.result index f61bad7c4a9..99390c8ceb2 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/index.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/index.result @@ -40,3 +40,23 @@ t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE t1 1 a 1 a A # NULL NULL YES LSMTREE simple index on a ALTER TABLE t1 DROP KEY a; DROP TABLE t1; +# +# Issue #376: MyRocks: ORDER BY optimizer is unable to use the index extension +# +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int); +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 ( +pk int not null, +a int not null, +b int not null, +primary key(pk), +key(a) +) engine=rocksdb; +insert into t2 select A.a, FLOOR(A.a/10), A.a from t1 A; +# This must have type=range, index=a, and must not have 'Using filesort': +explain select * from t2 force index (a) where a=0 and pk>=3 order by pk; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range a a 8 NULL # Using index condition +drop table t0,t1,t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/lock_info.result b/storage/rocksdb/mysql-test/rocksdb/r/lock_info.result new file mode 100644 index 00000000000..d0f1221e472 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/lock_info.result @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +create table t1 (a int, primary key(a) comment 'lock_into_cf1') engine=rocksdb; +insert into t1 values (1); +insert into t1 values (2); +create table t2 (a int, primary key(a) comment 'lock_info_cf2') engine=rocksdb; +insert into t2 values (1); +insert into t2 values (2); +set autocommit=0; +select * from t1 for update; +a +1 +2 +select * from t2 for update; +a +1 +2 +use information_schema; +select rocksdb_ddl.cf, rocksdb_locks.transaction_id, rocksdb_locks.key +from rocksdb_locks +left join rocksdb_ddl +on rocksdb_locks.column_family_id=rocksdb_ddl.column_family +order by rocksdb_ddl.cf; +cf transaction_id key +lock_info_cf2 _txn_id_ _key_ +lock_info_cf2 _txn_id_ _key_ +lock_into_cf1 _txn_id_ _key_ +lock_into_cf1 _txn_id_ _key_ +use test; +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result index 4b237dcb7aa..6df4d44f72b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result @@ -229,7 +229,7 @@ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; SELECT * FROM t0 WHERE value > 0 FOR UPDATE; SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000; -ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +ERROR: 1213 DROP TABLE t0; ----------------------------------------------------------------------- @@ -244,7 +244,9 @@ SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT * FROM t0 WHERE value > 0 FOR UPDATE; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000; -ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +id value +190000 1 +ERROR: 0 DROP TABLE t0; ----------------------------------------------------------------------- @@ -293,7 +295,7 @@ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; DELETE FROM t0 WHERE id=190000; COMMIT; -ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +ERROR: 1213 COMMIT; DROP TABLE t0; @@ -313,7 +315,8 @@ SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; DELETE FROM t0 WHERE id=190000; COMMIT; -ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +id value +ERROR: 0 COMMIT; DROP TABLE t0; @@ -333,7 +336,7 @@ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; BEGIN; UPDATE t0 SET id=200001 WHERE id=190000; COMMIT; -ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +ERROR: 1213 COMMIT; DROP TABLE t0; @@ -353,7 +356,8 @@ SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN; UPDATE t0 SET id=200001 WHERE id=190000; COMMIT; -ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +id value +ERROR: 0 COMMIT; DROP TABLE t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/optimizer_loose_index_scans.result b/storage/rocksdb/mysql-test/rocksdb/r/optimizer_loose_index_scans.result new file mode 100644 index 00000000000..27b1779627b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/optimizer_loose_index_scans.result @@ -0,0 +1,281 @@ +set optimizer_switch='index_merge_sort_union=off'; +create table t (a int, b int, c int, d int, e int, primary key(a, b, c, d), key(b, d)) engine=rocksdb; +analyze table t; +Table Op Msg_type Msg_text +test.t analyze status OK +show indexes from t; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t 0 PRIMARY 1 a A 100 NULL NULL LSMTREE +t 0 PRIMARY 2 b A 500 NULL NULL LSMTREE +t 0 PRIMARY 3 c A 2500 NULL NULL LSMTREE +t 0 PRIMARY 4 d A 2500 NULL NULL LSMTREE +t 1 b 1 b A 50 NULL NULL LSMTREE +t 1 b 2 d A 500 NULL NULL LSMTREE +set optimizer_switch = 'skip_scan=off'; +explain select b, d from t where d < 2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL b 8 NULL # Using where; Using index +rows_read +2500 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select b, d from t where d < 2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b b 8 NULL # Using where; Using index for skip scan +rows_read +260 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select b, d from t where d > 4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL b 8 NULL # Using where; Using index +rows_read +2500 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select b, d from t where d > 4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b b 8 NULL # Using where; Using index for skip scan +rows_read +1509 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a, b, c, d from t where a = 5 and d <= 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index +rows_read +251 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a, b, c, d from t where a = 5 and d <= 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +126 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select e from t where a = 5 and d <= 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where +rows_read +251 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select e from t where a = 5 and d <= 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where +rows_read +251 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a, b, c, d from t where a = 5 and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index +rows_read +251 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a, b, c, d from t where a = 5 and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +51 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select e from t where a = 5 and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where +rows_read +251 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select e from t where a = 5 and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where +rows_read +251 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a, b, c, d from t where a in (1, 5) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY PRIMARY 4 NULL # Using where; Using index +rows_read +502 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a, b, c, d from t where a in (1, 5) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +102 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a, b, c, d from t where a in (1, 3, 5) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY PRIMARY 4 NULL # Using where; Using index +rows_read +753 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a, b, c, d from t where a in (1, 3, 5) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +153 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a, b, c, d from t where a in (1, 5) and b in (1, 2) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 8 NULL # Using where; Using index +rows_read +204 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a, b, c, d from t where a in (1, 5) and b in (1, 2) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +44 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a, b, c, d from t where a in (1, 2, 3, 4, 5) and b in (1, 2, 3) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 8 NULL # Using where; Using index +rows_read +765 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a, b, c, d from t where a in (1, 2, 3, 4, 5) and b in (1, 2, 3) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +165 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a, b, c, d from t where a = 5 and b = 2 and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY,b PRIMARY 8 const,const # Using where; Using index +rows_read +51 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a, b, c, d from t where a = 5 and b = 2 and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +11 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a+1, b, c, d from t where a = 5 and d < 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index +rows_read +251 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a+1, b, c, d from t where a = 5 and d < 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +101 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select b, c, d from t where a = 5 and d < 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index +rows_read +251 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select b, c, d from t where a = 5 and d < 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +101 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a, b, c, d from t where a = b and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL b 8 NULL # Using where; Using index +rows_read +2500 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a, b, c, d from t where a = b and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b b 8 NULL # Using where; Using index for skip scan +rows_read +9 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=on'; +set optimizer_trace = 'enabled=on'; +explain select a, b, c, d from t where a = 5 and d < 3 order by b, c, d; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where; Using index +select count(*) from information_schema.optimizer_trace where trace like '%order_attribute_not_prefix_in_index%'; +count(*) +1 +explain select a, b, c, d from t where a = 2 and d >= 98 and e = 2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where +select count(*) from information_schema.optimizer_trace where trace like '%query_references_nonkey_column%'; +count(*) +1 +explain select a, b, c, d from t where a = 5 or b = 2 and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index PRIMARY,b b 8 NULL # Using where; Using index +select count(*) from information_schema.optimizer_trace where trace like '%no_range_tree%'; +count(*) +1 +explain select a, b, c, d from t where a = 5 or b = 2 or d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index PRIMARY,b b 8 NULL # Using where; Using index +select count(*) from information_schema.optimizer_trace where trace like '%no_range_tree%'; +count(*) +1 +explain select a, b, c, d from t where a = 5 or d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index PRIMARY,b b 8 NULL # Using where; Using index +select count(*) from information_schema.optimizer_trace where trace like '%no_range_tree%'; +count(*) +1 +explain select a, b, c, d from t where ((a = 5 and b = 2) or a = 2) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b b 8 NULL # Using where; Using index for skip scan +select count(*) from information_schema.optimizer_trace where trace like '%keypart_in_disjunctive_query%'; +count(*) +1 +explain select a, b, c, d from t where a > 2 and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 4 NULL # Using where; Using index +select count(*) from information_schema.optimizer_trace where trace like '%prefix_not_const_equality%'; +count(*) +1 +explain select a, b, c, d from t where a = 2 and (d >= 98 or d < 2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where; Using index +select count(*) from information_schema.optimizer_trace where trace like '%range_predicate_too_complex%'; +count(*) +1 +explain select a, b, c, d from t where a = 2 and b = 2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY,b PRIMARY 8 const,const # Using index +select count(*) from information_schema.optimizer_trace where trace like '%no_range_predicate%'; +count(*) +1 +explain select a, b, c, d from t where a = 2 and c > 2 and d < 2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where; Using index +select count(*) from information_schema.optimizer_trace where trace like '%too_many_range_predicates%'; +count(*) +1 +explain select a, b, c, d from t where (a < 1 or a = 4 or a = 5) and b in (1, 2, 3) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 8 NULL # Using where; Using index +select count(*) from information_schema.optimizer_trace where trace like '%prefix_not_const_equality%'; +count(*) +1 +set optimizer_trace = 'enabled=off'; +set optimizer_switch= 'skip_scan=off'; +drop table t; +set optimizer_switch='index_merge_sort_union=on'; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result index 55388c65b99..b6a17d90221 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result @@ -862,10 +862,9 @@ rocksdb_advise_random_on_open ON rocksdb_allow_concurrent_memtable_write OFF rocksdb_allow_mmap_reads OFF rocksdb_allow_mmap_writes OFF -rocksdb_allow_os_buffer ON rocksdb_background_sync OFF rocksdb_base_background_compactions 1 -rocksdb_block_cache_size 8388608 +rocksdb_block_cache_size 536870912 rocksdb_block_restart_interval 16 rocksdb_block_size 4096 rocksdb_block_size_deviation 10 @@ -887,6 +886,7 @@ rocksdb_create_if_missing ON rocksdb_create_missing_column_families OFF rocksdb_datadir ./.rocksdb rocksdb_db_write_buffer_size 0 +rocksdb_deadlock_detect OFF rocksdb_debug_optimizer_no_zero_cardinality ON rocksdb_default_cf_options rocksdb_delete_obsolete_files_period_micros 21600000000 @@ -925,6 +925,7 @@ rocksdb_paranoid_checks ON rocksdb_pause_background_work ON rocksdb_perf_context_level 0 rocksdb_pin_l0_filter_and_index_blocks_in_cache ON +rocksdb_print_snapshot_conflict_queries OFF rocksdb_rate_limiter_bytes_per_sec 0 rocksdb_read_free_rpl_tables rocksdb_records_in_range 50 @@ -936,16 +937,20 @@ rocksdb_skip_fill_cache OFF rocksdb_skip_unique_check OFF rocksdb_skip_unique_check_tables .* rocksdb_stats_dump_period_sec 600 -rocksdb_store_checksums OFF +rocksdb_store_row_debug_checksums OFF rocksdb_strict_collation_check OFF rocksdb_strict_collation_exceptions rocksdb_table_cache_numshardbits 6 rocksdb_table_stats_sampling_pct 10 +rocksdb_tmpdir +rocksdb_trace_sst_api OFF rocksdb_unsafe_for_binlog OFF rocksdb_use_adaptive_mutex OFF +rocksdb_use_direct_reads OFF +rocksdb_use_direct_writes OFF rocksdb_use_fsync OFF rocksdb_validate_tables 1 -rocksdb_verify_checksums OFF +rocksdb_verify_row_debug_checksums OFF rocksdb_wal_bytes_per_sync 0 rocksdb_wal_dir rocksdb_wal_recovery_mode 2 @@ -985,7 +990,7 @@ insert into t49 values (1,10),(2,20); begin; update t49 set a = 100 where pk = 1; connect con1,localhost,root,,; -set rocksdb_lock_wait_timeout=5000; +set rocksdb_lock_wait_timeout=60; set @var1= to_seconds(now()); update t49 set a = 1000 where pk = 1; connect con2,localhost,root,,; @@ -993,9 +998,7 @@ kill query $con1_id; connection con1; ERROR 70100: Query execution was interrupted set @var2= to_seconds(now()); -"[Jay Edgar] I've updated this query to help determine why it is sometimes failing" -"(t13541934). If you get an error here (i.e. not 'passed') notify me." -select if ((@var2 - @var1) < 1000, "passed", (@var2 - @var1)) as 'result'; +select if ((@var2 - @var1) < 60, "passed", (@var2 - @var1)) as 'result'; result passed connection default; @@ -1297,7 +1300,7 @@ insert into t1 select (@a:=@a+1), 1234 from information_schema.session_variables set @tmp1= @@rocksdb_max_row_locks; set rocksdb_max_row_locks= 20; update t1 set a=a+10; -ERROR HY000: Internal error: Operation aborted: Number of locks held by the transaction exceeded @@rocksdb_max_row_locks +ERROR HY000: Got error 196 'Number of locks held reached @@rocksdb_max_row_locks.' from ROCKSDB DROP TABLE t1; # # Test AUTO_INCREMENT behavior problem, @@ -1465,9 +1468,9 @@ rocksdb_number_superversion_acquires # rocksdb_number_superversion_cleanups # rocksdb_number_superversion_releases # rocksdb_rate_limit_delay_millis # -rocksdb_sequence_number # rocksdb_snapshot_conflict_errors # rocksdb_wal_bytes # +rocksdb_wal_group_syncs # rocksdb_wal_synced # rocksdb_write_other # rocksdb_write_self # @@ -1537,9 +1540,9 @@ ROCKSDB_NUMBER_SUPERVERSION_ACQUIRES ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS ROCKSDB_NUMBER_SUPERVERSION_RELEASES ROCKSDB_RATE_LIMIT_DELAY_MILLIS -ROCKSDB_SEQUENCE_NUMBER ROCKSDB_SNAPSHOT_CONFLICT_ERRORS ROCKSDB_WAL_BYTES +ROCKSDB_WAL_GROUP_SYNCS ROCKSDB_WAL_SYNCED ROCKSDB_WRITE_OTHER ROCKSDB_WRITE_SELF @@ -1611,9 +1614,9 @@ ROCKSDB_NUMBER_SUPERVERSION_ACQUIRES ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS ROCKSDB_NUMBER_SUPERVERSION_RELEASES ROCKSDB_RATE_LIMIT_DELAY_MILLIS -ROCKSDB_SEQUENCE_NUMBER ROCKSDB_SNAPSHOT_CONFLICT_ERRORS ROCKSDB_WAL_BYTES +ROCKSDB_WAL_GROUP_SYNCS ROCKSDB_WAL_SYNCED ROCKSDB_WRITE_OTHER ROCKSDB_WRITE_SELF @@ -2453,4 +2456,24 @@ a 10 11 DROP TABLE t1; +# +# Issue #411: Setting rocksdb_commit_in_the_middle commits transaction +# without releasing iterator +# +CREATE TABLE t1 (id1 bigint(20), +id2 bigint(20), +id3 bigint(20), +PRIMARY KEY (id1, id2, id3)) +DEFAULT CHARSET=latin1; +CREATE TABLE t2 (id1 bigint(20), +id2 bigint(20), +PRIMARY KEY (id1, id2)) +DEFAULT CHARSET=latin1; +set rocksdb_commit_in_the_middle=1; +SET @save_rocksdb_bulk_load_size= @@rocksdb_bulk_load_size; +set rocksdb_bulk_load_size = 100; +DELETE t2, t1 FROM t2 LEFT JOIN t1 ON t2.id2 = t1.id2 AND t2.id1 = t1.id1 WHERE t2.id1 = 0; +SET rocksdb_bulk_load_size= @save_rocksdb_bulk_load_size; +SET rocksdb_commit_in_the_middle=0; +DROP TABLE t1, t2; SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result index 09d251ccbe6..6c3d85b760c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result @@ -17,19 +17,19 @@ where option_type in ('WRITE_BUFFER_SIZE', 'MAX_BYTES_FOR_LEVEL_MULTIPLIER') order by cf_name, option_type; cf_name option_type value -cf1 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +cf1 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 cf1 TARGET_FILE_SIZE_BASE 1048576 cf1 WRITE_BUFFER_SIZE 12582912 -cf2 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +cf2 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 cf2 TARGET_FILE_SIZE_BASE 1048576 cf2 WRITE_BUFFER_SIZE 12582912 -default MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +default MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 default TARGET_FILE_SIZE_BASE 1048576 default WRITE_BUFFER_SIZE 12582912 -z MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +z MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 z TARGET_FILE_SIZE_BASE 1048576 z WRITE_BUFFER_SIZE 12582912 -__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 __system__ TARGET_FILE_SIZE_BASE 1048576 __system__ WRITE_BUFFER_SIZE 12582912 @@ -42,19 +42,19 @@ where option_type in ('WRITE_BUFFER_SIZE', 'MAX_BYTES_FOR_LEVEL_MULTIPLIER') order by cf_name, option_type; cf_name option_type value -cf1 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +cf1 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 cf1 TARGET_FILE_SIZE_BASE 2097152 cf1 WRITE_BUFFER_SIZE 8388608 -cf2 MAX_BYTES_FOR_LEVEL_MULTIPLIER 8 +cf2 MAX_BYTES_FOR_LEVEL_MULTIPLIER 8.000000 cf2 TARGET_FILE_SIZE_BASE 1048576 cf2 WRITE_BUFFER_SIZE 16777216 -default MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +default MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 default TARGET_FILE_SIZE_BASE 1048576 default WRITE_BUFFER_SIZE 12582912 -z MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +z MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 z TARGET_FILE_SIZE_BASE 4194304 z WRITE_BUFFER_SIZE 12582912 -__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER 10 +__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 __system__ TARGET_FILE_SIZE_BASE 1048576 __system__ WRITE_BUFFER_SIZE 12582912 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result index a8908edada5..87243c05e2a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result @@ -1,12 +1,12 @@ -set @save_rocksdb_store_checksums=@@global.rocksdb_store_checksums; -set @save_rocksdb_verify_checksums=@@global.rocksdb_verify_checksums; +set @save_rocksdb_store_row_debug_checksums=@@global.rocksdb_store_row_debug_checksums; +set @save_rocksdb_verify_row_debug_checksums=@@global.rocksdb_verify_row_debug_checksums; set @save_rocksdb_checksums_pct=@@global.rocksdb_checksums_pct; drop table if exists t1,t2,t3; show variables like 'rocksdb_%checksum%'; Variable_name Value rocksdb_checksums_pct 100 -rocksdb_store_checksums OFF -rocksdb_verify_checksums OFF +rocksdb_store_row_debug_checksums OFF +rocksdb_verify_row_debug_checksums OFF create table t1 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; insert into t1 values (1,1,1),(2,2,2),(3,3,3); check table t1; @@ -19,7 +19,7 @@ test.t1 check status OK CHECKTABLE t1: ... 3 index entries checked (0 had checksums) CHECKTABLE t1: 0 table records had checksums drop table t1; -set session rocksdb_store_checksums=on; +set session rocksdb_store_row_debug_checksums=on; create table t2 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; insert into t2 values (1,1,1),(2,2,2),(3,3,3); check table t2; @@ -34,9 +34,9 @@ test.t2 check status OK # Now, make a table that has both rows with checksums and without create table t3 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; insert into t3 values (1,1,1),(2,2,2),(3,3,3); -set session rocksdb_store_checksums=off; +set session rocksdb_store_row_debug_checksums=off; update t3 set b=3 where a=2; -set session rocksdb_store_checksums=on; +set session rocksdb_store_row_debug_checksums=on; check table t3; Table Op Msg_type Msg_text test.t3 check status OK @@ -46,7 +46,7 @@ test.t3 check status OK CHECKTABLE t3: Checking index b CHECKTABLE t3: ... 3 index entries checked (2 had checksums) CHECKTABLE t3: 2 table records had checksums -set session rocksdb_store_checksums=on; +set session rocksdb_store_row_debug_checksums=on; set session rocksdb_checksums_pct=5; create table t4 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; check table t4; @@ -65,13 +65,13 @@ insert into mtr.test_suppressions values ('Data with incorrect checksum'); # 1. Start with mismatch in key checksum of the PK. set session debug= "+d,myrocks_simulate_bad_pk_checksum1"; -set session rocksdb_verify_checksums=off; +set session rocksdb_verify_row_debug_checksums=off; select * from t3; pk a b 1 1 1 2 2 3 3 3 3 -set session rocksdb_verify_checksums=on; +set session rocksdb_verify_row_debug_checksums=on; select * from t3; ERROR HY000: Internal error: Record checksum mismatch select * from t4; @@ -79,13 +79,13 @@ ERROR HY000: Internal error: Record checksum mismatch set session debug= "-d,myrocks_simulate_bad_pk_checksum1"; # 2. Continue with mismatch in pk value checksum. set session debug= "+d,myrocks_simulate_bad_pk_checksum2"; -set session rocksdb_verify_checksums=off; +set session rocksdb_verify_row_debug_checksums=off; select * from t3; pk a b 1 1 1 2 2 3 3 3 3 -set session rocksdb_verify_checksums=on; +set session rocksdb_verify_row_debug_checksums=on; select * from t3; ERROR HY000: Internal error: Record checksum mismatch select * from t4; @@ -123,7 +123,7 @@ ERROR HY000: Internal error: Record checksum mismatch select a from t4 force index(a) where a<1000000; ERROR HY000: Internal error: Record checksum mismatch set session debug= "-d,myrocks_simulate_bad_key_checksum1"; -set @@global.rocksdb_store_checksums=@save_rocksdb_store_checksums; -set @@global.rocksdb_verify_checksums=@save_rocksdb_verify_checksums; +set @@global.rocksdb_store_row_debug_checksums=@save_rocksdb_store_row_debug_checksums; +set @@global.rocksdb_verify_row_debug_checksums=@save_rocksdb_verify_row_debug_checksums; set @@global.rocksdb_checksums_pct=@save_rocksdb_checksums_pct; drop table t2,t3,t4; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_detect_rc.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_detect_rc.result new file mode 100644 index 00000000000..043750f94f6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_detect_rc.result @@ -0,0 +1,54 @@ +set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout; +set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect; +set global rocksdb_lock_wait_timeout = 100000; +set global rocksdb_deadlock_detect = ON; +create table t (i int primary key); +create table r1 (id int primary key, value int); +insert into r1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10); +create table r2 like r1; +insert into r2 select * from r1; +begin; +update r2 set value=100 where id=9; +begin; +update r1 set value=100 where id=8; +select * from r2 for update;; +select * from r1 for update; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +rollback; +id value +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +rollback; +begin; +insert into t values (1); +begin; +insert into t values (2); +begin; +insert into t values (3); +select * from t where i = 2 for update; +select * from t where i = 3 for update; +select * from t; +i +3 +insert into t values (4), (1); +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +# Statement should be rolled back +select * from t; +i +3 +rollback; +i +rollback; +i +rollback; +set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout; +set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect; +drop table t,r1,r2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_detect_rr.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_detect_rr.result new file mode 100644 index 00000000000..043750f94f6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_detect_rr.result @@ -0,0 +1,54 @@ +set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout; +set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect; +set global rocksdb_lock_wait_timeout = 100000; +set global rocksdb_deadlock_detect = ON; +create table t (i int primary key); +create table r1 (id int primary key, value int); +insert into r1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10); +create table r2 like r1; +insert into r2 select * from r1; +begin; +update r2 set value=100 where id=9; +begin; +update r1 set value=100 where id=8; +select * from r2 for update;; +select * from r1 for update; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +rollback; +id value +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +rollback; +begin; +insert into t values (1); +begin; +insert into t values (2); +begin; +insert into t values (3); +select * from t where i = 2 for update; +select * from t where i = 3 for update; +select * from t; +i +3 +insert into t values (4), (1); +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +# Statement should be rolled back +select * from t; +i +3 +rollback; +i +rollback; +i +rollback; +set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout; +set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect; +drop table t,r1,r2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_stress_rc.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_stress_rc.result new file mode 100644 index 00000000000..f97da0099fe --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_stress_rc.result @@ -0,0 +1,8 @@ +create table t1 (a int primary key, b int) engine=rocksdb; +set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout; +set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect; +set global rocksdb_lock_wait_timeout = 100000; +set global rocksdb_deadlock_detect = ON; +set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout; +set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_stress_rr.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_stress_rr.result new file mode 100644 index 00000000000..f97da0099fe --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_stress_rr.result @@ -0,0 +1,8 @@ +create table t1 (a int primary key, b int) engine=rocksdb; +set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout; +set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect; +set global rocksdb_lock_wait_timeout = 100000; +set global rocksdb_deadlock_detect = ON; +set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout; +set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result index e4d11960e6e..a39fa0a429e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result @@ -7,6 +7,7 @@ pk 1 connect con1,localhost,root,,; connection con1; +call mtr.add_suppression("Got snapshot conflict errors"); ### Connection con1 set @@rocksdb_lock_wait_timeout=500; set autocommit=0; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rollback_savepoint.result b/storage/rocksdb/mysql-test/rocksdb/r/rollback_savepoint.result new file mode 100644 index 00000000000..18c014b9b07 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rollback_savepoint.result @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a'); +CREATE TABLE t2 LIKE t1; +INSERT INTO t2 SELECT * FROM t1; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +SAVEPOINT a; +SELECT * FROM t1 ORDER BY pk; +a b pk +1 a 1 +2 b 2 +3 a 3 +ROLLBACK TO SAVEPOINT a; +SAVEPOINT a; +SELECT * FROM t2 ORDER BY pk; +a b pk +1 a 1 +2 b 2 +3 a 3 +ROLLBACK TO SAVEPOINT a; +ALTER TABLE t1 RENAME TO t3; +DROP TABLE t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result b/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result index e6433dcbeef..286ab7178cc 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result @@ -13,11 +13,10 @@ SELECT a,b FROM t1 WHERE b='a'; a b 1 a 3 a -# -# Currently, SELECT ... LOCK IN SHARE MODE works like -# SELECT FOR UPDATE SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; -ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY +a b +1 a +3 a UPDATE t1 SET b='c' WHERE b='a'; ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY connection con1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result b/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result index 69b927ba5a8..7f1584938d6 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result @@ -407,7 +407,8 @@ SNAPSHOTS --------- LIST OF SNAPSHOTS FOR EACH SESSION: ---SNAPSHOT, ACTIVE NUM sec -MySQL thread id TID, OS thread handle PTR +MySQL thread id TID, OS thread handle PTR, query id QID localhost root ACTION +SHOW ENGINE rocksdb TRANSACTION STATUS lock count 0, write count 0 ----------------------------------------- END OF ROCKSDB TRANSACTION MONITOR OUTPUT diff --git a/storage/rocksdb/mysql-test/rocksdb/r/statistics.result b/storage/rocksdb/mysql-test/rocksdb/r/statistics.result index 1798563f328..78344991360 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/statistics.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/statistics.result @@ -21,8 +21,8 @@ index t3_1(b) comment 'rev:cf_t4' ) engine=rocksdb; SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE() and table_name <> 't1'; table_name table_rows -t2 4999 -t3 4999 +t2 1000 +t3 1000 SELECT CASE WHEN table_rows < 100000 then 'true' else 'false' end from information_schema.tables where table_name = 't1'; CASE WHEN table_rows < 100000 then 'true' else 'false' end true diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tmpdir.result b/storage/rocksdb/mysql-test/rocksdb/r/tmpdir.result new file mode 100644 index 00000000000..e07d750c413 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tmpdir.result @@ -0,0 +1,26 @@ +# If rocksdb_tmpdir is NULL or "", temporary file will be created in +# server configuration variable location(--tmpdir) +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB; +show session variables like 'rocksdb_tmpdir'; +Variable_name Value +rocksdb_tmpdir +# Connection con1 +show session variables like 'rocksdb_tmpdir'; +Variable_name Value +rocksdb_tmpdir +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +DROP TABLE t1; +# rocksdb_tmpdir with valid location. +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB; +set @tmpdir = @@global.tmpdir; +set global rocksdb_tmpdir = @tmpdir; +show session variables like 'rocksdb_tmpdir'; +Variable_name Value +rocksdb_tmpdir +# Connection con3 +show session variables like 'rocksdb_tmpdir'; +Variable_name Value +rocksdb_tmpdir MYSQL_TMP_DIR/mysqld.1 +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +set global rocksdb_tmpdir=NULL; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/trx_info.result b/storage/rocksdb/mysql-test/rocksdb/r/trx_info.result new file mode 100644 index 00000000000..ada2e127021 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/trx_info.result @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS t1; +create table t1 (a int) engine=rocksdb; +insert into t1 values (1); +insert into t1 values (2); +set autocommit=0; +select * from t1 for update; +a +1 +2 +select * from information_schema.rocksdb_trx; +TRANSACTION_ID STATE NAME WRITE_COUNT LOCK_COUNT TIMEOUT_SEC WAITING_KEY WAITING_COLUMN_FAMILY_ID IS_REPLICATION SKIP_TRX_API READ_ONLY HAS_DEADLOCK_DETECTION NUM_ONGOING_BULKLOAD THREAD_ID QUERY +_TRX_ID_ STARTED _NAME_ 0 2 1 _KEY_ 0 0 0 0 0 0 _THREAD_ID_ select * from information_schema.rocksdb_trx +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/trx_info_rpl.result b/storage/rocksdb/mysql-test/rocksdb/r/trx_info_rpl.result new file mode 100644 index 00000000000..1e0c7a5adbf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/trx_info_rpl.result @@ -0,0 +1,15 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +DROP TABLE IF EXISTS t1; +include/stop_slave.inc +create table t1 (a int) engine=rocksdb; +show variables like 'rocksdb_rpl_skip_tx_api'; +Variable_name Value +rocksdb_rpl_skip_tx_api ON +include/start_slave.inc +found +DROP TABLE t1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result index 9ab00243b6b..4b720271a4e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result @@ -1,21 +1,3 @@ -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; -create table t (id int not null auto_increment primary key, -c varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci, -key sk (c)); -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; -insert into t (c) values ('A'), ('b'), ('C'); -explain select c from t; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t index NULL sk 4 NULL # Using index -select c from t; -c -A -b -C -select c from t where c = 'a'; -c -A -drop table t; set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans"; create table t (id int not null auto_increment primary key, c varchar(8) CHARACTER SET utf8 COLLATE utf8_general_ci, @@ -91,7 +73,7 @@ c1 Asdf bbbb drop table t; -set session rocksdb_verify_checksums = on; +set session rocksdb_verify_row_debug_checksums = on; create table t (id int primary key, email varchar(100), KEY email_i (email(30))) engine=rocksdb default charset=latin1; insert into t values (1, ' a'); explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result b/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result index 7a7a5c7638c..7397ff64ab1 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result @@ -1,80 +1,4 @@ drop table if exists t1, t2; -# -# Check that DECIMAL PK -# -create table t0(a int); -insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -# First, make the server to create a dataset in the old format: -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; -create table t1 ( -pk1 decimal(32,16), -pk2 decimal(32,16), -pk3 decimal(32,16), -a smallint not null, -primary key(pk1, pk2, pk3) -); -insert into t1 -select -A.a, B.a, C.a, 1234 -from t0 A, t0 B, t0 C; -# -# Looking at the table size, one can tell that the data is stored using -# old format: -# -set global rocksdb_force_flush_memtable_now=1; -# Check the format version: -select table_name,index_name,kv_format_version -from information_schema.ROCKSDB_DDL -where TABLE_SCHEMA=database() AND table_name='t1'; -table_name index_name kv_format_version -t1 PRIMARY 10 -flush tables; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; -# Check that the new server reads the data in the old format: -select * from t1 order by pk1,pk2,pk3 limit 5; -pk1 pk2 pk3 a -0.0000000000000000 0.0000000000000000 0.0000000000000000 1234 -0.0000000000000000 0.0000000000000000 1.0000000000000000 1234 -0.0000000000000000 0.0000000000000000 2.0000000000000000 1234 -0.0000000000000000 0.0000000000000000 3.0000000000000000 1234 -0.0000000000000000 0.0000000000000000 4.0000000000000000 1234 -# -# Ok, now, enable the new data format: -# -create table t2 ( -pk1 decimal(32,16), -pk2 decimal(32,16), -pk3 decimal(32,16), -a smallint not null, -primary key(pk1, pk2, pk3) -); -insert into t2 -select -A.a, B.a, C.a, 1234 -from t0 A, t0 B, t0 C; -set global rocksdb_force_flush_memtable_now=1; -larger -1 -# This should show the new PK data fromat -select table_name,index_name,kv_format_version from information_schema.ROCKSDB_DDL -where TABLE_SCHEMA=database() AND table_name='t2'; -table_name index_name kv_format_version -t2 PRIMARY 11 -# -# Check that the server is able to read BOTH the old and the new formats: -# -select * from t2 limit 3; -pk1 pk2 pk3 a -0.0000000000000000 0.0000000000000000 0.0000000000000000 1234 -0.0000000000000000 0.0000000000000000 1.0000000000000000 1234 -0.0000000000000000 0.0000000000000000 2.0000000000000000 1234 -select * from t1 limit 3; -pk1 pk2 pk3 a -0.0000000000000000 0.0000000000000000 0.0000000000000000 1234 -0.0000000000000000 0.0000000000000000 1.0000000000000000 1234 -0.0000000000000000 0.0000000000000000 2.0000000000000000 1234 -drop table t1,t2; -drop table t0; # # Check that DECIMAL datatype supports 'index-only' scans and is decoded correctly. # (Decoding happens from the mem-comparable image in the index, regardless diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result index 5c449da2b8f..3cb06bc3c9c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result @@ -727,9 +727,9 @@ index_name count email_i 1 drop table t; set @save_rocksdb_checksums_pct = @@global.rocksdb_checksums_pct; -set @save_rocksdb_verify_checksums = @@session.rocksdb_verify_checksums; +set @save_rocksdb_verify_row_debug_checksums = @@session.rocksdb_verify_row_debug_checksums; set global rocksdb_checksums_pct = 100; -set session rocksdb_verify_checksums = on; +set session rocksdb_verify_row_debug_checksums = on; create table t (id int primary key, email varchar(100), KEY email_i (email(30))); insert into t values (1, 'a'); explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); @@ -740,4 +740,4 @@ index_name count email_i 1 drop table t; set global rocksdb_checksums_pct = @save_rocksdb_checksums_pct; -set session rocksdb_verify_checksums = @save_rocksdb_verify_checksums; +set session rocksdb_verify_row_debug_checksums = @save_rocksdb_verify_row_debug_checksums; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar_debug.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar_debug.result deleted file mode 100644 index de7608ebb1c..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar_debug.result +++ /dev/null @@ -1,254 +0,0 @@ -drop table if exists t1,t2; -set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans"; -# -# Issue 257: Sort order for varchars is different between -# MyISAM/InnoDB vs MyRocks -# -create table t1 ( -pk varchar(64) CHARACTER SET utf8 COLLATE utf8_general_ci, -col1 varchar(64), -primary key (pk) -); -insert into t1 values ('a','a'); -insert into t1 values ('a ', 'a-space'); -ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' -insert into t1 values('b ', 'b-2x-space'); -insert into t1 values ('b', 'b'); -ERROR 23000: Duplicate entry 'b' for key 'PRIMARY' -select pk, hex(pk), col1 from t1; -pk hex(pk) col1 -a 61 a -b 622020 b-2x-space -insert into t1 values ('a\t', 'a-tab'); -insert into t1 values ('a \t', 'a-space-tab'); -select pk, hex(pk), col1 from t1 order by pk; -pk hex(pk) col1 -a 6109 a-tab -a 612009 a-space-tab -a 61 a -b 622020 b-2x-space -# Try longer values -insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space'); -ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' -insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space'); -select * from t1; -pk col1 -a a-tab -a a-space-tab -a a -b b-2x-space -c c-10-x-space -drop table t1; -# Secondary index -create table t1 ( -pk int not null primary key, -col1 varchar(64) CHARACTER SET utf8 COLLATE utf8_general_ci, -col2 varchar(64), -key (col1) -); -insert into t1 values (0, 'ab', 'a-b'); -insert into t1 values (1, 'a ', 'a-space'); -insert into t1 values (2, 'a', 'a'); -insert into t1 values (3, 'a \t', 'a-tab'); -# Must show 'using index' for latin1_bin and utf8_bin: -explain -select col1, hex(col1) from t1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index NULL col1 195 NULL # Using index -select col1, hex(col1) from t1; -col1 hex(col1) -a 61202009 -a 6120 -a 61 -ab 6162 -# Must show 'using index' for latin1_bin and utf8_bin: -explain -select col1, hex(col1) from t1 where col1 < 'b'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 # col1 col1 195 NULL # Using where; Using index -select col1, hex(col1) from t1 where col1 < 'b'; -col1 hex(col1) -a 61202009 -a 6120 -a 61 -ab 6162 -delete from t1; -insert into t1 values(10, '', 'empty'); -insert into t1 values(11, repeat(' ', 8), '8x-space'); -insert into t1 values(12, repeat(' ', 16), '16x-space'); -insert into t1 values(13, repeat(' ', 24), '24x-space'); -insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a'); -insert into t1 values(21, repeat(' ', 9), '9x-space'); -insert into t1 values(22, repeat(' ',17), '17x-space'); -insert into t1 values(23, repeat(' ',18), '18x-space'); -explain -select pk, col1, hex(col1), length(col1) from t1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 # NULL col1 195 NULL # Using index -select pk, col1, hex(col1), length(col1) from t1; -pk col1 hex(col1) length(col1) -10 0 -11 2020202020202020 8 -12 20202020202020202020202020202020 16 -13 202020202020202020202020202020202020202020202020 24 -21 202020202020202020 9 -22 2020202020202020202020202020202020 17 -23 202020202020202020202020202020202020 18 -14 a 2020202020202020202020202020202061 17 -drop table t1; -create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb; -insert into t1 values (1, concat('a', repeat(' ', 300))); -insert into t1 values (2, concat('b', repeat(' ', 300))); -select pk,length(a) from t1 force index(a) where a < 'zz'; -pk length(a) -1 301 -2 301 -select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz'; -pk length(a) rtrim(a) -1 301 a -2 301 b -select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz'; -pk length(a) rtrim(a) -1 301 a -2 301 b -drop table t1; -set session debug= "-d,myrocks_enable_unknown_collation_index_only_scans"; -# -# Check backwards compatibility: -# -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; -# Create the tables in the old format -create table t1 ( -pk varchar(64) collate latin1_bin, -col1 varchar(64), -primary key (pk) -); -insert into t1 values ('a','a'); -# The following will not produce an error: -insert into t1 values ('a ', 'a-space'); -select pk, hex(pk), col1 from t1; -pk hex(pk) col1 -a 61 a -a 6120 a-space -create table t2 ( -pk int not null primary key, -col1 varchar(64) collate latin1_bin, -col2 varchar(64), -unique key (col1) -); -insert into t2 values (0, 'ab', 'a-b'); -# The following will not produce an error: -insert into t2 values (1, 'a ', 'a-space'); -insert into t2 values (2, 'a', 'a'); -select pk, col1, hex(col1), col2 from t2; -pk col1 hex(col1) col2 -0 ab 6162 a-b -1 a 6120 a-space -2 a 61 a -# Check the format version: -select table_name,index_name,kv_format_version -from information_schema.ROCKSDB_DDL -where TABLE_SCHEMA=database() AND table_name in ('t1','t2'); -table_name index_name kv_format_version -t1 PRIMARY 10 -t2 PRIMARY 10 -t2 col1 10 -flush tables; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; -select pk, hex(pk), col1 from t1; -pk hex(pk) col1 -a 61 a -a 6120 a-space -select pk, col1, hex(col1), col2 from t2; -pk col1 hex(col1) col2 -0 ab 6162 a-b -1 a 6120 a-space -2 a 61 a -select pk, hex(pk), col1 from t1; -pk hex(pk) col1 -a 61 a -a 6120 a-space -select pk, col1, hex(col1), col2 from t2; -pk col1 hex(col1) col2 -0 ab 6162 a-b -1 a 6120 a-space -2 a 61 a -drop table t1,t2; -# -# General upgrade tests to see that they work. -# -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; -create table t2 ( -id int primary key, -col1 varchar(64) collate latin1_swedish_ci, -unique key (col1) -) engine=rocksdb; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; -insert into t2 values (1, 'a'); -insert into t2 values (2, 'b'); -insert into t2 values (3, 'c'); -insert into t2 values (4, 'c '); -select col1 from t2; -col1 -a -b -c -c -delete from t2 where id = 4; -alter table t2 engine=rocksdb; -select col1 from t2; -col1 -a -b -c -insert into t2 values (4, 'c '); -ERROR 23000: Duplicate entry 'c ' for key 'col1' -drop table t2; -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; -create table t2 ( -id int primary key, -col1 varchar(64) collate latin1_bin, -unique key (col1) -) engine=rocksdb; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; -insert into t2 values (1, 'a'); -insert into t2 values (2, 'b'); -insert into t2 values (3, 'c'); -insert into t2 values (4, 'c '); -select col1 from t2; -col1 -a -b -c -c -delete from t2 where id = 4; -alter table t2 engine=rocksdb; -select col1 from t2; -col1 -a -b -c -insert into t2 values (4, 'c '); -ERROR 23000: Duplicate entry 'c ' for key 'col1' -drop table t2; -# -# Check what happens when one tries to 'upgrade' to the new data format -# and causes a unique key violation: -# -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; -create table t2 ( -pk int not null primary key, -col1 varchar(64) collate latin1_bin, -col2 varchar(64), -unique key (col1) -); -insert into t2 values (1, 'a ', 'a-space'); -insert into t2 values (2, 'a', 'a'); -select * from t2; -pk col1 col2 -1 a a-space -2 a a -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; -alter table t2 engine=rocksdb; -ERROR 23000: Duplicate entry 'a' for key 'col1' -drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result b/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result new file mode 100644 index 00000000000..d5cfdee4f07 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result @@ -0,0 +1,2 @@ + RocksDB: Can't enable both use_direct_reads and allow_mmap_reads + RocksDB: Can't enable both use_direct_writes and allow_mmap_writes diff --git a/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit-master.opt new file mode 100644 index 00000000000..83ed8522e72 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit-master.opt @@ -0,0 +1 @@ +--binlog-format=row diff --git a/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test b/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test new file mode 100644 index 00000000000..c806e46aa4d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test @@ -0,0 +1,64 @@ +--source include/have_rocksdb.inc +--source include/have_log_bin.inc + +--echo # Disable for valgrind because this takes too long +--source include/not_valgrind.inc + +--disable_warnings +DROP DATABASE IF EXISTS mysqlslap; +--enable_warnings + +CREATE DATABASE mysqlslap; +USE mysqlslap; +CREATE TABLE t1(id BIGINT AUTO_INCREMENT, value BIGINT, PRIMARY KEY(id)) ENGINE=rocksdb; + +--echo # 2PC enabled, MyRocks durability enabled +SET GLOBAL rocksdb_disable_2pc=0; +SET GLOBAL rocksdb_write_sync=1; + +--echo ## 2PC + durability + single thread +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)" +select case when variable_value-@c = 1000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; + +--echo ## 2PC + durability + group commit +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=10000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)" +select case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; + + +--echo # 2PC enabled, MyRocks durability disabled +SET GLOBAL rocksdb_disable_2pc=0; +SET GLOBAL rocksdb_write_sync=0; + +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)" +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; + +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=10000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)" +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; + + +--echo # 2PC disabled, MyRocks durability enabled +SET GLOBAL rocksdb_disable_2pc=1; +SET GLOBAL rocksdb_write_sync=1; + +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)" +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; + +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=10000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)" +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; + + + + + + + +SET GLOBAL rocksdb_disable_2pc=1; +SET GLOBAL rocksdb_write_sync=0; +DROP TABLE t1; +DROP DATABASE mysqlslap; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test index e0d7a4465c8..7d3f4091bb4 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test @@ -1,5 +1,4 @@ --source include/have_rocksdb.inc ---source include/have_debug.inc --disable_warnings drop table if exists t1; @@ -168,117 +167,6 @@ SELECT COUNT(*) FROM t1; DROP TABLE t1; -# -# test crash recovery -# - -CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; -INSERT INTO t1 (a, b) VALUES (1, 5); -INSERT INTO t1 (a, b) VALUES (2, 6); -INSERT INTO t1 (a, b) VALUES (3, 7); - ---echo # crash_during_online_index_creation -flush logs; - ---exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect -SET SESSION debug="+d,crash_during_online_index_creation"; ---error 2013 -ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; - ---enable_reconnect ---source include/wait_until_connected_again.inc - -SET SESSION debug="-d,crash_during_online_index_creation"; - -SHOW CREATE TABLE t1; -CHECK TABLE t1; - -DROP TABLE t1; - -# -# Test crash recovery with partitioned tables -# -CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; - ---disable_query_log -let $max = 100; -let $i = 1; -while ($i <= $max) { - let $insert = INSERT INTO t1 VALUES ($i, $i, $i); - inc $i; - eval $insert; -} ---enable_query_log - ---echo # crash_during_index_creation_partition -flush logs; - ---exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect -SET SESSION debug="+d,crash_during_index_creation_partition"; ---error 2013 -ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; - ---enable_reconnect ---source include/wait_until_connected_again.inc - -SET SESSION debug="-d,crash_during_index_creation_partition"; - -SHOW CREATE TABLE t1; - -# here, the index numbers should be higher because previously 4 index numbers -# were allocated for the partitioned table -ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; - -SELECT * FROM t1 ORDER BY i LIMIT 10; -SELECT COUNT(*) FROM t1; - -DROP TABLE t1; - -# -# Test rollback on partitioned tables for inplace alter -# -CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; - ---disable_query_log -let $max = 100; -let $i = 1; -while ($i <= $max) { - let $insert = INSERT INTO t1 VALUES ($i, $i, $i); - inc $i; - eval $insert; -} ---enable_query_log - ---echo # crash_during_index_creation_partition -flush logs; - ---exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect -SET SESSION debug="+d,myrocks_simulate_index_create_rollback"; - ---echo # expected assertion failure from sql layer here for alter rollback -call mtr.add_suppression("Assertion `0' failed."); -call mtr.add_suppression("Attempting backtrace. You can use the following information to find out"); - ---error 2013 - -ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; - ---enable_reconnect ---source include/wait_until_connected_again.inc - -SET SESSION debug="-d,myrocks_simulate_index_create_rollback"; - -SHOW CREATE TABLE t1; - -# here, the index numbers should be higher because previously 4 index numbers -# were allocated for the partitioned table -ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; - -SHOW CREATE TABLE t1; -SELECT COUNT(*) FROM t1; - -DROP TABLE t1; - # test failure in prepare phase (due to collation) CREATE TABLE t1 (a INT, b TEXT); @@ -287,4 +175,171 @@ ALTER TABLE t1 ADD KEY kb(b(10)); ALTER TABLE t1 ADD PRIMARY KEY(a); DROP TABLE t1; +# make sure race condition between connection close and alter on another +# connection is handled + +set global rocksdb_bulk_load=1; + +--echo # Establish connection con1 (user=root) +connect (con1,localhost,root,,); + +--echo # Switch to connection con1 +connection con1; + +show global variables like 'rocksdb_bulk_load'; +show session variables like 'rocksdb_bulk_load'; + +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; + +INSERT INTO t1 VALUES (1,1); + +# Disconnect connection 1, this starts the code path that will call +# rocksdb_close_connection, ending the bulk load. +--echo # Disconnecting on con1 +disconnect con1; + +--echo # Establish connection con2 (user=root) +connect (con2,localhost,root,,); +--echo # Switch to connection con2 +connection con2; + +# when alter table happens, it tries to close all other TABLE instances +# when acquiring the exclusive lock for alter table (this happens in SQL layer) +# make sure bulk_load now handles this possible race condition properly +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; + +SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY); +SELECT COUNT(*) FROM t1 FORCE INDEX(kj); + +DROP TABLE t1; +disconnect con2; + +# make sure implicilty closing the alter from another session works + +--echo # Establish connection con1 (user=root) +connect (con1,localhost,root,,); +--echo # Establish connection con2 (user=root) +connect (con2,localhost,root,,); + +--echo # Switch to connection con1 +connection con1; + +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; + +set rocksdb_bulk_load=1; +INSERT INTO t1 VALUES (1,1); + +--echo # Switch to connection con2 +connection con2; + +# here, the bulk load hasn't been completed yet, and we are in conn2 +# therefore select count returns 0 +SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY); + +# implicilty close the table from connection 2 +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; + +SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY); +SELECT COUNT(*) FROM t1 FORCE INDEX(kj); + +set global rocksdb_bulk_load=0; + +DROP TABLE t1; + +connection default; + + +SET @prior_rocksdb_merge_combine_read_size= @@rocksdb_merge_combine_read_size; +SET @prior_rocksdb_strict_collation_check= @@rocksdb_strict_collation_check; +SET @prior_rocksdb_merge_buf_size = @@rocksdb_merge_buf_size; + +SET global rocksdb_strict_collation_check = off; +SET session rocksdb_merge_combine_read_size = 566; +SET session rocksdb_merge_buf_size = 336; + +show variables like '%rocksdb_bulk_load%'; +CREATE TABLE t1 (a VARCHAR(80)) ENGINE=RocksDB; +INSERT INTO t1 (a) VALUES (REPEAT("a", 80)); +INSERT INTO t1 (a) VALUES (REPEAT("a", 80)); +INSERT INTO t1 (a) VALUES (REPEAT("a", 80)); +INSERT INTO t1 (a) VALUES (REPEAT("a", 80)); +ALTER TABLE t1 ADD INDEX ka(a), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(ka) WHERE a > ""; +DROP TABLE t1; + +SET session rocksdb_merge_buf_size = @prior_rocksdb_merge_buf_size; +SET session rocksdb_merge_combine_read_size = @prior_rocksdb_merge_combine_read_size; +SET global rocksdb_strict_collation_check = @prior_rocksdb_strict_collation_check; + +# Test to make sure index statistics are updating properly +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; + +--disable_query_log +let $max = 100; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +set global rocksdb_force_flush_memtable_now=1; + +--let $data_length_old = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1) + +## uncomment to see the actual values +#--replace_column 8 # +#SHOW TABLE STATUS WHERE name LIKE 't1'; + +# Now do an alter and see what happens +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; + +--let $data_length_new = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1) +--disable_query_log +--eval select $data_length_old < $data_length_new as "larger" + +--source include/restart_mysqld.inc +--source include/wait_until_connected_again.inc +--let $data_length_new = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1) +--disable_query_log +--eval select $data_length_old < $data_length_new as "larger" + +analyze table t1; +--let $data_length_new = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1) +--disable_query_log +--eval select $data_length_old < $data_length_new as "larger" + +--source include/restart_mysqld.inc +--source include/wait_until_connected_again.inc +--let $data_length_new = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1) +--disable_query_log +--eval select $data_length_old < $data_length_new as "larger" + +# verifying multiple analyze table won't change stats +--disable_query_log +let $max = 10; +let $i = 1; +while ($i <= $max) { + let $analyze = ANALYZE TABLE t1; + inc $i; + eval $analyze; +} +--enable_query_log + +--let $data_length_new2 = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1) +--eval select $data_length_new2 < $data_length_new * 1.5 as "same" + + +--enable_query_log + +## uncomment to see the actual values +#--replace_column 8 # +#SHOW TABLE STATUS WHERE name LIKE 't1'; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test new file mode 100644 index 00000000000..ca9122bccd7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test @@ -0,0 +1,117 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +# +# test crash recovery +# + +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); + +--echo # crash_during_online_index_creation +flush logs; + +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="+d,crash_during_online_index_creation"; +--error 2013 +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; + +--enable_reconnect +--source include/wait_until_connected_again.inc + +SET SESSION debug="-d,crash_during_online_index_creation"; + +SHOW CREATE TABLE t1; +CHECK TABLE t1; + +DROP TABLE t1; + +# +# Test crash recovery with partitioned tables +# +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; + +--disable_query_log +let $max = 100; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +--echo # crash_during_index_creation_partition +flush logs; + +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="+d,crash_during_index_creation_partition"; +--error 2013 +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; + +--enable_reconnect +--source include/wait_until_connected_again.inc + +SET SESSION debug="-d,crash_during_index_creation_partition"; + +SHOW CREATE TABLE t1; + +# here, the index numbers should be higher because previously 4 index numbers +# were allocated for the partitioned table +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; + +SELECT * FROM t1 ORDER BY i LIMIT 10; +SELECT COUNT(*) FROM t1; + +DROP TABLE t1; + +# +# Test rollback on partitioned tables for inplace alter +# +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; + +--disable_query_log +let $max = 100; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +--echo # crash_during_index_creation_partition +flush logs; + +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="+d,myrocks_simulate_index_create_rollback"; + +--echo # expected assertion failure from sql layer here for alter rollback +call mtr.add_suppression("Assertion `0' failed."); +call mtr.add_suppression("Attempting backtrace. You can use the following information to find out"); + +--error 2013 + +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; + +--enable_reconnect +--source include/wait_until_connected_again.inc + +SET SESSION debug="-d,myrocks_simulate_index_create_rollback"; + +SHOW CREATE TABLE t1; + +# here, the index numbers should be higher because previously 4 index numbers +# were allocated for the partitioned table +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; + +SHOW CREATE TABLE t1; +SELECT COUNT(*) FROM t1; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test index 2ad2c390d59..7e600224dcc 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test @@ -1,5 +1,4 @@ --source include/have_rocksdb.inc ---source include/have_debug.inc --disable_warnings drop table if exists t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_os_buffer.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_os_buffer.test deleted file mode 100644 index e3ac4307c54..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/t/allow_os_buffer.test +++ /dev/null @@ -1,30 +0,0 @@ ---source include/have_rocksdb.inc - -# Issue221 -# Turning on --rocksdb-allow-mmap-reads while having --rocksdb-allow-os-buffer -# off caused an assertion in RocksDB. Now it should not be allowed and the -# server will not start with that configuration - -# Write file to make mysql-test-run.pl expect the "crash", but don't restart -# the serve runtil it is told to ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect ---exec echo "wait" >$_expect_file_name -shutdown_server 10; - -# Clear the log ---exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err - -# Attempt to restart the server with invalid options ---exec echo "restart:--rocksdb_allow_os_buffer=0 --rocksdb_allow_mmap_reads=1" >$_expect_file_name ---sleep 0.1 # Wait 100ms - that is how long the sleep is in check_expected_crash_and_restart ---exec echo "restart:" >$_expect_file_name - -# Cleanup ---enable_reconnect ---source include/wait_until_connected_again.inc ---disable_reconnect - -# We should now have an error message ---exec grep "disable allow_os_buffer" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 - diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread.test new file mode 100644 index 00000000000..f801b2f683a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread.test @@ -0,0 +1,53 @@ +--source include/have_rocksdb.inc +--source include/have_debug_sync.inc + +--echo #--------------------------- +--echo # two threads inserting simultaneously with increment > 1 +--echo # Issue #390 +--echo #--------------------------- + +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +# Set up connections +connect (con1, localhost, root,,); +SET auto_increment_increment = 2; +SET auto_increment_offset = 1; +# Insert one row to set up the conditions that caused the original failure +INSERT INTO t1 VALUES(NULL); + +connect (con2, localhost, root,,); +SET auto_increment_increment = 2; +SET auto_increment_offset = 1; + +# Start each thread on an insert that will block waiting for a signal +connection con1; +SET debug_sync='rocksdb.autoinc_vars SIGNAL parked1 WAIT_FOR go NO_CLEAR_EVENT'; +send INSERT INTO t1 VALUES(NULL); + +connection con2; +SET debug_sync='rocksdb.autoinc_vars SIGNAL parked2 WAIT_FOR go NO_CLEAR_EVENT'; +send INSERT INTO t1 VALUES(NULL); + +# Wait for both threads to be at debug_sync point +connection default; +SET debug_sync='now WAIT_FOR parked1'; +SET debug_sync='now WAIT_FOR parked2'; + +# Signal both threads to continue +SET debug_sync='now SIGNAL go'; + +connection con1; +reap; + +connection con2; +reap; + +connection default; +SET debug_sync='RESET'; + +disconnect con1; +disconnect con2; + +SELECT * FROM t1; +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test new file mode 100644 index 00000000000..3c7d61aa15b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test @@ -0,0 +1,141 @@ +--source include/have_rocksdb.inc + +--echo #--------------------------- +--echo # ten threads inserting simultaneously with increment > 1 +--echo # Issue #390 +--echo #--------------------------- + +# Run 10 simulatenous threads each inserting 10,000 rows +let $num_threads = 10; +let $num_rows_per_thread = 100000; + +# Create the table with an AUTO_INCREMENT primary key and a separate colum +# to store which thread created the row +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, thr INT) ENGINE=rocksdb; + +# For each thread... +# 1) set up a connection +# 2) create a file that can be used for LOAD DATA INFILE ... +let $i = `SELECT $num_threads`; +while ($i > 0) +{ + dec $i; + + # Set up connection + connect (con$i, localhost, root,,); + + # Set up the auto_increment_* variables for each thread + eval SET auto_increment_increment = 100; + eval SET auto_increment_offset = $i + 1; + let $file = `SELECT CONCAT(@@datadir, "test_insert_", $i, ".txt")`; + + # Pass variables into perl + let ROCKSDB_INFILE = $file; + let ROCKSDB_THREAD = `SELECT $i`; + let ROCKSDB_ROWS_PER_THREAD = `SELECT $num_rows_per_thread`; + + # Create a file to load + perl; + my $fn = $ENV{'ROCKSDB_INFILE'}; + my $thr = $ENV{'ROCKSDB_THREAD'}; + my $num = $ENV{'ROCKSDB_ROWS_PER_THREAD'}; + open(my $fh, '>>', $fn) || die "perl open($fn): $!"; + for (my $ii = 0; $ii < $num; $ii++) + { + print $fh "\\N\t$thr\n" + } + close($fh); + EOF +} + +# For each connection start the LOAD DATA INFILE in the background +connection default; +let $i = `SELECT $num_threads`; +while ($i > 0) +{ + dec $i; + + connection con$i; + let $file = `SELECT CONCAT(@@datadir, "test_insert_", $i, ".txt")`; + --disable_query_log + --echo LOAD DATA INFILE INTO TABLE t1; + send_eval LOAD DATA INFILE '$file' INTO TABLE t1; + --enable_query_log +} + +# Reap each connection's background result +connection default; +let $i = `SELECT $num_threads`; +while ($i > 0) +{ + dec $i; + + connection con$i; + reap; +} + +# Make sure we have the required number of rows +connection default; +SELECT COUNT(*) FROM t1; +SELECT thr, COUNT(pk) FROM t1 GROUP BY thr; + +# Cleanup the connection and file used for LOAD DATA INFILE +let $i = `SELECT $num_threads`; +while ($i > 0) +{ + dec $i; + + disconnect con$i; + let $file = `SELECT CONCAT(@@datadir, "test_insert_", "$i", ".txt")`; + remove_file $file; +} + +# Validate each row. For each row, the created 'thr' column shows which +# thread created the row. The pk that was automatically generated should +# therefore match a certain pattern. For thread 0, the pk should be in +# the sequence [1, 101, 201, 301, ...]; for thread 1, it should be in the +# sequence [2, 102, 202, 302, ...], etc. The pk for each row should be +# smallest value in the sequence for thread 'thr' that is greater than +# the pk in the previous row. +let $file = `SELECT CONCAT(@@datadir, "test_export.txt")`; +--disable_query_log +--echo SELECT * FROM t1 ORDER BY pk INTO OUTFILE ; +eval SELECT * FROM t1 ORDER BY pk INTO OUTFILE "$file"; +--enable_query_log + +let ROCKSDB_OUTFILE = $file; + +perl; +my $fn = $ENV{'ROCKSDB_OUTFILE'}; +my $last_pk = 0; +open(my $fh, '<', $fn) || die "perl open($fn): $!"; +while (<$fh>) +{ + if ($_ =~ m/^(.*)\t(.*)$/) + { + my $pk = $1; + my $thr = $2; + + my $expected_pk = int($last_pk / 100) * 100 + ($thr + 1); + $expected_pk += 100 if $expected_pk <= $last_pk; + + if ($expected_pk != $pk) + { + die "Incorrect next pk ($pk); expected $expected_pk (previous: $last_pk)" + } + + $last_pk = $pk; + } + else + { + die "output file has incorrect format: $_"; + } +} +print stdout "All pk values matched their expected values\n"; +EOF + +remove_file $file; + +# Drop the table to finally clean up +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test index cfa2f6ff747..c80b789ae37 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test @@ -73,6 +73,15 @@ eval LOAD DATA INFILE '$file' INTO TABLE t3; --enable_query_log set rocksdb_bulk_load=0; +# Make sure row count index stats are correct +--replace_column 6 # 7 # 8 # 9 # +SHOW TABLE STATUS WHERE name LIKE 't%'; + +ANALYZE TABLE t1, t2, t3; + +--replace_column 6 # 7 # 8 # 9 # +SHOW TABLE STATUS WHERE name LIKE 't%'; + # Make sure all the data is there. select count(pk) from t1; select count(a) from t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation.test b/storage/rocksdb/mysql-test/rocksdb/t/collation.test index c3fcaad776c..030c3e21454 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/collation.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/collation.test @@ -155,7 +155,7 @@ DROP TABLE t2; # test invalid regex (missing end bracket) --exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err SET GLOBAL rocksdb_strict_collation_exceptions="[a-b"; ---exec grep -A 1 "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +--exec grep "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 --error ER_UNKNOWN_ERROR CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; SET GLOBAL rocksdb_strict_collation_exceptions="[a-b]"; @@ -168,7 +168,7 @@ DROP TABLE a, b; # test invalid regex (trailing escape) --exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err SET GLOBAL rocksdb_strict_collation_exceptions="abc\\"; ---exec grep -A 1 "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +--exec grep "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 --error ER_UNKNOWN_ERROR CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; SET GLOBAL rocksdb_strict_collation_exceptions="abc"; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/commit_in_the_middle_ddl.test b/storage/rocksdb/mysql-test/rocksdb/t/commit_in_the_middle_ddl.test new file mode 100644 index 00000000000..02f45779b9a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/commit_in_the_middle_ddl.test @@ -0,0 +1,27 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS a; +--enable_warnings + +create table a (id int, value int, primary key (id) comment 'cf_a') engine=rocksdb; + +--disable_query_log +let $i = 1; +while ($i <= 100000) { + let $insert = INSERT INTO a VALUES($i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +set rocksdb_bulk_load=1; +set rocksdb_commit_in_the_middle=1; +alter table a add index v (value) COMMENT 'cf_a'; +set rocksdb_bulk_load=0; +set rocksdb_commit_in_the_middle=0; +select count(*) from a force index(primary); +select count(*) from a force index(v); + +DROP TABLE a; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test b/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test index a9ee98dfda5..b0e74221702 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test @@ -21,13 +21,13 @@ insert into t1 values (1,1),(2,2),(3,3); select * from t1; -set @tmp1=@@rocksdb_verify_checksums; -set rocksdb_verify_checksums=1; +set @tmp1=@@rocksdb_verify_row_debug_checksums; +set rocksdb_verify_row_debug_checksums=1; set session debug= "+d,myrocks_simulate_bad_row_read1"; --error ER_GET_ERRNO select * from t1 where pk=1; set session debug= "-d,myrocks_simulate_bad_row_read1"; -set rocksdb_verify_checksums=@tmp1; +set rocksdb_verify_row_debug_checksums=@tmp1; select * from t1 where pk=1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index.test b/storage/rocksdb/mysql-test/rocksdb/t/index.test index f12e7714bc3..e0c6175b397 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/index.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/index.test @@ -20,4 +20,25 @@ --source index.inc +--echo # +--echo # Issue #376: MyRocks: ORDER BY optimizer is unable to use the index extension +--echo # +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int); +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 ( + pk int not null, + a int not null, + b int not null, + primary key(pk), + key(a) +) engine=rocksdb; +insert into t2 select A.a, FLOOR(A.a/10), A.a from t1 A; + +--echo # This must have type=range, index=a, and must not have 'Using filesort': +--replace_column 9 # +explain select * from t2 force index (a) where a=0 and pk>=3 order by pk; + +drop table t0,t1,t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt index 2d5e33057f6..a0bf5759ec4 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt @@ -3,4 +3,5 @@ --rocksdb_override_cf_options=__system__={memtable=skip_list:16} --rocksdb_compaction_sequential_deletes=0 --rocksdb_compaction_sequential_deletes_window=0 +--rocksdb_allow_concurrent_memtable_write=0 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/lock_info.test b/storage/rocksdb/mysql-test/rocksdb/t/lock_info.test new file mode 100644 index 00000000000..1b624cf38c0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/lock_info.test @@ -0,0 +1,31 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +--enable_warnings + +create table t1 (a int, primary key(a) comment 'lock_into_cf1') engine=rocksdb; +insert into t1 values (1); +insert into t1 values (2); + +create table t2 (a int, primary key(a) comment 'lock_info_cf2') engine=rocksdb; +insert into t2 values (1); +insert into t2 values (2); + +set autocommit=0; +select * from t1 for update; +select * from t2 for update; + +use information_schema; +--replace_column 2 _txn_id_ 3 _key_ +select rocksdb_ddl.cf, rocksdb_locks.transaction_id, rocksdb_locks.key +from rocksdb_locks +left join rocksdb_ddl +on rocksdb_locks.column_family_id=rocksdb_ddl.column_family +order by rocksdb_ddl.cf; + +use test; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/optimizer_loose_index_scans.test b/storage/rocksdb/mysql-test/rocksdb/t/optimizer_loose_index_scans.test new file mode 100644 index 00000000000..beccc8a6b8e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/optimizer_loose_index_scans.test @@ -0,0 +1,3 @@ + +let $engine=rocksdb; +--source include/loose_index_scans.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test index f612cb0997b..7ec15d157a7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test @@ -818,13 +818,17 @@ update t49 set a = 100 where pk = 1; --connect (con1,localhost,root,,) --let $con1_id = `SELECT CONNECTION_ID()` -set rocksdb_lock_wait_timeout=5000; +set rocksdb_lock_wait_timeout=60; set @var1= to_seconds(now()); send update t49 set a = 1000 where pk = 1; --connect (con2,localhost,root,,) --echo kill query \$con1_id; --disable_query_log +# If we immeditely kill the query - internally the condition broadcast can +# occur before the lock is waiting on the condition, thus the broadcast call +# is lost. Sleep 1 second to avoid this condition. +--sleep 1 eval kill query $con1_id; --enable_query_log --connection con1 @@ -833,10 +837,8 @@ eval kill query $con1_id; set @var2= to_seconds(now()); # We expect the time to kill query in con1 should be below -# rocksdb_lock_wait_timeout (5000). ---echo "[Jay Edgar] I've updated this query to help determine why it is sometimes failing" ---echo "(t13541934). If you get an error here (i.e. not 'passed') notify me." -select if ((@var2 - @var1) < 1000, "passed", (@var2 - @var1)) as 'result'; +# rocksdb_lock_wait_timeout (60). +select if ((@var2 - @var1) < 60, "passed", (@var2 - @var1)) as 'result'; --connection default --disconnect con1 @@ -1119,7 +1121,7 @@ set @a=-1; insert into t1 select (@a:=@a+1), 1234 from information_schema.session_variables limit 100; set @tmp1= @@rocksdb_max_row_locks; set rocksdb_max_row_locks= 20; ---error ER_INTERNAL_ERROR +--error ER_GET_ERRMSG update t1 set a=a+10; DROP TABLE t1; @@ -1922,4 +1924,42 @@ SHOW TABLE STATUS LIKE 't1'; SELECT * FROM t1; DROP TABLE t1; +--echo # +--echo # Issue #411: Setting rocksdb_commit_in_the_middle commits transaction +--echo # without releasing iterator +--echo # + +CREATE TABLE t1 (id1 bigint(20), + id2 bigint(20), + id3 bigint(20), + PRIMARY KEY (id1, id2, id3)) + DEFAULT CHARSET=latin1; + +CREATE TABLE t2 (id1 bigint(20), + id2 bigint(20), + PRIMARY KEY (id1, id2)) + DEFAULT CHARSET=latin1; + + +set rocksdb_commit_in_the_middle=1; +SET @save_rocksdb_bulk_load_size= @@rocksdb_bulk_load_size; +set rocksdb_bulk_load_size = 100; + +--disable_query_log +let $j = 10000; +while ($j) +{ + --eval insert into t1 (id1, id2, id3) values (0, $j, 0); + --eval insert into t2 (id1, id2) values (0, $j); + dec $j; +} +--enable_query_log + +DELETE t2, t1 FROM t2 LEFT JOIN t1 ON t2.id2 = t1.id2 AND t2.id1 = t1.id1 WHERE t2.id1 = 0; + +SET rocksdb_bulk_load_size= @save_rocksdb_bulk_load_size; +SET rocksdb_commit_in_the_middle=0; +DROP TABLE t1, t2; + + SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test index 1a0364ebaee..7e5c61386f2 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test @@ -5,8 +5,8 @@ # --source include/have_debug.inc -set @save_rocksdb_store_checksums=@@global.rocksdb_store_checksums; -set @save_rocksdb_verify_checksums=@@global.rocksdb_verify_checksums; +set @save_rocksdb_store_row_debug_checksums=@@global.rocksdb_store_row_debug_checksums; +set @save_rocksdb_verify_row_debug_checksums=@@global.rocksdb_verify_row_debug_checksums; set @save_rocksdb_checksums_pct=@@global.rocksdb_checksums_pct; # wiping mysql log for repeatable tests @@ -26,7 +26,7 @@ check table t1; drop table t1; -set session rocksdb_store_checksums=on; +set session rocksdb_store_row_debug_checksums=on; create table t2 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; insert into t2 values (1,1,1),(2,2,2),(3,3,3); check table t2; @@ -35,13 +35,13 @@ check table t2; --echo # Now, make a table that has both rows with checksums and without create table t3 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; insert into t3 values (1,1,1),(2,2,2),(3,3,3); -set session rocksdb_store_checksums=off; +set session rocksdb_store_row_debug_checksums=off; update t3 set b=3 where a=2; -set session rocksdb_store_checksums=on; +set session rocksdb_store_row_debug_checksums=on; check table t3; --exec grep "^[0-9-]* [0-9:]* [0-9]* \[Note\] CHECKTABLE t3" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 -set session rocksdb_store_checksums=on; +set session rocksdb_store_row_debug_checksums=on; set session rocksdb_checksums_pct=5; create table t4 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; --disable_query_log @@ -71,9 +71,9 @@ insert into mtr.test_suppressions values --echo # 1. Start with mismatch in key checksum of the PK. set session debug= "+d,myrocks_simulate_bad_pk_checksum1"; -set session rocksdb_verify_checksums=off; +set session rocksdb_verify_row_debug_checksums=off; select * from t3; -set session rocksdb_verify_checksums=on; +set session rocksdb_verify_row_debug_checksums=on; --error ER_INTERNAL_ERROR select * from t3; --error ER_INTERNAL_ERROR @@ -82,9 +82,9 @@ set session debug= "-d,myrocks_simulate_bad_pk_checksum1"; --echo # 2. Continue with mismatch in pk value checksum. set session debug= "+d,myrocks_simulate_bad_pk_checksum2"; -set session rocksdb_verify_checksums=off; +set session rocksdb_verify_row_debug_checksums=off; select * from t3; -set session rocksdb_verify_checksums=on; +set session rocksdb_verify_row_debug_checksums=on; --error ER_INTERNAL_ERROR select * from t3; --error ER_INTERNAL_ERROR @@ -105,6 +105,9 @@ select * from t4 force index(a) where a<1000000; set session debug= "-d,myrocks_simulate_bad_key_checksum1"; --echo # 4. The same for index-only reads? +--disable_query_log +set global rocksdb_force_flush_memtable_now=1; +--enable_query_log --replace_column 9 # explain select a from t3 force index(a) where a<4; @@ -117,8 +120,8 @@ select a from t3 force index(a) where a<4; select a from t4 force index(a) where a<1000000; set session debug= "-d,myrocks_simulate_bad_key_checksum1"; -set @@global.rocksdb_store_checksums=@save_rocksdb_store_checksums; -set @@global.rocksdb_verify_checksums=@save_rocksdb_verify_checksums; +set @@global.rocksdb_store_row_debug_checksums=@save_rocksdb_store_row_debug_checksums; +set @@global.rocksdb_verify_row_debug_checksums=@save_rocksdb_verify_row_debug_checksums; set @@global.rocksdb_checksums_pct=@save_rocksdb_checksums_pct; drop table t2,t3,t4; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect.inc b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect.inc new file mode 100644 index 00000000000..01180ea29a8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect.inc @@ -0,0 +1,90 @@ +# +# Some basic sanity tests for deadlock detection. +# +--source include/have_rocksdb.inc + +set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout; +set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect; +set global rocksdb_lock_wait_timeout = 100000; +set global rocksdb_deadlock_detect = ON; + +create table t (i int primary key); +create table r1 (id int primary key, value int); +insert into r1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10); +create table r2 like r1; +insert into r2 select * from r1; + +# deadlock on scanned locking reads +connect (con1,localhost,root,,); +let $con1= `SELECT CONNECTION_ID()`; +begin; +update r2 set value=100 where id=9; + +connect (con2,localhost,root,,); +let $con2= `SELECT CONNECTION_ID()`; +begin; +update r1 set value=100 where id=8; +--send select * from r2 for update; + +connection con1; +let $wait_condition = +`SELECT CONCAT('select count(*) = 1 from information_schema.rocksdb_trx where THREAD_ID = ', '$con2', ' and WAITING_KEY != ""')`; +--source include/wait_condition.inc +--error ER_LOCK_DEADLOCK +select * from r1 for update; +rollback; + +connection con2; +--reap; +rollback; + +connection con1; +begin; +insert into t values (1); + +connection con2; +begin; +insert into t values (2); + +connect (con3,localhost,root,,); +begin; +insert into t values (3); + +connection con1; +--send select * from t where i = 2 for update + +connection con2; +let $wait_condition = +`SELECT CONCAT('select count(*) = 1 from information_schema.rocksdb_trx where THREAD_ID = ', '$con1', ' and WAITING_KEY != ""')`; +--source include/wait_condition.inc + +--send select * from t where i = 3 for update + +connection con3; +let $wait_condition = +`SELECT CONCAT('select count(*) = 1 from information_schema.rocksdb_trx where THREAD_ID = ', '$con2', ' and WAITING_KEY != ""')`; +--source include/wait_condition.inc + +select * from t; +--error ER_LOCK_DEADLOCK +insert into t values (4), (1); +--echo # Statement should be rolled back +select * from t; +rollback; + +connection con2; +--reap +rollback; + +connection con1; +--reap +rollback; + +connection default; +disconnect con1; +disconnect con2; +disconnect con3; + +set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout; +set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect; +drop table t,r1,r2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rc-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rc-master.opt new file mode 100644 index 00000000000..25b80282211 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rc-master.opt @@ -0,0 +1 @@ +--transaction-isolation=read-committed diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rc.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rc.test new file mode 100644 index 00000000000..c7d69c61d14 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rc.test @@ -0,0 +1 @@ +--source suite/rocksdb/t/rocksdb_deadlock_detect.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rr.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rr.test new file mode 100644 index 00000000000..c7d69c61d14 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rr.test @@ -0,0 +1 @@ +--source suite/rocksdb/t/rocksdb_deadlock_detect.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.inc b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.inc new file mode 100644 index 00000000000..e164591ddec --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.inc @@ -0,0 +1,18 @@ +# +# Stress tests deadlock detection +# + +--source include/have_rocksdb.inc + +create table t1 (a int primary key, b int) engine=rocksdb; + +set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout; +set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect; +set global rocksdb_lock_wait_timeout = 100000; +set global rocksdb_deadlock_detect = ON; + +exec python suite/rocksdb/t/rocksdb_deadlock_stress.py root 127.0.0.1 $MASTER_MYPORT test t1 10000 10; + +set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout; +set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.py b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.py new file mode 100644 index 00000000000..3bc8a3be010 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.py @@ -0,0 +1,94 @@ +""" +This script stress tests deadlock detection. + +Usage: rocksdb_deadlock_stress.py user host port db_name table_name + num_iters num_threads +""" +import cStringIO +import hashlib +import MySQLdb +from MySQLdb.constants import ER +import os +import random +import signal +import sys +import threading +import time +import string +import traceback + +def is_deadlock_error(exc): + error_code = exc.args[0] + return (error_code == MySQLdb.constants.ER.LOCK_DEADLOCK) + +def get_query(table_name, idx): + # Let's assume that even indexes will always be acquireable, to make + # deadlock detection more interesting. + if idx % 2 == 0: + return """SELECT * from %s WHERE a = %d LOCK IN SHARE MODE""" % (table_name, idx) + else: + r = random.randint(1, 3); + if r == 1: + return """SELECT * from %s WHERE a = %d FOR UPDATE""" % (table_name, idx) + elif r == 2: + return """INSERT INTO %s VALUES (%d, 1) + ON DUPLICATE KEY UPDATE b=b+1""" % (table_name, idx) + else: + return """DELETE from %s WHERE a = %d""" % (table_name, idx) + +class Worker(threading.Thread): + def __init__(self, con, table_name, num_iters): + threading.Thread.__init__(self) + self.con = con + self.table_name = table_name + self.num_iters = num_iters + self.exception = None + self.start() + def run(self): + try: + self.runme() + except Exception, e: + self.exception = traceback.format_exc() + def runme(self): + cur = self.con.cursor() + for x in xrange(self.num_iters): + try: + for i in random.sample(xrange(100), 10): + cur.execute(get_query(self.table_name, i)) + self.con.commit() + except MySQLdb.OperationalError, e: + self.con.rollback() + cur = self.con.cursor() + if not is_deadlock_error(e): + raise e + +if __name__ == '__main__': + if len(sys.argv) != 8: + print "Usage: rocksdb_deadlock_stress.py user host port db_name " \ + "table_name num_iters num_threads" + sys.exit(1) + + user = sys.argv[1] + host = sys.argv[2] + port = int(sys.argv[3]) + db = sys.argv[4] + table_name = sys.argv[5] + num_iters = int(sys.argv[6]) + num_workers = int(sys.argv[7]) + + worker_failed = False + workers = [] + for i in xrange(num_workers): + w = Worker( + MySQLdb.connect(user=user, host=host, port=port, db=db), table_name, + num_iters) + workers.append(w) + + for w in workers: + w.join() + if w.exception: + print "Worker hit an exception:\n%s\n" % w.exception + worker_failed = True + + if worker_failed: + sys.exit(1) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rc-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rc-master.opt new file mode 100644 index 00000000000..25b80282211 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rc-master.opt @@ -0,0 +1 @@ +--transaction-isolation=read-committed diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rc.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rc.test new file mode 100644 index 00000000000..d80b4686d80 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rc.test @@ -0,0 +1 @@ +--source suite/rocksdb/t/rocksdb_deadlock_stress.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rr.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rr.test new file mode 100644 index 00000000000..d80b4686d80 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rr.test @@ -0,0 +1 @@ +--source suite/rocksdb/t/rocksdb_deadlock_stress.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks-master.opt new file mode 100644 index 00000000000..c9d9edb8565 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks-master.opt @@ -0,0 +1 @@ +--rocksdb_print_snapshot_conflict_queries=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks.test index 3b28df0d63b..9a25f39a8e3 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks.test @@ -16,6 +16,7 @@ select * from t1 where pk=1 for update; --connect (con1,localhost,root,,) --connection con1 +call mtr.add_suppression("Got snapshot conflict errors"); --echo ### Connection con1 let $ID= `select connection_id()`; set @@rocksdb_lock_wait_timeout=500; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rollback_savepoint.test b/storage/rocksdb/mysql-test/rocksdb/t/rollback_savepoint.test new file mode 100644 index 00000000000..8543ce81de4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rollback_savepoint.test @@ -0,0 +1,31 @@ +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a'); +CREATE TABLE t2 LIKE t1; +INSERT INTO t2 SELECT * FROM t1; + +--connect (con1,localhost,root,,) +--connect (con2,localhost,root,,) + +--connection con1 +START TRANSACTION WITH CONSISTENT SNAPSHOT; +SAVEPOINT a; +SELECT * FROM t1 ORDER BY pk; +ROLLBACK TO SAVEPOINT a; +SAVEPOINT a; +SELECT * FROM t2 ORDER BY pk; +ROLLBACK TO SAVEPOINT a; + +# should not be blocked +--connection con2 +ALTER TABLE t1 RENAME TO t3; + +--connection default +DROP TABLE t2, t3; + +--disconnect con1 +--disconnect con2 + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/select_lock_in_share_mode.test b/storage/rocksdb/mysql-test/rocksdb/t/select_lock_in_share_mode.test index d1d289bcb74..23ce6d45234 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/select_lock_in_share_mode.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/select_lock_in_share_mode.test @@ -31,10 +31,6 @@ SET lock_wait_timeout = 1; --sorted_result SELECT a,b FROM t1 WHERE b='a'; --sorted_result ---echo # ---echo # Currently, SELECT ... LOCK IN SHARE MODE works like ---echo # SELECT FOR UPDATE ---error ER_LOCK_WAIT_TIMEOUT SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; --error ER_LOCK_WAIT_TIMEOUT diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test b/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test index 0cb32d95d8a..834887ddffb 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test @@ -68,7 +68,8 @@ SHOW ENGINE rocksdb TRANSACTION STATUS; START TRANSACTION WITH CONSISTENT SNAPSHOT; ---replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /(ACTIVE) [0-9]+ /\1 NUM / /(thread id) [0-9]+/\1 TID/ /0x[0-9a-f]+/PTR/ +#select sleep(10); +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /(ACTIVE) [0-9]+ /\1 NUM / /(thread id) [0-9]+/\1 TID/ /0x[0-9a-f]+/PTR/ /(query id) [0-9]+/\1 QID/ /(root) [a-z ]+/\1 ACTION/ SHOW ENGINE rocksdb TRANSACTION STATUS; ROLLBACK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tmpdir.test b/storage/rocksdb/mysql-test/rocksdb/t/tmpdir.test new file mode 100644 index 00000000000..28615264d35 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tmpdir.test @@ -0,0 +1,35 @@ +--source include/have_rocksdb.inc + +--echo # If rocksdb_tmpdir is NULL or "", temporary file will be created in +--echo # server configuration variable location(--tmpdir) + +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB; +show session variables like 'rocksdb_tmpdir'; +--echo # Connection con1 +connect (con1,localhost,root); +show session variables like 'rocksdb_tmpdir'; + +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +connection default; +disconnect con1; +DROP TABLE t1; + +--echo # rocksdb_tmpdir with valid location. + +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB; +set @tmpdir = @@global.tmpdir; +set global rocksdb_tmpdir = @tmpdir; +show session variables like 'rocksdb_tmpdir'; + +--echo # Connection con3 +connect (con2,localhost,root); +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +show session variables like 'rocksdb_tmpdir'; + +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +disconnect con2; +connection default; +set global rocksdb_tmpdir=NULL; +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/trx_info.test b/storage/rocksdb/mysql-test/rocksdb/t/trx_info.test new file mode 100644 index 00000000000..975bed6132c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/trx_info.test @@ -0,0 +1,17 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +create table t1 (a int) engine=rocksdb; +insert into t1 values (1); +insert into t1 values (2); + +set autocommit=0; +select * from t1 for update; + +--replace_column 1 _TRX_ID_ 3 _NAME_ 7 _KEY_ 14 _THREAD_ID_ +select * from information_schema.rocksdb_trx; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.cnf b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.cnf new file mode 100644 index 00000000000..f5b725932e4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.cnf @@ -0,0 +1,8 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=row +[mysqld.2] +binlog_format=row +slave_parallel_workers=1 +rocksdb_rpl_skip_tx_api=ON diff --git a/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test new file mode 100644 index 00000000000..19499765140 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test @@ -0,0 +1,42 @@ +--source include/master-slave.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +connection slave; +--source include/stop_slave.inc +--enable_warnings + +connection master; +create table t1 (a int) engine=rocksdb; +--disable_query_log +--let $aa= 0 +while ($aa < 1000) { + eval insert into t1 values ($aa); + --inc $aa +} +--enable_query_log + +connection slave; +show variables like 'rocksdb_rpl_skip_tx_api'; +--source include/start_slave.inc + +--let $it=0 +--let $stop=0 +while ($stop != 1) { +let $count= query_get_value(select count(*) as Value from information_schema.rocksdb_trx, Value, 1); + if ($count) { + --echo found + --let $stop=1 + } + + if ($it > 1000) { + --echo not found + --let $stop=1 + } + + --inc $it +} + +connection master; +DROP TABLE t1; +--source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test index d51b55f07e2..a0ae824f829 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test @@ -1,21 +1,6 @@ --source include/have_rocksdb.inc --source include/have_debug.inc -# Test to see if index-only scan fails gracefully if unpack info is not -# available. -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; -create table t (id int not null auto_increment primary key, - c varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci, - key sk (c)); -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; -insert into t (c) values ('A'), ('b'), ('C'); ---replace_column 9 # -explain select c from t; -select c from t; -select c from t where c = 'a'; - -drop table t; - # Test if unknown collation works. set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans"; create table t (id int not null auto_increment primary key, @@ -127,7 +112,7 @@ select c1 from t; drop table t; # Test varchar keyparts with key prefix -set session rocksdb_verify_checksums = on; +set session rocksdb_verify_row_debug_checksums = on; create table t (id int primary key, email varchar(100), KEY email_i (email(30))) engine=rocksdb default charset=latin1; insert into t values (1, ' a'); --replace_column 9 # diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_decimal-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_decimal-master.opt new file mode 100644 index 00000000000..33e72265db2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_decimal-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=10 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test b/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test index d5ee75686df..ee325b34eff 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test @@ -5,81 +5,6 @@ drop table if exists t1, t2; --enable_warnings ---echo # ---echo # Check that DECIMAL PK ---echo # -create table t0(a int); -insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); - ---echo # First, make the server to create a dataset in the old format: -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; -create table t1 ( - pk1 decimal(32,16), - pk2 decimal(32,16), - pk3 decimal(32,16), - a smallint not null, - primary key(pk1, pk2, pk3) -); -insert into t1 -select - A.a, B.a, C.a, 1234 -from t0 A, t0 B, t0 C; - ---echo # ---echo # Looking at the table size, one can tell that the data is stored using ---echo # old format: ---echo # -set global rocksdb_force_flush_memtable_now=1; - ---let $data_length_old = query_get_value("select DATA_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", DATA_LENGTH, 1) - ---echo # Check the format version: -select table_name,index_name,kv_format_version -from information_schema.ROCKSDB_DDL -where TABLE_SCHEMA=database() AND table_name='t1'; - -flush tables; - -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; ---source include/restart_mysqld.inc - ---echo # Check that the new server reads the data in the old format: -select * from t1 order by pk1,pk2,pk3 limit 5; - ---echo # ---echo # Ok, now, enable the new data format: ---echo # -create table t2 ( - pk1 decimal(32,16), - pk2 decimal(32,16), - pk3 decimal(32,16), - a smallint not null, - primary key(pk1, pk2, pk3) -); -insert into t2 -select - A.a, B.a, C.a, 1234 -from t0 A, t0 B, t0 C; -set global rocksdb_force_flush_memtable_now=1; - ---let $data_length_new = query_get_value("select DATA_LENGTH from information_schema.tables where table_schema=database() and table_name='t2'", DATA_LENGTH, 1) ---disable_query_log ---eval select $data_length_old > $data_length_new as "larger" ---enable_query_log - ---echo # This should show the new PK data fromat -select table_name,index_name,kv_format_version from information_schema.ROCKSDB_DDL -where TABLE_SCHEMA=database() AND table_name='t2'; - ---echo # ---echo # Check that the server is able to read BOTH the old and the new formats: ---echo # -select * from t2 limit 3; -select * from t1 limit 3; - -drop table t1,t2; -drop table t0; - --echo # --echo # Check that DECIMAL datatype supports 'index-only' scans and is decoded correctly. --echo # (Decoding happens from the mem-comparable image in the index, regardless diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test index 6416af7308f..e45b6836f67 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test @@ -62,9 +62,9 @@ select 'email_i' as index_name, count(*) AS count from t force index(email_i); drop table t; set @save_rocksdb_checksums_pct = @@global.rocksdb_checksums_pct; -set @save_rocksdb_verify_checksums = @@session.rocksdb_verify_checksums; +set @save_rocksdb_verify_row_debug_checksums = @@session.rocksdb_verify_row_debug_checksums; set global rocksdb_checksums_pct = 100; -set session rocksdb_verify_checksums = on; +set session rocksdb_verify_row_debug_checksums = on; create table t (id int primary key, email varchar(100), KEY email_i (email(30))); insert into t values (1, 'a'); --replace_column 9 # @@ -72,4 +72,4 @@ explain select 'email_i' as index_name, count(*) AS count from t force index(ema select 'email_i' as index_name, count(*) AS count from t force index(email_i); drop table t; set global rocksdb_checksums_pct = @save_rocksdb_checksums_pct; -set session rocksdb_verify_checksums = @save_rocksdb_verify_checksums; +set session rocksdb_verify_row_debug_checksums = @save_rocksdb_verify_row_debug_checksums; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test deleted file mode 100644 index d61e85ed204..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test +++ /dev/null @@ -1,137 +0,0 @@ -# -# VARCHAR encoding tests that require debug support -# ---source include/have_rocksdb.inc ---source include/have_debug.inc - ---disable_warnings -drop table if exists t1,t2; ---enable_warnings - - -set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans"; - ---let $character_set_collate=CHARACTER SET utf8 COLLATE utf8_general_ci ---source type_varchar_endspace.inc - -set session debug= "-d,myrocks_enable_unknown_collation_index_only_scans"; - ---echo # ---echo # Check backwards compatibility: ---echo # - -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; - ---echo # Create the tables in the old format - -create table t1 ( - pk varchar(64) collate latin1_bin, - col1 varchar(64), - primary key (pk) -); -insert into t1 values ('a','a'); ---echo # The following will not produce an error: -insert into t1 values ('a ', 'a-space'); -select pk, hex(pk), col1 from t1; - -create table t2 ( - pk int not null primary key, - col1 varchar(64) collate latin1_bin, - col2 varchar(64), - unique key (col1) -); - -insert into t2 values (0, 'ab', 'a-b'); ---echo # The following will not produce an error: -insert into t2 values (1, 'a ', 'a-space'); -insert into t2 values (2, 'a', 'a'); -select pk, col1, hex(col1), col2 from t2; - ---echo # Check the format version: -select table_name,index_name,kv_format_version -from information_schema.ROCKSDB_DDL -where TABLE_SCHEMA=database() AND table_name in ('t1','t2'); - -flush tables; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; -select pk, hex(pk), col1 from t1; -select pk, col1, hex(col1), col2 from t2; - -## Check that we can still read the data when starting on the old datadir: ---source include/restart_mysqld.inc - -select pk, hex(pk), col1 from t1; -select pk, col1, hex(col1), col2 from t2; - -drop table t1,t2; - - ---echo # ---echo # General upgrade tests to see that they work. ---echo # -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; -create table t2 ( - id int primary key, - col1 varchar(64) collate latin1_swedish_ci, - unique key (col1) -) engine=rocksdb; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; - -insert into t2 values (1, 'a'); -insert into t2 values (2, 'b'); -insert into t2 values (3, 'c'); -# Check if this is indeed the old format -insert into t2 values (4, 'c '); -select col1 from t2; -delete from t2 where id = 4; -alter table t2 engine=rocksdb; -select col1 from t2; -# Check if this is indeed the new format ---error ER_DUP_ENTRY -insert into t2 values (4, 'c '); -drop table t2; - -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; -create table t2 ( - id int primary key, - col1 varchar(64) collate latin1_bin, - unique key (col1) -) engine=rocksdb; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; - -insert into t2 values (1, 'a'); -insert into t2 values (2, 'b'); -insert into t2 values (3, 'c'); -# Check if this is indeed the old format -insert into t2 values (4, 'c '); -select col1 from t2; -delete from t2 where id = 4; -alter table t2 engine=rocksdb; -select col1 from t2; -# Check if this is indeed the new format ---error ER_DUP_ENTRY -insert into t2 values (4, 'c '); -drop table t2; - ---echo # ---echo # Check what happens when one tries to 'upgrade' to the new data format ---echo # and causes a unique key violation: ---echo # -set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL'; -create table t2 ( - pk int not null primary key, - col1 varchar(64) collate latin1_bin, - col2 varchar(64), - unique key (col1) -); - -insert into t2 values (1, 'a ', 'a-space'); -insert into t2 values (2, 'a', 'a'); - -select * from t2; -set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL'; - ---error ER_DUP_ENTRY -alter table t2 engine=rocksdb; -drop table t2; - diff --git a/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test b/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test new file mode 100644 index 00000000000..349748e91a8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test @@ -0,0 +1,47 @@ +--source include/have_rocksdb.inc + +# Issue221 +# Turning on both --rocksdb-allow-mmap-reads and --rocksdb-use-direct-reads +# caused an assertion in RocksDB. Now it should not be allowed and the +# server will not start with that configuration + +# Write file to make mysql-test-run.pl expect the "crash", but don't restart +# the server until it is told to +--let $_server_id= `SELECT @@server_id` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--exec echo "wait" >$_expect_file_name +shutdown_server 10; + +# Clear the log +--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err + +# Attempt to restart the server with invalid options +--exec echo "restart:--rocksdb_use_direct_reads=1 --rocksdb_allow_mmap_reads=1" >$_expect_file_name +--sleep 0.1 # Wait 100ms - that is how long the sleep is in check_expected_crash_and_restart +--exec echo "restart:" >$_expect_file_name + +# Cleanup +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + +# We should now have an error message +--exec grep "enable both use_direct_reads" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 + +# Repeat with --rocksdb-use-direct-writes +--let $_server_id= `SELECT @@server_id` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--exec echo "wait" >$_expect_file_name +shutdown_server 10; + +--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err + +--exec echo "restart:--rocksdb_use_direct_writes=1 --rocksdb_allow_mmap_writes=1" >$_expect_file_name +--sleep 0.1 +--exec echo "restart:" >$_expect_file_name + +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + +--exec grep "enable both use_direct_writes" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_os_buffer_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_os_buffer_basic.result deleted file mode 100644 index 6099c3af344..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_os_buffer_basic.result +++ /dev/null @@ -1,7 +0,0 @@ -SET @start_global_value = @@global.ROCKSDB_ALLOW_OS_BUFFER; -SELECT @start_global_value; -@start_global_value -1 -"Trying to set variable @@global.ROCKSDB_ALLOW_OS_BUFFER to 444. It should fail because it is readonly." -SET @@global.ROCKSDB_ALLOW_OS_BUFFER = 444; -ERROR HY000: Variable 'rocksdb_allow_os_buffer' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_cache_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_cache_size_basic.result index fbd9d97e994..1cfe5385d5c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_cache_size_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_cache_size_basic.result @@ -1,7 +1,7 @@ SET @start_global_value = @@global.ROCKSDB_BLOCK_CACHE_SIZE; SELECT @start_global_value; @start_global_value -8388608 +536870912 "Trying to set variable @@global.ROCKSDB_BLOCK_CACHE_SIZE to 444. It should fail because it is readonly." SET @@global.ROCKSDB_BLOCK_CACHE_SIZE = 444; ERROR HY000: Variable 'rocksdb_block_cache_size' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_deadlock_detect_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_deadlock_detect_basic.result new file mode 100644 index 00000000000..f200105b542 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_deadlock_detect_basic.result @@ -0,0 +1,121 @@ +CREATE TABLE valid_values (value varchar(255)); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +CREATE TABLE invalid_values (value varchar(255)); +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_DEADLOCK_DETECT; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_DEADLOCK_DETECT; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to 1" +SET @@global.ROCKSDB_DEADLOCK_DETECT = 1; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEADLOCK_DETECT = DEFAULT; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to 0" +SET @@global.ROCKSDB_DEADLOCK_DETECT = 0; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEADLOCK_DETECT = DEFAULT; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to on" +SET @@global.ROCKSDB_DEADLOCK_DETECT = on; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEADLOCK_DETECT = DEFAULT; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to off" +SET @@global.ROCKSDB_DEADLOCK_DETECT = off; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEADLOCK_DETECT = DEFAULT; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_DEADLOCK_DETECT to 1" +SET @@session.ROCKSDB_DEADLOCK_DETECT = 1; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_DEADLOCK_DETECT = DEFAULT; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +0 +"Trying to set variable @@session.ROCKSDB_DEADLOCK_DETECT to 0" +SET @@session.ROCKSDB_DEADLOCK_DETECT = 0; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_DEADLOCK_DETECT = DEFAULT; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +0 +"Trying to set variable @@session.ROCKSDB_DEADLOCK_DETECT to on" +SET @@session.ROCKSDB_DEADLOCK_DETECT = on; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_DEADLOCK_DETECT = DEFAULT; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +0 +"Trying to set variable @@session.ROCKSDB_DEADLOCK_DETECT to off" +SET @@session.ROCKSDB_DEADLOCK_DETECT = off; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_DEADLOCK_DETECT = DEFAULT; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to 'aaa'" +SET @@global.ROCKSDB_DEADLOCK_DETECT = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to 'bbb'" +SET @@global.ROCKSDB_DEADLOCK_DETECT = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +SET @@global.ROCKSDB_DEADLOCK_DETECT = @start_global_value; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +SET @@session.ROCKSDB_DEADLOCK_DETECT = @start_session_value; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_print_snapshot_conflict_queries_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_print_snapshot_conflict_queries_basic.result new file mode 100644 index 00000000000..02a4b4040d7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_print_snapshot_conflict_queries_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to 1" +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = 1; +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = DEFAULT; +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +0 +"Trying to set variable @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to 0" +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = 0; +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = DEFAULT; +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +0 +"Trying to set variable @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to on" +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = on; +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = DEFAULT; +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +0 +"Trying to set variable @@session.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to 444. It should fail because it is not session." +SET @@session.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = 444; +ERROR HY000: Variable 'rocksdb_print_snapshot_conflict_queries' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to 'aaa'" +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +0 +"Trying to set variable @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to 'bbb'" +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +0 +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = @start_global_value; +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.result similarity index 100% rename from storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.test rename to storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.result diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_checksums_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_checksums_basic.result deleted file mode 100644 index 904a0bc536e..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_checksums_basic.result +++ /dev/null @@ -1,100 +0,0 @@ -CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO valid_values VALUES(1); -INSERT INTO valid_values VALUES(0); -INSERT INTO valid_values VALUES('on'); -CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO invalid_values VALUES('\'aaa\''); -INSERT INTO invalid_values VALUES('\'bbb\''); -SET @start_global_value = @@global.ROCKSDB_STORE_CHECKSUMS; -SELECT @start_global_value; -@start_global_value -0 -SET @start_session_value = @@session.ROCKSDB_STORE_CHECKSUMS; -SELECT @start_session_value; -@start_session_value -0 -'# Setting to valid values in global scope#' -"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to 1" -SET @@global.ROCKSDB_STORE_CHECKSUMS = 1; -SELECT @@global.ROCKSDB_STORE_CHECKSUMS; -@@global.ROCKSDB_STORE_CHECKSUMS -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_STORE_CHECKSUMS = DEFAULT; -SELECT @@global.ROCKSDB_STORE_CHECKSUMS; -@@global.ROCKSDB_STORE_CHECKSUMS -0 -"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to 0" -SET @@global.ROCKSDB_STORE_CHECKSUMS = 0; -SELECT @@global.ROCKSDB_STORE_CHECKSUMS; -@@global.ROCKSDB_STORE_CHECKSUMS -0 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_STORE_CHECKSUMS = DEFAULT; -SELECT @@global.ROCKSDB_STORE_CHECKSUMS; -@@global.ROCKSDB_STORE_CHECKSUMS -0 -"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to on" -SET @@global.ROCKSDB_STORE_CHECKSUMS = on; -SELECT @@global.ROCKSDB_STORE_CHECKSUMS; -@@global.ROCKSDB_STORE_CHECKSUMS -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_STORE_CHECKSUMS = DEFAULT; -SELECT @@global.ROCKSDB_STORE_CHECKSUMS; -@@global.ROCKSDB_STORE_CHECKSUMS -0 -'# Setting to valid values in session scope#' -"Trying to set variable @@session.ROCKSDB_STORE_CHECKSUMS to 1" -SET @@session.ROCKSDB_STORE_CHECKSUMS = 1; -SELECT @@session.ROCKSDB_STORE_CHECKSUMS; -@@session.ROCKSDB_STORE_CHECKSUMS -1 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_STORE_CHECKSUMS = DEFAULT; -SELECT @@session.ROCKSDB_STORE_CHECKSUMS; -@@session.ROCKSDB_STORE_CHECKSUMS -0 -"Trying to set variable @@session.ROCKSDB_STORE_CHECKSUMS to 0" -SET @@session.ROCKSDB_STORE_CHECKSUMS = 0; -SELECT @@session.ROCKSDB_STORE_CHECKSUMS; -@@session.ROCKSDB_STORE_CHECKSUMS -0 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_STORE_CHECKSUMS = DEFAULT; -SELECT @@session.ROCKSDB_STORE_CHECKSUMS; -@@session.ROCKSDB_STORE_CHECKSUMS -0 -"Trying to set variable @@session.ROCKSDB_STORE_CHECKSUMS to on" -SET @@session.ROCKSDB_STORE_CHECKSUMS = on; -SELECT @@session.ROCKSDB_STORE_CHECKSUMS; -@@session.ROCKSDB_STORE_CHECKSUMS -1 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_STORE_CHECKSUMS = DEFAULT; -SELECT @@session.ROCKSDB_STORE_CHECKSUMS; -@@session.ROCKSDB_STORE_CHECKSUMS -0 -'# Testing with invalid values in global scope #' -"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to 'aaa'" -SET @@global.ROCKSDB_STORE_CHECKSUMS = 'aaa'; -Got one of the listed errors -SELECT @@global.ROCKSDB_STORE_CHECKSUMS; -@@global.ROCKSDB_STORE_CHECKSUMS -0 -"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to 'bbb'" -SET @@global.ROCKSDB_STORE_CHECKSUMS = 'bbb'; -Got one of the listed errors -SELECT @@global.ROCKSDB_STORE_CHECKSUMS; -@@global.ROCKSDB_STORE_CHECKSUMS -0 -SET @@global.ROCKSDB_STORE_CHECKSUMS = @start_global_value; -SELECT @@global.ROCKSDB_STORE_CHECKSUMS; -@@global.ROCKSDB_STORE_CHECKSUMS -0 -SET @@session.ROCKSDB_STORE_CHECKSUMS = @start_session_value; -SELECT @@session.ROCKSDB_STORE_CHECKSUMS; -@@session.ROCKSDB_STORE_CHECKSUMS -0 -DROP TABLE valid_values; -DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_row_debug_checksums_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_row_debug_checksums_basic.result new file mode 100644 index 00000000000..a838d660a91 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_row_debug_checksums_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 1" +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 1; +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 0" +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 0; +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to on" +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = on; +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 1" +SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 1; +SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 0" +SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 0; +SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to on" +SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = on; +SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 'aaa'" +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 'bbb'" +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = @start_global_value; +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = @start_session_value; +SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_tmpdir_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_tmpdir_basic.result new file mode 100644 index 00000000000..25b19ee56a4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_tmpdir_basic.result @@ -0,0 +1,29 @@ +SET @start_global_value = @@global.rocksdb_tmpdir; +SELECT @start_global_value; +@start_global_value + +select @@session.rocksdb_tmpdir; +@@session.rocksdb_tmpdir + +show global variables like 'rocksdb_tmpdir'; +Variable_name Value +rocksdb_tmpdir +show session variables like 'rocksdb_tmpdir'; +Variable_name Value +rocksdb_tmpdir +select * from information_schema.global_variables where variable_name='rocksdb_tmpdir'; +VARIABLE_NAME VARIABLE_VALUE +ROCKSDB_TMPDIR +select * from information_schema.session_variables where variable_name='rocksdb_tmpdir'; +VARIABLE_NAME VARIABLE_VALUE +ROCKSDB_TMPDIR +set global rocksdb_tmpdir='value'; +set session rocksdb_tmpdir='value'; +set global rocksdb_tmpdir=1.1; +ERROR 42000: Incorrect argument type to variable 'rocksdb_tmpdir' +set global rocksdb_tmpdir=1e1; +ERROR 42000: Incorrect argument type to variable 'rocksdb_tmpdir' +SET @@global.rocksdb_tmpdir = @start_global_value; +SELECT @@global.rocksdb_tmpdir; +@@global.rocksdb_tmpdir + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_trace_sst_api_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_trace_sst_api_basic.result new file mode 100644 index 00000000000..d4ffde80001 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_trace_sst_api_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_TRACE_SST_API; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_TRACE_SST_API; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_TRACE_SST_API to 1" +SET @@global.ROCKSDB_TRACE_SST_API = 1; +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_TRACE_SST_API = DEFAULT; +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +0 +"Trying to set variable @@global.ROCKSDB_TRACE_SST_API to 0" +SET @@global.ROCKSDB_TRACE_SST_API = 0; +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_TRACE_SST_API = DEFAULT; +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +0 +"Trying to set variable @@global.ROCKSDB_TRACE_SST_API to on" +SET @@global.ROCKSDB_TRACE_SST_API = on; +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_TRACE_SST_API = DEFAULT; +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_TRACE_SST_API to 1" +SET @@session.ROCKSDB_TRACE_SST_API = 1; +SELECT @@session.ROCKSDB_TRACE_SST_API; +@@session.ROCKSDB_TRACE_SST_API +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_TRACE_SST_API = DEFAULT; +SELECT @@session.ROCKSDB_TRACE_SST_API; +@@session.ROCKSDB_TRACE_SST_API +0 +"Trying to set variable @@session.ROCKSDB_TRACE_SST_API to 0" +SET @@session.ROCKSDB_TRACE_SST_API = 0; +SELECT @@session.ROCKSDB_TRACE_SST_API; +@@session.ROCKSDB_TRACE_SST_API +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_TRACE_SST_API = DEFAULT; +SELECT @@session.ROCKSDB_TRACE_SST_API; +@@session.ROCKSDB_TRACE_SST_API +0 +"Trying to set variable @@session.ROCKSDB_TRACE_SST_API to on" +SET @@session.ROCKSDB_TRACE_SST_API = on; +SELECT @@session.ROCKSDB_TRACE_SST_API; +@@session.ROCKSDB_TRACE_SST_API +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_TRACE_SST_API = DEFAULT; +SELECT @@session.ROCKSDB_TRACE_SST_API; +@@session.ROCKSDB_TRACE_SST_API +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_TRACE_SST_API to 'aaa'" +SET @@global.ROCKSDB_TRACE_SST_API = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +0 +"Trying to set variable @@global.ROCKSDB_TRACE_SST_API to 'bbb'" +SET @@global.ROCKSDB_TRACE_SST_API = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +0 +SET @@global.ROCKSDB_TRACE_SST_API = @start_global_value; +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +0 +SET @@session.ROCKSDB_TRACE_SST_API = @start_session_value; +SELECT @@session.ROCKSDB_TRACE_SST_API; +@@session.ROCKSDB_TRACE_SST_API +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_reads_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_reads_basic.result new file mode 100644 index 00000000000..ec36c309dca --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_reads_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_USE_DIRECT_READS; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_USE_DIRECT_READS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_USE_DIRECT_READS = 444; +ERROR HY000: Variable 'rocksdb_use_direct_reads' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_writes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_writes_basic.result new file mode 100644 index 00000000000..4cc787e4586 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_writes_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_USE_DIRECT_WRITES; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_USE_DIRECT_WRITES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_USE_DIRECT_WRITES = 444; +ERROR HY000: Variable 'rocksdb_use_direct_writes' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_checksums_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_checksums_basic.result deleted file mode 100644 index da4cae7a151..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_checksums_basic.result +++ /dev/null @@ -1,100 +0,0 @@ -CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO valid_values VALUES(1); -INSERT INTO valid_values VALUES(0); -INSERT INTO valid_values VALUES('on'); -CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO invalid_values VALUES('\'aaa\''); -INSERT INTO invalid_values VALUES('\'bbb\''); -SET @start_global_value = @@global.ROCKSDB_VERIFY_CHECKSUMS; -SELECT @start_global_value; -@start_global_value -0 -SET @start_session_value = @@session.ROCKSDB_VERIFY_CHECKSUMS; -SELECT @start_session_value; -@start_session_value -0 -'# Setting to valid values in global scope#' -"Trying to set variable @@global.ROCKSDB_VERIFY_CHECKSUMS to 1" -SET @@global.ROCKSDB_VERIFY_CHECKSUMS = 1; -SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; -@@global.ROCKSDB_VERIFY_CHECKSUMS -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT; -SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; -@@global.ROCKSDB_VERIFY_CHECKSUMS -0 -"Trying to set variable @@global.ROCKSDB_VERIFY_CHECKSUMS to 0" -SET @@global.ROCKSDB_VERIFY_CHECKSUMS = 0; -SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; -@@global.ROCKSDB_VERIFY_CHECKSUMS -0 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT; -SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; -@@global.ROCKSDB_VERIFY_CHECKSUMS -0 -"Trying to set variable @@global.ROCKSDB_VERIFY_CHECKSUMS to on" -SET @@global.ROCKSDB_VERIFY_CHECKSUMS = on; -SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; -@@global.ROCKSDB_VERIFY_CHECKSUMS -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT; -SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; -@@global.ROCKSDB_VERIFY_CHECKSUMS -0 -'# Setting to valid values in session scope#' -"Trying to set variable @@session.ROCKSDB_VERIFY_CHECKSUMS to 1" -SET @@session.ROCKSDB_VERIFY_CHECKSUMS = 1; -SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS; -@@session.ROCKSDB_VERIFY_CHECKSUMS -1 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT; -SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS; -@@session.ROCKSDB_VERIFY_CHECKSUMS -0 -"Trying to set variable @@session.ROCKSDB_VERIFY_CHECKSUMS to 0" -SET @@session.ROCKSDB_VERIFY_CHECKSUMS = 0; -SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS; -@@session.ROCKSDB_VERIFY_CHECKSUMS -0 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT; -SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS; -@@session.ROCKSDB_VERIFY_CHECKSUMS -0 -"Trying to set variable @@session.ROCKSDB_VERIFY_CHECKSUMS to on" -SET @@session.ROCKSDB_VERIFY_CHECKSUMS = on; -SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS; -@@session.ROCKSDB_VERIFY_CHECKSUMS -1 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT; -SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS; -@@session.ROCKSDB_VERIFY_CHECKSUMS -0 -'# Testing with invalid values in global scope #' -"Trying to set variable @@global.ROCKSDB_VERIFY_CHECKSUMS to 'aaa'" -SET @@global.ROCKSDB_VERIFY_CHECKSUMS = 'aaa'; -Got one of the listed errors -SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; -@@global.ROCKSDB_VERIFY_CHECKSUMS -0 -"Trying to set variable @@global.ROCKSDB_VERIFY_CHECKSUMS to 'bbb'" -SET @@global.ROCKSDB_VERIFY_CHECKSUMS = 'bbb'; -Got one of the listed errors -SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; -@@global.ROCKSDB_VERIFY_CHECKSUMS -0 -SET @@global.ROCKSDB_VERIFY_CHECKSUMS = @start_global_value; -SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS; -@@global.ROCKSDB_VERIFY_CHECKSUMS -0 -SET @@session.ROCKSDB_VERIFY_CHECKSUMS = @start_session_value; -SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS; -@@session.ROCKSDB_VERIFY_CHECKSUMS -0 -DROP TABLE valid_values; -DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_row_debug_checksums_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_row_debug_checksums_basic.result new file mode 100644 index 00000000000..ad71c8909a6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_row_debug_checksums_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS to 1" +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = 1; +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS to 0" +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = 0; +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS to on" +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = on; +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS to 1" +SET @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = 1; +SELECT @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS to 0" +SET @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = 0; +SELECT @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS to on" +SET @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = on; +SELECT @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS to 'aaa'" +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS to 'bbb'" +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = @start_global_value; +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +SET @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = @start_session_value; +SELECT @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_deadlock_detect_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_deadlock_detect_basic.test new file mode 100644 index 00000000000..aa532fdc1be --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_deadlock_detect_basic.test @@ -0,0 +1,20 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); + +CREATE TABLE invalid_values (value varchar(255)); +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_DEADLOCK_DETECT +--let $read_only=0 +--let $session=1 +--let $sticky=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_print_snapshot_conflict_queries_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_print_snapshot_conflict_queries_basic.test new file mode 100644 index 00000000000..92a419a8636 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_print_snapshot_conflict_queries_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_checksums_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_row_debug_checksums_basic.test similarity index 91% rename from storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_checksums_basic.test rename to storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_row_debug_checksums_basic.test index d8c9c559703..e9c04bcc45e 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_checksums_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_row_debug_checksums_basic.test @@ -9,7 +9,7 @@ CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; INSERT INTO invalid_values VALUES('\'aaa\''); INSERT INTO invalid_values VALUES('\'bbb\''); ---let $sys_var=ROCKSDB_VERIFY_CHECKSUMS +--let $sys_var=ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS --let $read_only=0 --let $session=1 --source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_tmpdir_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_tmpdir_basic.test new file mode 100644 index 00000000000..8865914dd18 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_tmpdir_basic.test @@ -0,0 +1,38 @@ +--source include/have_rocksdb.inc + +SET @start_global_value = @@global.rocksdb_tmpdir; +SELECT @start_global_value; + +# +# exists as global and session +# +select @@session.rocksdb_tmpdir; + +show global variables like 'rocksdb_tmpdir'; +show session variables like 'rocksdb_tmpdir'; + +select * from information_schema.global_variables where variable_name='rocksdb_tmpdir'; +select * from information_schema.session_variables where variable_name='rocksdb_tmpdir'; + +# +# Show that it is writable +# + +set global rocksdb_tmpdir='value'; +set session rocksdb_tmpdir='value'; + +# +# incorrect types +# +--error ER_WRONG_TYPE_FOR_VAR +set global rocksdb_tmpdir=1.1; +--error ER_WRONG_TYPE_FOR_VAR +set global rocksdb_tmpdir=1e1; + +# +# Cleanup +# + +SET @@global.rocksdb_tmpdir = @start_global_value; +SELECT @@global.rocksdb_tmpdir; + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_checksums_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_trace_sst_api_basic.test similarity index 92% rename from storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_checksums_basic.test rename to storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_trace_sst_api_basic.test index 023b6420b96..83a0faaffe6 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_checksums_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_trace_sst_api_basic.test @@ -9,7 +9,7 @@ CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; INSERT INTO invalid_values VALUES('\'aaa\''); INSERT INTO invalid_values VALUES('\'bbb\''); ---let $sys_var=ROCKSDB_STORE_CHECKSUMS +--let $sys_var=ROCKSDB_TRACE_SST_API --let $read_only=0 --let $session=1 --source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_os_buffer_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_reads_basic.test similarity index 74% rename from storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_os_buffer_basic.test rename to storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_reads_basic.test index c38d0c7b210..b730eca6f3b 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_os_buffer_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_reads_basic.test @@ -1,6 +1,6 @@ --source include/have_rocksdb.inc ---let $sys_var=ROCKSDB_ALLOW_OS_BUFFER +--let $sys_var=ROCKSDB_USE_DIRECT_READS --let $read_only=1 --let $session=0 --source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_writes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_writes_basic.test new file mode 100644 index 00000000000..2abb2478d82 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_writes_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_USE_DIRECT_WRITES +--let $read_only=1 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_row_debug_checksums_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_row_debug_checksums_basic.test new file mode 100644 index 00000000000..cc35fdb0345 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_row_debug_checksums_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/properties_collector.cc b/storage/rocksdb/properties_collector.cc index fc8cd97fc72..e5b5b25792d 100644 --- a/storage/rocksdb/properties_collector.cc +++ b/storage/rocksdb/properties_collector.cc @@ -42,10 +42,10 @@ std::atomic rocksdb_num_sst_entry_other(0); my_bool rocksdb_compaction_sequential_deletes_count_sd = false; Rdb_tbl_prop_coll::Rdb_tbl_prop_coll( - Rdb_ddl_manager* ddl_manager, - Rdb_compact_params params, - uint32_t cf_id, - const uint8_t table_stats_sampling_pct + Rdb_ddl_manager* const ddl_manager, + const Rdb_compact_params ¶ms, + const uint32_t &cf_id, + const uint8_t &table_stats_sampling_pct ) : m_cf_id(cf_id), m_ddl_manager(ddl_manager), @@ -56,6 +56,8 @@ Rdb_tbl_prop_coll::Rdb_tbl_prop_coll( m_seed(time(nullptr)), m_card_adj_extra(1.) { + DBUG_ASSERT(ddl_manager != nullptr); + // We need to adjust the index cardinality numbers based on the sampling // rate so that the output of "SHOW INDEX" command will reflect reality // more closely. It will still be an approximation, just a better one. @@ -97,9 +99,9 @@ void Rdb_tbl_prop_coll::AdjustDeletedRows(rocksdb::EntryType type) // m_rows % m_deleted_rows_window.size() // m_deleted_rows is the current number of 1's in the vector // --update the counter for the element which will be overridden - bool is_delete= (type == rocksdb::kEntryDelete || - (type == rocksdb::kEntrySingleDelete && - rocksdb_compaction_sequential_deletes_count_sd)); + const bool is_delete= (type == rocksdb::kEntryDelete || + (type == rocksdb::kEntrySingleDelete && + rocksdb_compaction_sequential_deletes_count_sd)); // Only make changes if the value at the current position needs to change if (is_delete != m_deleted_rows_window[m_window_pos]) @@ -167,9 +169,9 @@ Rdb_index_stats* Rdb_tbl_prop_coll::AccessStats( void Rdb_tbl_prop_coll::CollectStatsForRow( const rocksdb::Slice& key, const rocksdb::Slice& value, - rocksdb::EntryType type, uint64_t file_size) + const rocksdb::EntryType &type, const uint64_t &file_size) { - auto stats = AccessStats(key); + const auto stats = AccessStats(key); stats->m_data_size += key.size()+value.size(); @@ -241,7 +243,7 @@ const char* Rdb_tbl_prop_coll::INDEXSTATS_KEY = "__indexstats__"; */ rocksdb::Status Rdb_tbl_prop_coll::Finish( - rocksdb::UserCollectedProperties* properties + rocksdb::UserCollectedProperties* const properties ) { uint64_t num_sst_entry_put = 0; uint64_t num_sst_entry_delete = 0; @@ -249,6 +251,8 @@ Rdb_tbl_prop_coll::Finish( uint64_t num_sst_entry_merge = 0; uint64_t num_sst_entry_other = 0; + DBUG_ASSERT(properties != nullptr); + for (auto it = m_stats.begin(); it != m_stats.end(); it++) { num_sst_entry_put += it->m_rows; @@ -303,7 +307,7 @@ bool Rdb_tbl_prop_coll::ShouldCollectStats() { return true; } - int val = rand_r(&m_seed) % + const int val = rand_r(&m_seed) % (RDB_TBL_STATS_SAMPLE_PCT_MAX - RDB_TBL_STATS_SAMPLE_PCT_MIN + 1) + RDB_TBL_STATS_SAMPLE_PCT_MIN; @@ -377,11 +381,11 @@ Rdb_tbl_prop_coll::GetReadableStats( void Rdb_tbl_prop_coll::read_stats_from_tbl_props( const std::shared_ptr& table_props, - std::vector* out_stats_vector) + std::vector* const out_stats_vector) { DBUG_ASSERT(out_stats_vector != nullptr); const auto& user_properties = table_props->user_collected_properties; - auto it2 = user_properties.find(std::string(INDEXSTATS_KEY)); + const auto it2 = user_properties.find(std::string(INDEXSTATS_KEY)); if (it2 != user_properties.end()) { auto result __attribute__((__unused__)) = @@ -400,7 +404,7 @@ std::string Rdb_index_stats::materialize( { String ret; rdb_netstr_append_uint16(&ret, INDEX_STATS_VERSION_ENTRY_TYPES); - for (auto i : stats) { + for (const auto &i : stats) { rdb_netstr_append_uint32(&ret, i.m_gl_index_id.cf_id); rdb_netstr_append_uint32(&ret, i.m_gl_index_id.index_id); DBUG_ASSERT(sizeof i.m_data_size <= 8); @@ -412,8 +416,8 @@ std::string Rdb_index_stats::materialize( rdb_netstr_append_uint64(&ret, i.m_entry_single_deletes); rdb_netstr_append_uint64(&ret, i.m_entry_merges); rdb_netstr_append_uint64(&ret, i.m_entry_others); - for (auto num_keys : i.m_distinct_keys_per_prefix) { - float upd_num_keys = num_keys * card_adj_extra; + for (const auto &num_keys : i.m_distinct_keys_per_prefix) { + const float upd_num_keys = num_keys * card_adj_extra; rdb_netstr_append_uint64(&ret, static_cast(upd_num_keys)); } } @@ -428,10 +432,10 @@ std::string Rdb_index_stats::materialize( @return 0 if completes successfully */ int Rdb_index_stats::unmaterialize( - const std::string& s, std::vector* ret) + const std::string& s, std::vector* const ret) { const uchar* p= rdb_std_str_to_uchar_ptr(s); - const uchar* p2= p + s.size(); + const uchar* const p2= p + s.size(); DBUG_ASSERT(ret != nullptr); @@ -440,7 +444,7 @@ int Rdb_index_stats::unmaterialize( return 1; } - int version= rdb_netbuf_read_uint16(&p); + const int version= rdb_netbuf_read_uint16(&p); Rdb_index_stats stats; // Make sure version is within supported range. if (version < INDEX_STATS_VERSION_INITIAL || @@ -503,10 +507,13 @@ int Rdb_index_stats::unmaterialize( for the index based on stats for each sst */ void Rdb_index_stats::merge( - const Rdb_index_stats& s, bool increment, int64_t estimated_data_len) + const Rdb_index_stats& s, const bool &increment, + const int64_t &estimated_data_len) { std::size_t i; + DBUG_ASSERT(estimated_data_len >= 0); + m_gl_index_id = s.m_gl_index_id; if (m_distinct_keys_per_prefix.size() < s.m_distinct_keys_per_prefix.size()) { diff --git a/storage/rocksdb/properties_collector.h b/storage/rocksdb/properties_collector.h index 1781d8a26c6..b0f8e5deb59 100644 --- a/storage/rocksdb/properties_collector.h +++ b/storage/rocksdb/properties_collector.h @@ -63,7 +63,7 @@ struct Rdb_index_stats static std::string materialize(const std::vector& stats, const float card_adj_extra); static int unmaterialize(const std::string& s, - std::vector* ret); + std::vector* const ret); Rdb_index_stats() : Rdb_index_stats({0, 0}) {} explicit Rdb_index_stats(GL_INDEX_ID gl_index_id) : @@ -76,8 +76,8 @@ struct Rdb_index_stats m_entry_merges(0), m_entry_others(0) {} - void merge(const Rdb_index_stats& s, bool increment = true, - int64_t estimated_data_len = 0); + void merge(const Rdb_index_stats& s, const bool &increment = true, + const int64_t &estimated_data_len = 0); }; @@ -85,10 +85,10 @@ class Rdb_tbl_prop_coll : public rocksdb::TablePropertiesCollector { public: Rdb_tbl_prop_coll( - Rdb_ddl_manager* ddl_manager, - Rdb_compact_params params, - uint32_t cf_id, - const uint8_t table_stats_sampling_pct + Rdb_ddl_manager* const ddl_manager, + const Rdb_compact_params ¶ms, + const uint32_t &cf_id, + const uint8_t &table_stats_sampling_pct ); /* @@ -124,13 +124,14 @@ class Rdb_tbl_prop_coll : public rocksdb::TablePropertiesCollector bool ShouldCollectStats(); void CollectStatsForRow(const rocksdb::Slice& key, - const rocksdb::Slice& value, rocksdb::EntryType type, uint64_t file_size); + const rocksdb::Slice& value, const rocksdb::EntryType &type, + const uint64_t &file_size); Rdb_index_stats* AccessStats(const rocksdb::Slice& key); void AdjustDeletedRows(rocksdb::EntryType type); private: uint32_t m_cf_id; - std::shared_ptr m_keydef; + std::shared_ptr m_keydef; Rdb_ddl_manager* m_ddl_manager; std::vector m_stats; Rdb_index_stats* m_last_stats; @@ -153,6 +154,9 @@ class Rdb_tbl_prop_coll : public rocksdb::TablePropertiesCollector class Rdb_tbl_prop_coll_factory : public rocksdb::TablePropertiesCollectorFactory { public: + Rdb_tbl_prop_coll_factory(const Rdb_tbl_prop_coll_factory&) = delete; + Rdb_tbl_prop_coll_factory& operator=(const Rdb_tbl_prop_coll_factory&) = delete; + explicit Rdb_tbl_prop_coll_factory(Rdb_ddl_manager* ddl_manager) : m_ddl_manager(ddl_manager) { } @@ -177,12 +181,12 @@ class Rdb_tbl_prop_coll_factory m_params = params; } - void SetTableStatsSamplingPct(const uint8_t table_stats_sampling_pct) { + void SetTableStatsSamplingPct(const uint8_t &table_stats_sampling_pct) { m_table_stats_sampling_pct = table_stats_sampling_pct; } private: - Rdb_ddl_manager* m_ddl_manager; + Rdb_ddl_manager* const m_ddl_manager; Rdb_compact_params m_params; uint8_t m_table_stats_sampling_pct; }; diff --git a/storage/rocksdb/rdb_buff.h b/storage/rocksdb/rdb_buff.h index d29f365ba31..deb718e88c9 100644 --- a/storage/rocksdb/rdb_buff.h +++ b/storage/rocksdb/rdb_buff.h @@ -27,7 +27,8 @@ namespace myrocks { ("netstr") which stores data in Network Byte Order (Big Endian). */ -inline void rdb_netstr_append_uint64(my_core::String *out_netstr, uint64 val) +inline void rdb_netstr_append_uint64(my_core::String* const out_netstr, + const uint64 &val) { DBUG_ASSERT(out_netstr != nullptr); @@ -37,7 +38,8 @@ inline void rdb_netstr_append_uint64(my_core::String *out_netstr, uint64 val) out_netstr->append(reinterpret_cast(&net_val), sizeof(net_val)); } -inline void rdb_netstr_append_uint32(my_core::String *out_netstr, uint32 val) +inline void rdb_netstr_append_uint32(my_core::String* const out_netstr, + const uint32 &val) { DBUG_ASSERT(out_netstr != nullptr); @@ -47,7 +49,8 @@ inline void rdb_netstr_append_uint32(my_core::String *out_netstr, uint32 val) out_netstr->append(reinterpret_cast(&net_val), sizeof(net_val)); } -inline void rdb_netstr_append_uint16(my_core::String *out_netstr, uint16 val) +inline void rdb_netstr_append_uint16(my_core::String* const out_netstr, + const uint16 &val) { DBUG_ASSERT(out_netstr != nullptr); @@ -62,15 +65,17 @@ inline void rdb_netstr_append_uint16(my_core::String *out_netstr, uint16 val) Basic network buffer ("netbuf") write helper functions. */ -inline void rdb_netbuf_store_uint64(uchar *dst_netbuf, uint64 n) +inline void rdb_netbuf_store_uint64(uchar* const dst_netbuf, const uint64 &n) { + DBUG_ASSERT(dst_netbuf != nullptr); + // Convert from host byte order (usually Little Endian) to network byte order // (Big Endian). uint64 net_val= htobe64(n); memcpy(dst_netbuf, &net_val, sizeof(net_val)); } -inline void rdb_netbuf_store_uint32(uchar *dst_netbuf, uint32 n) +inline void rdb_netbuf_store_uint32(uchar* const dst_netbuf, const uint32 &n) { DBUG_ASSERT(dst_netbuf != nullptr); @@ -80,7 +85,7 @@ inline void rdb_netbuf_store_uint32(uchar *dst_netbuf, uint32 n) memcpy(dst_netbuf, &net_val, sizeof(net_val)); } -inline void rdb_netbuf_store_uint16(uchar *dst_netbuf, uint16 n) +inline void rdb_netbuf_store_uint16(uchar* const dst_netbuf, const uint16 &n) { DBUG_ASSERT(dst_netbuf != nullptr); @@ -90,14 +95,15 @@ inline void rdb_netbuf_store_uint16(uchar *dst_netbuf, uint16 n) memcpy(dst_netbuf, &net_val, sizeof(net_val)); } -inline void rdb_netbuf_store_byte(uchar *dst_netbuf, uchar c) +inline void rdb_netbuf_store_byte(uchar* const dst_netbuf, const uchar &c) { DBUG_ASSERT(dst_netbuf != nullptr); *dst_netbuf= c; } -inline void rdb_netbuf_store_index(uchar *dst_netbuf, uint32 number) +inline void rdb_netbuf_store_index(uchar* const dst_netbuf, + const uint32 &number) { DBUG_ASSERT(dst_netbuf != nullptr); @@ -110,7 +116,7 @@ inline void rdb_netbuf_store_index(uchar *dst_netbuf, uint32 number) machine byte order (usually Little Endian). */ -inline uint64 rdb_netbuf_to_uint64(const uchar *netbuf) +inline uint64 rdb_netbuf_to_uint64(const uchar* const netbuf) { DBUG_ASSERT(netbuf != nullptr); @@ -122,7 +128,7 @@ inline uint64 rdb_netbuf_to_uint64(const uchar *netbuf) return be64toh(net_val); } -inline uint32 rdb_netbuf_to_uint32(const uchar *netbuf) +inline uint32 rdb_netbuf_to_uint32(const uchar* const netbuf) { DBUG_ASSERT(netbuf != nullptr); @@ -134,7 +140,7 @@ inline uint32 rdb_netbuf_to_uint32(const uchar *netbuf) return be32toh(net_val); } -inline uint16 rdb_netbuf_to_uint16(const uchar *netbuf) +inline uint16 rdb_netbuf_to_uint16(const uchar* const netbuf) { DBUG_ASSERT(netbuf != nullptr); @@ -146,7 +152,7 @@ inline uint16 rdb_netbuf_to_uint16(const uchar *netbuf) return be16toh(net_val); } -inline uchar rdb_netbuf_to_byte(const uchar* netbuf) +inline uchar rdb_netbuf_to_byte(const uchar* const netbuf) { DBUG_ASSERT(netbuf != nullptr); @@ -167,7 +173,7 @@ inline uint64 rdb_netbuf_read_uint64(const uchar **netbuf_ptr) // Convert from network byte order (Big Endian) to host machine byte order // (usually Little Endian). - uint64 host_val= rdb_netbuf_to_uint64(*netbuf_ptr); + const uint64 host_val= rdb_netbuf_to_uint64(*netbuf_ptr); // Advance pointer. *netbuf_ptr += sizeof(host_val); @@ -181,7 +187,7 @@ inline uint32 rdb_netbuf_read_uint32(const uchar **netbuf_ptr) // Convert from network byte order (Big Endian) to host machine byte order // (usually Little Endian). - uint32 host_val= rdb_netbuf_to_uint32(*netbuf_ptr); + const uint32 host_val= rdb_netbuf_to_uint32(*netbuf_ptr); // Advance pointer. *netbuf_ptr += sizeof(host_val); @@ -195,7 +201,7 @@ inline uint16 rdb_netbuf_read_uint16(const uchar **netbuf_ptr) // Convert from network byte order (Big Endian) to host machine byte order // (usually Little Endian). - uint16 host_val= rdb_netbuf_to_uint16(*netbuf_ptr); + const uint16 host_val= rdb_netbuf_to_uint16(*netbuf_ptr); // Advance pointer. *netbuf_ptr += sizeof(host_val); @@ -204,7 +210,7 @@ inline uint16 rdb_netbuf_read_uint16(const uchar **netbuf_ptr) } inline void rdb_netbuf_read_gl_index(const uchar **netbuf_ptr, - GL_INDEX_ID *gl_index_id) + GL_INDEX_ID* const gl_index_id) { DBUG_ASSERT(gl_index_id != nullptr); DBUG_ASSERT(netbuf_ptr != nullptr); @@ -223,7 +229,20 @@ class Rdb_string_reader { const char* m_ptr; uint m_len; + private: + Rdb_string_reader& operator=(const Rdb_string_reader&) = default; public: + Rdb_string_reader(const Rdb_string_reader&) = default; + /* named constructor */ + static Rdb_string_reader read_or_empty(const rocksdb::Slice* const slice) + { + if (!slice) { + return Rdb_string_reader(""); + } else { + return Rdb_string_reader(slice); + } + } + explicit Rdb_string_reader(const std::string &str) { m_len= str.length(); @@ -243,7 +262,7 @@ class Rdb_string_reader } } - explicit Rdb_string_reader(const rocksdb::Slice *slice) + explicit Rdb_string_reader(const rocksdb::Slice* const slice) { m_ptr= slice->data(); m_len= slice->size(); @@ -253,7 +272,7 @@ class Rdb_string_reader Read the next @param size bytes. Returns pointer to the bytes read, or nullptr if the remaining string doesn't have that many bytes. */ - const char *read(uint size) + const char *read(const uint &size) { const char *res; if (m_len < size) @@ -269,7 +288,7 @@ class Rdb_string_reader return res; } - bool read_uint8(uint* res) + bool read_uint8(uint* const res) { const uchar *p; if (!(p= reinterpret_cast(read(1)))) @@ -281,7 +300,7 @@ class Rdb_string_reader } } - bool read_uint16(uint* res) + bool read_uint16(uint* const res) { const uchar *p; if (!(p= reinterpret_cast(read(2)))) @@ -323,42 +342,47 @@ class Rdb_string_writer { std::vector m_data; public: + Rdb_string_writer(const Rdb_string_writer&) = delete; + Rdb_string_writer& operator=(const Rdb_string_writer&) = delete; + Rdb_string_writer() = default; + void clear() { m_data.clear(); } - void write_uint8(uint val) + void write_uint8(const uint &val) { m_data.push_back(static_cast(val)); } - void write_uint16(uint val) + void write_uint16(const uint &val) { - auto size= m_data.size(); + const auto size= m_data.size(); m_data.resize(size + 2); rdb_netbuf_store_uint16(m_data.data() + size, val); } - void write_uint32(uint val) + void write_uint32(const uint &val) { - auto size= m_data.size(); + const auto size= m_data.size(); m_data.resize(size + 4); rdb_netbuf_store_uint32(m_data.data() + size, val); } - void write(uchar *new_data, size_t len) + void write(const uchar* const new_data, const size_t &len) { + DBUG_ASSERT(new_data != nullptr); m_data.insert(m_data.end(), new_data, new_data + len); } uchar* ptr() { return m_data.data(); } size_t get_current_pos() const { return m_data.size(); } - void write_uint8_at(size_t pos, uint new_val) + void write_uint8_at(const size_t &pos, const uint &new_val) { // This function will only overwrite what was written DBUG_ASSERT(pos < get_current_pos()); m_data.data()[pos]= new_val; } - void write_uint16_at(size_t pos, uint new_val) + void write_uint16_at(const size_t &pos, const uint &new_val) { // This function will only overwrite what was written DBUG_ASSERT(pos < get_current_pos() && (pos + 1) < get_current_pos()); @@ -378,13 +402,16 @@ class Rdb_bit_writer Rdb_string_writer *m_writer; uchar m_offset; public: + Rdb_bit_writer(const Rdb_bit_writer&) = delete; + Rdb_bit_writer& operator=(const Rdb_bit_writer&) = delete; + explicit Rdb_bit_writer(Rdb_string_writer* writer_arg) : m_writer(writer_arg), m_offset(0) { } - void write(uint size, uint value) + void write(uint size, const uint &value) { DBUG_ASSERT((value & ((1 << size) - 1)) == value); @@ -395,8 +422,8 @@ class Rdb_bit_writer m_writer->write_uint8(0); } // number of bits to put in this byte - uint bits = std::min(size, (uint)(8 - m_offset)); - uchar *last_byte= m_writer->ptr() + m_writer->get_current_pos() - 1; + const uint bits = std::min(size, (uint)(8 - m_offset)); + uchar* const last_byte= m_writer->ptr() + m_writer->get_current_pos() - 1; *last_byte |= (uchar) ((value >> (size - bits)) & ((1 << bits) - 1)) << m_offset; size -= bits; @@ -410,9 +437,12 @@ class Rdb_bit_reader const uchar *m_cur; uchar m_offset; uint m_ret; - Rdb_string_reader *m_reader; + Rdb_string_reader* const m_reader; public: - explicit Rdb_bit_reader(Rdb_string_reader *reader) + Rdb_bit_reader(const Rdb_bit_reader&) = delete; + Rdb_bit_reader& operator=(const Rdb_bit_reader&) = delete; + + explicit Rdb_bit_reader(Rdb_string_reader* const reader) : m_cur(nullptr), m_offset(0), m_reader(reader) @@ -438,7 +468,7 @@ class Rdb_bit_reader } } // how many bits from the current byte? - uint bits = std::min((uint)(8 - m_offset), size); + const uint bits = std::min((uint)(8 - m_offset), size); m_ret <<= bits; m_ret |= (*m_cur >> m_offset) & ((1 << bits) - 1); size -= bits; diff --git a/storage/rocksdb/rdb_cf_manager.cc b/storage/rocksdb/rdb_cf_manager.cc index 3ee28b49cb6..7ae7d362159 100644 --- a/storage/rocksdb/rdb_cf_manager.cc +++ b/storage/rocksdb/rdb_cf_manager.cc @@ -28,7 +28,7 @@ namespace myrocks { /* Check if ColumnFamily name says it's a reverse-ordered CF */ -bool Rdb_cf_manager::is_cf_name_reverse(const char *name) +bool Rdb_cf_manager::is_cf_name_reverse(const char* const name) { /* nullptr means the default CF is used.. (TODO: can the default CF be * reverse?) */ @@ -43,8 +43,8 @@ static PSI_mutex_key ex_key_cfm; #endif void Rdb_cf_manager::init( - Rdb_cf_options* cf_options, - std::vector *handles) + Rdb_cf_options* const cf_options, + std::vector* const handles) { mysql_mutex_init(ex_key_cfm, &m_mutex, MY_MUTEX_INIT_FAST); @@ -78,8 +78,8 @@ void Rdb_cf_manager::cleanup() */ void Rdb_cf_manager::get_per_index_cf_name(const std::string& db_table_name, - const char *index_name, - std::string *res) + const char* const index_name, + std::string* const res) { DBUG_ASSERT(index_name != nullptr); DBUG_ASSERT(res != nullptr); @@ -96,11 +96,11 @@ void Rdb_cf_manager::get_per_index_cf_name(const std::string& db_table_name, See Rdb_cf_manager::get_cf */ rocksdb::ColumnFamilyHandle* -Rdb_cf_manager::get_or_create_cf(rocksdb::DB *rdb, +Rdb_cf_manager::get_or_create_cf(rocksdb::DB* const rdb, const char *cf_name, const std::string& db_table_name, - const char *index_name, - bool *is_automatic) + const char* const index_name, + bool* const is_automatic) { DBUG_ASSERT(rdb != nullptr); DBUG_ASSERT(is_automatic != nullptr); @@ -120,13 +120,13 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *rdb, *is_automatic= true; } - auto it = m_cf_name_map.find(cf_name); + const auto it = m_cf_name_map.find(cf_name); if (it != m_cf_name_map.end()) cf_handle= it->second; else { /* Create a Column Family. */ - std::string cf_name_str(cf_name); + const std::string cf_name_str(cf_name); rocksdb::ColumnFamilyOptions opts; m_cf_options->get_cf_options(cf_name_str, &opts); @@ -135,7 +135,8 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *rdb, sql_print_information(" target_file_size_base=%" PRIu64, opts.target_file_size_base); - rocksdb::Status s= rdb->CreateColumnFamily(opts, cf_name_str, &cf_handle); + const rocksdb::Status s= + rdb->CreateColumnFamily(opts, cf_name_str, &cf_handle); if (s.ok()) { m_cf_name_map[cf_handle->GetName()] = cf_handle; m_cf_id_map[cf_handle->GetID()] = cf_handle; @@ -164,10 +165,9 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *rdb, rocksdb::ColumnFamilyHandle* Rdb_cf_manager::get_cf(const char *cf_name, const std::string& db_table_name, - const char *index_name, - bool *is_automatic) const + const char* const index_name, + bool* const is_automatic) const { - DBUG_ASSERT(cf_name != nullptr); DBUG_ASSERT(is_automatic != nullptr); rocksdb::ColumnFamilyHandle* cf_handle; @@ -185,7 +185,7 @@ Rdb_cf_manager::get_cf(const char *cf_name, *is_automatic= true; } - auto it = m_cf_name_map.find(cf_name); + const auto it = m_cf_name_map.find(cf_name); cf_handle = (it != m_cf_name_map.end()) ? it->second : nullptr; mysql_mutex_unlock(&m_mutex); @@ -193,12 +193,12 @@ Rdb_cf_manager::get_cf(const char *cf_name, return cf_handle; } -rocksdb::ColumnFamilyHandle* Rdb_cf_manager::get_cf(const uint32_t id) const +rocksdb::ColumnFamilyHandle* Rdb_cf_manager::get_cf(const uint32_t &id) const { rocksdb::ColumnFamilyHandle* cf_handle = nullptr; mysql_mutex_lock(&m_mutex); - auto it = m_cf_id_map.find(id); + const auto it = m_cf_id_map.find(id); if (it != m_cf_id_map.end()) cf_handle = it->second; mysql_mutex_unlock(&m_mutex); diff --git a/storage/rocksdb/rdb_cf_manager.h b/storage/rocksdb/rdb_cf_manager.h index 5a43b533c6d..4fb5f7437e8 100644 --- a/storage/rocksdb/rdb_cf_manager.h +++ b/storage/rocksdb/rdb_cf_manager.h @@ -55,19 +55,24 @@ class Rdb_cf_manager static void get_per_index_cf_name(const std::string& db_table_name, - const char *index_name, std::string *res); + const char* const index_name, + std::string* const res); Rdb_cf_options* m_cf_options= nullptr; public: - static bool is_cf_name_reverse(const char *name); + Rdb_cf_manager(const Rdb_cf_manager&) = delete; + Rdb_cf_manager& operator=(const Rdb_cf_manager&) = delete; + Rdb_cf_manager() = default; + + static bool is_cf_name_reverse(const char* const name); /* This is called right after the DB::Open() call. The parameters describe column families that are present in the database. The first CF is the default CF. */ void init(Rdb_cf_options* cf_options, - std::vector *handles); + std::vector* const handles); void cleanup(); /* @@ -76,17 +81,18 @@ public: - cf_name=_auto_ means use 'dbname.tablename.indexname' */ rocksdb::ColumnFamilyHandle* get_or_create_cf( - rocksdb::DB *rdb, const char *cf_name, const std::string& db_table_name, - const char *index_name, bool *is_automatic); + rocksdb::DB* const rdb, const char *cf_name, + const std::string& db_table_name, const char* const index_name, + bool* const is_automatic); /* Used by table open */ rocksdb::ColumnFamilyHandle* get_cf(const char *cf_name, const std::string& db_table_name, - const char *index_name, - bool *is_automatic) const; + const char* const index_name, + bool* const is_automatic) const; /* Look up cf by id; used by datadic */ - rocksdb::ColumnFamilyHandle* get_cf(const uint32_t id) const; + rocksdb::ColumnFamilyHandle* get_cf(const uint32_t &id) const; /* Used to iterate over column families for show status */ std::vector get_cf_names(void) const; @@ -98,7 +104,7 @@ public: void get_cf_options( const std::string &cf_name, - rocksdb::ColumnFamilyOptions *opts) __attribute__((__nonnull__)) { + rocksdb::ColumnFamilyOptions* const opts) __attribute__((__nonnull__)) { m_cf_options->get_cf_options(cf_name, opts); } }; diff --git a/storage/rocksdb/rdb_cf_options.cc b/storage/rocksdb/rdb_cf_options.cc index ccdb46a654d..bd4d78d0796 100644 --- a/storage/rocksdb/rdb_cf_options.cc +++ b/storage/rocksdb/rdb_cf_options.cc @@ -41,16 +41,17 @@ Rdb_pk_comparator Rdb_cf_options::s_pk_comparator; Rdb_rev_comparator Rdb_cf_options::s_rev_pk_comparator; bool Rdb_cf_options::init( - size_t default_write_buffer_size, const rocksdb::BlockBasedTableOptions& table_options, std::shared_ptr prop_coll_factory, - const char * default_cf_options, - const char * override_cf_options) + const char* const default_cf_options, + const char* const override_cf_options) { + DBUG_ASSERT(default_cf_options != nullptr); + DBUG_ASSERT(override_cf_options != nullptr); + m_default_cf_opts.comparator = &s_pk_comparator; m_default_cf_opts.compaction_filter_factory.reset( new Rdb_compact_filter_factory); - m_default_cf_opts.write_buffer_size = default_write_buffer_size; m_default_cf_opts.table_factory.reset( rocksdb::NewBlockBasedTableFactory(table_options)); @@ -69,7 +70,7 @@ bool Rdb_cf_options::init( } void Rdb_cf_options::get(const std::string &cf_name, - rocksdb::ColumnFamilyOptions *opts) + rocksdb::ColumnFamilyOptions* const opts) { DBUG_ASSERT(opts != nullptr); @@ -106,7 +107,7 @@ bool Rdb_cf_options::set_default(const std::string &default_config) } // Skip over any spaces in the input string. -void Rdb_cf_options::skip_spaces(const std::string& input, size_t* pos) +void Rdb_cf_options::skip_spaces(const std::string& input, size_t* const pos) { DBUG_ASSERT(pos != nullptr); @@ -117,13 +118,14 @@ void Rdb_cf_options::skip_spaces(const std::string& input, size_t* pos) // Find a valid column family name. Note that all characters except a // semicolon are valid (should this change?) and all spaces are trimmed from // the beginning and end but are not removed between other characters. -bool Rdb_cf_options::find_column_family(const std::string& input, size_t* pos, - std::string* key) +bool Rdb_cf_options::find_column_family(const std::string& input, + size_t* const pos, + std::string* const key) { DBUG_ASSERT(pos != nullptr); DBUG_ASSERT(key != nullptr); - size_t beg_pos = *pos; + const size_t beg_pos = *pos; size_t end_pos = *pos - 1; // Loop through the characters in the string until we see a '='. @@ -148,8 +150,8 @@ bool Rdb_cf_options::find_column_family(const std::string& input, size_t* pos, // Find a valid options portion. Everything is deemed valid within the options // portion until we hit as many close curly braces as we have seen open curly // braces. -bool Rdb_cf_options::find_options(const std::string& input, size_t* pos, - std::string* options) +bool Rdb_cf_options::find_options(const std::string& input, size_t* const pos, + std::string* const options) { DBUG_ASSERT(pos != nullptr); DBUG_ASSERT(options != nullptr); @@ -169,7 +171,7 @@ bool Rdb_cf_options::find_options(const std::string& input, size_t* pos, // Set up our brace_count, the begin position and current end position. size_t brace_count = 1; - size_t beg_pos = *pos; + const size_t beg_pos = *pos; // Loop through the characters in the string until we find the appropriate // number of closing curly braces. @@ -211,9 +213,9 @@ bool Rdb_cf_options::find_options(const std::string& input, size_t* pos, } bool Rdb_cf_options::find_cf_options_pair(const std::string& input, - size_t* pos, - std::string* cf, - std::string* opt_str) + size_t* const pos, + std::string* const cf, + std::string* const opt_str) { DBUG_ASSERT(pos != nullptr); DBUG_ASSERT(cf != nullptr); @@ -326,7 +328,7 @@ const rocksdb::Comparator* Rdb_cf_options::get_cf_comparator( } void Rdb_cf_options::get_cf_options(const std::string &cf_name, - rocksdb::ColumnFamilyOptions *opts) + rocksdb::ColumnFamilyOptions* const opts) { DBUG_ASSERT(opts != nullptr); diff --git a/storage/rocksdb/rdb_cf_options.h b/storage/rocksdb/rdb_cf_options.h index e709e42e8b5..8151d907eb7 100644 --- a/storage/rocksdb/rdb_cf_options.h +++ b/storage/rocksdb/rdb_cf_options.h @@ -41,14 +41,17 @@ namespace myrocks { class Rdb_cf_options { public: - void get(const std::string &cf_name, rocksdb::ColumnFamilyOptions *opts); + Rdb_cf_options(const Rdb_cf_options&) = delete; + Rdb_cf_options& operator=(const Rdb_cf_options&) = delete; + Rdb_cf_options() = default; + + void get(const std::string &cf_name, rocksdb::ColumnFamilyOptions* const opts); bool init( - size_t default_write_buffer_size, const rocksdb::BlockBasedTableOptions& table_options, std::shared_ptr prop_coll_factory, - const char * default_cf_options, - const char * override_cf_options); + const char* const default_cf_options, + const char* const override_cf_options); const rocksdb::ColumnFamilyOptions& get_defaults() const { return m_default_cf_opts; @@ -59,20 +62,21 @@ class Rdb_cf_options void get_cf_options( const std::string &cf_name, - rocksdb::ColumnFamilyOptions *opts) __attribute__((__nonnull__)); + rocksdb::ColumnFamilyOptions* const opts) __attribute__((__nonnull__)); private: bool set_default(const std::string &default_config); bool set_override(const std::string &overide_config); /* Helper string manipulation functions */ - static void skip_spaces(const std::string& input, size_t* pos); - static bool find_column_family(const std::string& input, size_t* pos, - std::string* key); - static bool find_options(const std::string& input, size_t* pos, - std::string* options); - static bool find_cf_options_pair(const std::string& input, size_t* pos, - std::string* cf, std::string* opt_str); + static void skip_spaces(const std::string& input, size_t* const pos); + static bool find_column_family(const std::string& input, size_t* const pos, + std::string* const key); + static bool find_options(const std::string& input, size_t* const pos, + std::string* const options); + static bool find_cf_options_pair(const std::string& input, size_t* const pos, + std::string* const cf, + std::string* const opt_str); private: static Rdb_pk_comparator s_pk_comparator; diff --git a/storage/rocksdb/rdb_compact_filter.h b/storage/rocksdb/rdb_compact_filter.h index db2011721b7..ca634f74d43 100644 --- a/storage/rocksdb/rdb_compact_filter.h +++ b/storage/rocksdb/rdb_compact_filter.h @@ -35,6 +35,9 @@ namespace myrocks { class Rdb_compact_filter : public rocksdb::CompactionFilter { public: + Rdb_compact_filter(const Rdb_compact_filter&) = delete; + Rdb_compact_filter& operator=(const Rdb_compact_filter&) = delete; + explicit Rdb_compact_filter(uint32_t _cf_id) : m_cf_id(_cf_id) {} ~Rdb_compact_filter() {} @@ -95,6 +98,8 @@ class Rdb_compact_filter : public rocksdb::CompactionFilter class Rdb_compact_filter_factory : public rocksdb::CompactionFilterFactory { public: + Rdb_compact_filter_factory(const Rdb_compact_filter_factory&) = delete; + Rdb_compact_filter_factory& operator=(const Rdb_compact_filter_factory&) = delete; Rdb_compact_filter_factory() {} ~Rdb_compact_filter_factory() {} diff --git a/storage/rocksdb/rdb_comparator.h b/storage/rocksdb/rdb_comparator.h index 7e56c5ab00a..0e47556a778 100644 --- a/storage/rocksdb/rdb_comparator.h +++ b/storage/rocksdb/rdb_comparator.h @@ -35,11 +35,15 @@ namespace myrocks { class Rdb_pk_comparator : public rocksdb::Comparator { public: + Rdb_pk_comparator(const Rdb_pk_comparator&) = delete; + Rdb_pk_comparator& operator=(const Rdb_pk_comparator&) = delete; + Rdb_pk_comparator() = default; + static int bytewise_compare(const rocksdb::Slice& a, const rocksdb::Slice& b) { - size_t a_size= a.size(); - size_t b_size= b.size(); - size_t len= (a_size < b_size) ? a_size : b_size; + const size_t a_size= a.size(); + const size_t b_size= b.size(); + const size_t len= (a_size < b_size) ? a_size : b_size; int res; if ((res= memcmp(a.data(), b.data(), len))) @@ -75,6 +79,10 @@ class Rdb_pk_comparator : public rocksdb::Comparator class Rdb_rev_comparator : public rocksdb::Comparator { public: + Rdb_rev_comparator(const Rdb_rev_comparator&) = delete; + Rdb_rev_comparator& operator=(const Rdb_rev_comparator&) = delete; + Rdb_rev_comparator() = default; + static int bytewise_compare(const rocksdb::Slice& a, const rocksdb::Slice& b) { return -Rdb_pk_comparator::bytewise_compare(a, b); diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index d80c81a3d9a..3fa5f07844b 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -99,14 +99,14 @@ Rdb_key_def::Rdb_key_def(const Rdb_key_def& k) : rdb_netbuf_store_index(m_index_number_storage_form, m_index_number); if (k.m_pack_info) { - size_t size= sizeof(Rdb_field_packing) * k.m_key_parts; + const size_t size= sizeof(Rdb_field_packing) * k.m_key_parts; m_pack_info= reinterpret_cast(my_malloc(size, MYF(0))); memcpy(m_pack_info, k.m_pack_info, size); } if (k.m_pk_part_no) { - size_t size = sizeof(uint)*m_key_parts; + const size_t size = sizeof(uint)*m_key_parts; m_pk_part_no= reinterpret_cast(my_malloc(size, MYF(0))); memcpy(m_pk_part_no, k.m_pk_part_no, size); } @@ -123,7 +123,8 @@ Rdb_key_def::~Rdb_key_def() m_pack_info = nullptr; } -void Rdb_key_def::setup(const TABLE *tbl, const Rdb_tbl_def *tbl_def) +void Rdb_key_def::setup(const TABLE* const tbl, + const Rdb_tbl_def* const tbl_def) { DBUG_ASSERT(tbl != nullptr); DBUG_ASSERT(tbl_def != nullptr); @@ -194,7 +195,7 @@ void Rdb_key_def::setup(const TABLE *tbl, const Rdb_tbl_def *tbl_def) else m_pk_part_no= nullptr; - size_t size= sizeof(Rdb_field_packing) * m_key_parts; + const size_t size= sizeof(Rdb_field_packing) * m_key_parts; m_pack_info= reinterpret_cast(my_malloc(size, MYF(0))); size_t max_len= INDEX_NUMBER_SIZE; @@ -222,7 +223,7 @@ void Rdb_key_def::setup(const TABLE *tbl, const Rdb_tbl_def *tbl_def) /* this loop also loops over the 'extended key' tail */ for (uint src_i= 0; src_i < m_key_parts; src_i++, keypart_to_set++) { - Field *field= key_part ? key_part->field : nullptr; + Field* const field= key_part ? key_part->field : nullptr; if (simulating_extkey && !hidden_pk_exists) { @@ -346,13 +347,12 @@ void Rdb_key_def::setup(const TABLE *tbl, const Rdb_tbl_def *tbl_def) set of queries for which we would check the checksum twice. */ -uint Rdb_key_def::get_primary_key_tuple(TABLE *table, - const std::shared_ptr& pk_descr, - const rocksdb::Slice *key, - uchar *pk_buffer) const +uint Rdb_key_def::get_primary_key_tuple(const TABLE* const table, + const Rdb_key_def& pk_descr, + const rocksdb::Slice* const key, + uchar* const pk_buffer) const { DBUG_ASSERT(table != nullptr); - DBUG_ASSERT(pk_descr != nullptr); DBUG_ASSERT(key != nullptr); DBUG_ASSERT(pk_buffer); @@ -361,7 +361,7 @@ uint Rdb_key_def::get_primary_key_tuple(TABLE *table, DBUG_ASSERT(m_pk_key_parts); /* Put the PK number */ - rdb_netbuf_store_index(buf, pk_descr->m_index_number); + rdb_netbuf_store_index(buf, pk_descr.m_index_number); buf += INDEX_NUMBER_SIZE; size += INDEX_NUMBER_SIZE; @@ -404,10 +404,10 @@ uint Rdb_key_def::get_primary_key_tuple(TABLE *table, if (have_value) { - Rdb_field_packing *fpi= &m_pack_info[i]; + Rdb_field_packing* const fpi= &m_pack_info[i]; DBUG_ASSERT(table->s != nullptr); - bool is_hidden_pk_part= (i + 1 == m_key_parts) && + const bool is_hidden_pk_part= (i + 1 == m_key_parts) && (table->s->primary_key == MAX_INDEXES); Field *field= nullptr; if (!is_hidden_pk_part) @@ -424,7 +424,7 @@ uint Rdb_key_def::get_primary_key_tuple(TABLE *table, for (i= 0; i < m_pk_key_parts; i++) { - uint part_size= end_offs[i] - start_offs[i]; + const uint part_size= end_offs[i] - start_offs[i]; memcpy(buf, start_offs[i], end_offs[i] - start_offs[i]); buf += part_size; size += part_size; @@ -445,9 +445,10 @@ uint Rdb_key_def::get_primary_key_tuple(TABLE *table, size is at least max_storage_fmt_length() bytes. */ -uint Rdb_key_def::pack_index_tuple(TABLE *tbl, uchar *pack_buffer, - uchar *packed_tuple, const uchar *key_tuple, - key_part_map keypart_map) const +uint Rdb_key_def::pack_index_tuple(TABLE* const tbl, uchar* const pack_buffer, + uchar* const packed_tuple, + const uchar* const key_tuple, + const key_part_map &keypart_map) const { DBUG_ASSERT(tbl != nullptr); DBUG_ASSERT(pack_buffer != nullptr); @@ -455,7 +456,7 @@ uint Rdb_key_def::pack_index_tuple(TABLE *tbl, uchar *pack_buffer, DBUG_ASSERT(key_tuple != nullptr); /* We were given a record in KeyTupleFormat. First, save it to record */ - uint key_len= calculate_key_len(tbl, m_keyno, key_tuple, keypart_map); + const uint key_len= calculate_key_len(tbl, m_keyno, key_tuple, keypart_map); key_restore(tbl->record[0], key_tuple, &tbl->key_info[m_keyno], key_len); uint n_used_parts= my_count_bits(keypart_map); @@ -486,7 +487,7 @@ bool Rdb_key_def::unpack_info_has_checksum(const rocksdb::Slice &unpack_info) if (size >= RDB_UNPACK_HEADER_SIZE && ptr[0] == RDB_UNPACK_DATA_TAG) { - uint16 skip_len= rdb_netbuf_to_uint16(ptr + 1); + const uint16 skip_len= rdb_netbuf_to_uint16(ptr + 1); SHIP_ASSERT(size >= skip_len); size -= skip_len; @@ -499,7 +500,7 @@ bool Rdb_key_def::unpack_info_has_checksum(const rocksdb::Slice &unpack_info) /* @return Number of bytes that were changed */ -int Rdb_key_def::successor(uchar *packed_tuple, uint len) +int Rdb_key_def::successor(uchar* const packed_tuple, const uint &len) { DBUG_ASSERT(packed_tuple != nullptr); @@ -541,12 +542,14 @@ int Rdb_key_def::successor(uchar *packed_tuple, uint len) Length of the packed tuple */ -uint Rdb_key_def::pack_record(const TABLE *tbl, uchar *pack_buffer, - const uchar *record, uchar *packed_tuple, - Rdb_string_writer *unpack_info, - bool should_store_checksums, - longlong hidden_pk_id, uint n_key_parts, - uint *n_null_fields) const +uint Rdb_key_def::pack_record(const TABLE* const tbl, uchar* const pack_buffer, + const uchar* const record, + uchar* const packed_tuple, + Rdb_string_writer* const unpack_info, + const bool &should_store_row_debug_checksums, + const longlong &hidden_pk_id, + uint n_key_parts, + uint* const n_null_fields) const { DBUG_ASSERT(tbl != nullptr); DBUG_ASSERT(pack_buffer != nullptr); @@ -554,7 +557,7 @@ uint Rdb_key_def::pack_record(const TABLE *tbl, uchar *pack_buffer, DBUG_ASSERT(packed_tuple != nullptr); // Checksums for PKs are made when record is packed. // We should never attempt to make checksum just from PK values - DBUG_ASSERT_IMP(should_store_checksums, + DBUG_ASSERT_IMP(should_store_row_debug_checksums, (m_index_type == INDEX_TYPE_SECONDARY)); uchar *tuple= packed_tuple; @@ -568,7 +571,7 @@ uint Rdb_key_def::pack_record(const TABLE *tbl, uchar *pack_buffer, // The following includes the 'extended key' tail. // The 'extended key' includes primary key. This is done to 'uniqify' // non-unique indexes - bool use_all_columns = n_key_parts == 0 || n_key_parts == MAX_REF_PARTS; + const bool use_all_columns = n_key_parts == 0 || n_key_parts == MAX_REF_PARTS; // If hidden pk exists, but hidden pk wasnt passed in, we can't pack the // hidden key part. So we skip it (its always 1 part). @@ -599,13 +602,13 @@ uint Rdb_key_def::pack_record(const TABLE *tbl, uchar *pack_buffer, break; } - Field *field= m_pack_info[i].get_field_in_table(tbl); + Field* const field= m_pack_info[i].get_field_in_table(tbl); DBUG_ASSERT(field != nullptr); // Old Field methods expected the record pointer to be at tbl->record[0]. // The quick and easy way to fix this was to pass along the offset // for the pointer. - my_ptrdiff_t ptr_diff= record - tbl->record[0]; + const my_ptrdiff_t ptr_diff= record - tbl->record[0]; if (field->real_maybe_null()) { @@ -626,11 +629,9 @@ uint Rdb_key_def::pack_record(const TABLE *tbl, uchar *pack_buffer, } } - bool create_unpack_info= + const bool create_unpack_info= (unpack_info && // we were requested to generate unpack_info - m_pack_info[i].uses_unpack_info() && // and this keypart uses it - index_format_min_check(PRIMARY_FORMAT_VERSION_UPDATE1, - SECONDARY_FORMAT_VERSION_UPDATE1)); + m_pack_info[i].uses_unpack_info()); // and this keypart uses it Rdb_pack_field_context pack_ctx(unpack_info); // Set the offset for methods which do not take an offset as an argument @@ -652,7 +653,7 @@ uint Rdb_key_def::pack_record(const TABLE *tbl, uchar *pack_buffer, if (unpack_info) { - size_t len= unpack_info->get_current_pos(); + const size_t len= unpack_info->get_current_pos(); DBUG_ASSERT(len <= std::numeric_limits::max()); // Don't store the unpack_info if it has only the header (that is, there's @@ -676,10 +677,10 @@ uint Rdb_key_def::pack_record(const TABLE *tbl, uchar *pack_buffer, // so the checksums are computed and stored by // ha_rocksdb::convert_record_to_storage_format // - if (should_store_checksums) + if (should_store_row_debug_checksums) { - uint32_t key_crc32= crc32(0, packed_tuple, tuple - packed_tuple); - uint32_t val_crc32= crc32(0, unpack_info->ptr(), + const uint32_t key_crc32= crc32(0, packed_tuple, tuple - packed_tuple); + const uint32_t val_crc32= crc32(0, unpack_info->ptr(), unpack_info->get_current_pos()); unpack_info->write_uint8(RDB_CHECKSUM_DATA_TAG); @@ -705,8 +706,8 @@ uint Rdb_key_def::pack_record(const TABLE *tbl, uchar *pack_buffer, Length of the packed tuple */ -uint Rdb_key_def::pack_hidden_pk(longlong hidden_pk_id, - uchar *packed_tuple) const +uint Rdb_key_def::pack_hidden_pk(const longlong &hidden_pk_id, + uchar* const packed_tuple) const { DBUG_ASSERT(packed_tuple != nullptr); @@ -728,10 +729,11 @@ uint Rdb_key_def::pack_hidden_pk(longlong hidden_pk_id, Function of type rdb_index_field_pack_t */ -void rdb_pack_with_make_sort_key(Rdb_field_packing *fpi, Field *field, - uchar *buf __attribute__((__unused__)), +void rdb_pack_with_make_sort_key(Rdb_field_packing* const fpi, + Field* const field, + uchar* const buf __attribute__((__unused__)), uchar **dst, - Rdb_pack_field_context *pack_ctx + Rdb_pack_field_context* const pack_ctx __attribute__((__unused__))) { DBUG_ASSERT(fpi != nullptr); @@ -756,7 +758,7 @@ void rdb_pack_with_make_sort_key(Rdb_field_packing *fpi, Field *field, int Rdb_key_def::compare_keys( const rocksdb::Slice *key1, const rocksdb::Slice *key2, - std::size_t* column_index + std::size_t* const column_index ) const { DBUG_ASSERT(key1 != nullptr); @@ -779,11 +781,11 @@ int Rdb_key_def::compare_keys( for (uint i= 0; i < m_key_parts ; i++) { - Rdb_field_packing *fpi= &m_pack_info[i]; + const Rdb_field_packing* const fpi= &m_pack_info[i]; if (fpi->m_maybe_null) { - auto nullp1= reader1.read(1); - auto nullp2= reader2.read(1); + const auto nullp1= reader1.read(1); + const auto nullp2= reader2.read(1); if (nullp1 == nullptr || nullp2 == nullptr) return 1; //error @@ -800,15 +802,15 @@ int Rdb_key_def::compare_keys( } } - auto before_skip1 = reader1.get_current_ptr(); - auto before_skip2 = reader2.get_current_ptr(); + const auto before_skip1 = reader1.get_current_ptr(); + const auto before_skip2 = reader2.get_current_ptr(); DBUG_ASSERT(fpi->m_skip_func); if (fpi->m_skip_func(fpi, nullptr, &reader1)) return 1; if (fpi->m_skip_func(fpi, nullptr, &reader2)) return 1; - auto size1 = reader1.get_current_ptr() - before_skip1; - auto size2 = reader2.get_current_ptr() - before_skip2; + const auto size1 = reader1.get_current_ptr() - before_skip1; + const auto size2 = reader2.get_current_ptr() - before_skip2; if (size1 != size2) { *column_index = i; @@ -835,7 +837,8 @@ int Rdb_key_def::compare_keys( Fixed-size skip functions just read. */ -size_t Rdb_key_def::key_length(TABLE *table, const rocksdb::Slice &key) const +size_t Rdb_key_def::key_length(const TABLE* const table, + const rocksdb::Slice &key) const { DBUG_ASSERT(table != nullptr); @@ -846,8 +849,8 @@ size_t Rdb_key_def::key_length(TABLE *table, const rocksdb::Slice &key) const for (uint i= 0; i < m_key_parts ; i++) { - Rdb_field_packing *fpi= &m_pack_info[i]; - Field *field= nullptr; + const Rdb_field_packing *fpi= &m_pack_info[i]; + const Field *field= nullptr; if (m_index_type != INDEX_TYPE_HIDDEN_PRIMARY) field= fpi->get_field_in_table(table); if (fpi->m_skip_func(fpi, field, &reader)) @@ -870,30 +873,26 @@ size_t Rdb_key_def::key_length(TABLE *table, const rocksdb::Slice &key) const unpacking. */ -int Rdb_key_def::unpack_record(TABLE *table, uchar *buf, - const rocksdb::Slice *packed_key, - const rocksdb::Slice *unpack_info, - bool verify_checksums) const +int Rdb_key_def::unpack_record(TABLE* const table, uchar* const buf, + const rocksdb::Slice* const packed_key, + const rocksdb::Slice* const unpack_info, + const bool &verify_row_debug_checksums) const { Rdb_string_reader reader(packed_key); - Rdb_string_reader unp_reader(""); + Rdb_string_reader unp_reader= Rdb_string_reader::read_or_empty(unpack_info); + const bool is_hidden_pk= (m_index_type == INDEX_TYPE_HIDDEN_PRIMARY); const bool hidden_pk_exists= table_has_hidden_pk(table); const bool secondary_key= (m_index_type == INDEX_TYPE_SECONDARY); // There is no checksuming data after unpack_info for primary keys, because // the layout there is different. The checksum is verified in // ha_rocksdb::convert_record_from_storage_format instead. - DBUG_ASSERT_IMP(!secondary_key, !verify_checksums); - - if (unpack_info) - { - unp_reader= Rdb_string_reader(unpack_info); - } + DBUG_ASSERT_IMP(!secondary_key, !verify_row_debug_checksums); // Old Field methods expected the record pointer to be at tbl->record[0]. // The quick and easy way to fix this was to pass along the offset // for the pointer. - my_ptrdiff_t ptr_diff= buf - table->record[0]; + const my_ptrdiff_t ptr_diff= buf - table->record[0]; // Skip the index number if ((!reader.read(INDEX_NUMBER_SIZE))) @@ -904,7 +903,7 @@ int Rdb_key_def::unpack_record(TABLE *table, uchar *buf, // For secondary keys, we expect the value field to contain unpack data and // checksum data in that order. One or both can be missing, but they cannot // be reordered. - bool has_unpack_info= unp_reader.remaining_bytes() && + const bool has_unpack_info= unp_reader.remaining_bytes() && *unp_reader.get_current_ptr() == RDB_UNPACK_DATA_TAG; if (has_unpack_info && !unp_reader.read(RDB_UNPACK_HEADER_SIZE)) { @@ -913,7 +912,7 @@ int Rdb_key_def::unpack_record(TABLE *table, uchar *buf, for (uint i= 0; i < m_key_parts ; i++) { - Rdb_field_packing *fpi= &m_pack_info[i]; + Rdb_field_packing* const fpi= &m_pack_info[i]; /* Hidden pk field is packed at the end of the secondary keys, but the SQL @@ -930,12 +929,9 @@ int Rdb_key_def::unpack_record(TABLE *table, uchar *buf, continue; } - Field *field= fpi->get_field_in_table(table); + Field* const field= fpi->get_field_in_table(table); - bool do_unpack= secondary_key || - !fpi->uses_unpack_info() || - (m_kv_format_version >= Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1); - if (fpi->m_unpack_func && do_unpack) + if (fpi->m_unpack_func) { /* It is possible to unpack this column. Do it. */ @@ -964,9 +960,9 @@ int Rdb_key_def::unpack_record(TABLE *table, uchar *buf, // If we need unpack info, but there is none, tell the unpack function // this by passing unp_reader as nullptr. If we never read unpack_info // during unpacking anyway, then there won't an error. - int res; - bool maybe_missing_unpack= !has_unpack_info && fpi->uses_unpack_info(); - res= fpi->m_unpack_func(fpi, field, field->ptr + ptr_diff, + const bool maybe_missing_unpack= + !has_unpack_info && fpi->uses_unpack_info(); + const int res= fpi->m_unpack_func(fpi, field, field->ptr + ptr_diff, &reader, maybe_missing_unpack ? nullptr : &unp_reader); @@ -1001,16 +997,16 @@ int Rdb_key_def::unpack_record(TABLE *table, uchar *buf, const char* ptr; if ((ptr= unp_reader.read(1)) && *ptr == RDB_CHECKSUM_DATA_TAG) { - if (verify_checksums) + if (verify_row_debug_checksums) { uint32_t stored_key_chksum= rdb_netbuf_to_uint32( (const uchar*)unp_reader.read(RDB_CHECKSUM_SIZE)); - uint32_t stored_val_chksum= rdb_netbuf_to_uint32( + const uint32_t stored_val_chksum= rdb_netbuf_to_uint32( (const uchar*)unp_reader.read(RDB_CHECKSUM_SIZE)); - uint32_t computed_key_chksum= + const uint32_t computed_key_chksum= crc32(0, (const uchar*)packed_key->data(), packed_key->size()); - uint32_t computed_val_chksum= + const uint32_t computed_val_chksum= crc32(0, (const uchar*) unpack_info->data(), unpack_info->size() - RDB_CHECKSUM_CHUNK_SIZE); @@ -1044,20 +1040,20 @@ int Rdb_key_def::unpack_record(TABLE *table, uchar *buf, return 0; } -bool Rdb_key_def::table_has_hidden_pk(const TABLE* table) +bool Rdb_key_def::table_has_hidden_pk(const TABLE* const table) { return table->s->primary_key == MAX_INDEXES; } -void Rdb_key_def::report_checksum_mismatch(bool is_key, const char *data, - size_t data_size) const +void Rdb_key_def::report_checksum_mismatch(const bool &is_key, + const char* const data, + const size_t data_size) const { - std::string buf; // NO_LINT_DEBUG sql_print_error("Checksum mismatch in %s of key-value pair for index 0x%x", is_key? "key" : "value", get_index_number()); - buf = rdb_hexdump(data, data_size, 1000); + const std::string buf = rdb_hexdump(data, data_size, RDB_MAX_HEXDUMP_LEN); // NO_LINT_DEBUG sql_print_error("Data with incorrect checksum (%" PRIu64 " bytes): %s", (uint64_t)data_size, buf.c_str()); @@ -1065,7 +1061,8 @@ void Rdb_key_def::report_checksum_mismatch(bool is_key, const char *data, my_error(ER_INTERNAL_ERROR, MYF(0), "Record checksum mismatch"); } -bool Rdb_key_def::index_format_min_check(int pk_min, int sk_min) const +bool Rdb_key_def::index_format_min_check(const int &pk_min, + const int &sk_min) const { switch (m_index_type) { @@ -1088,9 +1085,9 @@ bool Rdb_key_def::index_format_min_check(int pk_min, int sk_min) const Function of type rdb_index_field_skip_t */ -int rdb_skip_max_length(const Rdb_field_packing *fpi, - const Field *field __attribute__((__unused__)), - Rdb_string_reader *reader) +int rdb_skip_max_length(const Rdb_field_packing* const fpi, + const Field* const field __attribute__((__unused__)), + Rdb_string_reader* const reader) { if (!reader->read(fpi->m_max_image_len)) return 1; @@ -1112,8 +1109,8 @@ static_assert((RDB_ESCAPE_LENGTH - 1) % 2 == 0, */ static int rdb_skip_variable_length( - const Rdb_field_packing *fpi __attribute__((__unused__)), - const Field *field, Rdb_string_reader *reader) + const Rdb_field_packing* const fpi __attribute__((__unused__)), + const Field* const field, Rdb_string_reader* const reader) { const uchar *ptr; bool finished= false; @@ -1121,7 +1118,7 @@ static int rdb_skip_variable_length( size_t dst_len; /* How much data can be there */ if (field) { - const Field_varstring* field_var= + const Field_varstring* const field_var= static_cast(field); dst_len= field_var->pack_length() - field_var->length_bytes; } @@ -1134,8 +1131,8 @@ static int rdb_skip_variable_length( while ((ptr= (const uchar*)reader->read(RDB_ESCAPE_LENGTH))) { /* See rdb_pack_with_varchar_encoding. */ - uchar pad= 255 - ptr[RDB_ESCAPE_LENGTH - 1]; // number of padding bytes - uchar used_bytes= RDB_ESCAPE_LENGTH - 1 - pad; + const uchar pad= 255 - ptr[RDB_ESCAPE_LENGTH - 1]; // number of padding bytes + const uchar used_bytes= RDB_ESCAPE_LENGTH - 1 - pad; if (used_bytes > RDB_ESCAPE_LENGTH - 1 || used_bytes > dst_len) { @@ -1167,8 +1164,8 @@ const int VARCHAR_CMP_GREATER_THAN_SPACES = 3; */ static int rdb_skip_variable_space_pad( - const Rdb_field_packing *fpi, - const Field *field, Rdb_string_reader *reader) + const Rdb_field_packing* const fpi, + const Field* const field, Rdb_string_reader* const reader) { const uchar *ptr; bool finished= false; @@ -1177,7 +1174,7 @@ static int rdb_skip_variable_space_pad( if (field) { - const Field_varstring* field_var= + const Field_varstring* const field_var= static_cast(field); dst_len= field_var->pack_length() - field_var->length_bytes; } @@ -1186,7 +1183,7 @@ static int rdb_skip_variable_space_pad( while ((ptr= (const uchar*)reader->read(fpi->m_segment_size))) { // See rdb_pack_with_varchar_space_pad - uchar c= ptr[fpi->m_segment_size-1]; + const uchar c= ptr[fpi->m_segment_size-1]; if (c == VARCHAR_CMP_EQUAL_TO_SPACES) { // This is the last segment @@ -1221,9 +1218,9 @@ static int rdb_skip_variable_space_pad( */ int rdb_unpack_integer( - Rdb_field_packing *fpi, Field *field, uchar *to, - Rdb_string_reader *reader, - Rdb_string_reader *unp_reader __attribute__((__unused__))) + Rdb_field_packing* const fpi, Field* const field, uchar* const to, + Rdb_string_reader* const reader, + Rdb_string_reader* const unp_reader __attribute__((__unused__))) { const int length= fpi->m_max_image_len; @@ -1254,7 +1251,7 @@ int rdb_unpack_integer( } #if !defined(WORDS_BIGENDIAN) -static void rdb_swap_double_bytes(uchar *dst, const uchar *src) +static void rdb_swap_double_bytes(uchar* const dst, const uchar* const src) { #if defined(__FLOAT_WORD_ORDER) && (__FLOAT_WORD_ORDER == __BIG_ENDIAN) // A few systems store the most-significant _word_ first on little-endian @@ -1266,7 +1263,7 @@ static void rdb_swap_double_bytes(uchar *dst, const uchar *src) #endif } -static void rdb_swap_float_bytes(uchar *dst, const uchar *src) +static void rdb_swap_float_bytes(uchar* const dst, const uchar* const src) { dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; } @@ -1276,15 +1273,13 @@ static void rdb_swap_float_bytes(uchar *dst, const uchar *src) #endif static int rdb_unpack_floating_point( - uchar *dst, Rdb_string_reader *reader, - size_t size, int exp_digit, - const uchar *zero_pattern, - const uchar *zero_val, + uchar* const dst, Rdb_string_reader* const reader, const size_t &size, + const int &exp_digit, + const uchar* const zero_pattern, + const uchar* const zero_val, void (*swap_func)(uchar *, const uchar *)) { - const uchar* from; - - from= (const uchar*) reader->read(size); + const uchar* const from = (const uchar*) reader->read(size); if (from == nullptr) return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ @@ -1297,7 +1292,7 @@ static int rdb_unpack_floating_point( #if defined(WORDS_BIGENDIAN) // On big-endian, output can go directly into result - uchar *tmp = dst; + uchar* const tmp = dst; #else // Otherwise use a temporary buffer to make byte-swapping easier later uchar tmp[8]; @@ -1347,11 +1342,11 @@ static int rdb_unpack_floating_point( allowed in the database. */ static int rdb_unpack_double( - Rdb_field_packing *fpi __attribute__((__unused__)), - Field *field __attribute__((__unused__)), - uchar *field_ptr, - Rdb_string_reader *reader, - Rdb_string_reader *unp_reader __attribute__((__unused__))) + Rdb_field_packing* const fpi __attribute__((__unused__)), + Field* const field __attribute__((__unused__)), + uchar* const field_ptr, + Rdb_string_reader* const reader, + Rdb_string_reader* const unp_reader __attribute__((__unused__))) { static double zero_val = 0.0; static const uchar zero_pattern[8] = { 128, 0, 0, 0, 0, 0, 0, 0 }; @@ -1375,10 +1370,10 @@ static int rdb_unpack_double( allowed in the database. */ static int rdb_unpack_float( - Rdb_field_packing *, Field *field __attribute__((__unused__)), - uchar *field_ptr, - Rdb_string_reader *reader, - Rdb_string_reader *unp_reader __attribute__((__unused__))) + Rdb_field_packing* const, Field* const field __attribute__((__unused__)), + uchar* const field_ptr, + Rdb_string_reader* const reader, + Rdb_string_reader* const unp_reader __attribute__((__unused__))) { static float zero_val = 0.0; static const uchar zero_pattern[4] = { 128, 0, 0, 0 }; @@ -1395,10 +1390,10 @@ static int rdb_unpack_float( */ int rdb_unpack_newdate( - Rdb_field_packing *fpi, Field *field, - uchar *field_ptr, - Rdb_string_reader *reader, - Rdb_string_reader *unp_reader __attribute__((__unused__))) + Rdb_field_packing* const fpi, Field* constfield, + uchar* const field_ptr, + Rdb_string_reader* const reader, + Rdb_string_reader* const unp_reader __attribute__((__unused__))) { const char* from; DBUG_ASSERT(fpi->m_max_image_len == 3); @@ -1420,10 +1415,9 @@ int rdb_unpack_newdate( */ static int rdb_unpack_binary_str( - Rdb_field_packing *fpi, Field *field, - uchar *to, - Rdb_string_reader *reader, - Rdb_string_reader *unp_reader __attribute__((__unused__))) + Rdb_field_packing* const fpi, Field* const field, uchar* const to, + Rdb_string_reader* const reader, + Rdb_string_reader* const unp_reader __attribute__((__unused__))) { const char* from; if (!(from= reader->read(fpi->m_max_image_len))) @@ -1441,18 +1435,18 @@ static int rdb_unpack_binary_str( */ static int rdb_unpack_utf8_str( - Rdb_field_packing *fpi, Field *field, + Rdb_field_packing* const fpi, Field* const field, uchar *dst, - Rdb_string_reader *reader, - Rdb_string_reader *unp_reader __attribute__((__unused__))) + Rdb_string_reader* const reader, + Rdb_string_reader* const unp_reader __attribute__((__unused__))) { - my_core::CHARSET_INFO *cset= (my_core::CHARSET_INFO*)field->charset(); + my_core::CHARSET_INFO* const cset= (my_core::CHARSET_INFO*)field->charset(); const uchar *src; if (!(src= (const uchar*)reader->read(fpi->m_max_image_len))) return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ - const uchar *src_end= src + fpi->m_max_image_len; - uchar *dst_end= dst + field->pack_length(); + const uchar* const src_end= src + fpi->m_max_image_len; + uchar* const dst_end= dst + field->pack_length(); while (src < src_end) { @@ -1476,8 +1470,8 @@ static int rdb_unpack_utf8_str( */ static void rdb_pack_with_varchar_encoding( - Rdb_field_packing *fpi, Field *field, uchar *buf, uchar **dst, - Rdb_pack_field_context *pack_ctx __attribute__((__unused__))) + Rdb_field_packing* const fpi, Field* const field, uchar *buf, uchar **dst, + Rdb_pack_field_context* const pack_ctx __attribute__((__unused__))) { /* Use a flag byte every Nth byte. Set it to (255 - #pad) where #pad is 0 @@ -1489,14 +1483,14 @@ static void rdb_pack_with_varchar_encoding( * 4 bytes (1, 2, 3, 0) this is encoded as: 1, 2, 3, 0, 0, 0, 0, 252 And the 4 byte string compares as greater than the 3 byte string */ - const CHARSET_INFO *charset= field->charset(); - Field_varstring *field_var= (Field_varstring*)field; + const CHARSET_INFO* const charset= field->charset(); + Field_varstring* const field_var= (Field_varstring*)field; - size_t value_length= (field_var->length_bytes == 1) ? - (uint) *field->ptr : - uint2korr(field->ptr); - size_t xfrm_len; - xfrm_len= charset->coll->strnxfrm(charset, + const size_t value_length= (field_var->length_bytes == 1) ? + (uint) *field->ptr : + uint2korr(field->ptr); + size_t xfrm_len= charset->coll->strnxfrm( + charset, buf, fpi->m_max_image_len, field_var->char_length(), field_var->ptr + field_var->length_bytes, @@ -1506,11 +1500,11 @@ static void rdb_pack_with_varchar_encoding( /* Got a mem-comparable image in 'buf'. Now, produce varlength encoding */ size_t encoded_size= 0; - uchar *ptr= *dst; + uchar* ptr= *dst; while (1) { - size_t copy_len= std::min((size_t)RDB_ESCAPE_LENGTH-1, xfrm_len); - size_t padding_bytes= RDB_ESCAPE_LENGTH - 1 - copy_len; + const size_t copy_len= std::min((size_t)RDB_ESCAPE_LENGTH-1, xfrm_len); + const size_t padding_bytes= RDB_ESCAPE_LENGTH - 1 - copy_len; memcpy(ptr, buf, copy_len); ptr += copy_len; buf += copy_len; @@ -1534,8 +1528,8 @@ static void rdb_pack_with_varchar_encoding( */ static -int rdb_compare_string_with_spaces(const uchar *buf, const uchar *buf_end, - const std::vector *space_xfrm) +int rdb_compare_string_with_spaces(const uchar *buf, const uchar* const buf_end, + const std::vector* const space_xfrm) { int cmp= 0; while (buf < buf_end) @@ -1621,24 +1615,24 @@ static const int RDB_TRIMMED_CHARS_OFFSET= 8; */ static void rdb_pack_with_varchar_space_pad( - Rdb_field_packing *fpi, Field *field, uchar *buf, uchar **dst, - Rdb_pack_field_context *pack_ctx) + Rdb_field_packing* const fpi, Field* const field, uchar* buf, + uchar **dst, Rdb_pack_field_context* const pack_ctx) { - Rdb_string_writer *unpack_info= pack_ctx->writer; - const CHARSET_INFO *charset= field->charset(); - auto field_var= static_cast(field); + Rdb_string_writer* const unpack_info= pack_ctx->writer; + const CHARSET_INFO* const charset= field->charset(); + const auto field_var= static_cast(field); - size_t value_length= (field_var->length_bytes == 1) ? - (uint) *field->ptr : - uint2korr(field->ptr); + const size_t value_length= (field_var->length_bytes == 1) ? + (uint) *field->ptr : + uint2korr(field->ptr); - size_t trimmed_len= + const size_t trimmed_len= charset->cset->lengthsp(charset, (const char*)field_var->ptr + field_var->length_bytes, value_length); - size_t xfrm_len; - xfrm_len= charset->coll->strnxfrm(charset, + const size_t xfrm_len = charset->coll->strnxfrm( + charset, buf, fpi->m_max_image_len, field_var->char_length(), field_var->ptr + field_var->length_bytes, @@ -1646,14 +1640,15 @@ static void rdb_pack_with_varchar_space_pad( 0); /* Got a mem-comparable image in 'buf'. Now, produce varlength encoding */ - uchar *buf_end= buf + xfrm_len; + uchar* const buf_end= buf + xfrm_len; size_t encoded_size= 0; uchar *ptr= *dst; size_t padding_bytes; while (true) { - size_t copy_len= std::min(fpi->m_segment_size-1, buf_end - buf); + const size_t copy_len= + std::min(fpi->m_segment_size-1, buf_end - buf); padding_bytes= fpi->m_segment_size - 1 - copy_len; memcpy(ptr, buf, copy_len); ptr += copy_len; @@ -1670,7 +1665,8 @@ static void rdb_pack_with_varchar_space_pad( // Compare the string suffix with a hypothetical infinite string of // spaces. It could be that the first difference is beyond the end of // current chunk. - int cmp= rdb_compare_string_with_spaces(buf, buf_end, fpi->space_xfrm); + const int cmp= + rdb_compare_string_with_spaces(buf, buf_end, fpi->space_xfrm); if (cmp < 0) *ptr= VARCHAR_CMP_LESS_THAN_SPACES; @@ -1698,7 +1694,8 @@ static void rdb_pack_with_varchar_space_pad( // then, we add 8, because we don't store negative values. DBUG_ASSERT(padding_bytes % fpi->space_xfrm_len == 0); DBUG_ASSERT((value_length - trimmed_len)% fpi->space_mb_len == 0); - size_t removed_chars= RDB_TRIMMED_CHARS_OFFSET + + const size_t removed_chars= + RDB_TRIMMED_CHARS_OFFSET + (value_length - trimmed_len) / fpi->space_mb_len - padding_bytes/fpi->space_xfrm_len; @@ -1721,20 +1718,20 @@ static void rdb_pack_with_varchar_space_pad( */ static int rdb_unpack_binary_or_utf8_varchar( - Rdb_field_packing *fpi, Field *field, - uchar *dst, - Rdb_string_reader *reader, - Rdb_string_reader *unp_reader __attribute__((__unused__))) + Rdb_field_packing* const fpi, Field* const field, + uchar* dst, + Rdb_string_reader* const reader, + Rdb_string_reader* const unp_reader __attribute__((__unused__))) { const uchar *ptr; size_t len= 0; bool finished= false; uchar *d0= dst; - Field_varstring* field_var= (Field_varstring*)field; + Field_varstring* const field_var= (Field_varstring*)field; dst += field_var->length_bytes; // How much we can unpack size_t dst_len= field_var->pack_length() - field_var->length_bytes; - uchar *dst_end= dst + dst_len; + uchar* const dst_end= dst + dst_len; /* Decode the length-emitted encoding here */ while ((ptr= (const uchar*)reader->read(RDB_ESCAPE_LENGTH))) @@ -1823,15 +1820,15 @@ static int rdb_unpack_binary_or_utf8_varchar( rdb_skip_variable_space_pad - skip function */ static int rdb_unpack_binary_or_utf8_varchar_space_pad( - Rdb_field_packing *fpi, Field *field, - uchar *dst, - Rdb_string_reader *reader, - Rdb_string_reader *unp_reader) + Rdb_field_packing* const fpi, Field* const field, + uchar* dst, + Rdb_string_reader* const reader, + Rdb_string_reader* const unp_reader) { const uchar *ptr; size_t len= 0; bool finished= false; - Field_varstring* field_var= static_cast(field); + Field_varstring* const field_var= static_cast(field); uchar *d0= dst; uchar *dst_end= dst + field_var->pack_length(); dst += field_var->length_bytes; @@ -1859,7 +1856,7 @@ static int rdb_unpack_binary_or_utf8_varchar_space_pad( /* Decode the length-emitted encoding here */ while ((ptr= (const uchar*)reader->read(fpi->m_segment_size))) { - char last_byte= ptr[fpi->m_segment_size - 1]; + const char last_byte= ptr[fpi->m_segment_size - 1]; size_t used_bytes; if (last_byte == VARCHAR_CMP_EQUAL_TO_SPACES) // this is the last segment { @@ -1891,7 +1888,7 @@ static int rdb_unpack_binary_or_utf8_varchar_space_pad( } const uchar *src= ptr; - const uchar *src_end= ptr + used_bytes; + const uchar* const src_end= ptr + used_bytes; while (src < src_end) { my_wc_t wc= (src[0] <<8) | src[1]; @@ -1953,7 +1950,7 @@ static int rdb_unpack_binary_or_utf8_varchar_space_pad( static void rdb_make_unpack_unknown( const Rdb_collation_codec *codec __attribute__((__unused__)), - const Field *field, Rdb_pack_field_context *pack_ctx) + const Field* const field, Rdb_pack_field_context* const pack_ctx) { pack_ctx->writer->write(field->ptr, field->pack_length()); } @@ -1978,13 +1975,13 @@ static void rdb_dummy_make_unpack_info( Function of type rdb_index_field_unpack_t */ -static int rdb_unpack_unknown(Rdb_field_packing *fpi, Field *field, - uchar *dst, - Rdb_string_reader *reader, - Rdb_string_reader *unp_reader) +static int rdb_unpack_unknown(Rdb_field_packing* const fpi, Field* const field, + uchar* const dst, + Rdb_string_reader* const reader, + Rdb_string_reader* const unp_reader) { const uchar *ptr; - uint len = fpi->m_unpack_data_len; + const uint len = fpi->m_unpack_data_len; // We don't use anything from the key, so skip over it. if (rdb_skip_max_length(fpi, field, reader)) { @@ -2008,10 +2005,10 @@ static int rdb_unpack_unknown(Rdb_field_packing *fpi, Field *field, */ static void rdb_make_unpack_unknown_varchar( - const Rdb_collation_codec *codec __attribute__((__unused__)), - const Field *field, Rdb_pack_field_context *pack_ctx) + const Rdb_collation_codec* const codec __attribute__((__unused__)), + const Field* const field, Rdb_pack_field_context* const pack_ctx) { - auto f= static_cast(field); + const auto f= static_cast(field); uint len= f->length_bytes == 1 ? (uint) *f->ptr : uint2korr(f->ptr); len+= f->length_bytes; pack_ctx->writer->write(field->ptr, len); @@ -2032,16 +2029,17 @@ static void rdb_make_unpack_unknown_varchar( rdb_make_unpack_unknown, rdb_unpack_unknown */ -static int rdb_unpack_unknown_varchar(Rdb_field_packing *fpi, Field *field, +static int rdb_unpack_unknown_varchar(Rdb_field_packing* const fpi, + Field* const field, uchar *dst, - Rdb_string_reader *reader, - Rdb_string_reader *unp_reader) + Rdb_string_reader* const reader, + Rdb_string_reader* const unp_reader) { const uchar *ptr; - uchar *d0= dst; - auto f= static_cast(field); + uchar* const d0= dst; + const auto f= static_cast(field); dst += f->length_bytes; - uint len_bytes= f->length_bytes; + const uint len_bytes= f->length_bytes; // We don't use anything from the key, so skip over it. if (fpi->m_skip_func(fpi, field, reader)) { @@ -2056,7 +2054,7 @@ static int rdb_unpack_unknown_varchar(Rdb_field_packing *fpi, Field *field, if ((ptr= (const uchar*)unp_reader->read(len_bytes))) { memcpy(d0, ptr, len_bytes); - uint len= len_bytes == 1 ? (uint) *ptr : uint2korr(ptr); + const uint len= len_bytes == 1 ? (uint) *ptr : uint2korr(ptr); if ((ptr= (const uchar*)unp_reader->read(len))) { memcpy(dst, ptr, len); @@ -2070,9 +2068,10 @@ static int rdb_unpack_unknown_varchar(Rdb_field_packing *fpi, Field *field, /* Write unpack_data for a "simple" collation */ -static void rdb_write_unpack_simple(Rdb_bit_writer *writer, - const Rdb_collation_codec *codec, - const uchar *src, size_t src_len) +static void rdb_write_unpack_simple(Rdb_bit_writer* const writer, + const Rdb_collation_codec* const codec, + const uchar* const src, + const size_t src_len) { for (uint i= 0; i < src_len; i++) { @@ -2081,10 +2080,10 @@ static void rdb_write_unpack_simple(Rdb_bit_writer *writer, } -static uint rdb_read_unpack_simple(Rdb_bit_reader *reader, - const Rdb_collation_codec *codec, - const uchar *src, size_t src_len, - uchar *dst) +static uint rdb_read_unpack_simple(Rdb_bit_reader* const reader, + const Rdb_collation_codec* const codec, + const uchar* const src, + const size_t &src_len, uchar* const dst) { for (uint i= 0; i < src_len; i++) { @@ -2120,13 +2119,14 @@ static uint rdb_read_unpack_simple(Rdb_bit_reader *reader, */ static void -rdb_make_unpack_simple_varchar(const Rdb_collation_codec* codec, - const Field *field, - Rdb_pack_field_context *pack_ctx) +rdb_make_unpack_simple_varchar(const Rdb_collation_codec* const codec, + const Field* const field, + Rdb_pack_field_context* const pack_ctx) { - auto f= static_cast(field); - uchar *src= f->ptr + f->length_bytes; - size_t src_len= f->length_bytes == 1 ? (uint) *f->ptr : uint2korr(f->ptr); + const auto f= static_cast(field); + uchar* const src= f->ptr + f->length_bytes; + const size_t src_len= + f->length_bytes == 1 ? (uint) *f->ptr : uint2korr(f->ptr); Rdb_bit_writer bit_writer(pack_ctx->writer); // The std::min compares characters with bytes, but for simple collations, // mbmaxlen = 1. @@ -2143,16 +2143,17 @@ rdb_make_unpack_simple_varchar(const Rdb_collation_codec* codec, */ int -rdb_unpack_simple_varchar_space_pad(Rdb_field_packing *fpi, Field *field, - uchar *dst, - Rdb_string_reader *reader, - Rdb_string_reader *unp_reader) +rdb_unpack_simple_varchar_space_pad(Rdb_field_packing* const fpi, + Field* const field, + uchar* dst, + Rdb_string_reader* const reader, + Rdb_string_reader * const unp_reader) { const uchar *ptr; size_t len= 0; bool finished= false; uchar *d0= dst; - Field_varstring* field_var= static_cast(field); + const Field_varstring* const field_var= static_cast(field); // For simple collations, char_length is also number of bytes. DBUG_ASSERT((size_t)fpi->m_max_image_len >= field_var->char_length()); uchar *dst_end= dst + field_var->pack_length(); @@ -2186,7 +2187,7 @@ rdb_unpack_simple_varchar_space_pad(Rdb_field_packing *fpi, Field *field, /* Decode the length-emitted encoding here */ while ((ptr= (const uchar*)reader->read(fpi->m_segment_size))) { - char last_byte= ptr[fpi->m_segment_size - 1]; // number of padding bytes + const char last_byte= ptr[fpi->m_segment_size - 1]; // number of padding bytes size_t used_bytes; if (last_byte == VARCHAR_CMP_EQUAL_TO_SPACES) { @@ -2266,11 +2267,11 @@ rdb_unpack_simple_varchar_space_pad(Rdb_field_packing *fpi, Field *field, The VARCHAR variant is in rdb_make_unpack_simple_varchar */ -static void rdb_make_unpack_simple(const Rdb_collation_codec *codec, - const Field *field, - Rdb_pack_field_context *pack_ctx) +static void rdb_make_unpack_simple(const Rdb_collation_codec* const codec, + const Field* const field, + Rdb_pack_field_context* const pack_ctx) { - uchar *src= field->ptr; + const uchar* const src= field->ptr; Rdb_bit_writer bit_writer(pack_ctx->writer); rdb_write_unpack_simple(&bit_writer, codec, src, field->pack_length()); } @@ -2279,14 +2280,14 @@ static void rdb_make_unpack_simple(const Rdb_collation_codec *codec, Function of type rdb_index_field_unpack_t */ -static int rdb_unpack_simple(Rdb_field_packing *fpi, - Field *field __attribute__((__unused__)), - uchar *dst, - Rdb_string_reader *reader, - Rdb_string_reader *unp_reader) +static int rdb_unpack_simple(Rdb_field_packing* const fpi, + Field* const field __attribute__((__unused__)), + uchar* const dst, + Rdb_string_reader* const reader, + Rdb_string_reader* const unp_reader) { const uchar *ptr; - uint len = fpi->m_max_image_len; + const uint len = fpi->m_max_image_len; Rdb_bit_reader bit_reader(unp_reader); if (!(ptr= (const uchar*)reader->read(len))) @@ -2307,6 +2308,10 @@ const int RDB_SPACE_XFRM_SIZE= 32; class Rdb_charset_space_info { public: + Rdb_charset_space_info(const Rdb_charset_space_info&) = delete; + Rdb_charset_space_info& operator=(const Rdb_charset_space_info&) = delete; + Rdb_charset_space_info() = default; + // A few strxfrm'ed space characters, at least RDB_SPACE_XFRM_SIZE bytes std::vector spaces_xfrm; @@ -2343,10 +2348,10 @@ rdb_mem_comparable_space; */ static -void rdb_get_mem_comparable_space(const CHARSET_INFO *cs, +void rdb_get_mem_comparable_space(const CHARSET_INFO* const cs, const std::vector **xfrm, - size_t *xfrm_len, - size_t *mb_len) + size_t* const xfrm_len, + size_t* const mb_len) { DBUG_ASSERT(cs->number < MY_ALL_CHARSETS_SIZE); if (!rdb_mem_comparable_space[cs->number].get()) @@ -2362,19 +2367,15 @@ void rdb_get_mem_comparable_space(const CHARSET_INFO *cs, // multi-byte form of the ' ' (space) character uchar space_mb[MAX_MULTI_BYTE_CHAR_SIZE]; - size_t space_mb_len= cs->cset->wc_mb(cs, (my_wc_t) cs->pad_char, - space_mb, - space_mb + sizeof(space_mb)); + const size_t space_mb_len= cs->cset->wc_mb(cs, (my_wc_t) cs->pad_char, + space_mb, + space_mb + sizeof(space_mb)); uchar space[20]; // mem-comparable image of the space character - size_t space_len= cs->coll->strnxfrm(cs, - space, sizeof(space), - 1, - space_mb, - space_mb_len, - 0); - Rdb_charset_space_info *info= new Rdb_charset_space_info; + const size_t space_len= cs->coll->strnxfrm(cs, space, sizeof(space), 1, + space_mb, space_mb_len, 0); + Rdb_charset_space_info* const info= new Rdb_charset_space_info; info->space_xfrm_len= space_len; info->space_mb_len= space_mb_len; while (info->spaces_xfrm.size() < RDB_SPACE_XFRM_SIZE) @@ -2398,16 +2399,16 @@ std::array rdb_collation_data; mysql_mutex_t rdb_collation_data_mutex; -static bool rdb_is_collation_supported(const my_core::CHARSET_INFO * cs) +static bool rdb_is_collation_supported(const my_core::CHARSET_INFO* const cs) { return (cs->coll == &my_collation_8bit_simple_ci_handler); } static const Rdb_collation_codec *rdb_init_collation_mapping( - const my_core::CHARSET_INFO *cs) + const my_core::CHARSET_INFO* const cs) { DBUG_ASSERT(cs && cs->state & MY_CS_AVAILABLE); - const Rdb_collation_codec *codec= rdb_collation_data[cs->number]; + const Rdb_collation_codec* codec= rdb_collation_data[cs->number]; if (codec == nullptr && rdb_is_collation_supported(cs)) { @@ -2469,7 +2470,7 @@ static const Rdb_collation_codec *rdb_init_collation_mapping( } -static int get_segment_size_from_collation(const CHARSET_INFO* cs) +static int get_segment_size_from_collation(const CHARSET_INFO* const cs) { int ret; if (cs == &my_charset_utf8mb4_bin || @@ -2526,9 +2527,10 @@ static int get_segment_size_from_collation(const CHARSET_INFO* cs) FALSE - Otherwise */ -bool Rdb_field_packing::setup(const Rdb_key_def *key_descr, const Field *field, - uint keynr_arg, uint key_part_arg, - uint16 key_length) +bool Rdb_field_packing::setup(const Rdb_key_def* const key_descr, + const Field* const field, + const uint &keynr_arg, const uint &key_part_arg, + const uint16 &key_length) { int res= false; enum_field_types type= field ? field->real_type() : MYSQL_TYPE_LONGLONG; @@ -2595,10 +2597,7 @@ bool Rdb_field_packing::setup(const Rdb_key_def *key_descr, const Field *field, case MYSQL_TYPE_LONG_BLOB: case MYSQL_TYPE_BLOB: { - if (key_descr && - key_descr->index_format_min_check( - Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1, - Rdb_key_def::SECONDARY_FORMAT_VERSION_UPDATE1)) + if (key_descr) { // The my_charset_bin collation is special in that it will consider // shorter strings sorting as less than longer strings. @@ -2634,7 +2633,7 @@ bool Rdb_field_packing::setup(const Rdb_key_def *key_descr, const Field *field, const bool is_varchar= (type == MYSQL_TYPE_VARCHAR); const CHARSET_INFO *cs= field->charset(); // max_image_len before chunking is taken into account - int max_image_len_before_chunks= m_max_image_len; + const int max_image_len_before_chunks= m_max_image_len; if (is_varchar) { @@ -2646,7 +2645,7 @@ bool Rdb_field_packing::setup(const Rdb_key_def *key_descr, const Field *field, m_max_image_len= (m_max_image_len/(RDB_ESCAPE_LENGTH-1) + 1) * RDB_ESCAPE_LENGTH; - auto field_var= static_cast(field); + const auto field_var= static_cast(field); m_unpack_info_uses_two_bytes= (field_var->field_length + 8 >= 0x100); } @@ -2675,33 +2674,19 @@ bool Rdb_field_packing::setup(const Rdb_key_def *key_descr, const Field *field, if (is_varchar) { - if (!key_descr || - key_descr->index_format_min_check( - Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1, - Rdb_key_def::SECONDARY_FORMAT_VERSION_UPDATE1)) - { - // VARCHARs - // - are compared as if they were space-padded - // - but are not actually space-padded (reading the value back - // produces the original value, without the padding) - m_unpack_func= rdb_unpack_binary_or_utf8_varchar_space_pad; - m_skip_func= rdb_skip_variable_space_pad; - m_pack_func= rdb_pack_with_varchar_space_pad; - m_make_unpack_info_func= rdb_dummy_make_unpack_info; - m_segment_size= get_segment_size_from_collation(cs); - m_max_image_len= - (max_image_len_before_chunks/(m_segment_size-1) + 1) * - m_segment_size; - rdb_get_mem_comparable_space(cs, &space_xfrm, &space_xfrm_len, - &space_mb_len); - } - else - { - // Older variant where VARCHARs were not compared as space-padded: - m_unpack_func= rdb_unpack_binary_or_utf8_varchar; - m_skip_func= rdb_skip_variable_length; - m_pack_func= rdb_pack_with_varchar_encoding; - } + // VARCHARs - are compared as if they were space-padded - but are + // not actually space-padded (reading the value back produces the + // original value, without the padding) + m_unpack_func= rdb_unpack_binary_or_utf8_varchar_space_pad; + m_skip_func= rdb_skip_variable_space_pad; + m_pack_func= rdb_pack_with_varchar_space_pad; + m_make_unpack_info_func= rdb_dummy_make_unpack_info; + m_segment_size= get_segment_size_from_collation(cs); + m_max_image_len= + (max_image_len_before_chunks/(m_segment_size-1) + 1) * + m_segment_size; + rdb_get_mem_comparable_space(cs, &space_xfrm, &space_xfrm_len, + &space_mb_len); } else { @@ -2718,20 +2703,11 @@ bool Rdb_field_packing::setup(const Rdb_key_def *key_descr, const Field *field, res= true; // index-only scans are possible m_unpack_data_len= is_varchar ? 0 : field->field_length; - uint idx= is_varchar ? 0 : 1; + const uint idx= is_varchar ? 0 : 1; const Rdb_collation_codec *codec= nullptr; if (is_varchar) { - if (cs->levels_for_order != 1) - { - // NO_LINT_DEBUG - sql_print_warning("RocksDB: you're trying to create an index " - "with a multi-level collation %s", cs->name); - // NO_LINT_DEBUG - sql_print_warning("MyRocks will handle this collation internally " - " as if it had a NO_PAD attribute."); - } // VARCHAR requires space-padding for doing comparisons // // The check for cs->levels_for_order is to catch @@ -2741,11 +2717,7 @@ bool Rdb_field_packing::setup(const Rdb_key_def *key_descr, const Field *field, // either. // Currently we handle these collations as NO_PAD, even if they have // PAD_SPACE attribute. - if ((!key_descr || - key_descr->index_format_min_check( - Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1, - Rdb_key_def::SECONDARY_FORMAT_VERSION_UPDATE1)) && - cs->levels_for_order == 1) + if (cs->levels_for_order == 1) { m_pack_func= rdb_pack_with_varchar_space_pad; m_skip_func= rdb_skip_variable_space_pad; @@ -2758,6 +2730,12 @@ bool Rdb_field_packing::setup(const Rdb_key_def *key_descr, const Field *field, } else { + // NO_LINT_DEBUG + sql_print_warning("RocksDB: you're trying to create an index " + "with a multi-level collation %s", cs->name); + // NO_LINT_DEBUG + sql_print_warning("MyRocks will handle this collation internally " + " as if it had a NO_PAD attribute."); m_pack_func= rdb_pack_with_varchar_encoding; m_skip_func= rdb_skip_variable_length; } @@ -2824,14 +2802,14 @@ bool Rdb_field_packing::setup(const Rdb_key_def *key_descr, const Field *field, } -Field *Rdb_field_packing::get_field_in_table(const TABLE *tbl) const +Field *Rdb_field_packing::get_field_in_table(const TABLE* const tbl) const { return tbl->key_info[m_keynr].key_part[m_key_part].field; } void Rdb_field_packing::fill_hidden_pk_val(uchar **dst, - longlong hidden_pk_id) const + const longlong &hidden_pk_id) const { DBUG_ASSERT(m_max_image_len == 8); @@ -2877,8 +2855,9 @@ Rdb_tbl_def::~Rdb_tbl_def() ( cf_id, index_nr ) */ -bool Rdb_tbl_def::put_dict(Rdb_dict_manager* dict, rocksdb::WriteBatch *batch, - uchar *key, size_t keylen) +bool Rdb_tbl_def::put_dict(Rdb_dict_manager* const dict, + rocksdb::WriteBatch* const batch, + uchar* const key, const size_t &keylen) { StringBuffer<8 * Rdb_key_def::PACKED_SIZE> indexes; indexes.alloc(Rdb_key_def::VERSION_SIZE + @@ -2887,13 +2866,13 @@ bool Rdb_tbl_def::put_dict(Rdb_dict_manager* dict, rocksdb::WriteBatch *batch, for (uint i = 0; i < m_key_count; i++) { - const std::shared_ptr& kd= m_key_descr_arr[i]; + const Rdb_key_def& kd= *m_key_descr_arr[i]; - uchar flags = - (kd->m_is_reverse_cf ? Rdb_key_def::REVERSE_CF_FLAG : 0) | - (kd->m_is_auto_cf ? Rdb_key_def::AUTO_CF_FLAG : 0); + const uchar flags = + (kd.m_is_reverse_cf ? Rdb_key_def::REVERSE_CF_FLAG : 0) | + (kd.m_is_auto_cf ? Rdb_key_def::AUTO_CF_FLAG : 0); - uint cf_id= kd->get_cf()->GetID(); + const uint cf_id= kd.get_cf()->GetID(); /* If cf_id already exists, cf_flags must be the same. To prevent race condition, reading/modifying/committing CF flags @@ -2919,14 +2898,14 @@ bool Rdb_tbl_def::put_dict(Rdb_dict_manager* dict, rocksdb::WriteBatch *batch, } rdb_netstr_append_uint32(&indexes, cf_id); - rdb_netstr_append_uint32(&indexes, kd->m_index_number); - dict->add_or_update_index_cf_mapping(batch, kd->m_index_type, - kd->m_kv_format_version, - kd->m_index_number, cf_id); + rdb_netstr_append_uint32(&indexes, kd.m_index_number); + dict->add_or_update_index_cf_mapping(batch, kd.m_index_type, + kd.m_kv_format_version, + kd.m_index_number, cf_id); } - rocksdb::Slice skey((char*)key, keylen); - rocksdb::Slice svalue(indexes.c_ptr(), indexes.length()); + const rocksdb::Slice skey((char*)key, keylen); + const rocksdb::Slice svalue(indexes.c_ptr(), indexes.length()); dict->put_key(batch, skey, svalue); return false; @@ -2968,7 +2947,7 @@ void Rdb_tbl_def::set_name(const std::string& name) (Rdb_tbl_def in our case). */ const uchar* Rdb_ddl_manager::get_hash_key( - Rdb_tbl_def *rec, size_t *length, + Rdb_tbl_def* const rec, size_t* const length, my_bool not_used __attribute__((__unused__))) { const std::string& dbname_tablename= rec->full_tablename(); @@ -2982,13 +2961,13 @@ const uchar* Rdb_ddl_manager::get_hash_key( invoked by the m_ddl_hash object of type my_core::HASH. It deletes a record (Rdb_tbl_def in our case). */ -void Rdb_ddl_manager::free_hash_elem(void* data) +void Rdb_ddl_manager::free_hash_elem(void* const data) { Rdb_tbl_def* elem= reinterpret_cast(data); delete elem; } -void Rdb_ddl_manager::erase_index_num(GL_INDEX_ID gl_index_id) +void Rdb_ddl_manager::erase_index_num(const GL_INDEX_ID &gl_index_id) { m_index_num_to_keydef.erase(gl_index_id); } @@ -3209,7 +3188,7 @@ bool Rdb_validate_tbls::compare_to_actual_tables( bool Rdb_ddl_manager::validate_schemas(void) { bool has_errors= false; - std::string datadir= std::string(mysql_real_data_home); + const std::string datadir= std::string(mysql_real_data_home); Rdb_validate_tbls table_list; /* Get the list of tables from the database dictionary */ @@ -3243,15 +3222,16 @@ bool Rdb_ddl_manager::validate_schemas(void) return !has_errors; } -bool Rdb_ddl_manager::init(Rdb_dict_manager *dict_arg, - Rdb_cf_manager *cf_manager, - uint32_t validate_tables) +bool Rdb_ddl_manager::init(Rdb_dict_manager* const dict_arg, + Rdb_cf_manager* const cf_manager, + const uint32_t &validate_tables) { + const ulong TABLE_HASH_SIZE= 32; m_dict= dict_arg; mysql_rwlock_init(0, &m_rwlock); (void) my_hash_init(&m_ddl_hash, /*system_charset_info*/ &my_charset_bin, - 32, 0, 0, + TABLE_HASH_SIZE, 0, 0, (my_hash_get_key) Rdb_ddl_manager::get_hash_key, Rdb_ddl_manager::free_hash_elem, 0); @@ -3259,7 +3239,7 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager *dict_arg, /* Read the data dictionary and populate the hash */ uchar ddl_entry[Rdb_key_def::INDEX_NUMBER_SIZE]; rdb_netbuf_store_index(ddl_entry, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER); - rocksdb::Slice ddl_entry_slice((char*)ddl_entry, + const rocksdb::Slice ddl_entry_slice((char*)ddl_entry, Rdb_key_def::INDEX_NUMBER_SIZE); /* Reading data dictionary should always skip bloom filter */ @@ -3273,8 +3253,8 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager *dict_arg, { const uchar *ptr; const uchar *ptr_end; - rocksdb::Slice key= it->key(); - rocksdb::Slice val= it->value(); + const rocksdb::Slice key= it->key(); + const rocksdb::Slice val= it->value(); if (key.size() >= Rdb_key_def::INDEX_NUMBER_SIZE && memcmp(key.data(), ddl_entry, Rdb_key_def::INDEX_NUMBER_SIZE)) @@ -3287,10 +3267,11 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager *dict_arg, return true; } - Rdb_tbl_def *tdef= new Rdb_tbl_def(key, Rdb_key_def::INDEX_NUMBER_SIZE); + Rdb_tbl_def* const tdef= + new Rdb_tbl_def(key, Rdb_key_def::INDEX_NUMBER_SIZE); // Now, read the DDLs. - int real_val_size= val.size() - Rdb_key_def::VERSION_SIZE; + const int real_val_size= val.size() - Rdb_key_def::VERSION_SIZE; if (real_val_size % Rdb_key_def::PACKED_SIZE*2) { sql_print_error("RocksDB: Table_store: invalid keylist for table %s", @@ -3301,7 +3282,7 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager *dict_arg, tdef->m_key_descr_arr= new std::shared_ptr[tdef->m_key_count]; ptr= reinterpret_cast(val.data()); - int version= rdb_netbuf_read_uint16(&ptr); + const int version= rdb_netbuf_read_uint16(&ptr); if (version != Rdb_key_def::DDL_ENTRY_INDEX_VERSION) { sql_print_error("RocksDB: DDL ENTRY Version was not expected." @@ -3344,7 +3325,8 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager *dict_arg, return true; } - rocksdb::ColumnFamilyHandle* cfh = cf_manager->get_cf(gl_index_id.cf_id); + rocksdb::ColumnFamilyHandle* const cfh = + cf_manager->get_cf(gl_index_id.cf_id); DBUG_ASSERT(cfh != nullptr); /* @@ -3387,7 +3369,7 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager *dict_arg, if (!it->status().ok()) { - std::string s= it->status().ToString(); + const std::string s= it->status().ToString(); sql_print_error("RocksDB: Table_store: load error: %s", s.c_str()); return true; } @@ -3397,14 +3379,15 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager *dict_arg, } -Rdb_tbl_def* Rdb_ddl_manager::find(const std::string& table_name, bool lock) +Rdb_tbl_def* Rdb_ddl_manager::find(const std::string& table_name, + const bool &lock) { if (lock) { mysql_rwlock_rdlock(&m_rwlock); } - Rdb_tbl_def* rec= reinterpret_cast( + Rdb_tbl_def* const rec= reinterpret_cast( my_hash_search(&m_ddl_hash, reinterpret_cast(table_name.c_str()), table_name.size())); @@ -3421,19 +3404,20 @@ Rdb_tbl_def* Rdb_ddl_manager::find(const std::string& table_name, bool lock) // lock on m_rwlock to make sure the Rdb_key_def is not discarded while we // are finding it. Copying it into 'ret' increments the count making sure // that the object will not be discarded until we are finished with it. -std::shared_ptr Rdb_ddl_manager::safe_find(GL_INDEX_ID gl_index_id) +std::shared_ptr Rdb_ddl_manager::safe_find( + GL_INDEX_ID gl_index_id) { - std::shared_ptr ret(nullptr); + std::shared_ptr ret(nullptr); mysql_rwlock_rdlock(&m_rwlock); auto it= m_index_num_to_keydef.find(gl_index_id); if (it != m_index_num_to_keydef.end()) { - auto table_def = find(it->second.first, false); + const auto table_def = find(it->second.first, false); if (table_def && it->second.second < table_def->m_key_count) { - auto& kd= table_def->m_key_descr_arr[it->second.second]; + const auto &kd= table_def->m_key_descr_arr[it->second.second]; if (kd->max_storage_fmt_length() != 0) { ret = kd; @@ -3470,9 +3454,10 @@ void Rdb_ddl_manager::set_stats( { mysql_rwlock_wrlock(&m_rwlock); for (auto src : stats) { - auto keydef = find(src.second.m_gl_index_id); + const auto& keydef = find(src.second.m_gl_index_id); if (keydef) { keydef->m_stats = src.second; + m_stats2store[keydef->m_stats.m_gl_index_id] = keydef->m_stats; } } mysql_rwlock_unlock(&m_rwlock); @@ -3488,7 +3473,7 @@ void Rdb_ddl_manager::adjust_stats( { for (const auto& src : data) { - auto keydef= find(src.m_gl_index_id); + const auto& keydef= find(src.m_gl_index_id); if (keydef) { keydef->m_stats.merge(src, i == 0, keydef->max_storage_fmt_length()); @@ -3497,7 +3482,7 @@ void Rdb_ddl_manager::adjust_stats( } i++; } - bool should_save_stats= !m_stats2store.empty(); + const bool should_save_stats= !m_stats2store.empty(); mysql_rwlock_unlock(&m_rwlock); if (should_save_stats) { @@ -3506,15 +3491,15 @@ void Rdb_ddl_manager::adjust_stats( } } -void Rdb_ddl_manager::persist_stats(bool sync) +void Rdb_ddl_manager::persist_stats(const bool &sync) { mysql_rwlock_wrlock(&m_rwlock); - auto local_stats2store = std::move(m_stats2store); + const auto local_stats2store = std::move(m_stats2store); m_stats2store.clear(); mysql_rwlock_unlock(&m_rwlock); // Persist stats - std::unique_ptr wb = m_dict->begin(); + const std::unique_ptr wb = m_dict->begin(); std::vector stats; std::transform( local_stats2store.begin(), local_stats2store.end(), @@ -3531,8 +3516,8 @@ void Rdb_ddl_manager::persist_stats(bool sync) on-disk data dictionary. */ -int Rdb_ddl_manager::put_and_write(Rdb_tbl_def *tbl, - rocksdb::WriteBatch *batch) +int Rdb_ddl_manager::put_and_write(Rdb_tbl_def* const tbl, + rocksdb::WriteBatch* const batch) { uchar buf[FN_LEN * 2 + Rdb_key_def::INDEX_NUMBER_SIZE]; uint pos= 0; @@ -3564,7 +3549,7 @@ int Rdb_ddl_manager::put_and_write(Rdb_tbl_def *tbl, See the discussion here: https://reviews.facebook.net/D35925#inline-259167 Tracked by https://github.com/facebook/mysql-5.6/issues/33 */ -int Rdb_ddl_manager::put(Rdb_tbl_def *tbl, bool lock) +int Rdb_ddl_manager::put(Rdb_tbl_def* const tbl, const bool &lock) { Rdb_tbl_def *rec; my_bool result; @@ -3575,7 +3560,7 @@ int Rdb_ddl_manager::put(Rdb_tbl_def *tbl, bool lock) // We have to do this find because 'tbl' is not yet in the list. We need // to find the one we are replacing ('rec') - rec= reinterpret_cast(find(dbname_tablename, false)); + rec= find(dbname_tablename, false); if (rec) { // this will free the old record. @@ -3594,8 +3579,9 @@ int Rdb_ddl_manager::put(Rdb_tbl_def *tbl, bool lock) } -void Rdb_ddl_manager::remove(Rdb_tbl_def *tbl, - rocksdb::WriteBatch *batch, bool lock) +void Rdb_ddl_manager::remove(Rdb_tbl_def* const tbl, + rocksdb::WriteBatch * const batch, + const bool &lock) { if (lock) mysql_rwlock_wrlock(&m_rwlock); @@ -3610,7 +3596,7 @@ void Rdb_ddl_manager::remove(Rdb_tbl_def *tbl, memcpy(buf + pos, dbname_tablename.c_str(), dbname_tablename.size()); pos += dbname_tablename.size(); - rocksdb::Slice tkey((char*)buf, pos); + const rocksdb::Slice tkey((char*)buf, pos); m_dict->delete_key(batch, tkey); /* The following will also delete the object: */ @@ -3622,7 +3608,7 @@ void Rdb_ddl_manager::remove(Rdb_tbl_def *tbl, bool Rdb_ddl_manager::rename(const std::string& from, const std::string& to, - rocksdb::WriteBatch *batch) + rocksdb::WriteBatch* const batch) { Rdb_tbl_def *rec; Rdb_tbl_def *new_rec; @@ -3675,7 +3661,7 @@ void Rdb_ddl_manager::cleanup() } -int Rdb_ddl_manager::scan_for_tables(Rdb_tables_scanner* tables_scanner) +int Rdb_ddl_manager::scan_for_tables(Rdb_tables_scanner* const tables_scanner) { int i, ret; Rdb_tbl_def *rec; @@ -3705,7 +3691,7 @@ int Rdb_ddl_manager::scan_for_tables(Rdb_tables_scanner* tables_scanner) Rdb_binlog_manager class implementation */ -bool Rdb_binlog_manager::init(Rdb_dict_manager *dict_arg) +bool Rdb_binlog_manager::init(Rdb_dict_manager* const dict_arg) { DBUG_ASSERT(dict_arg != nullptr); m_dict= dict_arg; @@ -3728,21 +3714,22 @@ void Rdb_binlog_manager::cleanup() write succeeded or not is not possible here. @param binlog_name Binlog name @param binlog_pos Binlog pos - @param binlog_gtid Binlog GTID + @param binlog_gtid Binlog max GTID @param batch WriteBatch */ -void Rdb_binlog_manager::update(const char* binlog_name, +void Rdb_binlog_manager::update(const char* const binlog_name, const my_off_t binlog_pos, - const char* binlog_gtid, - rocksdb::WriteBatchBase* batch) + const char* const binlog_max_gtid, + rocksdb::WriteBatchBase* const batch) { if (binlog_name && binlog_pos) { // max binlog length (512) + binlog pos (4) + binlog gtid (57) < 1024 - uchar value_buf[1024]; + const size_t RDB_MAX_BINLOG_INFO_LEN= 1024; + uchar value_buf[RDB_MAX_BINLOG_INFO_LEN]; m_dict->put_key(batch, m_key_slice, pack_value(value_buf, binlog_name, - binlog_pos, binlog_gtid)); + binlog_pos, binlog_max_gtid)); } } @@ -3755,8 +3742,9 @@ void Rdb_binlog_manager::update(const char* binlog_name, true is binlog info was found (valid behavior) false otherwise */ -bool Rdb_binlog_manager::read(char *binlog_name, my_off_t *binlog_pos, - char *binlog_gtid) +bool Rdb_binlog_manager::read(char* const binlog_name, + my_off_t* const binlog_pos, + char* const binlog_gtid) const { bool ret= false; if (binlog_name) @@ -3782,10 +3770,11 @@ bool Rdb_binlog_manager::read(char *binlog_name, my_off_t *binlog_pos, @param binlog_gtid Binlog GTID @return rocksdb::Slice converted from buf and its length */ -rocksdb::Slice Rdb_binlog_manager::pack_value(uchar *buf, - const char* binlog_name, - const my_off_t binlog_pos, - const char* binlog_gtid) +rocksdb::Slice Rdb_binlog_manager::pack_value(uchar* const buf, + const char* const binlog_name, + const my_off_t &binlog_pos, + const char* const binlog_gtid + ) const { uint pack_len= 0; @@ -3794,10 +3783,10 @@ rocksdb::Slice Rdb_binlog_manager::pack_value(uchar *buf, pack_len += Rdb_key_def::VERSION_SIZE; // store binlog file name length - DBUG_ASSERT(strlen(binlog_name) <= 65535); - uint16_t binlog_name_len = strlen(binlog_name); + DBUG_ASSERT(strlen(binlog_name) <= FN_REFLEN); + const uint16_t binlog_name_len = strlen(binlog_name); rdb_netbuf_store_uint16(buf+pack_len, binlog_name_len); - pack_len += 2; + pack_len += sizeof(uint16); // store binlog file name memcpy(buf+pack_len, binlog_name, binlog_name_len); @@ -3805,13 +3794,13 @@ rocksdb::Slice Rdb_binlog_manager::pack_value(uchar *buf, // store binlog pos rdb_netbuf_store_uint32(buf+pack_len, binlog_pos); - pack_len += 4; + pack_len += sizeof(uint32); // store binlog gtid length. // If gtid was not set, store 0 instead - uint16_t binlog_gtid_len = binlog_gtid? strlen(binlog_gtid) : 0; + const uint16_t binlog_gtid_len = binlog_gtid? strlen(binlog_gtid) : 0; rdb_netbuf_store_uint16(buf+pack_len, binlog_gtid_len); - pack_len += 2; + pack_len += sizeof(uint16); if (binlog_gtid_len > 0) { @@ -3831,23 +3820,24 @@ rocksdb::Slice Rdb_binlog_manager::pack_value(uchar *buf, @param[OUT] binlog_gtid Binlog GTID @return true on error */ -bool Rdb_binlog_manager::unpack_value(const uchar *value, char *binlog_name, - my_off_t *binlog_pos, - char *binlog_gtid) +bool Rdb_binlog_manager::unpack_value(const uchar* const value, + char* const binlog_name, + my_off_t* const binlog_pos, + char* const binlog_gtid) const { uint pack_len= 0; DBUG_ASSERT(binlog_pos != nullptr); // read version - uint16_t version= rdb_netbuf_to_uint16(value); + const uint16_t version= rdb_netbuf_to_uint16(value); pack_len += Rdb_key_def::VERSION_SIZE; if (version != Rdb_key_def::BINLOG_INFO_INDEX_NUMBER_VERSION) return true; // read binlog file name length - uint16_t binlog_name_len= rdb_netbuf_to_uint16(value+pack_len); - pack_len += 2; + const uint16_t binlog_name_len= rdb_netbuf_to_uint16(value+pack_len); + pack_len += sizeof(uint16); if (binlog_name_len) { // read and set binlog name @@ -3857,11 +3847,11 @@ bool Rdb_binlog_manager::unpack_value(const uchar *value, char *binlog_name, // read and set binlog pos *binlog_pos= rdb_netbuf_to_uint32(value+pack_len); - pack_len += 4; + pack_len += sizeof(uint32); // read gtid length - uint16_t binlog_gtid_len= rdb_netbuf_to_uint16(value+pack_len); - pack_len += 2; + const uint16_t binlog_gtid_len= rdb_netbuf_to_uint16(value+pack_len); + pack_len += sizeof(uint16); if (binlog_gtid && binlog_gtid_len > 0) { // read and set gtid @@ -3883,8 +3873,8 @@ bool Rdb_binlog_manager::unpack_value(const uchar *value, char *binlog_name, @param[IN] write_batch Handle to storage engine writer. */ void Rdb_binlog_manager::update_slave_gtid_info( - uint id, const char* db, const char* gtid, - rocksdb::WriteBatchBase* write_batch) + const uint &id, const char* const db, const char* const gtid, + rocksdb::WriteBatchBase* const write_batch) { if (id && db && gtid) { // Make sure that if the slave_gtid_info table exists we have a @@ -3910,14 +3900,14 @@ void Rdb_binlog_manager::update_slave_gtid_info( buf += Rdb_key_def::INDEX_NUMBER_SIZE; rdb_netbuf_store_uint32(buf, id); buf += 4; - rocksdb::Slice key_slice = + const rocksdb::Slice key_slice = rocksdb::Slice((const char*)key_buf, buf-key_buf); // Build value uchar value_buf[128]= {0}; DBUG_ASSERT(gtid); - uint db_len= strlen(db); - uint gtid_len= strlen(gtid); + const uint db_len= strlen(db); + const uint gtid_len= strlen(gtid); buf= value_buf; // 1 byte used for flags. Empty here. buf++; @@ -3935,14 +3925,15 @@ void Rdb_binlog_manager::update_slave_gtid_info( buf++; memcpy(buf, gtid, gtid_len); buf += gtid_len; - rocksdb::Slice value_slice = + const rocksdb::Slice value_slice = rocksdb::Slice((const char*)value_buf, buf-value_buf); write_batch->Put(kd->get_cf(), key_slice, value_slice); } } -bool Rdb_dict_manager::init(rocksdb::DB *rdb_dict, Rdb_cf_manager *cf_manager) +bool Rdb_dict_manager::init(rocksdb::DB* const rdb_dict, + Rdb_cf_manager* const cf_manager) { mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST); m_db= rdb_dict; @@ -3960,20 +3951,20 @@ bool Rdb_dict_manager::init(rocksdb::DB *rdb_dict, Rdb_cf_manager *cf_manager) return (m_system_cfh == nullptr); } -std::unique_ptr Rdb_dict_manager::begin() +std::unique_ptr Rdb_dict_manager::begin() const { return std::unique_ptr(new rocksdb::WriteBatch); } -void Rdb_dict_manager::put_key(rocksdb::WriteBatchBase *batch, +void Rdb_dict_manager::put_key(rocksdb::WriteBatchBase* const batch, const rocksdb::Slice &key, - const rocksdb::Slice &value) + const rocksdb::Slice &value) const { batch->Put(m_system_cfh, key, value); } rocksdb::Status Rdb_dict_manager::get_value(const rocksdb::Slice &key, - std::string *value) const + std::string* const value) const { rocksdb::ReadOptions options; options.total_order_seek= true; @@ -3986,7 +3977,7 @@ void Rdb_dict_manager::delete_key(rocksdb::WriteBatchBase *batch, batch->Delete(m_system_cfh, key); } -rocksdb::Iterator* Rdb_dict_manager::new_iterator() +rocksdb::Iterator* Rdb_dict_manager::new_iterator() const { /* Reading data dictionary should always skip bloom filter */ rocksdb::ReadOptions read_options; @@ -3994,7 +3985,8 @@ rocksdb::Iterator* Rdb_dict_manager::new_iterator() return m_db->NewIterator(read_options, m_system_cfh); } -int Rdb_dict_manager::commit(rocksdb::WriteBatch *batch, bool sync) +int Rdb_dict_manager::commit(rocksdb::WriteBatch* const batch, const bool &sync) +const { if (!batch) return 1; @@ -4011,7 +4003,7 @@ int Rdb_dict_manager::commit(rocksdb::WriteBatch *batch, bool sync) return res; } -void Rdb_dict_manager::dump_index_id(uchar *netbuf, +void Rdb_dict_manager::dump_index_id(uchar* const netbuf, Rdb_key_def::DATA_DICT_TYPE dict_type, const GL_INDEX_ID &gl_index_id) { @@ -4022,7 +4014,7 @@ void Rdb_dict_manager::dump_index_id(uchar *netbuf, gl_index_id.index_id); } -void Rdb_dict_manager::delete_with_prefix(rocksdb::WriteBatch* batch, +void Rdb_dict_manager::delete_with_prefix(rocksdb::WriteBatch* const batch, Rdb_key_def::DATA_DICT_TYPE dict_type, const GL_INDEX_ID &gl_index_id) const { @@ -4038,13 +4030,13 @@ void Rdb_dict_manager::add_or_update_index_cf_mapping( const uchar m_index_type, const uint16_t kv_version, const uint32_t index_id, - const uint32_t cf_id) + const uint32_t cf_id) const { uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; uchar value_buf[256]= {0}; GL_INDEX_ID gl_index_id= {cf_id, index_id}; dump_index_id(key_buf, Rdb_key_def::INDEX_INFO, gl_index_id); - rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + const rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); uchar* ptr= value_buf; rdb_netbuf_store_uint16(ptr, Rdb_key_def::INDEX_INFO_VERSION_LATEST); @@ -4054,24 +4046,25 @@ void Rdb_dict_manager::add_or_update_index_cf_mapping( rdb_netbuf_store_uint16(ptr, kv_version); ptr+= 2; - rocksdb::Slice value= rocksdb::Slice((char*)value_buf, ptr-value_buf); + const rocksdb::Slice value= rocksdb::Slice((char*)value_buf, ptr-value_buf); batch->Put(m_system_cfh, key, value); } -void Rdb_dict_manager::add_cf_flags(rocksdb::WriteBatch* batch, - const uint32_t cf_id, - const uint32_t cf_flags) +void Rdb_dict_manager::add_cf_flags(rocksdb::WriteBatch* const batch, + const uint32_t &cf_id, + const uint32_t &cf_flags) const { uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]= {0}; uchar value_buf[Rdb_key_def::VERSION_SIZE+ Rdb_key_def::INDEX_NUMBER_SIZE]= {0}; rdb_netbuf_store_uint32(key_buf, Rdb_key_def::CF_DEFINITION); rdb_netbuf_store_uint32(key_buf + Rdb_key_def::INDEX_NUMBER_SIZE, cf_id); - rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + const rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); rdb_netbuf_store_uint16(value_buf, Rdb_key_def::CF_DEFINITION_VERSION); rdb_netbuf_store_uint32(value_buf + Rdb_key_def::VERSION_SIZE, cf_flags); - rocksdb::Slice value= rocksdb::Slice((char*)value_buf, sizeof(value_buf)); + const rocksdb::Slice value= + rocksdb::Slice((char*)value_buf, sizeof(value_buf)); batch->Put(m_system_cfh, key, value); } @@ -4085,19 +4078,19 @@ void Rdb_dict_manager::delete_index_info(rocksdb::WriteBatch* batch, bool Rdb_dict_manager::get_index_info(const GL_INDEX_ID &gl_index_id, uint16_t *m_index_dict_version, uchar *m_index_type, - uint16_t *kv_version) + uint16_t *kv_version) const { bool found= false; bool error= false; std::string value; uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; dump_index_id(key_buf, Rdb_key_def::INDEX_INFO, gl_index_id); - rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + const rocksdb::Slice &key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); - rocksdb::Status status= get_value(key, &value); + const rocksdb::Status &status= get_value(key, &value); if (status.ok()) { - const uchar* val= (const uchar*)value.c_str(); + const uchar* const val= (const uchar*)value.c_str(); const uchar* ptr= val; *m_index_dict_version= rdb_netbuf_to_uint16(val); *kv_version= 0; @@ -4148,16 +4141,17 @@ bool Rdb_dict_manager::get_index_info(const GL_INDEX_ID &gl_index_id, return found; } -bool Rdb_dict_manager::get_cf_flags(const uint32_t cf_id, uint32_t *cf_flags) +bool Rdb_dict_manager::get_cf_flags(const uint32_t &cf_id, + uint32_t* const cf_flags) const { bool found= false; std::string value; uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]= {0}; rdb_netbuf_store_uint32(key_buf, Rdb_key_def::CF_DEFINITION); rdb_netbuf_store_uint32(key_buf + Rdb_key_def::INDEX_NUMBER_SIZE, cf_id); - rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + const rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); - rocksdb::Status status= get_value(key, &value); + const rocksdb::Status status= get_value(key, &value); if (status.ok()) { const uchar* val= (const uchar*)value.c_str(); @@ -4177,22 +4171,22 @@ bool Rdb_dict_manager::get_cf_flags(const uint32_t cf_id, uint32_t *cf_flags) ongoing creation. */ void Rdb_dict_manager::get_ongoing_index_operation( - std::vector* gl_index_ids, - Rdb_key_def::DATA_DICT_TYPE dd_type) + std::vector* const gl_index_ids, + Rdb_key_def::DATA_DICT_TYPE dd_type) const { DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); uchar index_buf[Rdb_key_def::INDEX_NUMBER_SIZE]; rdb_netbuf_store_uint32(index_buf, dd_type); - rocksdb::Slice index_slice(reinterpret_cast(index_buf), + const rocksdb::Slice index_slice(reinterpret_cast(index_buf), Rdb_key_def::INDEX_NUMBER_SIZE); rocksdb::Iterator* it= new_iterator(); for (it->Seek(index_slice); it->Valid(); it->Next()) { rocksdb::Slice key= it->key(); - const uchar* ptr= (const uchar*)key.data(); + const uchar* const ptr= (const uchar*)key.data(); /* Ongoing drop/create index operations require key to be of the form: @@ -4226,7 +4220,7 @@ void Rdb_dict_manager::get_ongoing_index_operation( */ bool Rdb_dict_manager::is_index_operation_ongoing( const GL_INDEX_ID& gl_index_id, - Rdb_key_def::DATA_DICT_TYPE dd_type) + Rdb_key_def::DATA_DICT_TYPE dd_type) const { DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); @@ -4235,9 +4229,9 @@ bool Rdb_dict_manager::is_index_operation_ongoing( std::string value; uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; dump_index_id(key_buf, dd_type, gl_index_id); - rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + const rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); - rocksdb::Status status= get_value(key, &value); + const rocksdb::Status status= get_value(key, &value); if (status.ok()) { found= true; @@ -4250,9 +4244,9 @@ bool Rdb_dict_manager::is_index_operation_ongoing( by drop_index_thread, or to track online index creation. */ void Rdb_dict_manager::start_ongoing_index_operation( - rocksdb::WriteBatch* batch, + rocksdb::WriteBatch* const batch, const GL_INDEX_ID& gl_index_id, - Rdb_key_def::DATA_DICT_TYPE dd_type) + Rdb_key_def::DATA_DICT_TYPE dd_type) const { DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); @@ -4273,8 +4267,9 @@ void Rdb_dict_manager::start_ongoing_index_operation( Rdb_key_def::DDL_CREATE_INDEX_ONGOING_VERSION); } - rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); - rocksdb::Slice value= rocksdb::Slice((char*)value_buf, sizeof(value_buf)); + const rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + const rocksdb::Slice value= + rocksdb::Slice((char*)value_buf, sizeof(value_buf)); batch->Put(m_system_cfh, key, value); } @@ -4282,9 +4277,10 @@ void Rdb_dict_manager::start_ongoing_index_operation( Removing index_id from data dictionary to confirm drop_index_thread completed dropping entire key/values of the index_id */ -void Rdb_dict_manager::end_ongoing_index_operation(rocksdb::WriteBatch* batch, - const GL_INDEX_ID& gl_index_id, - Rdb_key_def::DATA_DICT_TYPE dd_type) +void Rdb_dict_manager::end_ongoing_index_operation( + rocksdb::WriteBatch* const batch, + const GL_INDEX_ID& gl_index_id, + Rdb_key_def::DATA_DICT_TYPE dd_type) const { DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); @@ -4296,7 +4292,7 @@ void Rdb_dict_manager::end_ongoing_index_operation(rocksdb::WriteBatch* batch, Returning true if there is no target index ids to be removed by drop_index_thread */ -bool Rdb_dict_manager::is_drop_index_empty() +bool Rdb_dict_manager::is_drop_index_empty() const { std::vector gl_index_ids; get_ongoing_drop_indexes(&gl_index_ids); @@ -4308,9 +4304,9 @@ bool Rdb_dict_manager::is_drop_index_empty() that dropping indexes started, and adding data dictionary so that all associated indexes to be removed */ -void Rdb_dict_manager::add_drop_table(std::shared_ptr* key_descr, - uint32 n_keys, - rocksdb::WriteBatch *batch) +void Rdb_dict_manager::add_drop_table(std::shared_ptr* const key_descr, + const uint32 &n_keys, + rocksdb::WriteBatch* const batch) const { std::unordered_set dropped_index_ids; for (uint32 i = 0; i < n_keys; i++) @@ -4328,7 +4324,7 @@ void Rdb_dict_manager::add_drop_table(std::shared_ptr* key_descr, */ void Rdb_dict_manager::add_drop_index( const std::unordered_set& gl_index_ids, - rocksdb::WriteBatch *batch) + rocksdb::WriteBatch* const batch) const { for (const auto& gl_index_id : gl_index_ids) { @@ -4344,7 +4340,7 @@ void Rdb_dict_manager::add_drop_index( */ void Rdb_dict_manager::add_create_index( const std::unordered_set& gl_index_ids, - rocksdb::WriteBatch *batch) + rocksdb::WriteBatch* const batch) const { for (const auto& gl_index_id : gl_index_ids) { @@ -4361,13 +4357,13 @@ void Rdb_dict_manager::add_create_index( */ void Rdb_dict_manager::finish_indexes_operation( const std::unordered_set& gl_index_ids, - Rdb_key_def::DATA_DICT_TYPE dd_type) + Rdb_key_def::DATA_DICT_TYPE dd_type) const { DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); - std::unique_ptr wb= begin(); - rocksdb::WriteBatch *batch= wb.get(); + const std::unique_ptr wb= begin(); + rocksdb::WriteBatch* const batch= wb.get(); for (const auto& gl_index_id : gl_index_ids) { @@ -4395,7 +4391,7 @@ void Rdb_dict_manager::finish_indexes_operation( Rdb_dict_manager (at startup). If there is any index ids that are drop ongoing, printing out messages for diagnostics purposes. */ -void Rdb_dict_manager::resume_drop_indexes() +void Rdb_dict_manager::resume_drop_indexes() const { std::vector gl_index_ids; get_ongoing_drop_indexes(&gl_index_ids); @@ -4418,10 +4414,10 @@ void Rdb_dict_manager::resume_drop_indexes() } } -void Rdb_dict_manager::rollback_ongoing_index_creation() +void Rdb_dict_manager::rollback_ongoing_index_creation() const { - std::unique_ptr wb= begin(); - rocksdb::WriteBatch *batch= wb.get(); + const std::unique_ptr wb= begin(); + rocksdb::WriteBatch* const batch= wb.get(); std::vector gl_index_ids; get_ongoing_create_indexes(&gl_index_ids); @@ -4441,9 +4437,9 @@ void Rdb_dict_manager::rollback_ongoing_index_creation() } void Rdb_dict_manager::log_start_drop_table( - const std::shared_ptr* key_descr, - uint32 n_keys, - const char* log_action) + const std::shared_ptr* const key_descr, + const uint32 &n_keys, + const char* const log_action) const { for (uint32 i = 0; i < n_keys; i++) { log_start_drop_index(key_descr[i]->get_gl_index_id(), log_action); @@ -4451,7 +4447,7 @@ void Rdb_dict_manager::log_start_drop_table( } void Rdb_dict_manager::log_start_drop_index(GL_INDEX_ID gl_index_id, - const char* log_action) + const char* log_action) const { uint16 m_index_dict_version= 0; uchar m_index_type= 0; @@ -4468,16 +4464,16 @@ void Rdb_dict_manager::log_start_drop_index(GL_INDEX_ID gl_index_id, log_action, gl_index_id.cf_id, gl_index_id.index_id); } -bool Rdb_dict_manager::get_max_index_id(uint32_t *index_id) +bool Rdb_dict_manager::get_max_index_id(uint32_t* const index_id) const { bool found= false; std::string value; - rocksdb::Status status= get_value(m_key_slice_max_index_id, &value); + const rocksdb::Status status= get_value(m_key_slice_max_index_id, &value); if (status.ok()) { - const uchar* val= (const uchar*)value.c_str(); - uint16_t version= rdb_netbuf_to_uint16(val); + const uchar* const val= (const uchar*)value.c_str(); + const uint16_t &version= rdb_netbuf_to_uint16(val); if (version == Rdb_key_def::MAX_INDEX_ID_VERSION) { *index_id= rdb_netbuf_to_uint32(val+Rdb_key_def::VERSION_SIZE); @@ -4487,8 +4483,8 @@ bool Rdb_dict_manager::get_max_index_id(uint32_t *index_id) return found; } -bool Rdb_dict_manager::update_max_index_id(rocksdb::WriteBatch* batch, - const uint32_t index_id) +bool Rdb_dict_manager::update_max_index_id(rocksdb::WriteBatch* const batch, + const uint32_t &index_id) const { DBUG_ASSERT(batch != nullptr); @@ -4509,15 +4505,14 @@ bool Rdb_dict_manager::update_max_index_id(rocksdb::WriteBatch* batch, {0}; rdb_netbuf_store_uint16(value_buf, Rdb_key_def::MAX_INDEX_ID_VERSION); rdb_netbuf_store_uint32(value_buf + Rdb_key_def::VERSION_SIZE, index_id); - rocksdb::Slice value= rocksdb::Slice((char*)value_buf, sizeof(value_buf)); + const rocksdb::Slice value= + rocksdb::Slice((char*)value_buf, sizeof(value_buf)); batch->Put(m_system_cfh, m_key_slice_max_index_id, value); return false; } -void Rdb_dict_manager::add_stats( - rocksdb::WriteBatch* batch, - const std::vector& stats -) +void Rdb_dict_manager::add_stats(rocksdb::WriteBatch* const batch, + const std::vector& stats) const { DBUG_ASSERT(batch != nullptr); @@ -4527,7 +4522,7 @@ void Rdb_dict_manager::add_stats( // IndexStats::materialize takes complete care of serialization including // storing the version - auto value = Rdb_index_stats::materialize( + const auto value = Rdb_index_stats::materialize( std::vector{it}, 1.); batch->Put( @@ -4538,13 +4533,13 @@ void Rdb_dict_manager::add_stats( } } -Rdb_index_stats Rdb_dict_manager::get_stats(GL_INDEX_ID gl_index_id) +Rdb_index_stats Rdb_dict_manager::get_stats(GL_INDEX_ID gl_index_id) const { uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; dump_index_id(key_buf, Rdb_key_def::INDEX_STATISTICS, gl_index_id); std::string value; - rocksdb::Status status= get_value( + const rocksdb::Status status= get_value( rocksdb::Slice(reinterpret_cast(key_buf), sizeof(key_buf)), &value); if (status.ok()) @@ -4560,7 +4555,8 @@ Rdb_index_stats Rdb_dict_manager::get_stats(GL_INDEX_ID gl_index_id) return Rdb_index_stats(); } -uint Rdb_seq_generator::get_and_update_next_number(Rdb_dict_manager *dict) +uint +Rdb_seq_generator::get_and_update_next_number(Rdb_dict_manager* const dict) { DBUG_ASSERT(dict != nullptr); @@ -4569,8 +4565,8 @@ uint Rdb_seq_generator::get_and_update_next_number(Rdb_dict_manager *dict) res= m_next_number++; - std::unique_ptr wb= dict->begin(); - rocksdb::WriteBatch *batch= wb.get(); + const std::unique_ptr wb= dict->begin(); + rocksdb::WriteBatch* const batch= wb.get(); DBUG_ASSERT(batch != nullptr); dict->update_max_index_id(batch, res); diff --git a/storage/rocksdb/rdb_datadic.h b/storage/rocksdb/rdb_datadic.h index 7c277ddad7d..91cb9b8d563 100644 --- a/storage/rocksdb/rdb_datadic.h +++ b/storage/rocksdb/rdb_datadic.h @@ -59,7 +59,10 @@ class Rdb_ddl_manager; class Rdb_pack_field_context { public: - explicit Rdb_pack_field_context(Rdb_string_writer *writer_arg) : + Rdb_pack_field_context(const Rdb_pack_field_context&) = delete; + Rdb_pack_field_context& operator=(const Rdb_pack_field_context&) = delete; + + explicit Rdb_pack_field_context(Rdb_string_writer* const writer_arg) : writer(writer_arg) {} @@ -164,44 +167,50 @@ class Rdb_key_def { public: /* Convert a key from KeyTupleFormat to mem-comparable form */ - uint pack_index_tuple(TABLE *tbl, uchar *pack_buffer, uchar *packed_tuple, - const uchar *key_tuple, key_part_map keypart_map) const; + uint pack_index_tuple(TABLE* const tbl, uchar* const pack_buffer, + uchar* const packed_tuple, + const uchar* const key_tuple, + const key_part_map &keypart_map) const; /* Convert a key from Table->record format to mem-comparable form */ - uint pack_record(const TABLE *tbl, uchar *pack_buffer, const uchar *record, - uchar *packed_tuple, Rdb_string_writer *unpack_info, - bool should_store_checksums, - longlong hidden_pk_id= 0, uint n_key_parts= 0, - uint *n_null_fields= nullptr) const; + uint pack_record(const TABLE* const tbl, uchar* const pack_buffer, + const uchar* const record, + uchar* const packed_tuple, + Rdb_string_writer* const unpack_info, + const bool &should_store_row_debug_checksums, + const longlong &hidden_pk_id= 0, uint n_key_parts= 0, + uint* const n_null_fields= nullptr) const; /* Pack the hidden primary key into mem-comparable form. */ - uint pack_hidden_pk(longlong hidden_pk_id, - uchar *packed_tuple) const; - int unpack_record(TABLE *table, uchar *buf, const rocksdb::Slice *packed_key, - const rocksdb::Slice *unpack_info, bool verify_checksums) + uint pack_hidden_pk(const longlong &hidden_pk_id, + uchar* const packed_tuple) const; + int unpack_record(TABLE* const table, uchar* const buf, + const rocksdb::Slice* const packed_key, + const rocksdb::Slice* const unpack_info, + const bool &verify_row_debug_checksums) const; static bool unpack_info_has_checksum(const rocksdb::Slice& unpack_info); int compare_keys(const rocksdb::Slice *key1, const rocksdb::Slice *key2, - std::size_t* column_index) const; + std::size_t* const column_index) const; - size_t key_length(TABLE *table, const rocksdb::Slice &key) const; + size_t key_length(const TABLE* const table, const rocksdb::Slice &key) const; /* Get the key that is the "infimum" for this index */ - inline void get_infimum_key(uchar *key, uint *size) const + inline void get_infimum_key(uchar* const key, uint* const size) const { rdb_netbuf_store_index(key, m_index_number); *size= INDEX_NUMBER_SIZE; } /* Get the key that is a "supremum" for this index */ - inline void get_supremum_key(uchar *key, uint *size) const + inline void get_supremum_key(uchar* const key, uint* const size) const { rdb_netbuf_store_index(key, m_index_number+1); *size= INDEX_NUMBER_SIZE; } /* Make a key that is right after the given key. */ - static int successor(uchar *packed_tuple, uint len); + static int successor(uchar* const packed_tuple, const uint &len); /* This can be used to compare prefixes. @@ -252,14 +261,15 @@ public: GL_INDEX_ID get_gl_index_id() const { - GL_INDEX_ID gl_index_id = { m_cf_handle->GetID(), m_index_number }; + const GL_INDEX_ID gl_index_id = { m_cf_handle->GetID(), m_index_number }; return gl_index_id; } /* Must only be called for secondary keys: */ - uint get_primary_key_tuple(TABLE *tbl, - const std::shared_ptr& pk_descr, - const rocksdb::Slice *key, uchar *pk_buffer) const; + uint get_primary_key_tuple(const TABLE* const tbl, + const Rdb_key_def& pk_descr, + const rocksdb::Slice* const key, + uchar* const pk_buffer) const; /* Return max length of mem-comparable form */ uint max_storage_fmt_length() const @@ -288,6 +298,7 @@ public: return m_name; } + Rdb_key_def& operator=(const Rdb_key_def&) = delete; Rdb_key_def(const Rdb_key_def& k); Rdb_key_def(uint indexnr_arg, uint keyno_arg, rocksdb::ColumnFamilyHandle* cf_handle_arg, @@ -379,31 +390,31 @@ public: SECONDARY_FORMAT_VERSION_LATEST= SECONDARY_FORMAT_VERSION_UPDATE1, }; - void setup(const TABLE *table, const Rdb_tbl_def *tbl_def); + void setup(const TABLE* const table, const Rdb_tbl_def* const tbl_def); rocksdb::ColumnFamilyHandle *get_cf() const { return m_cf_handle; } /* Check if keypart #kp can be unpacked from index tuple */ - inline bool can_unpack(uint kp) const; + inline bool can_unpack(const uint &kp) const; /* Check if keypart #kp needs unpack info */ - inline bool has_unpack_info(uint kp) const; + inline bool has_unpack_info(const uint &kp) const; /* Check if given table has a primary key */ - static bool table_has_hidden_pk(const TABLE* table); + static bool table_has_hidden_pk(const TABLE* const table); - void report_checksum_mismatch(bool is_key, const char *data, - size_t data_size) const; + void report_checksum_mismatch(const bool &is_key, const char* const data, + const size_t data_size) const; /* Check if index is at least pk_min if it is a PK, or at least sk_min if SK.*/ - bool index_format_min_check(int pk_min, int sk_min) const; + bool index_format_min_check(const int &pk_min, const int &sk_min) const; private: #ifndef DBUG_OFF - inline bool is_storage_available(int offset, int needed) const + inline bool is_storage_available(const int &offset, const int &needed) const { - int storage_length= static_cast(max_storage_fmt_length()); + const int storage_length= static_cast(max_storage_fmt_length()); return (storage_length - offset) >= needed; } #endif // DBUG_OFF @@ -497,6 +508,10 @@ extern std::array class Rdb_field_packing { public: + Rdb_field_packing(const Rdb_field_packing&) = delete; + Rdb_field_packing& operator=(const Rdb_field_packing&) = delete; + Rdb_field_packing() = default; + /* Length of mem-comparable image of the field, in bytes */ int m_max_image_len; @@ -577,10 +592,11 @@ private: uint m_keynr; uint m_key_part; public: - bool setup(const Rdb_key_def *key_descr, const Field *field, - uint keynr_arg, uint key_part_arg, uint16 key_length); - Field *get_field_in_table(const TABLE *tbl) const; - void fill_hidden_pk_val(uchar **dst, longlong hidden_pk_id) const; + bool setup(const Rdb_key_def* const key_descr, const Field* const field, + const uint &keynr_arg, const uint &key_part_arg, + const uint16 &key_length); + Field *get_field_in_table(const TABLE* const tbl) const; + void fill_hidden_pk_val(uchar **dst, const longlong &hidden_pk_id) const; }; /* @@ -593,6 +609,8 @@ public: class Rdb_field_encoder { public: + Rdb_field_encoder(const Rdb_field_encoder&) = delete; + Rdb_field_encoder& operator=(const Rdb_field_encoder&) = delete; /* STORE_NONE is set when a column can be decoded solely from their mem-comparable form. @@ -633,13 +651,13 @@ inline Field* Rdb_key_def::get_table_field_for_part_no(TABLE *table, return m_pack_info[part_no].get_field_in_table(table); } -inline bool Rdb_key_def::can_unpack(uint kp) const +inline bool Rdb_key_def::can_unpack(const uint &kp) const { DBUG_ASSERT(kp < m_key_parts); return (m_pack_info[kp].m_unpack_func != nullptr); } -inline bool Rdb_key_def::has_unpack_info(uint kp) const +inline bool Rdb_key_def::has_unpack_info(const uint &kp) const { DBUG_ASSERT(kp < m_key_parts); return m_pack_info[kp].uses_unpack_info(); @@ -671,19 +689,22 @@ class Rdb_tbl_def void set_name(const std::string& name); public: + Rdb_tbl_def(const Rdb_tbl_def&) = delete; + Rdb_tbl_def& operator=(const Rdb_tbl_def&) = delete; + explicit Rdb_tbl_def(const std::string& name) : m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) { set_name(name); } - Rdb_tbl_def(const char* name, size_t len) : + Rdb_tbl_def(const char* const name, const size_t &len) : m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) { set_name(std::string(name, len)); } - explicit Rdb_tbl_def(const rocksdb::Slice& slice, size_t pos= 0) : + explicit Rdb_tbl_def(const rocksdb::Slice& slice, const size_t &pos= 0) : m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) { set_name(std::string(slice.data() + pos, slice.size() - pos)); @@ -703,8 +724,8 @@ class Rdb_tbl_def /* Is this a system table */ bool m_is_mysql_system_table; - bool put_dict(Rdb_dict_manager *dict, rocksdb::WriteBatch *batch, - uchar *key, size_t keylen); + bool put_dict(Rdb_dict_manager* const dict, rocksdb::WriteBatch* const batch, + uchar* const key, const size_t &keylen); const std::string& full_tablename() const { return m_dbname_tablename; } const std::string& base_dbname() const { return m_dbname; } @@ -724,13 +745,17 @@ class Rdb_seq_generator mysql_mutex_t m_mutex; public: - void init(uint initial_number) + Rdb_seq_generator(const Rdb_seq_generator&) = delete; + Rdb_seq_generator& operator=(const Rdb_seq_generator&) = delete; + Rdb_seq_generator() = default; + + void init(const uint &initial_number) { mysql_mutex_init(0 , &m_mutex, MY_MUTEX_INIT_FAST); m_next_number= initial_number; } - uint get_and_update_next_number(Rdb_dict_manager *dict); + uint get_and_update_next_number(Rdb_dict_manager* const dict); void cleanup() { @@ -766,46 +791,54 @@ class Rdb_ddl_manager // It is produced by event listener (ie compaction and flush threads) // and consumed by the rocksdb background thread std::map m_stats2store; + + const std::shared_ptr& find( + GL_INDEX_ID gl_index_id); public: + Rdb_ddl_manager(const Rdb_ddl_manager&) = delete; + Rdb_ddl_manager& operator=(const Rdb_ddl_manager&) = delete; + Rdb_ddl_manager() {} + /* Load the data dictionary from on-disk storage */ - bool init(Rdb_dict_manager *dict_arg, Rdb_cf_manager *cf_manager, - uint32_t validate_tables); + bool init(Rdb_dict_manager* const dict_arg, Rdb_cf_manager* const cf_manager, + const uint32_t &validate_tables); void cleanup(); - Rdb_tbl_def* find(const std::string& table_name, bool lock= true); - const std::shared_ptr& find(GL_INDEX_ID gl_index_id); - std::shared_ptr safe_find(GL_INDEX_ID gl_index_id); + Rdb_tbl_def* find(const std::string& table_name, const bool &lock= true); + std::shared_ptr safe_find(GL_INDEX_ID gl_index_id); void set_stats( const std::unordered_map& stats); void adjust_stats( const std::vector& new_data, const std::vector& deleted_data =std::vector()); - void persist_stats(bool sync = false); + void persist_stats(const bool &sync = false); /* Modify the mapping and write it to on-disk storage */ - int put_and_write(Rdb_tbl_def *key_descr, rocksdb::WriteBatch *batch); - void remove(Rdb_tbl_def *rec, rocksdb::WriteBatch *batch, bool lock= true); + int put_and_write(Rdb_tbl_def* const key_descr, + rocksdb::WriteBatch* const batch); + void remove(Rdb_tbl_def* const rec, rocksdb::WriteBatch* const batch, + const bool &lock= true); bool rename(const std::string& from, const std::string& to, - rocksdb::WriteBatch *batch); + rocksdb::WriteBatch* const batch); - uint get_and_update_next_number(Rdb_dict_manager *dict) + uint get_and_update_next_number(Rdb_dict_manager* const dict) { return m_sequence.get_and_update_next_number(dict); } /* Walk the data dictionary */ int scan_for_tables(Rdb_tables_scanner* tables_scanner); - void erase_index_num(GL_INDEX_ID gl_index_id); + void erase_index_num(const GL_INDEX_ID &gl_index_id); private: /* Put the data into in-memory table (only) */ - int put(Rdb_tbl_def *key_descr, bool lock= true); + int put(Rdb_tbl_def* const key_descr, const bool &lock= true); /* Helper functions to be passed to my_core::HASH object */ - static const uchar* get_hash_key(Rdb_tbl_def *rec, size_t *length, + static const uchar* get_hash_key(Rdb_tbl_def* const rec, size_t* const length, my_bool not_used __attribute__((unused))); - static void free_hash_elem(void* data); + static void free_hash_elem(void* const data); bool validate_schemas(); }; @@ -829,25 +862,32 @@ private: class Rdb_binlog_manager { public: - bool init(Rdb_dict_manager *dict); + Rdb_binlog_manager(const Rdb_binlog_manager&) = delete; + Rdb_binlog_manager& operator=(const Rdb_binlog_manager&) = delete; + Rdb_binlog_manager() = default; + + bool init(Rdb_dict_manager* const dict); void cleanup(); - void update(const char* binlog_name, const my_off_t binlog_pos, - const char* binlog_gtid, rocksdb::WriteBatchBase* batch); - bool read(char* binlog_name, my_off_t* binlog_pos, char* binlog_gtid); - void update_slave_gtid_info(uint id, const char* db, const char* gtid, - rocksdb::WriteBatchBase *write_batch); + void update(const char* const binlog_name, const my_off_t binlog_pos, + const char* const binlog_max_gtid, + rocksdb::WriteBatchBase* const batch); + bool read(char* const binlog_name, my_off_t* const binlog_pos, + char* const binlog_gtid) const; + void update_slave_gtid_info(const uint &id, const char* const db, + const char* const gtid, + rocksdb::WriteBatchBase* const write_batch); private: Rdb_dict_manager *m_dict= nullptr; uchar m_key_buf[Rdb_key_def::INDEX_NUMBER_SIZE]= {0}; rocksdb::Slice m_key_slice; - rocksdb::Slice pack_value(uchar *buf, - const char *binlog_name, - const my_off_t binlog_pos, - const char *binlog_gtid); - bool unpack_value(const uchar *value, char *binlog_name, - my_off_t *binlog_pos, char *binlog_gtid); + rocksdb::Slice pack_value(uchar* const buf, + const char* const binlog_name, + const my_off_t &binlog_pos, + const char* const binlog_gtid) const; + bool unpack_value(const uchar* const value, char* const binlog_name, + my_off_t* const binlog_pos, char* const binlog_gtid) const; std::atomic m_slave_gtid_info_tbl; }; @@ -915,21 +955,25 @@ private: uchar m_key_buf_max_index_id[Rdb_key_def::INDEX_NUMBER_SIZE]= {0}; rocksdb::Slice m_key_slice_max_index_id; - static void dump_index_id(uchar *netbuf, + static void dump_index_id(uchar* const netbuf, Rdb_key_def::DATA_DICT_TYPE dict_type, const GL_INDEX_ID &gl_index_id); - void delete_with_prefix(rocksdb::WriteBatch* batch, + void delete_with_prefix(rocksdb::WriteBatch* const batch, Rdb_key_def::DATA_DICT_TYPE dict_type, const GL_INDEX_ID &gl_index_id) const; /* Functions for fast DROP TABLE/INDEX */ - void resume_drop_indexes(); - void log_start_drop_table(const std::shared_ptr* key_descr, - uint32 n_keys, - const char* log_action); + void resume_drop_indexes() const; + void log_start_drop_table(const std::shared_ptr* const key_descr, + const uint32 &n_keys, + const char* const log_action) const; void log_start_drop_index(GL_INDEX_ID gl_index_id, - const char* log_action); + const char* log_action) const; public: - bool init(rocksdb::DB *rdb_dict, Rdb_cf_manager *cf_manager); + Rdb_dict_manager(const Rdb_dict_manager&) = delete; + Rdb_dict_manager& operator=(const Rdb_dict_manager&) = delete; + Rdb_dict_manager() = default; + + bool init(rocksdb::DB* const rdb_dict, Rdb_cf_manager* const cf_manager); inline void cleanup() { @@ -947,108 +991,111 @@ public: } /* Raw RocksDB operations */ - std::unique_ptr begin(); - int commit(rocksdb::WriteBatch *batch, bool sync = true); + std::unique_ptr begin() const; + int commit(rocksdb::WriteBatch* const batch, const bool &sync = true) const; rocksdb::Status get_value(const rocksdb::Slice& key, - std::string *value) const; - void put_key(rocksdb::WriteBatchBase *batch, const rocksdb::Slice &key, - const rocksdb::Slice &value); + std::string* const value) const; + void put_key(rocksdb::WriteBatchBase* const batch, const rocksdb::Slice &key, + const rocksdb::Slice &value) const; void delete_key(rocksdb::WriteBatchBase *batch, const rocksdb::Slice &key) const; - rocksdb::Iterator *new_iterator(); + rocksdb::Iterator *new_iterator() const; /* Internal Index id => CF */ void add_or_update_index_cf_mapping(rocksdb::WriteBatch *batch, const uchar index_type, const uint16_t kv_version, const uint index_id, - const uint cf_id); + const uint cf_id) const; void delete_index_info(rocksdb::WriteBatch* batch, const GL_INDEX_ID &index_id) const; bool get_index_info(const GL_INDEX_ID &gl_index_id, uint16_t *index_dict_version, - uchar *index_type, uint16_t *kv_version); + uchar *index_type, uint16_t *kv_version) const; /* CF id => CF flags */ - void add_cf_flags(rocksdb::WriteBatch *batch, - const uint cf_id, - const uint cf_flags); - bool get_cf_flags(const uint cf_id, uint *cf_flags); + void add_cf_flags(rocksdb::WriteBatch* const batch, + const uint &cf_id, + const uint &cf_flags) const; + bool get_cf_flags(const uint &cf_id, uint* const cf_flags) const; /* Functions for fast CREATE/DROP TABLE/INDEX */ void get_ongoing_index_operation(std::vector* gl_index_ids, - Rdb_key_def::DATA_DICT_TYPE dd_type); + Rdb_key_def::DATA_DICT_TYPE dd_type) const; bool is_index_operation_ongoing(const GL_INDEX_ID& gl_index_id, - Rdb_key_def::DATA_DICT_TYPE dd_type); + Rdb_key_def::DATA_DICT_TYPE dd_type) const; void start_ongoing_index_operation(rocksdb::WriteBatch* batch, const GL_INDEX_ID& gl_index_id, - Rdb_key_def::DATA_DICT_TYPE dd_type); - void end_ongoing_index_operation(rocksdb::WriteBatch* batch, + Rdb_key_def::DATA_DICT_TYPE dd_type) const; + void end_ongoing_index_operation(rocksdb::WriteBatch* const batch, const GL_INDEX_ID& gl_index_id, - Rdb_key_def::DATA_DICT_TYPE dd_type); - bool is_drop_index_empty(); - void add_drop_table(std::shared_ptr* key_descr, uint32 n_keys, - rocksdb::WriteBatch *batch); + Rdb_key_def::DATA_DICT_TYPE dd_type) const; + bool is_drop_index_empty() const; + void add_drop_table(std::shared_ptr* const key_descr, + const uint32 &n_keys, + rocksdb::WriteBatch* const batch) const; void add_drop_index(const std::unordered_set& gl_index_ids, - rocksdb::WriteBatch *batch); + rocksdb::WriteBatch* const batch) const; void add_create_index(const std::unordered_set& gl_index_ids, - rocksdb::WriteBatch *batch); + rocksdb::WriteBatch* const batch) const; void finish_indexes_operation( const std::unordered_set& gl_index_ids, - Rdb_key_def::DATA_DICT_TYPE dd_type); - void rollback_ongoing_index_creation(); + Rdb_key_def::DATA_DICT_TYPE dd_type) const; + void rollback_ongoing_index_creation() const; - inline void get_ongoing_drop_indexes(std::vector* gl_index_ids) + inline void + get_ongoing_drop_indexes(std::vector* gl_index_ids) const { get_ongoing_index_operation(gl_index_ids, Rdb_key_def::DDL_DROP_INDEX_ONGOING); } - inline void get_ongoing_create_indexes(std::vector* gl_index_ids) + inline void + get_ongoing_create_indexes(std::vector* gl_index_ids) const { get_ongoing_index_operation(gl_index_ids, Rdb_key_def::DDL_CREATE_INDEX_ONGOING); } inline void start_drop_index(rocksdb::WriteBatch *wb, - const GL_INDEX_ID& gl_index_id) + const GL_INDEX_ID& gl_index_id) const { start_ongoing_index_operation(wb, gl_index_id, Rdb_key_def::DDL_DROP_INDEX_ONGOING); } inline void start_create_index(rocksdb::WriteBatch *wb, - const GL_INDEX_ID& gl_index_id) + const GL_INDEX_ID& gl_index_id) const { start_ongoing_index_operation(wb, gl_index_id, Rdb_key_def::DDL_CREATE_INDEX_ONGOING); } inline void finish_drop_indexes( - const std::unordered_set& gl_index_ids) + const std::unordered_set& gl_index_ids) const { finish_indexes_operation(gl_index_ids, Rdb_key_def::DDL_DROP_INDEX_ONGOING); } inline void finish_create_indexes( - const std::unordered_set& gl_index_ids) + const std::unordered_set& gl_index_ids) const { finish_indexes_operation(gl_index_ids, Rdb_key_def::DDL_CREATE_INDEX_ONGOING); } - inline bool is_drop_index_ongoing(const GL_INDEX_ID& gl_index_id) + inline bool is_drop_index_ongoing(const GL_INDEX_ID& gl_index_id) const { return is_index_operation_ongoing(gl_index_id, Rdb_key_def::DDL_DROP_INDEX_ONGOING); } - inline bool is_create_index_ongoing(const GL_INDEX_ID& gl_index_id) + inline bool is_create_index_ongoing(const GL_INDEX_ID& gl_index_id) const { return is_index_operation_ongoing(gl_index_id, Rdb_key_def::DDL_CREATE_INDEX_ONGOING); } - bool get_max_index_id(uint32_t *index_id); - bool update_max_index_id(rocksdb::WriteBatch* batch, - const uint32_t index_id); - void add_stats(rocksdb::WriteBatch* batch, - const std::vector& stats); - Rdb_index_stats get_stats(GL_INDEX_ID gl_index_id); + bool get_max_index_id(uint32_t* const index_id) const; + bool update_max_index_id(rocksdb::WriteBatch* const batch, + const uint32_t &index_id) const; + void add_stats(rocksdb::WriteBatch* const batch, + const std::vector& stats) const; + Rdb_index_stats get_stats(GL_INDEX_ID gl_index_id) const; }; } // namespace myrocks diff --git a/storage/rocksdb/rdb_i_s.cc b/storage/rocksdb/rdb_i_s.cc index 78af6eff3a5..c35116e10a9 100644 --- a/storage/rocksdb/rdb_i_s.cc +++ b/storage/rocksdb/rdb_i_s.cc @@ -28,12 +28,14 @@ #include "rocksdb/memtablerep.h" #include "rocksdb/merge_operator.h" #include "rocksdb/slice_transform.h" +#include "rocksdb/utilities/transaction_db.h" /* MyRocks header files */ #include "./ha_rocksdb.h" #include "./ha_rocksdb_proto.h" #include "./rdb_cf_manager.h" #include "./rdb_datadic.h" +#include "./rdb_utils.h" namespace myrocks { @@ -50,17 +52,35 @@ namespace myrocks { /* Support for INFORMATION_SCHEMA.ROCKSDB_CFSTATS dynamic table */ +namespace RDB_CFSTATS_FIELD +{ + enum + { + CF_NAME= 0, + STAT_TYPE, + VALUE + }; +} // namespace RDB_CFSTATS_FIELD + +static ST_FIELD_INFO rdb_i_s_cfstats_fields_info[]= +{ + ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO_END +}; + static int rdb_i_s_cfstats_fill_table( - my_core::THD *thd, - my_core::TABLE_LIST *tables, - my_core::Item *cond __attribute__((__unused__))) + my_core::THD* const thd, + my_core::TABLE_LIST* const tables, + my_core::Item* const cond __attribute__((__unused__))) { bool ret; uint64_t val; DBUG_ENTER("rdb_i_s_cfstats_fill_table"); - std::vector> cf_properties = { + const std::vector> cf_properties = { {rocksdb::DB::Properties::kNumImmutableMemTable, "NUM_IMMUTABLE_MEM_TABLE"}, {rocksdb::DB::Properties::kMemTableFlushPending, "MEM_TABLE_FLUSH_PENDING"}, @@ -77,11 +97,11 @@ static int rdb_i_s_cfstats_fill_table( {rocksdb::DB::Properties::kNumLiveVersions, "NUM_LIVE_VERSIONS"} }; - rocksdb::DB *rdb= rdb_get_rocksdb_db(); - Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); + rocksdb::DB* const rdb= rdb_get_rocksdb_db(); + const Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); DBUG_ASSERT(rdb != nullptr); - for (auto cf_name : cf_manager.get_cf_names()) + for (const auto &cf_name : cf_manager.get_cf_names()) { rocksdb::ColumnFamilyHandle* cfh; bool is_automatic; @@ -94,19 +114,22 @@ static int rdb_i_s_cfstats_fill_table( if (cfh == nullptr) continue; - for (auto property : cf_properties) + for (const auto &property : cf_properties) { if (!rdb->GetIntProperty(cfh, property.first, &val)) continue; DBUG_ASSERT(tables != nullptr); - tables->table->field[0]->store(cf_name.c_str(), cf_name.size(), - system_charset_info); - tables->table->field[1]->store(property.second.c_str(), - property.second.size(), - system_charset_info); - tables->table->field[2]->store(val, true); + tables->table->field[RDB_CFSTATS_FIELD::CF_NAME]->store( + cf_name.c_str(), + cf_name.size(), + system_charset_info); + tables->table->field[RDB_CFSTATS_FIELD::STAT_TYPE]->store( + property.second.c_str(), + property.second.size(), + system_charset_info); + tables->table->field[RDB_CFSTATS_FIELD::VALUE]->store(val, true); ret= my_core::schema_table_store_record(thd, tables->table); @@ -117,14 +140,6 @@ static int rdb_i_s_cfstats_fill_table( DBUG_RETURN(0); } -static ST_FIELD_INFO rdb_i_s_cfstats_fields_info[]= -{ - ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), - ROCKSDB_FIELD_INFO_END -}; - static int rdb_i_s_cfstats_init(void *p) { my_core::ST_SCHEMA_TABLE *schema; @@ -143,37 +158,54 @@ static int rdb_i_s_cfstats_init(void *p) /* Support for INFORMATION_SCHEMA.ROCKSDB_DBSTATS dynamic table */ +namespace RDB_DBSTATS_FIELD +{ + enum + { + STAT_TYPE= 0, + VALUE + }; +} // namespace RDB_DBSTATS_FIELD + +static ST_FIELD_INFO rdb_i_s_dbstats_fields_info[]= +{ + ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO_END +}; + static int rdb_i_s_dbstats_fill_table( - my_core::THD *thd, - my_core::TABLE_LIST *tables, - my_core::Item *cond __attribute__((__unused__))) + my_core::THD* const thd, + my_core::TABLE_LIST* const tables, + my_core::Item* const cond __attribute__((__unused__))) { bool ret; uint64_t val; DBUG_ENTER("rdb_i_s_dbstats_fill_table"); - std::vector> db_properties = { + const std::vector> db_properties = { {rocksdb::DB::Properties::kBackgroundErrors, "DB_BACKGROUND_ERRORS"}, {rocksdb::DB::Properties::kNumSnapshots, "DB_NUM_SNAPSHOTS"}, {rocksdb::DB::Properties::kOldestSnapshotTime, "DB_OLDEST_SNAPSHOT_TIME"} }; - rocksdb::DB *rdb= rdb_get_rocksdb_db(); + rocksdb::DB* const rdb= rdb_get_rocksdb_db(); const rocksdb::BlockBasedTableOptions& table_options= rdb_get_table_options(); - for (auto property : db_properties) + for (const auto &property : db_properties) { if (!rdb->GetIntProperty(property.first, &val)) continue; DBUG_ASSERT(tables != nullptr); - tables->table->field[0]->store(property.second.c_str(), - property.second.size(), - system_charset_info); - tables->table->field[1]->store(val, true); + tables->table->field[RDB_DBSTATS_FIELD::STAT_TYPE]->store( + property.second.c_str(), + property.second.size(), + system_charset_info); + tables->table->field[RDB_DBSTATS_FIELD::VALUE]->store(val, true); ret= my_core::schema_table_store_record(thd, tables->table); @@ -192,23 +224,16 @@ static int rdb_i_s_dbstats_fill_table( information from the column family. */ val= (table_options.block_cache ? table_options.block_cache->GetUsage() : 0); - tables->table->field[0]->store(STRING_WITH_LEN("DB_BLOCK_CACHE_USAGE"), - system_charset_info); - tables->table->field[1]->store(val, true); + tables->table->field[RDB_DBSTATS_FIELD::STAT_TYPE]->store( + STRING_WITH_LEN("DB_BLOCK_CACHE_USAGE"), system_charset_info); + tables->table->field[RDB_DBSTATS_FIELD::VALUE]->store(val, true); ret= my_core::schema_table_store_record(thd, tables->table); DBUG_RETURN(ret); } -static ST_FIELD_INFO rdb_i_s_dbstats_fields_info[]= -{ - ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), - ROCKSDB_FIELD_INFO_END -}; - -static int rdb_i_s_dbstats_init(void *p) +static int rdb_i_s_dbstats_init(void* const p) { DBUG_ASSERT(p != nullptr); @@ -227,11 +252,34 @@ static int rdb_i_s_dbstats_init(void *p) /* Support for INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT dynamic table */ +namespace RDB_PERF_CONTEXT_FIELD +{ + enum + { + TABLE_SCHEMA= 0, + TABLE_NAME, + PARTITION_NAME, + STAT_TYPE, + VALUE + }; +} // namespace RDB_PERF_CONTEXT_FIELD + +static ST_FIELD_INFO rdb_i_s_perf_context_fields_info[]= +{ + ROCKSDB_FIELD_INFO("TABLE_SCHEMA", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("TABLE_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("PARTITION_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, + MY_I_S_MAYBE_NULL), + ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, + 0), + ROCKSDB_FIELD_INFO_END +}; static int rdb_i_s_perf_context_fill_table( - my_core::THD *thd, - my_core::TABLE_LIST *tables, - my_core::Item *cond __attribute__((__unused__))) + my_core::THD* const thd, + my_core::TABLE_LIST* const tables, + my_core::Item* const cond __attribute__((__unused__))) { DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); @@ -241,7 +289,7 @@ static int rdb_i_s_perf_context_fill_table( DBUG_ENTER("rdb_i_s_perf_context_fill_table"); - std::vector tablenames= rdb_get_open_table_names(); + const std::vector tablenames= rdb_get_open_table_names(); for (const auto& it : tablenames) { std::string str, dbname, tablename, partname; @@ -263,23 +311,28 @@ static int rdb_i_s_perf_context_fill_table( DBUG_ASSERT(field != nullptr); - field[0]->store(dbname.c_str(), dbname.size(), system_charset_info); - field[1]->store(tablename.c_str(), tablename.size(), system_charset_info); + field[RDB_PERF_CONTEXT_FIELD::TABLE_SCHEMA]->store( + dbname.c_str(), dbname.size(), system_charset_info); + field[RDB_PERF_CONTEXT_FIELD::TABLE_NAME]->store( + tablename.c_str(), tablename.size(), system_charset_info); if (partname.size() == 0) { - field[2]->set_null(); + field[RDB_PERF_CONTEXT_FIELD::PARTITION_NAME]->set_null(); } else { - field[2]->set_notnull(); - field[2]->store(partname.c_str(), partname.size(), system_charset_info); + field[RDB_PERF_CONTEXT_FIELD::PARTITION_NAME]->set_notnull(); + field[RDB_PERF_CONTEXT_FIELD::PARTITION_NAME]->store( + partname.c_str(), partname.size(), system_charset_info); } for (int i= 0; i < PC_MAX_IDX; i++) { - field[3]->store(rdb_pc_stat_types[i].c_str(), rdb_pc_stat_types[i].size(), - system_charset_info); - field[4]->store(counters.m_value[i], true); + field[RDB_PERF_CONTEXT_FIELD::STAT_TYPE]->store( + rdb_pc_stat_types[i].c_str(), + rdb_pc_stat_types[i].size(), + system_charset_info); + field[RDB_PERF_CONTEXT_FIELD::VALUE]->store(counters.m_value[i], true); ret= my_core::schema_table_store_record(thd, tables->table); if (ret) @@ -290,19 +343,7 @@ static int rdb_i_s_perf_context_fill_table( DBUG_RETURN(0); } -static ST_FIELD_INFO rdb_i_s_perf_context_fields_info[]= -{ - ROCKSDB_FIELD_INFO("TABLE_SCHEMA", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("TABLE_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("PARTITION_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, - MY_I_S_MAYBE_NULL), - ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, - 0), - ROCKSDB_FIELD_INFO_END -}; - -static int rdb_i_s_perf_context_init(void *p) +static int rdb_i_s_perf_context_init(void* const p) { DBUG_ASSERT(p != nullptr); @@ -318,10 +359,29 @@ static int rdb_i_s_perf_context_init(void *p) DBUG_RETURN(0); } +/* + Support for INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL dynamic table + */ +namespace RDB_PERF_CONTEXT_GLOBAL_FIELD +{ + enum + { + STAT_TYPE= 0, + VALUE + }; +} // namespace RDB_PERF_CONTEXT_GLOBAL_FIELD + +static ST_FIELD_INFO rdb_i_s_perf_context_global_fields_info[]= +{ + ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO_END +}; + static int rdb_i_s_perf_context_global_fill_table( - my_core::THD *thd, - my_core::TABLE_LIST *tables, - my_core::Item *cond __attribute__((__unused__))) + my_core::THD* const thd, + my_core::TABLE_LIST* const tables, + my_core::Item* const cond __attribute__((__unused__))) { DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); @@ -337,10 +397,12 @@ static int rdb_i_s_perf_context_global_fill_table( DBUG_ASSERT(tables->table != nullptr); DBUG_ASSERT(tables->table->field != nullptr); - tables->table->field[0]->store(rdb_pc_stat_types[i].c_str(), - rdb_pc_stat_types[i].size(), - system_charset_info); - tables->table->field[1]->store(global_counters.m_value[i], true); + tables->table->field[RDB_PERF_CONTEXT_GLOBAL_FIELD::STAT_TYPE]->store( + rdb_pc_stat_types[i].c_str(), + rdb_pc_stat_types[i].size(), + system_charset_info); + tables->table->field[RDB_PERF_CONTEXT_GLOBAL_FIELD::VALUE]->store( + global_counters.m_value[i], true); ret= my_core::schema_table_store_record(thd, tables->table); if (ret) @@ -350,14 +412,7 @@ static int rdb_i_s_perf_context_global_fill_table( DBUG_RETURN(0); } -static ST_FIELD_INFO rdb_i_s_perf_context_global_fields_info[]= -{ - ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), - ROCKSDB_FIELD_INFO_END -}; - -static int rdb_i_s_perf_context_global_init(void *p) +static int rdb_i_s_perf_context_global_init(void* const p) { DBUG_ASSERT(p != nullptr); @@ -376,10 +431,28 @@ static int rdb_i_s_perf_context_global_init(void *p) /* Support for INFORMATION_SCHEMA.ROCKSDB_CFOPTIONS dynamic table */ +namespace RDB_CFOPTIONS_FIELD +{ + enum + { + CF_NAME= 0, + OPTION_TYPE, + VALUE + }; +} // namespace RDB_CFOPTIONS_FIELD + +static ST_FIELD_INFO rdb_i_s_cfoptions_fields_info[] = +{ + ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("OPTION_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END +}; + static int rdb_i_s_cfoptions_fill_table( - my_core::THD *thd, - my_core::TABLE_LIST *tables, - my_core::Item *cond __attribute__((__unused__))) + my_core::THD* const thd, + my_core::TABLE_LIST* const tables, + my_core::Item* const cond __attribute__((__unused__))) { DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); @@ -390,7 +463,7 @@ static int rdb_i_s_cfoptions_fill_table( Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); - for (auto cf_name : cf_manager.get_cf_names()) + for (const auto &cf_name : cf_manager.get_cf_names()) { std::string val; rocksdb::ColumnFamilyOptions opts; @@ -460,7 +533,7 @@ static int rdb_i_s_cfoptions_fill_table( // get MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL option value val = opts.max_bytes_for_level_multiplier_additional.empty() ? "NULL" : ""; - for (auto level : opts.max_bytes_for_level_multiplier_additional) + for (const auto &level : opts.max_bytes_for_level_multiplier_additional) { val.append(std::to_string(level) + ":"); } @@ -477,7 +550,7 @@ static int rdb_i_s_cfoptions_fill_table( // get COMPRESSION_PER_LEVEL option value val = opts.compression_per_level.empty() ? "NULL" : ""; - for (auto compression_type : opts.compression_per_level) + for (const auto &compression_type : opts.compression_per_level) { std::string res; GetStringFromCompressionType(&res, compression_type); @@ -523,7 +596,8 @@ static int rdb_i_s_cfoptions_fill_table( cf_option_types.push_back({"COMPACTION_STYLE", val}); // get COMPACTION_OPTIONS_UNIVERSAL related options - rocksdb::CompactionOptionsUniversal compac_opts = opts.compaction_options_universal; + const rocksdb::CompactionOptionsUniversal compac_opts = + opts.compaction_options_universal; val = "{SIZE_RATIO="; val.append(std::to_string(compac_opts.size_ratio)); val.append("; MIN_MERGE_WIDTH="); @@ -550,8 +624,7 @@ static int rdb_i_s_cfoptions_fill_table( std::to_string(opts.compaction_options_fifo.max_table_files_size)}); // get block-based table related options - const rocksdb::BlockBasedTableOptions& table_options= - rdb_get_table_options(); + const rocksdb::BlockBasedTableOptions& table_options= rdb_get_table_options(); // get BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS option cf_option_types.push_back( @@ -620,19 +693,21 @@ static int rdb_i_s_cfoptions_fill_table( cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION", std::to_string(table_options.format_version)}); - for (auto cf_option_type : cf_option_types) + for (const auto &cf_option_type : cf_option_types) { DBUG_ASSERT(tables->table != nullptr); DBUG_ASSERT(tables->table->field != nullptr); - tables->table->field[0]->store(cf_name.c_str(), cf_name.size(), - system_charset_info); - tables->table->field[1]->store(cf_option_type.first.c_str(), - cf_option_type.first.size(), - system_charset_info); - tables->table->field[2]->store(cf_option_type.second.c_str(), - cf_option_type.second.size(), - system_charset_info); + tables->table->field[RDB_CFOPTIONS_FIELD::CF_NAME]->store( + cf_name.c_str(), cf_name.size(), system_charset_info); + tables->table->field[RDB_CFOPTIONS_FIELD::OPTION_TYPE]->store( + cf_option_type.first.c_str(), + cf_option_type.first.size(), + system_charset_info); + tables->table->field[RDB_CFOPTIONS_FIELD::VALUE]->store( + cf_option_type.second.c_str(), + cf_option_type.second.size(), + system_charset_info); ret = my_core::schema_table_store_record(thd, tables->table); @@ -643,11 +718,24 @@ static int rdb_i_s_cfoptions_fill_table( DBUG_RETURN(0); } -static ST_FIELD_INFO rdb_i_s_cfoptions_fields_info[] = +/* + Support for INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO dynamic table + */ +namespace RDB_GLOBAL_INFO_FIELD { - ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("OPTION_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("VALUE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + enum + { + TYPE= 0, + NAME, + VALUE + }; +} + +static ST_FIELD_INFO rdb_i_s_global_info_fields_info[] = +{ + ROCKSDB_FIELD_INFO("TYPE", FN_REFLEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("NAME", FN_REFLEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", FN_REFLEN+1, MYSQL_TYPE_STRING, 0), ROCKSDB_FIELD_INFO_END }; @@ -656,11 +744,11 @@ static ST_FIELD_INFO rdb_i_s_cfoptions_fields_info[] = * to insert (TYPE, KEY, VALUE) rows into * information_schema.rocksdb_global_info */ -static int rdb_global_info_fill_row(my_core::THD *thd, - my_core::TABLE_LIST *tables, - const char *type, - const char *name, - const char *value) +static int rdb_global_info_fill_row(my_core::THD* const thd, + my_core::TABLE_LIST* const tables, + const char* const type, + const char* const name, + const char* const value) { DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); @@ -672,20 +760,20 @@ static int rdb_global_info_fill_row(my_core::THD *thd, Field **field= tables->table->field; DBUG_ASSERT(field != nullptr); - field[0]->store(type, strlen(type), system_charset_info); - field[1]->store(name, strlen(name), system_charset_info); - field[2]->store(value, strlen(value), system_charset_info); + field[RDB_GLOBAL_INFO_FIELD::TYPE]->store( + type, strlen(type), system_charset_info); + field[RDB_GLOBAL_INFO_FIELD::NAME]->store( + name, strlen(name), system_charset_info); + field[RDB_GLOBAL_INFO_FIELD::VALUE]->store( + value, strlen(value), system_charset_info); return my_core::schema_table_store_record(thd, tables->table); } -/* - Support for INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO dynamic table - */ static int rdb_i_s_global_info_fill_table( - my_core::THD *thd, - my_core::TABLE_LIST *tables, - my_core::Item *cond __attribute__((__unused__))) + my_core::THD* const thd, + my_core::TABLE_LIST* const tables, + my_core::Item* const cond __attribute__((__unused__))) { DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); @@ -698,7 +786,7 @@ static int rdb_i_s_global_info_fill_table( int ret= 0; /* binlog info */ - Rdb_binlog_manager *blm= rdb_get_binlog_manager(); + Rdb_binlog_manager* const blm= rdb_get_binlog_manager(); DBUG_ASSERT(blm != nullptr); char file_buf[FN_REFLEN+1]= {0}; @@ -714,7 +802,7 @@ static int rdb_i_s_global_info_fill_table( } /* max index info */ - Rdb_dict_manager *dict_manager= rdb_get_dict_manager(); + const Rdb_dict_manager* const dict_manager= rdb_get_dict_manager(); DBUG_ASSERT(dict_manager != nullptr); uint32_t max_index_id; @@ -729,8 +817,8 @@ static int rdb_i_s_global_info_fill_table( /* cf_id -> cf_flags */ char cf_id_buf[INT_BUF_LEN]= {0}; char cf_value_buf[FN_REFLEN+1] = {0}; - Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); - for (auto cf_handle : cf_manager.get_all_cf()) { + const Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); + for (const auto &cf_handle : cf_manager.get_all_cf()) { uint flags; dict_manager->get_cf_flags(cf_handle->GetID(), &flags); snprintf(cf_id_buf, INT_BUF_LEN, "%u", cf_handle->GetID()); @@ -761,14 +849,6 @@ static int rdb_i_s_global_info_fill_table( DBUG_RETURN(ret); } -static ST_FIELD_INFO rdb_i_s_global_info_fields_info[] = -{ - ROCKSDB_FIELD_INFO("TYPE", FN_REFLEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("NAME", FN_REFLEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("VALUE", FN_REFLEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO_END -}; - namespace // anonymous namespace = not visible outside this source file { @@ -781,76 +861,24 @@ struct Rdb_ddl_scanner : public Rdb_tables_scanner }; } // anonymous namespace - -int Rdb_ddl_scanner::add_table(Rdb_tbl_def *tdef) +/* + Support for INFORMATION_SCHEMA.ROCKSDB_DDL dynamic table + */ +namespace RDB_DDL_FIELD { - DBUG_ASSERT(tdef != nullptr); - - int ret= 0; - - DBUG_ASSERT(m_table != nullptr); - Field** field= m_table->field; - DBUG_ASSERT(field != nullptr); - - const std::string& dbname= tdef->base_dbname(); - field[0]->store(dbname.c_str(), dbname.size(), system_charset_info); - - const std::string& tablename= tdef->base_tablename(); - field[1]->store(tablename.c_str(), tablename.size(), system_charset_info); - - const std::string& partname= tdef->base_partition(); - if (partname.length() == 0) + enum { - field[2]->set_null(); - } - else - { - field[2]->set_notnull(); - field[2]->store(partname.c_str(), partname.size(), system_charset_info); - } - - for (uint i= 0; i < tdef->m_key_count; i++) - { - const std::shared_ptr& kd= tdef->m_key_descr_arr[i]; - DBUG_ASSERT(kd != nullptr); - - field[3]->store(kd->m_name.c_str(), kd->m_name.size(), system_charset_info); - - GL_INDEX_ID gl_index_id = kd->get_gl_index_id(); - field[4]->store(gl_index_id.cf_id, true); - field[5]->store(gl_index_id.index_id, true); - field[6]->store(kd->m_index_type, true); - field[7]->store(kd->m_kv_format_version, true); - - std::string cf_name= kd->get_cf()->GetName(); - field[8]->store(cf_name.c_str(), cf_name.size(), system_charset_info); - - ret= my_core::schema_table_store_record(m_thd, m_table); - if (ret) - return ret; - } - return 0; -} - -static int rdb_i_s_ddl_fill_table(my_core::THD *thd, - my_core::TABLE_LIST *tables, - my_core::Item *cond) -{ - DBUG_ENTER("rdb_i_s_ddl_fill_table"); - - DBUG_ASSERT(thd != nullptr); - DBUG_ASSERT(tables != nullptr); - - Rdb_ddl_scanner ddl_arg; - ddl_arg.m_thd= thd; - ddl_arg.m_table= tables->table; - - Rdb_ddl_manager *ddl_manager= rdb_get_ddl_manager(); - DBUG_ASSERT(ddl_manager != nullptr); - int ret= ddl_manager->scan_for_tables(&ddl_arg); - - DBUG_RETURN(ret); -} + TABLE_SCHEMA= 0, + TABLE_NAME, + PARTITION_NAME, + INDEX_NAME, + COLUMN_FAMILY, + INDEX_NUMBER, + INDEX_TYPE, + KV_FORMAT_VERSION, + CF + }; +} // namespace RDB_DDL_FIELD static ST_FIELD_INFO rdb_i_s_ddl_fields_info[] = { @@ -868,7 +896,82 @@ static ST_FIELD_INFO rdb_i_s_ddl_fields_info[] = ROCKSDB_FIELD_INFO_END }; -static int rdb_i_s_ddl_init(void *p) +int Rdb_ddl_scanner::add_table(Rdb_tbl_def *tdef) +{ + DBUG_ASSERT(tdef != nullptr); + + int ret= 0; + + DBUG_ASSERT(m_table != nullptr); + Field** field= m_table->field; + DBUG_ASSERT(field != nullptr); + + const std::string& dbname= tdef->base_dbname(); + field[RDB_DDL_FIELD::TABLE_SCHEMA]->store( + dbname.c_str(), dbname.size(), system_charset_info); + + const std::string& tablename= tdef->base_tablename(); + field[RDB_DDL_FIELD::TABLE_NAME]->store( + tablename.c_str(), tablename.size(), system_charset_info); + + const std::string& partname= tdef->base_partition(); + if (partname.length() == 0) + { + field[RDB_DDL_FIELD::PARTITION_NAME]->set_null(); + } + else + { + field[RDB_DDL_FIELD::PARTITION_NAME]->set_notnull(); + field[RDB_DDL_FIELD::PARTITION_NAME]->store( + partname.c_str(), partname.size(), system_charset_info); + } + + for (uint i= 0; i < tdef->m_key_count; i++) + { + const Rdb_key_def& kd= *tdef->m_key_descr_arr[i]; + + field[RDB_DDL_FIELD::INDEX_NAME]->store( + kd.m_name.c_str(), kd.m_name.size(), system_charset_info); + + GL_INDEX_ID gl_index_id = kd.get_gl_index_id(); + field[RDB_DDL_FIELD::COLUMN_FAMILY]->store(gl_index_id.cf_id, true); + field[RDB_DDL_FIELD::INDEX_NUMBER]->store(gl_index_id.index_id, true); + field[RDB_DDL_FIELD::INDEX_TYPE]->store(kd.m_index_type, true); + field[RDB_DDL_FIELD::KV_FORMAT_VERSION]->store( + kd.m_kv_format_version, true); + + std::string cf_name= kd.get_cf()->GetName(); + field[RDB_DDL_FIELD::CF]->store( + cf_name.c_str(), cf_name.size(), system_charset_info); + + ret= my_core::schema_table_store_record(m_thd, m_table); + if (ret) + return ret; + } + return 0; +} + +static int rdb_i_s_ddl_fill_table(my_core::THD* const thd, + my_core::TABLE_LIST* const tables, + my_core::Item* const cond) +{ + DBUG_ENTER("rdb_i_s_ddl_fill_table"); + + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + + Rdb_ddl_scanner ddl_arg; + ddl_arg.m_thd= thd; + ddl_arg.m_table= tables->table; + + Rdb_ddl_manager *ddl_manager= rdb_get_ddl_manager(); + DBUG_ASSERT(ddl_manager != nullptr); + int ret= ddl_manager->scan_for_tables(&ddl_arg); + + DBUG_RETURN(ret); +} + +static int rdb_i_s_ddl_init(void* const p) { my_core::ST_SCHEMA_TABLE *schema; @@ -883,7 +986,7 @@ static int rdb_i_s_ddl_init(void *p) DBUG_RETURN(0); } -static int rdb_i_s_cfoptions_init(void *p) +static int rdb_i_s_cfoptions_init(void* const p) { my_core::ST_SCHEMA_TABLE *schema; @@ -898,7 +1001,7 @@ static int rdb_i_s_cfoptions_init(void *p) DBUG_RETURN(0); } -static int rdb_i_s_global_info_init(void *p) +static int rdb_i_s_global_info_init(void* const p) { my_core::ST_SCHEMA_TABLE *schema; @@ -918,7 +1021,7 @@ static std::string rdb_filename_without_path( const std::string& path) { /* Find last slash in path */ - size_t pos = path.rfind('/'); + const size_t pos = path.rfind('/'); /* None found? Just return the original string */ if (pos == std::string::npos) { @@ -929,84 +1032,29 @@ static std::string rdb_filename_without_path( return path.substr(pos + 1); } -/* Fill the information_schema.rocksdb_index_file_map virtual table */ -static int rdb_i_s_index_file_map_fill_table( - my_core::THD *thd, - my_core::TABLE_LIST *tables, - my_core::Item *cond __attribute__((__unused__))) +/* + Support for INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP dynamic table + */ +namespace RDB_INDEX_FILE_MAP_FIELD { - DBUG_ASSERT(thd != nullptr); - DBUG_ASSERT(tables != nullptr); - DBUG_ASSERT(tables->table != nullptr); - - int ret = 0; - Field **field = tables->table->field; - DBUG_ASSERT(field != nullptr); - - DBUG_ENTER("rdb_i_s_index_file_map_fill_table"); - - /* Iterate over all the column families */ - rocksdb::DB *rdb= rdb_get_rocksdb_db(); - DBUG_ASSERT(rdb != nullptr); - - Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); - for (auto cf_handle : cf_manager.get_all_cf()) { - /* Grab the the properties of all the tables in the column family */ - rocksdb::TablePropertiesCollection table_props_collection; - rocksdb::Status s = rdb->GetPropertiesOfAllTables(cf_handle, - &table_props_collection); - if (!s.ok()) { - continue; - } - - /* Iterate over all the items in the collection, each of which contains a - * name and the actual properties */ - for (auto props : table_props_collection) { - /* Add the SST name into the output */ - std::string sst_name = rdb_filename_without_path(props.first); - field[2]->store(sst_name.data(), sst_name.size(), system_charset_info); - - /* Get the __indexstats__ data out of the table property */ - std::vector stats; - Rdb_tbl_prop_coll::read_stats_from_tbl_props(props.second, &stats); - if (stats.empty()) { - field[0]->store(-1, true); - field[1]->store(-1, true); - field[3]->store(-1, true); - field[4]->store(-1, true); - field[5]->store(-1, true); - field[6]->store(-1, true); - field[7]->store(-1, true); - field[8]->store(-1, true); - } - else { - for (auto it : stats) { - /* Add the index number, the number of rows, and data size to the output */ - field[0]->store(it.m_gl_index_id.cf_id, true); - field[1]->store(it.m_gl_index_id.index_id, true); - field[3]->store(it.m_rows, true); - field[4]->store(it.m_data_size, true); - field[5]->store(it.m_entry_deletes, true); - field[6]->store(it.m_entry_single_deletes, true); - field[7]->store(it.m_entry_merges, true); - field[8]->store(it.m_entry_others, true); - - /* Tell MySQL about this row in the virtual table */ - ret= my_core::schema_table_store_record(thd, tables->table); - if (ret != 0) { - break; - } - } - } - } - } - - DBUG_RETURN(ret); -} + enum + { + COLUMN_FAMILY= 0, + INDEX_NUMBER, + SST_NAME, + NUM_ROWS, + DATA_SIZE, + ENTRY_DELETES, + ENTRY_SINGLEDELETES, + ENTRY_MERGES, + ENTRY_OTHERS + }; +} // namespace RDB_INDEX_FILE_MAP_FIELD static ST_FIELD_INFO rdb_i_s_index_file_map_fields_info[] = { - /* The information_schema.rocksdb_index_file_map virtual table has four fields: + /* The information_schema.rocksdb_index_file_map virtual table has four + * fields: * COLUMN_FAMILY => the index's column family contained in the SST file * INDEX_NUMBER => the index id contained in the SST file * SST_NAME => the name of the SST file containing some indexes @@ -1025,8 +1073,91 @@ static ST_FIELD_INFO rdb_i_s_index_file_map_fields_info[] = ROCKSDB_FIELD_INFO_END }; +/* Fill the information_schema.rocksdb_index_file_map virtual table */ +static int rdb_i_s_index_file_map_fill_table( + my_core::THD* const thd, + my_core::TABLE_LIST* const tables, + my_core::Item* const cond __attribute__((__unused__))) +{ + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + DBUG_ASSERT(tables->table != nullptr); + + int ret = 0; + Field **field = tables->table->field; + DBUG_ASSERT(field != nullptr); + + DBUG_ENTER("rdb_i_s_index_file_map_fill_table"); + + /* Iterate over all the column families */ + rocksdb::DB* const rdb= rdb_get_rocksdb_db(); + DBUG_ASSERT(rdb != nullptr); + + const Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); + for (const auto &cf_handle : cf_manager.get_all_cf()) { + /* Grab the the properties of all the tables in the column family */ + rocksdb::TablePropertiesCollection table_props_collection; + const rocksdb::Status s = rdb->GetPropertiesOfAllTables(cf_handle, + &table_props_collection); + if (!s.ok()) { + continue; + } + + /* Iterate over all the items in the collection, each of which contains a + * name and the actual properties */ + for (const auto &props : table_props_collection) { + /* Add the SST name into the output */ + const std::string sst_name = rdb_filename_without_path(props.first); + field[RDB_INDEX_FILE_MAP_FIELD::SST_NAME]->store( + sst_name.data(), sst_name.size(), system_charset_info); + + /* Get the __indexstats__ data out of the table property */ + std::vector stats; + Rdb_tbl_prop_coll::read_stats_from_tbl_props(props.second, &stats); + if (stats.empty()) { + field[RDB_INDEX_FILE_MAP_FIELD::COLUMN_FAMILY]->store(-1, true); + field[RDB_INDEX_FILE_MAP_FIELD::INDEX_NUMBER]->store(-1, true); + field[RDB_INDEX_FILE_MAP_FIELD::NUM_ROWS]->store(-1, true); + field[RDB_INDEX_FILE_MAP_FIELD::DATA_SIZE]->store(-1, true); + field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_DELETES]->store(-1, true); + field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_SINGLEDELETES]->store(-1, true); + field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_MERGES]->store(-1, true); + field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_OTHERS]->store(-1, true); + } + else { + for (auto it : stats) { + /* Add the index number, the number of rows, and data size to the output */ + field[RDB_INDEX_FILE_MAP_FIELD::COLUMN_FAMILY]->store( + it.m_gl_index_id.cf_id, true); + field[RDB_INDEX_FILE_MAP_FIELD::INDEX_NUMBER]->store( + it.m_gl_index_id.index_id, true); + field[RDB_INDEX_FILE_MAP_FIELD::NUM_ROWS]->store(it.m_rows, true); + field[RDB_INDEX_FILE_MAP_FIELD::DATA_SIZE]->store( + it.m_data_size, true); + field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_DELETES]->store( + it.m_entry_deletes, true); + field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_SINGLEDELETES]->store( + it.m_entry_single_deletes, true); + field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_MERGES]->store( + it.m_entry_merges, true); + field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_OTHERS]->store( + it.m_entry_others, true); + + /* Tell MySQL about this row in the virtual table */ + ret= my_core::schema_table_store_record(thd, tables->table); + if (ret != 0) { + break; + } + } + } + } + } + + DBUG_RETURN(ret); +} + /* Initialize the information_schema.rocksdb_index_file_map virtual table */ -static int rdb_i_s_index_file_map_init(void *p) +static int rdb_i_s_index_file_map_init(void* const p) { my_core::ST_SCHEMA_TABLE *schema; @@ -1041,6 +1172,221 @@ static int rdb_i_s_index_file_map_init(void *p) DBUG_RETURN(0); } +/* + Support for INFORMATION_SCHEMA.ROCKSDB_LOCKS dynamic table + */ +namespace RDB_LOCKS_FIELD +{ + enum + { + COLUMN_FAMILY_ID= 0, + TRANSACTION_ID, + KEY, + MODE + }; +} // namespace RDB_LOCKS_FIELD + +static ST_FIELD_INFO rdb_i_s_lock_info_fields_info[] = +{ + ROCKSDB_FIELD_INFO("COLUMN_FAMILY_ID", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("TRANSACTION_ID", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("KEY", FN_REFLEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("MODE", 32, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END +}; + +/* Fill the information_schema.rocksdb_locks virtual table */ +static int rdb_i_s_lock_info_fill_table( + my_core::THD* const thd, + my_core::TABLE_LIST* const tables, + my_core::Item* const cond __attribute__((__unused__))) +{ + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + DBUG_ASSERT(tables->table != nullptr); + + int ret = 0; + + DBUG_ENTER("rdb_i_s_lock_info_fill_table"); + + rocksdb::TransactionDB* const rdb= rdb_get_rocksdb_db(); + DBUG_ASSERT(rdb != nullptr); + + /* cf id -> rocksdb::KeyLockInfo */ + std::unordered_multimap lock_info = + rdb->GetLockStatusData(); + + for (const auto& lock : lock_info) { + const uint32_t cf_id = lock.first; + const auto& key_lock_info = lock.second; + const auto key_hexstr = rdb_hexdump(key_lock_info.key.c_str(), + key_lock_info.key.length(), FN_REFLEN); + + for (const auto &id : key_lock_info.ids) { + tables->table->field[RDB_LOCKS_FIELD::COLUMN_FAMILY_ID]->store( + cf_id, true); + tables->table->field[RDB_LOCKS_FIELD::TRANSACTION_ID]->store(id, true); + + tables->table->field[RDB_LOCKS_FIELD::KEY]->store( + key_hexstr.c_str(), key_hexstr.size(), + system_charset_info); + tables->table->field[RDB_LOCKS_FIELD::MODE]->store( + key_lock_info.exclusive ? "X" : "S", + 1, system_charset_info); + + /* Tell MySQL about this row in the virtual table */ + ret= my_core::schema_table_store_record(thd, tables->table); + if (ret != 0) { + break; + } + } + } + DBUG_RETURN(ret); +} + +/* Initialize the information_schema.rocksdb_lock_info virtual table */ +static int rdb_i_s_lock_info_init(void* const p) +{ + my_core::ST_SCHEMA_TABLE *schema; + + DBUG_ENTER("rdb_i_s_lock_info_init"); + DBUG_ASSERT(p != nullptr); + + schema= (my_core::ST_SCHEMA_TABLE*) p; + + schema->fields_info= rdb_i_s_lock_info_fields_info; + schema->fill_table= rdb_i_s_lock_info_fill_table; + + DBUG_RETURN(0); +} + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_TRX dynamic table + */ +namespace RDB_TRX_FIELD +{ + enum + { + TRANSACTION_ID= 0, + STATE, + NAME, + WRITE_COUNT, + LOCK_COUNT, + TIMEOUT_SEC, + WAITING_KEY, + WAITING_COLUMN_FAMILY_ID, + IS_REPLICATION, + SKIP_TRX_API, + READ_ONLY, + HAS_DEADLOCK_DETECTION, + NUM_ONGOING_BULKLOAD, + THREAD_ID, + QUERY + }; +} // namespace RDB_TRX_FIELD + +static ST_FIELD_INFO rdb_i_s_trx_info_fields_info[] = +{ + ROCKSDB_FIELD_INFO("TRANSACTION_ID", sizeof(ulonglong), + MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("STATE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("WRITE_COUNT", sizeof(ulonglong), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("LOCK_COUNT", sizeof(ulonglong), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("TIMEOUT_SEC", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("WAITING_KEY", FN_REFLEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("WAITING_COLUMN_FAMILY_ID", sizeof(uint32_t), + MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("IS_REPLICATION", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("SKIP_TRX_API", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("READ_ONLY", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("HAS_DEADLOCK_DETECTION", sizeof(uint32_t), + MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("NUM_ONGOING_BULKLOAD", sizeof(uint32_t), + MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("THREAD_ID", sizeof(ulong), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("QUERY", NAME_LEN+1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END +}; + +/* Fill the information_schema.rocksdb_trx virtual table */ +static int rdb_i_s_trx_info_fill_table( + my_core::THD* const thd, + my_core::TABLE_LIST* const tables, + my_core::Item* const cond __attribute__((__unused__))) +{ + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + DBUG_ASSERT(tables->table != nullptr); + + int ret = 0; + + DBUG_ENTER("rdb_i_s_trx_info_fill_table"); + + const std::vector &all_trx_info = rdb_get_all_trx_info(); + + for (const auto &info : all_trx_info) { + auto name_hexstr = rdb_hexdump(info.name.c_str(), info.name.length(), + NAME_LEN); + auto key_hexstr = rdb_hexdump(info.waiting_key.c_str(), + info.waiting_key.length(), FN_REFLEN); + tables->table->field[RDB_TRX_FIELD::TRANSACTION_ID]->store( + info.trx_id, true); + tables->table->field[RDB_TRX_FIELD::STATE]->store( + info.state.c_str(), info.state.length(), system_charset_info); + tables->table->field[RDB_TRX_FIELD::NAME]->store( + name_hexstr.c_str(), name_hexstr.length(), system_charset_info); + tables->table->field[RDB_TRX_FIELD::WRITE_COUNT]->store( + info.write_count, true); + tables->table->field[RDB_TRX_FIELD::LOCK_COUNT]->store( + info.lock_count, true); + tables->table->field[RDB_TRX_FIELD::TIMEOUT_SEC]->store( + info.timeout_sec, false); + tables->table->field[RDB_TRX_FIELD::WAITING_KEY]->store( + key_hexstr.c_str(), key_hexstr.length(), system_charset_info); + tables->table->field[RDB_TRX_FIELD::WAITING_COLUMN_FAMILY_ID]->store( + info.waiting_cf_id, true); + tables->table->field[RDB_TRX_FIELD::IS_REPLICATION]->store( + info.is_replication, false); + tables->table->field[RDB_TRX_FIELD::SKIP_TRX_API]->store( + info.skip_trx_api, false); + tables->table->field[RDB_TRX_FIELD::READ_ONLY]->store( + info.read_only, false); + tables->table->field[RDB_TRX_FIELD::HAS_DEADLOCK_DETECTION]->store( + info.deadlock_detect, false); + tables->table->field[RDB_TRX_FIELD::NUM_ONGOING_BULKLOAD]->store( + info.num_ongoing_bulk_load, false); + tables->table->field[RDB_TRX_FIELD::THREAD_ID]->store( + info.thread_id, true); + tables->table->field[RDB_TRX_FIELD::QUERY]->store( + info.query_str.c_str(), info.query_str.length(), system_charset_info); + + /* Tell MySQL about this row in the virtual table */ + ret= my_core::schema_table_store_record(thd, tables->table); + if (ret != 0) { + break; + } + } + + DBUG_RETURN(ret); +} + +/* Initialize the information_schema.rocksdb_trx_info virtual table */ +static int rdb_i_s_trx_info_init(void* const p) +{ + my_core::ST_SCHEMA_TABLE *schema; + + DBUG_ENTER("rdb_i_s_trx_info_init"); + DBUG_ASSERT(p != nullptr); + + schema= (my_core::ST_SCHEMA_TABLE*) p; + + schema->fields_info= rdb_i_s_trx_info_fields_info; + schema->fill_table= rdb_i_s_trx_info_fill_table; + + DBUG_RETURN(0); +} + static int rdb_i_s_deinit(void *p __attribute__((__unused__))) { DBUG_ENTER("rdb_i_s_deinit"); @@ -1186,4 +1532,37 @@ struct st_mysql_plugin rdb_i_s_index_file_map= 0, /* flags */ }; +struct st_mysql_plugin rdb_i_s_lock_info= +{ + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_LOCKS", + "Facebook", + "RocksDB lock information", + PLUGIN_LICENSE_GPL, + rdb_i_s_lock_info_init, + nullptr, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_mysql_plugin rdb_i_s_trx_info= +{ + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_TRX", + "Facebook", + "RocksDB transaction information", + PLUGIN_LICENSE_GPL, + rdb_i_s_trx_info_init, + nullptr, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; } // namespace myrocks diff --git a/storage/rocksdb/rdb_i_s.h b/storage/rocksdb/rdb_i_s.h index 846defab961..5384d237d3b 100644 --- a/storage/rocksdb/rdb_i_s.h +++ b/storage/rocksdb/rdb_i_s.h @@ -29,6 +29,7 @@ extern struct st_mysql_plugin rdb_i_s_cfoptions; extern struct st_mysql_plugin rdb_i_s_global_info; extern struct st_mysql_plugin rdb_i_s_ddl; extern struct st_mysql_plugin rdb_i_s_index_file_map; - +extern struct st_mysql_plugin rdb_i_s_lock_info; +extern struct st_mysql_plugin rdb_i_s_trx_info; } // namespace myrocks diff --git a/storage/rocksdb/rdb_index_merge.cc b/storage/rocksdb/rdb_index_merge.cc index dc85db4d356..8544bdf52fd 100644 --- a/storage/rocksdb/rdb_index_merge.cc +++ b/storage/rocksdb/rdb_index_merge.cc @@ -23,9 +23,11 @@ namespace myrocks { -Rdb_index_merge::Rdb_index_merge(const ulonglong merge_buf_size, - const ulonglong merge_combine_read_size, - const rocksdb::Comparator* comparator) : +Rdb_index_merge::Rdb_index_merge(const char* const tmpfile_path, + const ulonglong &merge_buf_size, + const ulonglong &merge_combine_read_size, + const rocksdb::Comparator* const comparator) : + m_tmpfile_path(tmpfile_path), m_merge_buf_size(merge_buf_size), m_merge_combine_read_size(merge_combine_read_size), m_comparator(comparator), @@ -64,13 +66,15 @@ int Rdb_index_merge::init() to disk. They will be written to disk sorted. A sorted tree is used to keep track of the offset of each record within the unsorted buffer. */ - m_rec_buf_unsorted= std::make_shared(m_merge_buf_size); + m_rec_buf_unsorted= std::shared_ptr( + new merge_buf_info(m_merge_buf_size)); /* Allocate output buffer that will contain sorted block that is written to disk. */ - m_output_buf= std::make_shared(m_merge_buf_size); + m_output_buf= std::shared_ptr( + new merge_buf_info(m_merge_buf_size)); return 0; } @@ -82,7 +86,16 @@ int Rdb_index_merge::merge_file_create() { DBUG_ASSERT(m_merge_file.fd == -1); - int fd = mysql_tmpfile("myrocks"); + int fd; + /* If no path set for tmpfile, use mysql_tmpdir by default */ + if (m_tmpfile_path == nullptr) + { + fd = mysql_tmpfile("myrocks"); + } + else + { + fd = mysql_tmpfile_path(m_tmpfile_path, "myrocks"); + } if (fd < 0) { @@ -112,11 +125,24 @@ int Rdb_index_merge::add(const rocksdb::Slice& key, Check if sort buffer is going to be out of space, if so write it out to disk in sorted order using offset tree. */ - uint total_offset= RDB_MERGE_CHUNK_LEN + m_rec_buf_unsorted->curr_offset + + const uint total_offset= + RDB_MERGE_CHUNK_LEN + m_rec_buf_unsorted->curr_offset + RDB_MERGE_KEY_DELIMITER + RDB_MERGE_VAL_DELIMITER + key.size() + val.size(); if (total_offset >= m_rec_buf_unsorted->total_size) { + /* + If the offset tree is empty here, that means that the proposed key to + add is too large for the buffer. + */ + if (m_offset_tree.empty()) + { + // NO_LINT_DEBUG + sql_print_error("Sort buffer size is too small to process merge. " + "Please set merge buffer size to a higher value."); + return HA_ERR_INTERNAL_ERROR; + } + if (merge_buf_write()) { // NO_LINT_DEBUG @@ -125,7 +151,7 @@ int Rdb_index_merge::add(const rocksdb::Slice& key, } } - ulonglong rec_offset= m_rec_buf_unsorted->curr_offset; + const ulonglong rec_offset= m_rec_buf_unsorted->curr_offset; /* Store key and value in temporary unsorted in memory buffer pointed to by @@ -159,7 +185,7 @@ int Rdb_index_merge::merge_buf_write() Iterate through the offset tree. Should be ordered by the secondary key at this point. */ - for (auto& rec : m_offset_tree) + for (const auto& rec : m_offset_tree) { DBUG_ASSERT(m_output_buf->curr_offset <= m_merge_buf_size); @@ -188,8 +214,14 @@ int Rdb_index_merge::merge_buf_write() return HA_ERR_INTERNAL_ERROR; } + /* + Add a file sync call here to flush the data out. Otherwise, the filesystem + cache can flush out all of the files at the same time, causing a write + burst. + */ if (my_write(m_merge_file.fd, m_output_buf->block.get(), - m_output_buf->total_size, MYF(MY_WME | MY_NABP))) + m_output_buf->total_size, MYF(MY_WME | MY_NABP)) || + mysql_file_sync(m_merge_file.fd, MYF(MY_WME))) { // NO_LINT_DEBUG sql_print_error("Error writing sorted merge buffer to disk."); @@ -238,13 +270,13 @@ int Rdb_index_merge::merge_heap_prepare() /* Allocate buffers for each chunk */ for (ulonglong i = 0; i < m_merge_file.num_sort_buffers; i++) { - auto entry= std::make_shared(m_comparator); + const auto entry= std::make_shared(m_comparator); /* Read chunk_size bytes from each chunk on disk, and place inside respective chunk buffer. */ - size_t total_size= + const size_t total_size= entry->prepare(m_merge_file.fd, i * m_merge_buf_size, chunk_size); if (total_size == (size_t) - 1) @@ -275,7 +307,7 @@ int Rdb_index_merge::merge_heap_prepare() /** Create and/or iterate through keys in the merge heap. */ -int Rdb_index_merge::next(rocksdb::Slice* key, rocksdb::Slice* val) +int Rdb_index_merge::next(rocksdb::Slice* const key, rocksdb::Slice* const val) { /* If table fits in one sort buffer, we can optimize by writing @@ -292,7 +324,7 @@ int Rdb_index_merge::next(rocksdb::Slice* key, rocksdb::Slice* val) return -1; } - auto rec= m_offset_tree.begin(); + const auto rec= m_offset_tree.begin(); /* Read record from offset */ merge_read_rec(rec->block, key, val); @@ -332,8 +364,8 @@ int Rdb_index_merge::next(rocksdb::Slice* key, rocksdb::Slice* val) /** Get current top record from the heap. */ -void Rdb_index_merge::merge_heap_top(rocksdb::Slice* key, - rocksdb::Slice* val) +void Rdb_index_merge::merge_heap_top(rocksdb::Slice* const key, + rocksdb::Slice* const val) { DBUG_ASSERT(!m_merge_min_heap.empty()); @@ -348,8 +380,8 @@ void Rdb_index_merge::merge_heap_top(rocksdb::Slice* key, Returns -1 when there are no more records in the heap. */ -int Rdb_index_merge::merge_heap_pop_and_get_next(rocksdb::Slice* key, - rocksdb::Slice* val) +int Rdb_index_merge::merge_heap_pop_and_get_next(rocksdb::Slice* const key, + rocksdb::Slice* const val) { /* Make a new reference to shared ptr so it doesn't get destroyed @@ -430,7 +462,7 @@ int Rdb_index_merge::merge_buf_info::read_next_chunk_from_disk(File fd) } /* Overwrite the old block */ - size_t bytes_read= my_read(fd, block.get(), block_len, MYF(MY_WME)); + const size_t bytes_read= my_read(fd, block.get(), block_len, MYF(MY_WME)); if (bytes_read == (size_t) -1) { // NO_LINT_DEBUG @@ -446,8 +478,8 @@ int Rdb_index_merge::merge_buf_info::read_next_chunk_from_disk(File fd) Get records from offset within sort buffer and compare them. Sort by least to greatest. */ -int Rdb_index_merge::merge_record_compare(const uchar* a_block, - const uchar* b_block, +int Rdb_index_merge::merge_record_compare(const uchar* const a_block, + const uchar* const b_block, const rocksdb::Comparator* const comparator) { return comparator->Compare(as_slice(a_block), as_slice(b_block)); @@ -457,9 +489,9 @@ int Rdb_index_merge::merge_record_compare(const uchar* a_block, Given an offset in a merge sort buffer, read out the keys + values. After this, block will point to the next record in the buffer. **/ -void Rdb_index_merge::merge_read_rec(const uchar* block, - rocksdb::Slice* key, - rocksdb::Slice* val) +void Rdb_index_merge::merge_read_rec(const uchar* const block, + rocksdb::Slice* const key, + rocksdb::Slice* const val) { /* Read key at block offset into key slice and the value into value slice*/ read_slice(key, block); @@ -474,13 +506,15 @@ void Rdb_index_merge::read_slice(rocksdb::Slice* slice, const uchar* block_ptr) *slice= rocksdb::Slice(reinterpret_cast(block_ptr), slice_len); } -int Rdb_index_merge::merge_heap_entry::read_rec(rocksdb::Slice *key, - rocksdb::Slice *val) +int Rdb_index_merge::merge_heap_entry::read_rec(rocksdb::Slice* const key, + rocksdb::Slice* const val) { const uchar* block_ptr= block; + const auto orig_offset = chunk_info->curr_offset; + const auto orig_block = block; /* Read key at block offset into key slice and the value into value slice*/ - if (read_slice(key, &block_ptr) != 0 || read_slice(val, &block_ptr) != 0) + if (read_slice(key, &block_ptr) != 0) { return 1; } @@ -488,10 +522,20 @@ int Rdb_index_merge::merge_heap_entry::read_rec(rocksdb::Slice *key, chunk_info->curr_offset += (uintptr_t) block_ptr - (uintptr_t) block; block += (uintptr_t) block_ptr - (uintptr_t) block; + if (read_slice(val, &block_ptr) != 0) + { + chunk_info->curr_offset= orig_offset; + block= orig_block; + return 1; + } + + chunk_info->curr_offset += (uintptr_t) block_ptr - (uintptr_t) block; + block += (uintptr_t) block_ptr - (uintptr_t) block; + return 0; } -int Rdb_index_merge::merge_heap_entry::read_slice(rocksdb::Slice* slice, +int Rdb_index_merge::merge_heap_entry::read_slice(rocksdb::Slice* const slice, const uchar** block_ptr) { if (!chunk_info->has_space(RDB_MERGE_REC_DELIMITER)) @@ -515,7 +559,7 @@ size_t Rdb_index_merge::merge_heap_entry::prepare(File fd, ulonglong f_offset, ulonglong chunk_size) { chunk_info= std::make_shared(chunk_size); - size_t res = chunk_info->prepare(fd, f_offset); + const size_t res = chunk_info->prepare(fd, f_offset); if (res != (size_t) - 1) { block= chunk_info->block.get() + RDB_MERGE_CHUNK_LEN; @@ -540,7 +584,7 @@ size_t Rdb_index_merge::merge_buf_info::prepare(File fd, ulonglong f_offset) return (size_t) - 1; } - size_t bytes_read= my_read(fd, block.get(), total_size, MYF(MY_WME)); + const size_t bytes_read= my_read(fd, block.get(), total_size, MYF(MY_WME)); if (bytes_read == (size_t) - 1) { // NO_LINT_DEBUG @@ -601,4 +645,3 @@ void Rdb_index_merge::merge_reset() } } // namespace myrocks - diff --git a/storage/rocksdb/rdb_index_merge.h b/storage/rocksdb/rdb_index_merge.h index 24090c335ac..86893bf316c 100644 --- a/storage/rocksdb/rdb_index_merge.h +++ b/storage/rocksdb/rdb_index_merge.h @@ -60,7 +60,7 @@ class Rdb_index_merge { struct merge_buf_info { /* heap memory allocated for main memory sort/merge */ std::unique_ptr block; - ulonglong block_len; /* amount of data bytes allocated for block above */ + const ulonglong block_len; /* amount of data bytes allocated for block above */ ulonglong curr_offset; /* offset of the record pointer for the block */ ulonglong disk_start_offset; /* where the chunk starts on disk */ ulonglong disk_curr_offset; /* current offset on disk */ @@ -78,12 +78,12 @@ class Rdb_index_merge { int read_next_chunk_from_disk(File fd) __attribute__((__nonnull__, __warn_unused_result__)); - inline bool is_chunk_finished() + inline bool is_chunk_finished() const { return curr_offset + disk_curr_offset - disk_start_offset == total_size; } - inline bool has_space(uint64 needed) + inline bool has_space(uint64 needed) const { return curr_offset + needed <= block_len; } @@ -115,10 +115,10 @@ class Rdb_index_merge { int read_next_chunk_from_disk(File fd) __attribute__((__nonnull__, __warn_unused_result__)); - int read_rec(rocksdb::Slice *key, rocksdb::Slice *val) + int read_rec(rocksdb::Slice* const key, rocksdb::Slice* const val) __attribute__((__nonnull__, __warn_unused_result__)); - int read_slice(rocksdb::Slice* slice, const uchar** block_ptr) + int read_slice(rocksdb::Slice* const slice, const uchar** block_ptr) __attribute__((__nonnull__, __warn_unused_result__)); explicit merge_heap_entry(const rocksdb::Comparator* const comparator) : @@ -145,11 +145,13 @@ class Rdb_index_merge { return merge_record_compare(this->block, record.block, comparator) < 0; } - merge_record(uchar* block, const rocksdb::Comparator* const comparator) : + merge_record(uchar* const block, + const rocksdb::Comparator* const comparator) : block(block), comparator(comparator) {} }; private: + const char* m_tmpfile_path; const ulonglong m_merge_buf_size; const ulonglong m_merge_combine_read_size; const rocksdb::Comparator* m_comparator; @@ -161,12 +163,12 @@ class Rdb_index_merge { std::vector>, merge_heap_comparator> m_merge_min_heap; - static inline void merge_store_uint64(uchar *dst, uint64 n) + static inline void merge_store_uint64(uchar* const dst, uint64 n) { memcpy(dst, &n, sizeof(n)); } - static inline void merge_read_uint64(const uchar **buf_ptr, uint64 *dst) + static inline void merge_read_uint64(const uchar **buf_ptr, uint64* const dst) { DBUG_ASSERT(buf_ptr != nullptr); memcpy(dst, *buf_ptr, sizeof(uint64)); @@ -185,17 +187,18 @@ class Rdb_index_merge { const rocksdb::Comparator* const comparator) __attribute__((__nonnull__, __warn_unused_result__)); - void merge_read_rec(const uchar* block, rocksdb::Slice* key, - rocksdb::Slice* val) + void merge_read_rec(const uchar* const block, rocksdb::Slice* const key, + rocksdb::Slice* const val) __attribute__((__nonnull__)); void read_slice(rocksdb::Slice* slice, const uchar* block_ptr) __attribute__((__nonnull__)); public: - Rdb_index_merge(const ulonglong merge_buf_size, - const ulonglong merge_combine_read_size, - const rocksdb::Comparator* comparator); + Rdb_index_merge(const char* const tmpfile_path, + const ulonglong &merge_buf_size, + const ulonglong &merge_combine_read_size, + const rocksdb::Comparator* const comparator); ~Rdb_index_merge(); int init() @@ -210,7 +213,7 @@ class Rdb_index_merge { int merge_buf_write() __attribute__((__nonnull__, __warn_unused_result__)); - int next(rocksdb::Slice* key, rocksdb::Slice* val) + int next(rocksdb::Slice* const key, rocksdb::Slice* const val) __attribute__((__nonnull__, __warn_unused_result__)); int merge_heap_prepare() @@ -219,11 +222,11 @@ class Rdb_index_merge { void merge_heap_top(rocksdb::Slice* key, rocksdb::Slice* val) __attribute__((__nonnull__)); - int merge_heap_pop_and_get_next(rocksdb::Slice* key, rocksdb::Slice* val) + int merge_heap_pop_and_get_next(rocksdb::Slice* const key, + rocksdb::Slice* const val) __attribute__((__nonnull__, __warn_unused_result__)); void merge_reset(); }; } // namespace myrocks - diff --git a/storage/rocksdb/rdb_mutex_wrapper.cc b/storage/rocksdb/rdb_mutex_wrapper.cc index e8077e2fd89..5b1c9ba4c22 100644 --- a/storage/rocksdb/rdb_mutex_wrapper.cc +++ b/storage/rocksdb/rdb_mutex_wrapper.cc @@ -39,7 +39,7 @@ PSI_stage_info stage_waiting_on_row_lock2= { 0, "Waiting for row lock", 0}; static const int64_t MICROSECS= 1000*1000; // A timeout as long as one full non-leap year worth of microseconds is as // good as infinite timeout. -static const int64_t BIG_TIMEOUT= MICROSECS * 60 * 60 * 24 * 7 * 365; +static const int64_t BIG_TIMEOUT= MICROSECS * 60 * 60 * 24 * 365; Rdb_cond_var::Rdb_cond_var() { mysql_cond_init(0, &m_cond, nullptr); @@ -49,7 +49,7 @@ Rdb_cond_var::~Rdb_cond_var() { mysql_cond_destroy(&m_cond); } -Status Rdb_cond_var::Wait(std::shared_ptr mutex_arg) { +Status Rdb_cond_var::Wait(const std::shared_ptr mutex_arg) { return WaitFor(mutex_arg, BIG_TIMEOUT); } @@ -69,7 +69,7 @@ Status Rdb_cond_var::Wait(std::shared_ptr mutex_arg) { */ Status -Rdb_cond_var::WaitFor(std::shared_ptr mutex_arg, +Rdb_cond_var::WaitFor(const std::shared_ptr mutex_arg, int64_t timeout_micros) { auto *mutex_obj= reinterpret_cast(mutex_arg.get()); @@ -204,7 +204,7 @@ Status Rdb_mutex::TryLockFor(int64_t timeout_time __attribute__((__unused__))) #ifndef STANDALONE_UNITTEST -void Rdb_mutex::set_unlock_action(PSI_stage_info *old_stage_arg) +void Rdb_mutex::set_unlock_action(const PSI_stage_info* const old_stage_arg) { DBUG_ASSERT(old_stage_arg != nullptr); @@ -221,7 +221,8 @@ void Rdb_mutex::UnLock() { #ifndef STANDALONE_UNITTEST if (m_old_stage_info.count(current_thd) > 0) { - std::shared_ptr old_stage = m_old_stage_info[current_thd]; + const std::shared_ptr old_stage = + m_old_stage_info[current_thd]; m_old_stage_info.erase(current_thd); /* The following will call mysql_mutex_unlock */ my_core::thd_exit_cond(current_thd, old_stage.get()); diff --git a/storage/rocksdb/rdb_mutex_wrapper.h b/storage/rocksdb/rdb_mutex_wrapper.h index 7d0e4169ade..6edd78a1167 100644 --- a/storage/rocksdb/rdb_mutex_wrapper.h +++ b/storage/rocksdb/rdb_mutex_wrapper.h @@ -64,13 +64,15 @@ class Rdb_mutex: public rocksdb::TransactionDBMutex { friend class Rdb_cond_var; #ifndef STANDALONE_UNITTEST - void set_unlock_action(PSI_stage_info *old_stage_arg); + void set_unlock_action(const PSI_stage_info* const old_stage_arg); std::unordered_map> m_old_stage_info; #endif }; class Rdb_cond_var: public rocksdb::TransactionDBCondVar { + Rdb_cond_var(const Rdb_cond_var&) = delete; + Rdb_cond_var& operator=(const Rdb_cond_var&) = delete; public: Rdb_cond_var(); virtual ~Rdb_cond_var(); @@ -85,7 +87,7 @@ class Rdb_cond_var: public rocksdb::TransactionDBCondVar { // Returns non-OK if TransactionDB should stop waiting and fail the operation. // May return OK spuriously even if not notified. virtual rocksdb::Status - Wait(std::shared_ptr mutex) override; + Wait(const std::shared_ptr mutex) override; // Block current thread until condition variable is notifiesd by a call to // Notify() or NotifyAll(), or if the timeout is reached. @@ -100,7 +102,7 @@ class Rdb_cond_var: public rocksdb::TransactionDBCondVar { // fail the operation. // May return OK spuriously even if not notified. virtual rocksdb::Status - WaitFor(std::shared_ptr mutex, + WaitFor(const std::shared_ptr mutex, int64_t timeout_time) override; // If any threads are waiting on *this, unblock at least one of the @@ -117,6 +119,9 @@ class Rdb_cond_var: public rocksdb::TransactionDBCondVar { class Rdb_mutex_factory : public rocksdb::TransactionDBMutexFactory { public: + Rdb_mutex_factory(const Rdb_mutex_factory&) = delete; + Rdb_mutex_factory& operator=(const Rdb_mutex_factory&) = delete; + Rdb_mutex_factory() {} /* Override parent class's virtual methods of interrest. */ diff --git a/storage/rocksdb/rdb_perf_context.cc b/storage/rocksdb/rdb_perf_context.cc index cd0d9e57c2b..88d84061789 100644 --- a/storage/rocksdb/rdb_perf_context.cc +++ b/storage/rocksdb/rdb_perf_context.cc @@ -96,7 +96,7 @@ std::string rdb_pc_stat_types[]= idx++; \ } while (0) -static void harvest_diffs(Rdb_atomic_perf_counters *counters) +static void harvest_diffs(Rdb_atomic_perf_counters * const counters) { // (C) These should be in the same order as the PC enum size_t idx= 0; @@ -151,7 +151,7 @@ static void harvest_diffs(Rdb_atomic_perf_counters *counters) static Rdb_atomic_perf_counters rdb_global_perf_counters; -void rdb_get_global_perf_counters(Rdb_perf_counters *counters) +void rdb_get_global_perf_counters(Rdb_perf_counters* const counters) { DBUG_ASSERT(counters != nullptr); @@ -165,9 +165,9 @@ void Rdb_perf_counters::load(const Rdb_atomic_perf_counters &atomic_counters) } } -bool Rdb_io_perf::start(uint32_t perf_context_level) +bool Rdb_io_perf::start(const uint32_t perf_context_level) { - rocksdb::PerfLevel perf_level= + const rocksdb::PerfLevel perf_level= static_cast(perf_context_level); if (rocksdb::GetPerfLevel() != perf_level) @@ -185,9 +185,9 @@ bool Rdb_io_perf::start(uint32_t perf_context_level) return true; } -void Rdb_io_perf::end_and_record(uint32_t perf_context_level) +void Rdb_io_perf::end_and_record(const uint32_t perf_context_level) { - rocksdb::PerfLevel perf_level= + const rocksdb::PerfLevel perf_level= static_cast(perf_context_level); if (perf_level == rocksdb::kDisable) @@ -208,7 +208,7 @@ void Rdb_io_perf::end_and_record(uint32_t perf_context_level) { my_io_perf_t io_perf_read; - my_io_perf_init(&io_perf_read); + io_perf_read.init(); io_perf_read.bytes= rocksdb::perf_context.block_read_byte; io_perf_read.requests= rocksdb::perf_context.block_read_count; @@ -219,8 +219,8 @@ void Rdb_io_perf::end_and_record(uint32_t perf_context_level) io_perf_read.svc_time_max= io_perf_read.svc_time= rocksdb::perf_context.block_read_time; - my_io_perf_sum_atomic_helper(m_shared_io_perf_read, &io_perf_read); - my_io_perf_sum(&m_stats->table_io_perf_read, &io_perf_read); + m_shared_io_perf_read->sum(io_perf_read); + m_stats->table_io_perf_read.sum(io_perf_read); } if (m_stats) { diff --git a/storage/rocksdb/rdb_perf_context.h b/storage/rocksdb/rdb_perf_context.h index 1e01e933895..e6439c2e613 100644 --- a/storage/rocksdb/rdb_perf_context.h +++ b/storage/rocksdb/rdb_perf_context.h @@ -91,7 +91,10 @@ struct Rdb_atomic_perf_counters */ class Rdb_perf_counters { + Rdb_perf_counters(const Rdb_perf_counters&) = delete; + Rdb_perf_counters& operator=(const Rdb_perf_counters&) = delete; public: + Rdb_perf_counters() = default; uint64_t m_value[PC_MAX_IDX]; void load(const Rdb_atomic_perf_counters &atomic_counters); @@ -110,9 +113,12 @@ class Rdb_io_perf ha_statistics *m_stats= nullptr; public: - void init(Rdb_atomic_perf_counters *atomic_counters, - my_io_perf_atomic_t *shared_io_perf_read, - ha_statistics *stats) + Rdb_io_perf(const Rdb_io_perf&) = delete; + Rdb_io_perf& operator=(const Rdb_io_perf&) = delete; + + void init(Rdb_atomic_perf_counters* const atomic_counters, + my_io_perf_atomic_t* const shared_io_perf_read, + ha_statistics* const stats) { DBUG_ASSERT(atomic_counters != nullptr); DBUG_ASSERT(shared_io_perf_read != nullptr); @@ -123,8 +129,8 @@ class Rdb_io_perf m_stats= stats; } - bool start(uint32_t perf_context_level); - void end_and_record(uint32_t perf_context_level); + bool start(const uint32_t perf_context_level); + void end_and_record(const uint32_t perf_context_level); explicit Rdb_io_perf() : m_atomic_counters(nullptr), m_shared_io_perf_read(nullptr), diff --git a/storage/rocksdb/rdb_sst_info.cc b/storage/rocksdb/rdb_sst_info.cc index d131545e476..ce457cc73a7 100644 --- a/storage/rocksdb/rdb_sst_info.cc +++ b/storage/rocksdb/rdb_sst_info.cc @@ -28,6 +28,7 @@ /* RocksDB header files */ #include "rocksdb/db.h" +#include "rocksdb/options.h" /* MyRocks header files */ #include "./ha_rocksdb.h" @@ -36,14 +37,16 @@ namespace myrocks { -Rdb_sst_file::Rdb_sst_file(rocksdb::DB* db, rocksdb::ColumnFamilyHandle* cf, +Rdb_sst_file::Rdb_sst_file(rocksdb::DB* const db, + rocksdb::ColumnFamilyHandle* const cf, const rocksdb::DBOptions& db_options, - const std::string& name) : + const std::string& name, const bool tracing) : m_db(db), m_cf(cf), m_db_options(db_options), m_sst_file_writer(nullptr), - m_name(name) + m_name(name), + m_tracing(tracing) { DBUG_ASSERT(db != nullptr); DBUG_ASSERT(cf != nullptr); @@ -76,13 +79,20 @@ rocksdb::Status Rdb_sst_file::open() // Create an sst file writer with the current options and comparator const rocksdb::Comparator* comparator= m_cf->GetComparator(); - rocksdb::EnvOptions env_options(m_db_options); - rocksdb::Options options(m_db_options, cf_descr.options); + const rocksdb::EnvOptions env_options(m_db_options); + const rocksdb::Options options(m_db_options, cf_descr.options); m_sst_file_writer= - new rocksdb::SstFileWriter(env_options, options, comparator); + new rocksdb::SstFileWriter(env_options, options, comparator, m_cf); s= m_sst_file_writer->Open(m_name); + if (m_tracing) + { + // NO_LINT_DEBUG + sql_print_information("SST Tracing: Open(%s) returned %s", m_name.c_str(), + s.ok() ? "ok" : "not ok"); + } + if (!s.ok()) { delete m_sst_file_writer; @@ -101,22 +111,73 @@ rocksdb::Status Rdb_sst_file::put(const rocksdb::Slice& key, return m_sst_file_writer->Add(key, value); } +std::string Rdb_sst_file::generateKey(const std::string& key) +{ + static char const hexdigit[]= { + '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' + }; + + std::string res; + + res.reserve(key.size() * 2); + + for (auto ch : key) + { + res += hexdigit[((uint8_t) ch) >> 4]; + res += hexdigit[((uint8_t) ch) & 0x0F]; + } + + return res; +} + // This function is run by the background thread rocksdb::Status Rdb_sst_file::commit() { DBUG_ASSERT(m_sst_file_writer != nullptr); rocksdb::Status s; + rocksdb::ExternalSstFileInfo fileinfo; ///Finish may should be modified // Close out the sst file - s= m_sst_file_writer->Finish(); + s= m_sst_file_writer->Finish(&fileinfo); + if (m_tracing) + { + // NO_LINT_DEBUG + sql_print_information("SST Tracing: Finish returned %s", + s.ok() ? "ok" : "not ok"); + } + if (s.ok()) { - std::vector files = { m_name }; + if (m_tracing) + { + // NO_LINT_DEBUG + sql_print_information("SST Tracing: Adding file %s, smallest key: %s, " + "largest key: %s, file size: %" PRIu64 ", " + "num_entries: %" PRIu64, fileinfo.file_path.c_str(), + generateKey(fileinfo.smallest_key).c_str(), + generateKey(fileinfo.largest_key).c_str(), + fileinfo.file_size, fileinfo.num_entries); + } + // Add the file to the database - // Set the skip_snapshot_check parameter to true since no one + // Set the snapshot_consistency parameter to false since no one // should be accessing the table we are bulk loading - s= m_db->AddFile(m_cf, files, true, true); + rocksdb::IngestExternalFileOptions opts; + opts.move_files = true; + opts.snapshot_consistency = false; + opts.allow_global_seqno = false; + opts.allow_blocking_flush = false; + s= m_db->IngestExternalFile(m_cf, { m_name }, opts); + + if (m_tracing) + { + // NO_LINT_DEBUG + sql_print_information("SST Tracing: AddFile(%s) returned %s", + fileinfo.file_path.c_str(), + s.ok() ? "ok" : "not ok"); + } } delete m_sst_file_writer; @@ -125,10 +186,11 @@ rocksdb::Status Rdb_sst_file::commit() return s; } -Rdb_sst_info::Rdb_sst_info(rocksdb::DB* db, const std::string& tablename, +Rdb_sst_info::Rdb_sst_info(rocksdb::DB* const db, const std::string& tablename, const std::string& indexname, - rocksdb::ColumnFamilyHandle* cf, - const rocksdb::DBOptions& db_options) : + rocksdb::ColumnFamilyHandle* const cf, + const rocksdb::DBOptions& db_options, + const bool& tracing) : m_db(db), m_cf(cf), m_db_options(db_options), @@ -142,7 +204,8 @@ Rdb_sst_info::Rdb_sst_info(rocksdb::DB* db, const std::string& tablename, m_thread(nullptr), m_finished(false), #endif - m_sst_file(nullptr) + m_sst_file(nullptr), + m_tracing(tracing) { m_prefix= db->GetName() + "/"; @@ -162,7 +225,7 @@ Rdb_sst_info::Rdb_sst_info(rocksdb::DB* db, const std::string& tablename, } rocksdb::ColumnFamilyDescriptor cf_descr; - rocksdb::Status s= m_cf->GetDescriptor(&cf_descr); + const rocksdb::Status s= m_cf->GetDescriptor(&cf_descr); if (!s.ok()) { // Default size if we can't get the cf's target size @@ -188,13 +251,13 @@ int Rdb_sst_info::open_new_sst_file() DBUG_ASSERT(m_sst_file == nullptr); // Create the new sst file's name - std::string name= m_prefix + std::to_string(m_sst_count++) + m_suffix; + const std::string name= m_prefix + std::to_string(m_sst_count++) + m_suffix; // Create the new sst file object - m_sst_file= new Rdb_sst_file(m_db, m_cf, m_db_options, name); + m_sst_file= new Rdb_sst_file(m_db, m_cf, m_db_options, name, m_tracing); // Open the sst file - rocksdb::Status s= m_sst_file->open(); + const rocksdb::Status s= m_sst_file->open(); if (!s.ok()) { set_error_msg(s.ToString()); @@ -224,14 +287,14 @@ void Rdb_sst_info::close_curr_sst_file() { // Add this finished sst file to the queue (while holding mutex) - std::lock_guard guard(m_mutex); + const std::lock_guard guard(m_mutex); m_queue.push(m_sst_file); } // Notify the background thread that there is a new entry in the queue m_cond.notify_one(); #else - rocksdb::Status s= m_sst_file->commit(); + const rocksdb::Status s= m_sst_file->commit(); if (!s.ok()) { set_error_msg(s.ToString()); @@ -276,7 +339,7 @@ int Rdb_sst_info::put(const rocksdb::Slice& key, DBUG_ASSERT(m_sst_file != nullptr); // Add the key/value to the current sst file - rocksdb::Status s= m_sst_file->put(key, value); + const rocksdb::Status s= m_sst_file->put(key, value); if (!s.ok()) { set_error_msg(s.ToString()); @@ -325,7 +388,7 @@ void Rdb_sst_info::set_error_msg(const std::string& msg) // Both the foreground and background threads can set the error message // so lock the mutex to protect it. We only want the first error that // we encounter. - std::lock_guard guard(m_mutex); + const std::lock_guard guard(m_mutex); #endif my_printf_error(ER_UNKNOWN_ERROR, "bulk load error: %s", MYF(0), msg.c_str()); if (m_error_msg.empty()) @@ -343,7 +406,7 @@ void Rdb_sst_info::thread_fcn(void* object) void Rdb_sst_info::run_thread() { - std::unique_lock lk(m_mutex); + const std::unique_lock lk(m_mutex); do { @@ -353,14 +416,14 @@ void Rdb_sst_info::run_thread() // Inner loop pulls off all Rdb_sst_file entries and processes them while (!m_queue.empty()) { - Rdb_sst_file* sst_file= m_queue.front(); + const Rdb_sst_file* const sst_file= m_queue.front(); m_queue.pop(); // Release the lock - we don't want to hold it while committing the file lk.unlock(); // Close out the sst file and add it to the database - rocksdb::Status s= sst_file->commit(); + const rocksdb::Status s= sst_file->commit(); if (!s.ok()) { set_error_msg(s.ToString()); @@ -380,10 +443,10 @@ void Rdb_sst_info::run_thread() } #endif -void Rdb_sst_info::init(rocksdb::DB* db) +void Rdb_sst_info::init(const rocksdb::DB* const db) { - std::string path= db->GetName() + FN_DIRSEP; - struct st_my_dir* dir_info= my_dir(path.c_str(), MYF(MY_DONT_SORT)); + const std::string path= db->GetName() + FN_DIRSEP; + struct st_my_dir* const dir_info= my_dir(path.c_str(), MYF(MY_DONT_SORT)); // Access the directory if (dir_info == nullptr) @@ -395,16 +458,16 @@ void Rdb_sst_info::init(rocksdb::DB* db) } // Scan through the files in the directory - struct fileinfo* file_info= dir_info->dir_entry; + const struct fileinfo* file_info= dir_info->dir_entry; for (uint ii= 0; ii < dir_info->number_off_files; ii++, file_info++) { // find any files ending with m_suffix ... - std::string name= file_info->name; - size_t pos= name.find(m_suffix); + const std::string name= file_info->name; + const size_t pos= name.find(m_suffix); if (pos != std::string::npos && name.size() - pos == m_suffix.size()) { // ... and remove them - std::string fullname= path + name; + const std::string fullname= path + name; my_delete(fullname.c_str(), MYF(0)); } } diff --git a/storage/rocksdb/rdb_sst_info.h b/storage/rocksdb/rdb_sst_info.h index 8845ec98122..933357c8f08 100644 --- a/storage/rocksdb/rdb_sst_info.h +++ b/storage/rocksdb/rdb_sst_info.h @@ -33,18 +33,24 @@ namespace myrocks { class Rdb_sst_file { + private: Rdb_sst_file(const Rdb_sst_file& p)= delete; Rdb_sst_file& operator=(const Rdb_sst_file& p)= delete; - rocksdb::DB* m_db; - rocksdb::ColumnFamilyHandle* m_cf; - const rocksdb::DBOptions& m_db_options; - rocksdb::SstFileWriter* m_sst_file_writer; - std::string m_name; + rocksdb::DB* const m_db; + rocksdb::ColumnFamilyHandle* const m_cf; + const rocksdb::DBOptions& m_db_options; + rocksdb::SstFileWriter* m_sst_file_writer; + const std::string m_name; + const bool m_tracing; + + std::string generateKey(const std::string& key); public: - Rdb_sst_file(rocksdb::DB* db, rocksdb::ColumnFamilyHandle* cf, - const rocksdb::DBOptions& db_options, const std::string& name); + Rdb_sst_file(rocksdb::DB* const db, + rocksdb::ColumnFamilyHandle* const cf, + const rocksdb::DBOptions& db_options, const std::string& name, + const bool tracing); ~Rdb_sst_file(); rocksdb::Status open(); @@ -53,26 +59,28 @@ class Rdb_sst_file { }; class Rdb_sst_info { + private: Rdb_sst_info(const Rdb_sst_info& p)= delete; Rdb_sst_info& operator=(const Rdb_sst_info& p)= delete; - rocksdb::DB* m_db; - rocksdb::ColumnFamilyHandle* m_cf; - const rocksdb::DBOptions& m_db_options; - uint64_t m_curr_size; - uint64_t m_max_size; - uint m_sst_count; - std::string m_error_msg; - std::string m_prefix; - static std::string m_suffix; + rocksdb::DB* const m_db; + rocksdb::ColumnFamilyHandle* const m_cf; + const rocksdb::DBOptions& m_db_options; + uint64_t m_curr_size; + uint64_t m_max_size; + uint m_sst_count; + std::string m_error_msg; + std::string m_prefix; + static std::string m_suffix; #if defined(RDB_SST_INFO_USE_THREAD) - std::queue m_queue; - std::mutex m_mutex; - std::condition_variable m_cond; - std::thread* m_thread; - bool m_finished; + std::queue m_queue; + std::mutex m_mutex; + std::condition_variable m_cond; + std::thread* m_thread; + bool m_finished; #endif - Rdb_sst_file* m_sst_file; + Rdb_sst_file* m_sst_file; + const bool m_tracing; int open_new_sst_file(); void close_curr_sst_file(); @@ -85,9 +93,10 @@ class Rdb_sst_info { #endif public: - Rdb_sst_info(rocksdb::DB* db, const std::string& tablename, - const std::string& indexname, rocksdb::ColumnFamilyHandle* cf, - const rocksdb::DBOptions& db_options); + Rdb_sst_info(rocksdb::DB* const db, const std::string& tablename, + const std::string& indexname, + rocksdb::ColumnFamilyHandle* const cf, + const rocksdb::DBOptions& db_options, const bool &tracing); ~Rdb_sst_info(); int put(const rocksdb::Slice& key, const rocksdb::Slice& value); @@ -95,7 +104,7 @@ class Rdb_sst_info { const std::string& error_message() const { return m_error_msg; } - static void init(rocksdb::DB* db); + static void init(const rocksdb::DB* const db); }; } // namespace myrocks diff --git a/storage/rocksdb/rdb_threads.cc b/storage/rocksdb/rdb_threads.cc index 1538404ea56..3f00bc13325 100644 --- a/storage/rocksdb/rdb_threads.cc +++ b/storage/rocksdb/rdb_threads.cc @@ -24,10 +24,10 @@ namespace myrocks { -void* Rdb_thread::thread_func(void* thread_ptr) +void* Rdb_thread::thread_func(void* const thread_ptr) { DBUG_ASSERT(thread_ptr != nullptr); - Rdb_thread* thread= static_cast(thread_ptr); + Rdb_thread* const thread= static_cast(thread_ptr); if (!thread->m_run_once.exchange(true)) { thread->run(); @@ -68,7 +68,7 @@ int Rdb_thread::create_thread( } -void Rdb_thread::signal(bool stop_thread) +void Rdb_thread::signal(const bool &stop_thread) { mysql_mutex_lock(&m_signal_mutex); if (stop_thread) { diff --git a/storage/rocksdb/rdb_threads.h b/storage/rocksdb/rdb_threads.h index cba03b4cdba..c06dba438c0 100644 --- a/storage/rocksdb/rdb_threads.h +++ b/storage/rocksdb/rdb_threads.h @@ -58,7 +58,7 @@ class Rdb_thread virtual void run(void) = 0; - void signal(bool stop_thread= false); + void signal(const bool &stop_thread= false); int join() { @@ -70,7 +70,7 @@ class Rdb_thread virtual ~Rdb_thread() {} private: - static void* thread_func(void* thread_ptr); + static void* thread_func(void* const thread_ptr); }; diff --git a/storage/rocksdb/rdb_utils.cc b/storage/rocksdb/rdb_utils.cc index 05f0104d483..599f11c5681 100644 --- a/storage/rocksdb/rdb_utils.cc +++ b/storage/rocksdb/rdb_utils.cc @@ -32,7 +32,8 @@ namespace myrocks { /* Skip past any spaces in the input */ -const char* rdb_skip_spaces(struct charset_info_st* cs, const char *str) +const char* rdb_skip_spaces(const struct charset_info_st* const cs, + const char *str) { DBUG_ASSERT(cs != nullptr); DBUG_ASSERT(str != nullptr); @@ -50,7 +51,7 @@ const char* rdb_skip_spaces(struct charset_info_st* cs, const char *str) Note that str1 can be longer but we only compare up to the number of characters in str2. */ -bool rdb_compare_strings_ic(const char *str1, const char *str2) +bool rdb_compare_strings_ic(const char* const str1, const char* const str2) { DBUG_ASSERT(str1 != nullptr); DBUG_ASSERT(str2 != nullptr); @@ -74,7 +75,7 @@ bool rdb_compare_strings_ic(const char *str1, const char *str2) and skipping all data enclosed in quotes. */ const char* rdb_find_in_string(const char *str, const char *pattern, - bool *succeeded) + bool * const succeeded) { char quote = '\0'; bool escape = false; @@ -131,8 +132,9 @@ const char* rdb_find_in_string(const char *str, const char *pattern, /* See if the next valid token matches the specified string */ -const char* rdb_check_next_token(struct charset_info_st* cs, const char *str, - const char *pattern, bool *succeeded) +const char* rdb_check_next_token(const struct charset_info_st* const cs, + const char *str, const char* const pattern, + bool* const succeeded) { DBUG_ASSERT(cs != nullptr); DBUG_ASSERT(str != nullptr); @@ -156,8 +158,8 @@ const char* rdb_check_next_token(struct charset_info_st* cs, const char *str, /* Parse id */ -const char* rdb_parse_id(struct charset_info_st* cs, const char *str, - std::string *id) +const char* rdb_parse_id(const struct charset_info_st* const cs, + const char *str, std::string * const id) { DBUG_ASSERT(cs != nullptr); DBUG_ASSERT(str != nullptr); @@ -232,7 +234,7 @@ const char* rdb_parse_id(struct charset_info_st* cs, const char *str, /* Skip id */ -const char* rdb_skip_id(struct charset_info_st* cs, const char *str) +const char* rdb_skip_id(const struct charset_info_st* const cs, const char *str) { DBUG_ASSERT(cs != nullptr); DBUG_ASSERT(str != nullptr); @@ -251,8 +253,8 @@ static const std::array rdb_hexdigit = Convert data into a hex string with optional maximum length. If the data is larger than the maximum length trancate it and append "..". */ -std::string rdb_hexdump(const char *data, std::size_t data_len, - std::size_t maxsize) +std::string rdb_hexdump(const char *data, const std::size_t data_len, + const std::size_t maxsize) { DBUG_ASSERT(data != nullptr); @@ -296,8 +298,9 @@ std::string rdb_hexdump(const char *data, std::size_t data_len, */ bool rdb_database_exists(const std::string& db_name) { - std::string dir = std::string(mysql_real_data_home) + FN_DIRSEP + db_name; - struct st_my_dir* dir_info = my_dir(dir.c_str(), + const std::string dir = std::string(mysql_real_data_home) + FN_DIRSEP + + db_name; + struct st_my_dir* const dir_info = my_dir(dir.c_str(), MYF(MY_DONT_SORT | MY_WANT_STAT)); if (dir_info == nullptr) { diff --git a/storage/rocksdb/rdb_utils.h b/storage/rocksdb/rdb_utils.h index 5630dc1c20f..7d63ff9c220 100644 --- a/storage/rocksdb/rdb_utils.h +++ b/storage/rocksdb/rdb_utils.h @@ -16,6 +16,7 @@ #pragma once /* C++ standard header files */ +#include #include /* MySQL header files */ @@ -100,6 +101,11 @@ namespace myrocks { DBUG_ASSERT(static_cast(a) == static_cast(b)) #endif +/* + Generic constant. +*/ +const size_t RDB_MAX_HEXDUMP_LEN= 1000; + /* Helper function to get an NULL terminated uchar* out of a given MySQL String. */ @@ -120,6 +126,15 @@ inline const uchar* rdb_std_str_to_uchar_ptr(const std::string &str) return reinterpret_cast(str.data()); } +/* + Helper function to convert seconds to milliseconds. +*/ + +constexpr int rdb_convert_sec_to_ms(int sec) +{ + return std::chrono::milliseconds(std::chrono::seconds(sec)).count(); +} + /* Helper function to get plain (not necessary NULL terminated) uchar* out of a given RocksDB item. @@ -169,33 +184,35 @@ inline int purge_all_jemalloc_arenas() Helper functions to parse strings. */ -const char* rdb_skip_spaces(struct charset_info_st* cs, const char *str) +const char* rdb_skip_spaces(const struct charset_info_st* const cs, + const char *str) __attribute__((__nonnull__, __warn_unused_result__)); -bool rdb_compare_strings_ic(const char *str1, const char *str2) +bool rdb_compare_strings_ic(const char* const str1, const char* const str2) __attribute__((__nonnull__, __warn_unused_result__)); const char* rdb_find_in_string(const char *str, const char *pattern, - bool *succeeded) + bool * const succeeded) __attribute__((__nonnull__, __warn_unused_result__)); -const char* rdb_check_next_token(struct charset_info_st* cs, const char *str, - const char *pattern, bool *succeeded) +const char* rdb_check_next_token(const struct charset_info_st* const cs, + const char *str, const char* const pattern, + bool * const succeeded) __attribute__((__nonnull__, __warn_unused_result__)); -const char* rdb_parse_id(struct charset_info_st* cs, const char *str, - std::string *id) +const char* rdb_parse_id(const struct charset_info_st* const cs, + const char *str, std::string * const id) __attribute__((__nonnull__(1, 2), __warn_unused_result__)); -const char* rdb_skip_id(struct charset_info_st* cs, const char *str) +const char* rdb_skip_id(const struct charset_info_st* const cs, const char *str) __attribute__((__nonnull__, __warn_unused_result__)); /* Helper functions to populate strings. */ -std::string rdb_hexdump(const char *data, std::size_t data_len, - std::size_t maxsize = 0) +std::string rdb_hexdump(const char *data, const std::size_t data_len, + const std::size_t maxsize = 0) __attribute__((__nonnull__)); /* diff --git a/storage/rocksdb/tools/mysql_ldb.cc b/storage/rocksdb/tools/mysql_ldb.cc index ce51481690b..52d23f20a32 100644 --- a/storage/rocksdb/tools/mysql_ldb.cc +++ b/storage/rocksdb/tools/mysql_ldb.cc @@ -8,7 +8,7 @@ int main(int argc, char** argv) { rocksdb::Options db_options; - myrocks::Rdb_pk_comparator pk_comparator; + const myrocks::Rdb_pk_comparator pk_comparator; db_options.comparator= &pk_comparator; rocksdb::LDBTool tool; From 3e7e55915007680a2ff5c6240237d589fcc723f7 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 1 Jan 2017 01:50:17 +0300 Subject: [PATCH 113/233] Fix the "fatal error: mysqld_error.h: No such file or directory" compile error --- storage/rocksdb/CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index d2c6c8b7659..37cc19551de 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -64,6 +64,11 @@ ADD_CONVENIENCE_LIBRARY( ${ROCKSDB_LIB_SOURCES} ) +# We include storage/innobase/include/ut0counter.h, which includes +# univ.i, which includes mysqld_error.h. +# Indicate that MyRocks is dependent on that file (just like innochecksum does) +ADD_DEPENDENCIES(ROCKSDB_AUX_LIB GenError) + SET(ROCKSDB_SOURCES rdb_mariadb_server_port.cc rdb_mariadb_server_port.h ha_rocksdb.cc ha_rocksdb.h From 7c4ebec82d1ad9e39c274e27ee7545368df55df7 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 2 Jan 2017 00:06:26 +0000 Subject: [PATCH 114/233] MariaRocks: trivial post-merge test fixes --- mysql-test/include/have_rocksdb.opt | 1 + .../rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.inc | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/mysql-test/include/have_rocksdb.opt b/mysql-test/include/have_rocksdb.opt index 7415665b92d..36d7dda1609 100644 --- a/mysql-test/include/have_rocksdb.opt +++ b/mysql-test/include/have_rocksdb.opt @@ -9,3 +9,4 @@ --loose-enable-rocksdb_cfstats --loose-enable-rocksdb_lock_info --loose-enable-rocksdb_trx +--loose-enable-rocksdb_locks diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.inc b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.inc index e164591ddec..c88c7ebd20a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.inc @@ -11,7 +11,7 @@ set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect; set global rocksdb_lock_wait_timeout = 100000; set global rocksdb_deadlock_detect = ON; -exec python suite/rocksdb/t/rocksdb_deadlock_stress.py root 127.0.0.1 $MASTER_MYPORT test t1 10000 10; +exec python ../storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.py root 127.0.0.1 $MASTER_MYPORT test t1 10000 10; set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout; set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect; From 3ecd9e0bfc8d694458da3a25a4ab2bf93f0a5ad3 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 2 Jan 2017 00:15:45 +0000 Subject: [PATCH 115/233] Post-merge fixes for rocksdb.add_index_inplace_crash --- .../rocksdb/r/add_index_inplace_crash.result | 18 +++++++++--------- .../rocksdb/t/add_index_inplace_crash.test | 13 +++++++------ 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_crash.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_crash.result index 987b34948e8..bab4479fd83 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_crash.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_crash.result @@ -5,10 +5,10 @@ INSERT INTO t1 (a, b) VALUES (2, 6); INSERT INTO t1 (a, b) VALUES (3, 7); # crash_during_online_index_creation flush logs; -SET SESSION debug="+d,crash_during_online_index_creation"; +SET SESSION debug_dbug="+d,crash_during_online_index_creation"; ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; ERROR HY000: Lost connection to MySQL server during query -SET SESSION debug="-d,crash_during_online_index_creation"; +SET SESSION debug_dbug="-d,crash_during_online_index_creation"; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -24,14 +24,14 @@ DROP TABLE t1; CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; # crash_during_index_creation_partition flush logs; -SET SESSION debug="+d,crash_during_index_creation_partition"; +SET SESSION debug_dbug="+d,crash_during_index_creation_partition"; ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; ERROR HY000: Lost connection to MySQL server during query -SET SESSION debug="-d,crash_during_index_creation_partition"; +SET SESSION debug_dbug="-d,crash_during_index_creation_partition"; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `i` int(11) NOT NULL DEFAULT '0', + `i` int(11) NOT NULL, `j` int(11) DEFAULT NULL, `k` int(11) DEFAULT NULL, PRIMARY KEY (`i`), @@ -59,17 +59,17 @@ DROP TABLE t1; CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; # crash_during_index_creation_partition flush logs; -SET SESSION debug="+d,myrocks_simulate_index_create_rollback"; +SET SESSION debug_dbug="+d,myrocks_simulate_index_create_rollback"; # expected assertion failure from sql layer here for alter rollback call mtr.add_suppression("Assertion `0' failed."); call mtr.add_suppression("Attempting backtrace. You can use the following information to find out"); ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; ERROR HY000: Lost connection to MySQL server during query -SET SESSION debug="-d,myrocks_simulate_index_create_rollback"; +SET SESSION debug_dbug="-d,myrocks_simulate_index_create_rollback"; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `i` int(11) NOT NULL DEFAULT '0', + `i` int(11) NOT NULL, `j` int(11) DEFAULT NULL, `k` int(11) DEFAULT NULL, PRIMARY KEY (`i`), @@ -81,7 +81,7 @@ ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `i` int(11) NOT NULL DEFAULT '0', + `i` int(11) NOT NULL, `j` int(11) DEFAULT NULL, `k` int(11) DEFAULT NULL, PRIMARY KEY (`i`), diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test index ca9122bccd7..6ff45973b70 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test @@ -1,5 +1,6 @@ --source include/have_rocksdb.inc --source include/have_debug.inc +--source include/have_partition.inc --disable_warnings drop table if exists t1; @@ -18,14 +19,14 @@ INSERT INTO t1 (a, b) VALUES (3, 7); flush logs; --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect -SET SESSION debug="+d,crash_during_online_index_creation"; +SET SESSION debug_dbug="+d,crash_during_online_index_creation"; --error 2013 ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; --enable_reconnect --source include/wait_until_connected_again.inc -SET SESSION debug="-d,crash_during_online_index_creation"; +SET SESSION debug_dbug="-d,crash_during_online_index_creation"; SHOW CREATE TABLE t1; CHECK TABLE t1; @@ -51,14 +52,14 @@ while ($i <= $max) { flush logs; --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect -SET SESSION debug="+d,crash_during_index_creation_partition"; +SET SESSION debug_dbug="+d,crash_during_index_creation_partition"; --error 2013 ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; --enable_reconnect --source include/wait_until_connected_again.inc -SET SESSION debug="-d,crash_during_index_creation_partition"; +SET SESSION debug_dbug="-d,crash_during_index_creation_partition"; SHOW CREATE TABLE t1; @@ -90,7 +91,7 @@ while ($i <= $max) { flush logs; --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect -SET SESSION debug="+d,myrocks_simulate_index_create_rollback"; +SET SESSION debug_dbug="+d,myrocks_simulate_index_create_rollback"; --echo # expected assertion failure from sql layer here for alter rollback call mtr.add_suppression("Assertion `0' failed."); @@ -103,7 +104,7 @@ ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; --enable_reconnect --source include/wait_until_connected_again.inc -SET SESSION debug="-d,myrocks_simulate_index_create_rollback"; +SET SESSION debug_dbug="-d,myrocks_simulate_index_create_rollback"; SHOW CREATE TABLE t1; From a597e0e164f9e99c1422fdba5bf70d66e84371e5 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 2 Jan 2017 22:50:35 +0300 Subject: [PATCH 116/233] MariaRocks port: fix tests - rocksdb.tmpdir works (however @@rocksdb_tmpdir has no effect yet!) - trx_info_rpl is only run in RBR mode - type_char_indexes_collation now works = take into account that characters with the same weight can have any order after sorting (and they do in MariaDB) = MariaDB doesn't use index-only for extended keys that have partially- covered columns. --- storage/rocksdb/mysql-test/rocksdb/r/tmpdir.result | 6 ++++++ .../mysql-test/rocksdb/r/trx_info_rpl.result | 7 ++++--- .../rocksdb/r/type_char_indexes_collation.result | 8 ++++---- .../rocksdb/mysql-test/rocksdb/t/trx_info_rpl.cnf | 3 +++ .../rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test | 1 + .../rocksdb/t/type_char_indexes_collation.test | 13 ++++++------- 6 files changed, 24 insertions(+), 14 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tmpdir.result b/storage/rocksdb/mysql-test/rocksdb/r/tmpdir.result index e07d750c413..4c1e0221178 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/tmpdir.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/tmpdir.result @@ -5,10 +5,13 @@ show session variables like 'rocksdb_tmpdir'; Variable_name Value rocksdb_tmpdir # Connection con1 +connect con1,localhost,root; show session variables like 'rocksdb_tmpdir'; Variable_name Value rocksdb_tmpdir ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +connection default; +disconnect con1; DROP TABLE t1; # rocksdb_tmpdir with valid location. CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB; @@ -18,9 +21,12 @@ show session variables like 'rocksdb_tmpdir'; Variable_name Value rocksdb_tmpdir # Connection con3 +connect con2,localhost,root; show session variables like 'rocksdb_tmpdir'; Variable_name Value rocksdb_tmpdir MYSQL_TMP_DIR/mysqld.1 ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +disconnect con2; +connection default; set global rocksdb_tmpdir=NULL; DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/trx_info_rpl.result b/storage/rocksdb/mysql-test/rocksdb/r/trx_info_rpl.result index 1e0c7a5adbf..84a6b98b004 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/trx_info_rpl.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/trx_info_rpl.result @@ -1,15 +1,16 @@ include/master-slave.inc -Warnings: -Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. -Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. [connection master] DROP TABLE IF EXISTS t1; +connection slave; include/stop_slave.inc +connection master; create table t1 (a int) engine=rocksdb; +connection slave; show variables like 'rocksdb_rpl_skip_tx_api'; Variable_name Value rocksdb_rpl_skip_tx_api ON include/start_slave.inc found +connection master; DROP TABLE t1; include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result index 4b720271a4e..cb56089595b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result @@ -1,4 +1,4 @@ -set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans"; +set session debug_dbug= "+d,myrocks_enable_unknown_collation_index_only_scans"; create table t (id int not null auto_increment primary key, c varchar(8) CHARACTER SET utf8 COLLATE utf8_general_ci, key sk (c)); @@ -11,7 +11,7 @@ c ß ☀ drop table t; -set session debug= "-d,myrocks_enable_unknown_collation_index_only_scans"; +set session debug_dbug= "-d,myrocks_enable_unknown_collation_index_only_scans"; create table t (id int not null auto_increment, c1 varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci, c2 char(1) CHARACTER SET latin1 COLLATE latin1_general_ci, @@ -67,7 +67,7 @@ insert into t (c1) values ('a '); ERROR 23000: Duplicate entry 'a' for key 'sk1' explain select c1 from t; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t ALL NULL NULL NULL NULL # NULL +1 SIMPLE t ALL NULL NULL NULL NULL # select c1 from t; c1 Asdf @@ -78,7 +78,7 @@ create table t (id int primary key, email varchar(100), KEY email_i (email(30))) insert into t values (1, ' a'); explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t index NULL email_i 33 NULL # Using index +1 SIMPLE t ALL NULL NULL NULL NULL # select 'email_i' as index_name, count(*) AS count from t force index(email_i); index_name count email_i 1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.cnf b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.cnf index f5b725932e4..c2a3e69ac75 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.cnf +++ b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.cnf @@ -2,7 +2,10 @@ [mysqld.1] binlog_format=row +loose-enable-rocksdb_trx + [mysqld.2] binlog_format=row slave_parallel_workers=1 rocksdb_rpl_skip_tx_api=ON +loose-enable-rocksdb_trx diff --git a/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test index 19499765140..64b73123acf 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test @@ -1,4 +1,5 @@ --source include/master-slave.inc +--source include/have_binlog_format_row.inc --disable_warnings DROP TABLE IF EXISTS t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test index a0ae824f829..d231236bd92 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test @@ -2,7 +2,7 @@ --source include/have_debug.inc # Test if unknown collation works. -set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans"; +set session debug_dbug= "+d,myrocks_enable_unknown_collation_index_only_scans"; create table t (id int not null auto_increment primary key, c varchar(8) CHARACTER SET utf8 COLLATE utf8_general_ci, key sk (c)); @@ -11,7 +11,7 @@ insert into t (c) values ('☀'), ('ß'); explain select c from t; select c from t; drop table t; -set session debug= "-d,myrocks_enable_unknown_collation_index_only_scans"; +set session debug_dbug= "-d,myrocks_enable_unknown_collation_index_only_scans"; # Testing if all characters in latin1 charset get restored correctly. This is # done by comparing results from a PK scan. @@ -44,10 +44,9 @@ explain select hex(c2) from t IGNORE INDEX (sk1) order by c2; --let $file1=$MYSQLTEST_VARDIR/tmp/filesort_order --let $file2=$MYSQLTEST_VARDIR/tmp/sk_order - --disable_query_log ---eval select hex(c1) INTO OUTFILE '$file1' from t order by c1 ---eval select hex(c1) INTO OUTFILE '$file2' from t IGNORE INDEX (sk1) order by c1 +--eval select hex(weight_string(c1)) INTO OUTFILE '$file1' from t order by c1 +--eval select hex(weight_string(c1)) INTO OUTFILE '$file2' from t IGNORE INDEX (sk1) order by c1 --enable_query_log --diff_files $file1 $file2 @@ -55,8 +54,8 @@ explain select hex(c2) from t IGNORE INDEX (sk1) order by c2; --remove_file $file2 --disable_query_log ---eval select hex(c2) INTO OUTFILE '$file1' from t order by c2 ---eval select hex(c2) INTO OUTFILE '$file2' from t IGNORE INDEX (sk1) order by c2 +--eval select hex(weight_string(c2)) INTO OUTFILE '$file1' from t order by c2 +--eval select hex(weight_string(c2)) INTO OUTFILE '$file2' from t IGNORE INDEX (sk1) order by c2 --enable_query_log --diff_files $file1 $file2 From e3df50c4b982e2e8d575bfed96b905972602a853 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 3 Jan 2017 00:21:04 +0300 Subject: [PATCH 117/233] MariaRocks port: fix rocksdb.table_stats - Need to manually enable userstat - MariaDB has a lot fewer columns in table_statistics table. --- storage/rocksdb/mysql-test/rocksdb/r/table_stats.result | 7 +++++-- .../rocksdb/mysql-test/rocksdb/t/table_stats-master.opt | 1 + storage/rocksdb/mysql-test/rocksdb/t/table_stats.test | 2 ++ 3 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/table_stats-master.opt diff --git a/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result index e0520f5a31b..31cb1b6477b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result @@ -4,6 +4,9 @@ SELECT COUNT(*) FROM t1; COUNT(*) 1000 SELECT * FROM INFORMATION_SCHEMA.TABLE_STATISTICS WHERE TABLE_NAME = "t1"; -TABLE_SCHEMA TABLE_NAME TABLE_ENGINE ROWS_INSERTED ROWS_UPDATED ROWS_DELETED ROWS_READ ROWS_REQUESTED COMPRESSED_PAGE_SIZE COMPRESS_PADDING COMPRESS_OPS COMPRESS_OPS_OK COMPRESS_PRIMARY_OPS COMPRESS_PRIMARY_OPS_OK COMPRESS_USECS COMPRESS_OK_USECS COMPRESS_PRIMARY_USECS COMPRESS_PRIMARY_OK_USECS UNCOMPRESS_OPS UNCOMPRESS_USECS ROWS_INDEX_FIRST ROWS_INDEX_NEXT IO_READ_BYTES IO_READ_REQUESTS IO_READ_SVC_USECS IO_READ_SVC_USECS_MAX IO_READ_WAIT_USECS IO_READ_WAIT_USECS_MAX IO_READ_SLOW_IOS IO_WRITE_BYTES IO_WRITE_REQUESTS IO_WRITE_SVC_USECS IO_WRITE_SVC_USECS_MAX IO_WRITE_WAIT_USECS IO_WRITE_WAIT_USECS_MAX IO_WRITE_SLOW_IOS IO_READ_BYTES_BLOB IO_READ_REQUESTS_BLOB IO_READ_SVC_USECS_BLOB IO_READ_SVC_USECS_MAX_BLOB IO_READ_WAIT_USECS_BLOB IO_READ_WAIT_USECS_MAX_BLOB IO_READ_SLOW_IOS_BLOB IO_READ_BYTES_PRIMARY IO_READ_REQUESTS_PRIMARY IO_READ_SVC_USECS_PRIMARY IO_READ_SVC_USECS_MAX_PRIMARY IO_READ_WAIT_USECS_PRIMARY IO_READ_WAIT_USECS_MAX_PRIMARY IO_READ_SLOW_IOS_PRIMARY IO_READ_BYTES_SECONDARY IO_READ_REQUESTS_SECONDARY IO_READ_SVC_USECS_SECONDARY IO_READ_SVC_USECS_MAX_SECONDARY IO_READ_WAIT_USECS_SECONDARY IO_READ_WAIT_USECS_MAX_SECONDARY IO_READ_SLOW_IOS_SECONDARY IO_INDEX_INSERTS QUERIES_USED QUERIES_EMPTY COMMENT_BYTES INNODB_ROW_LOCK_WAITS INNODB_ROW_LOCK_WAIT_TIMEOUTS INNODB_PAGES_READ INNODB_PAGES_READ_INDEX INNODB_PAGES_READ_BLOB INNODB_PAGES_WRITTEN INNODB_PAGES_WRITTEN_INDEX INNODB_PAGES_WRITTEN_BLOB -test t1 ROCKSDB 1000 0 0 1000 1001 0 0 0 0 0 0 0 0 0 0 0 0 1 999 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1001 0 0 0 0 0 0 0 0 0 0 +TABLE_SCHEMA test +TABLE_NAME t1 +ROWS_READ 1000 +ROWS_CHANGED 1000 +ROWS_CHANGED_X_INDEXES 1000 DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/table_stats-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/table_stats-master.opt new file mode 100644 index 00000000000..be8a06eacae --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/table_stats-master.opt @@ -0,0 +1 @@ +--userstat diff --git a/storage/rocksdb/mysql-test/rocksdb/t/table_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/table_stats.test index 734a5169608..3eb58098372 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/table_stats.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/table_stats.test @@ -21,7 +21,9 @@ while ($i <= $max) { SELECT COUNT(*) FROM t1; # Verify the table stats are returned +--vertical_results SELECT * FROM INFORMATION_SCHEMA.TABLE_STATISTICS WHERE TABLE_NAME = "t1"; +--horizontal_results #cleanup DROP TABLE t1; From 69d5ee58c6c6919f3129214d7a2733829066df78 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 6 Jan 2017 23:36:23 +0300 Subject: [PATCH 118/233] MariaRocks port: Temporarily disable tests related to Gap Lock detector (MDEV-11735) --- storage/rocksdb/mysql-test/rocksdb/t/disabled.def | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index 8e43da0e46d..27f4dabdf1f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -9,3 +9,8 @@ select_for_update_skip_locked_nowait: MDEV-11481 rpl_read_free: MDEV-10976 optimizer_loose_index_scans: MariaDB doesnt support Skip Scan + +# MDEV-11735: MyRocks: Gap Lock detector support +gap_lock_issue254: MDEV-11735: MyRocks: Gap Lock detector support +gap_lock_raise_error: MDEV-11735: MyRocks: Gap Lock detector support + From ebbe59fa332e1b7c5432b8cd63aa2b878ef289f8 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 7 Jan 2017 16:00:35 +0000 Subject: [PATCH 119/233] MariaRocks port: fix rocksdb.bloomfilter3 test In MySQL 5.6, QUICK_SELECT_DESC calls handler->set_end_range() to inform the storage engine about the bounds of the range being scanned. MariaDB doesn't have that (handler::set_end_range call was back-ported but it is not called from QUICK_SELECT_DESC). Instead, it got prepare_*scan() methods from TokuDB. Implement these methods so that MyRocks has information about the range end. --- storage/rocksdb/ha_rocksdb.cc | 17 +++++++++++++++++ storage/rocksdb/ha_rocksdb.h | 11 +++++++++++ .../mysql-test/rocksdb/r/bloomfilter3.result | 10 +++++----- 3 files changed, 33 insertions(+), 5 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index c4208fd1340..77e1d162e47 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -6528,6 +6528,23 @@ int ha_rocksdb::read_range_first(const key_range* const start_key, } +int ha_rocksdb::prepare_index_scan() +{ + range_key_part= table->key_info[active_index].key_part; + set_end_range(NULL); + return 0; +} + + +int ha_rocksdb::prepare_range_scan(const key_range *start_key, + const key_range *end_key) +{ + range_key_part= table->key_info[active_index].key_part; + set_end_range(end_key); + return 0; +} + + int ha_rocksdb::index_read_map(uchar* const buf, const uchar* const key, key_part_map keypart_map, enum ha_rkey_function find_flag) diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 4eaf5dbdf50..b57a2639c55 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -576,6 +576,17 @@ class ha_rocksdb: public my_core::handler void update_stats(void); public: + /* + The following two are currently only used for getting the range bounds + from QUICK_SELECT_DESC. + We don't need to implement prepare_index_key_scan[_map] because it is + only used with HA_READ_KEY_EXACT and HA_READ_PREFIX_LAST where one + can infer the bounds of the range being scanned, anyway. + */ + int prepare_index_scan() override; + int prepare_range_scan(const key_range *start_key, + const key_range *end_key) override; + /* Controls whether writes include checksums. This is updated from the session variable at the start of each query. diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result index c7b5c42f2b3..6ad9867049d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result @@ -101,11 +101,11 @@ insert into t1 values (21,2,2,0x12FFFFFFFFFF,1); explain select * from t1 where kp0=1 and kp1=1 and kp2=0x12FFFFFFFFFF order by kp3 desc; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index kp12 kp12 28 NULL # Using where; Using index +1 SIMPLE t1 ref kp12 kp12 20 const,const,const # Using where; Using index show status like '%rocksdb_bloom_filter_prefix%'; Variable_name Value -rocksdb_bloom_filter_prefix_checked 0 -rocksdb_bloom_filter_prefix_useful 0 +Rocksdb_bloom_filter_prefix_checked 0 +Rocksdb_bloom_filter_prefix_useful 0 select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; select * from t1 where kp0=1 and kp1=1 and kp2=0x12FFFFFFFFFF order by kp3 desc; pk kp0 kp1 kp2 kp3 @@ -113,8 +113,8 @@ pk kp0 kp1 kp2 kp3 10 1 1 20890720927743 1 show status like '%rocksdb_bloom_filter_prefix%'; Variable_name Value -rocksdb_bloom_filter_prefix_checked 0 -rocksdb_bloom_filter_prefix_useful 0 +Rocksdb_bloom_filter_prefix_checked 0 +Rocksdb_bloom_filter_prefix_useful 0 # The following MUST show TRUE: select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; case when variable_value-@c = 0 then 'true' else 'false' end From 93d3a39ba9442794106c9083519ba9c5695ded4d Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 7 Jan 2017 19:43:27 +0000 Subject: [PATCH 120/233] MariaRocks port: fix rocksdb.rocksdb_row_stats test Make rocksdb_rows_XXX counters to work --- storage/rocksdb/ha_rocksdb.cc | 18 ++++++++---------- storage/rocksdb/ha_rocksdb.h | 4 ++-- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 77e1d162e47..905e83d95ce 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -95,7 +95,6 @@ namespace myrocks { static st_global_stats global_stats; static st_export_stats export_stats; -#ifdef MARIAROCKS_NOT_YET /** Updates row counters based on the table type and operation type. */ @@ -107,7 +106,6 @@ void ha_rocksdb::update_row_stats(const operation_type &type) { else global_stats.rows[type].inc(); } -#endif void dbug_dump_database(rocksdb::DB *db); static handler *rocksdb_create_handler(my_core::handlerton *hton, @@ -6451,8 +6449,8 @@ int ha_rocksdb::secondary_index_read(const int keyno, uchar* const buf) #ifdef MARIAROCKS_NOT_YET stats.rows_read++; stats.rows_index_next++; - update_row_stats(ROWS_READ); #endif + update_row_stats(ROWS_READ); } return rc; } @@ -6600,8 +6598,8 @@ int ha_rocksdb::index_read_map_impl(uchar* const buf, const uchar* const key, #ifdef MARIAROCKS_NOT_YET stats.rows_read++; stats.rows_index_first++; - update_row_stats(ROWS_READ); #endif + update_row_stats(ROWS_READ); } DBUG_RETURN(rc); } @@ -6729,8 +6727,8 @@ int ha_rocksdb::index_read_map_impl(uchar* const buf, const uchar* const key, #ifdef MARIAROCKS_NOT_YET stats.rows_read++; stats.rows_index_first++; - update_row_stats(ROWS_READ); #endif + update_row_stats(ROWS_READ); } DBUG_RETURN(rc); } @@ -7632,8 +7630,8 @@ int ha_rocksdb::write_row(uchar* const buf) { #ifdef MARIAROCKS_NOT_YET stats.rows_inserted++; - update_row_stats(ROWS_INSERTED); #endif + update_row_stats(ROWS_INSERTED); } DBUG_RETURN(rv); } @@ -8545,8 +8543,8 @@ int ha_rocksdb::rnd_next_with_direction(uchar* const buf, bool move_forward) #ifdef MARIAROCKS_NOT_YET stats.rows_read++; stats.rows_index_next++; - update_row_stats(ROWS_READ); #endif + update_row_stats(ROWS_READ); } DBUG_RETURN(rc); @@ -8662,8 +8660,8 @@ int ha_rocksdb::delete_row(const uchar* const buf) } #ifdef MARIAROCKS_NOT_YET stats.rows_deleted++; - update_row_stats(ROWS_DELETED); #endif + update_row_stats(ROWS_DELETED); DBUG_RETURN(0); } @@ -8882,8 +8880,8 @@ int ha_rocksdb::rnd_pos(uchar* const buf, uchar* const pos) { #ifdef MARIAROCKS_NOT_YET stats.rows_read++; - update_row_stats(ROWS_READ); #endif + update_row_stats(ROWS_READ); } DBUG_RETURN(rc); @@ -8946,8 +8944,8 @@ int ha_rocksdb::update_row(const uchar* const old_data, uchar* const new_data) { #ifdef MARIAROCKS_NOT_YET stats.rows_updated++; - update_row_stats(ROWS_UPDATED); #endif + update_row_stats(ROWS_UPDATED); } DBUG_RETURN(rv); } diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index b57a2639c55..06e82f9aec6 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -510,9 +510,9 @@ class ha_rocksdb: public my_core::handler __attribute__((__nonnull__, __warn_unused_result__)); bool has_hidden_pk(const TABLE* const table) const __attribute__((__nonnull__, __warn_unused_result__)); -#ifdef MARIAROCKS_NOT_YET + void update_row_stats(const operation_type &type); -#endif + void set_last_rowkey(const uchar* const old_data); /* From 520d2063652cde706520d7e25012584f55093c2d Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 8 Jan 2017 18:29:14 +0000 Subject: [PATCH 121/233] MariaRocks port: get rocksdb.rocksdb_icp[_rev] to work - Fix the test cases to not use userstat counters specific to facebook/mysql-5.6 - Make testcase also check MariaDB's ICP counters - Remove ha_rocksdb::check_index_cond(), call handler_index_cond_check instead. --- storage/rocksdb/ha_rocksdb.cc | 39 +------- storage/rocksdb/ha_rocksdb.h | 1 - .../rocksdb/include/rocksdb_icp.inc | 59 ++++++++++-- .../mysql-test/rocksdb/r/rocksdb_icp.result | 96 ++++++++++++------- .../rocksdb/r/rocksdb_icp_rev.result | 94 +++++++++++------- .../rocksdb/t/rocksdb_icp-master.opt | 2 +- .../rocksdb/t/rocksdb_icp_rev-master.opt | 2 +- 7 files changed, 181 insertions(+), 112 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 905e83d95ce..7bc8d205511 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -6795,7 +6795,7 @@ int ha_rocksdb::find_icp_matching_index_rec(const bool &move_forward, return HA_ERR_INTERNAL_ERROR; } - const enum icp_result icp_status= check_index_cond(); + const enum icp_result icp_status= handler_index_cond_check(this); if (icp_status == ICP_NO_MATCH) { if (move_forward) @@ -6804,7 +6804,7 @@ int ha_rocksdb::find_icp_matching_index_rec(const bool &move_forward, m_scan_it->Prev(); continue; /* Get the next (or prev) index tuple */ } - else if (icp_status == ICP_OUT_OF_RANGE) + else if (icp_status == ICP_OUT_OF_RANGE || icp_status == ICP_ABORTED_BY_USER) { /* We have walked out of range we are scanning */ table->status= STATUS_NOT_FOUND; @@ -9951,41 +9951,6 @@ class Item* ha_rocksdb::idx_cond_push(uint keyno, class Item* const idx_cond) } -/* - @brief - Check the index condition. - - @detail - Check the index condition. (The caller has unpacked all needed index - columns into table->record[0]) - - @return - ICP_NO_MATCH - Condition not satisfied (caller should continue - scanning) - OUT_OF_RANGE - We've left the range we're scanning (caller should - stop scanning and return HA_ERR_END_OF_FILE) - - ICP_MATCH - Condition is satisfied (caller should fetch the record - and return it) -*/ - -enum icp_result ha_rocksdb::check_index_cond() const -{ - DBUG_ASSERT(pushed_idx_cond); - DBUG_ASSERT(pushed_idx_cond_keyno != MAX_KEY); - - // MARIAROCKS_NOT_YET: MariaRocks todo: switch to using - // handler_index_cond_check() call? - if (end_range && compare_key2(end_range) > 0) - { - /* caller should return HA_ERR_END_OF_FILE already */ - return ICP_OUT_OF_RANGE; - } - - return pushed_idx_cond->val_int() ? ICP_MATCH : ICP_NO_MATCH; -} - - /** Checking if an index is used for ascending scan or not diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 06e82f9aec6..8d8a98d7de8 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -855,7 +855,6 @@ private: int index_last_intern(uchar *buf) __attribute__((__nonnull__, __warn_unused_result__)); - enum icp_result check_index_cond() const; int find_icp_matching_index_rec(const bool &move_forward, uchar* const buf) __attribute__((__nonnull__, __warn_unused_result__)); diff --git a/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_icp.inc b/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_icp.inc index 5728e49b5e0..c76b52d4cc1 100644 --- a/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_icp.inc +++ b/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_icp.inc @@ -66,6 +66,8 @@ select * from t2 where kp1< 3 and kp2+1>50000; select * from t2 where kp1< 3 and kp2+1>50000; --echo # Try doing backwards scans +--echo # MariaDB: ICP is not supported for reverse scans. + --replace_column 9 # explain select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; @@ -88,17 +90,60 @@ drop table t0,t1,t2,t3; --echo # --echo # First, some preparations --echo # +--echo # in facebook/mysql-5.6, it was: +--echo # select ROWS_READ, ROWS_REQUESTED, ROWS_INDEX_FIRST, ROWS_INDEX_NEXT +--echo # +--echo # In MariaDB, we do: +delimiter |; create procedure save_read_stats() - select ROWS_READ, ROWS_REQUESTED, ROWS_INDEX_FIRST, ROWS_INDEX_NEXT - into @rr, @rq, @rif, @rin - from information_schema.table_statistics - where table_name='t4' and table_schema=database(); +begin + set @rr=(select ROWS_READ + from information_schema.table_statistics + where table_name='t4' and table_schema=database()); + + set @rif= (select VARIABLE_VALUE + from information_schema.session_status + where VARIABLE_NAME='Handler_read_first'); + + set @rin=(select VARIABLE_VALUE + from information_schema.session_status + where VARIABLE_NAME='Handler_read_next'); + + set @icp_attempts=(select VARIABLE_VALUE + from information_schema.session_status + where VARIABLE_NAME='Handler_icp_attempts'); + + set @icp_matches=(select VARIABLE_VALUE + from information_schema.session_status + where VARIABLE_NAME='Handler_icp_match'); +end| create procedure get_read_stats() +begin select - ROWS_READ-@rr, ROWS_REQUESTED-@rq, ROWS_INDEX_FIRST-@rif, ROWS_INDEX_NEXT-@rin - from information_schema.table_statistics - where table_name='t4' and table_schema=database(); + (select ROWS_READ + from information_schema.table_statistics + where table_name='t4' and table_schema=database() + ) - @rr as ROWS_READ_DIFF, + + (select VARIABLE_VALUE - @rif + from information_schema.session_status + where VARIABLE_NAME='Handler_read_first') as ROWS_INDEX_FIRST, + + (select VARIABLE_VALUE - @rin + from information_schema.session_status + where VARIABLE_NAME='Handler_read_next') as ROWS_INDEX_NEXT, + + (select VARIABLE_VALUE - @icp_attempts + from information_schema.session_status + where VARIABLE_NAME='Handler_icp_attempts') as ICP_ATTEMPTS, + + (select VARIABLE_VALUE - @icp_matches + from information_schema.session_status + where VARIABLE_NAME='Handler_icp_match') as ICP_MATCHES; +end| + +delimiter ;| eval create table t4 ( diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result index 51841f174af..b2b6d7cdde9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result @@ -50,23 +50,17 @@ EXPLAIN "table": { "table_name": "t3", "access_type": "range", - "possible_keys": [ - "kp1" - ], + "possible_keys": ["kp1"], "key": "kp1", - "used_key_parts": [ - "kp1" - ], "key_length": "5", + "used_key_parts": ["kp1"], "rows": 1000, "filtered": 100, - "index_condition": "((`test`.`t3`.`kp1` between 2 and 4) and ((`test`.`t3`.`kp1` % 3) = 0))", - "attached_condition": "(`test`.`t3`.`kp2` like '%foo%')" + "index_condition": "t3.kp1 between 2 and 4 and t3.kp1 % 3 = 0", + "attached_condition": "t3.kp2 like '%foo%'" } } } -Warnings: -Note 1003 /* select#1 */ select `test`.`t3`.`pk` AS `pk`,`test`.`t3`.`kp1` AS `kp1`,`test`.`t3`.`kp2` AS `kp2`,`test`.`t3`.`col1` AS `col1` from `test`.`t3` where ((`test`.`t3`.`kp1` between 2 and 4) and ((`test`.`t3`.`kp1` % 3) = 0) and (`test`.`t3`.`kp2` like '%foo%')) # Check that we handle the case where out-of-range is encountered sooner # than matched index condition explain @@ -82,10 +76,11 @@ id select_type table type possible_keys key key_len ref rows Extra select * from t2 where kp1< 3 and kp2+1>50000; pk kp1 kp2 col1 # Try doing backwards scans +# MariaDB: ICP is not supported for reverse scans. explain select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +1 SIMPLE t2 range kp1 kp1 5 NULL # Using where select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; pk kp1 kp2 col1 10 10 10 10 @@ -96,7 +91,7 @@ pk kp1 kp2 col1 explain select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +1 SIMPLE t2 range kp1 kp1 5 NULL # Using where select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc; pk kp1 kp2 col1 998 998 998 998 @@ -106,7 +101,7 @@ pk kp1 kp2 col1 explain select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +1 SIMPLE t2 range kp1 kp1 5 NULL # Using where select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc; pk kp1 kp2 col1 drop table t0,t1,t2,t3; @@ -115,16 +110,48 @@ drop table t0,t1,t2,t3; # # First, some preparations # +# in facebook/mysql-5.6, it was: +# select ROWS_READ, ROWS_REQUESTED, ROWS_INDEX_FIRST, ROWS_INDEX_NEXT +# +# In MariaDB, we do: create procedure save_read_stats() -select ROWS_READ, ROWS_REQUESTED, ROWS_INDEX_FIRST, ROWS_INDEX_NEXT -into @rr, @rq, @rif, @rin +begin +set @rr=(select ROWS_READ from information_schema.table_statistics -where table_name='t4' and table_schema=database(); +where table_name='t4' and table_schema=database()); +set @rif= (select VARIABLE_VALUE +from information_schema.session_status +where VARIABLE_NAME='Handler_read_first'); +set @rin=(select VARIABLE_VALUE +from information_schema.session_status +where VARIABLE_NAME='Handler_read_next'); +set @icp_attempts=(select VARIABLE_VALUE +from information_schema.session_status +where VARIABLE_NAME='Handler_icp_attempts'); +set @icp_matches=(select VARIABLE_VALUE +from information_schema.session_status +where VARIABLE_NAME='Handler_icp_match'); +end| create procedure get_read_stats() +begin select -ROWS_READ-@rr, ROWS_REQUESTED-@rq, ROWS_INDEX_FIRST-@rif, ROWS_INDEX_NEXT-@rin +(select ROWS_READ from information_schema.table_statistics -where table_name='t4' and table_schema=database(); +where table_name='t4' and table_schema=database() +) - @rr as ROWS_READ_DIFF, +(select VARIABLE_VALUE - @rif +from information_schema.session_status +where VARIABLE_NAME='Handler_read_first') as ROWS_INDEX_FIRST, +(select VARIABLE_VALUE - @rin +from information_schema.session_status +where VARIABLE_NAME='Handler_read_next') as ROWS_INDEX_NEXT, +(select VARIABLE_VALUE - @icp_attempts +from information_schema.session_status +where VARIABLE_NAME='Handler_icp_attempts') as ICP_ATTEMPTS, +(select VARIABLE_VALUE - @icp_matches +from information_schema.session_status +where VARIABLE_NAME='Handler_icp_match') as ICP_MATCHES; +end| create table t4 ( id int, id1 int, @@ -142,8 +169,8 @@ insert into t4 values # call save_read_stats(); call get_read_stats(); -ROWS_READ-@rr ROWS_REQUESTED-@rq ROWS_INDEX_FIRST-@rif ROWS_INDEX_NEXT-@rin -0 0 0 0 +ROWS_READ_DIFF ROWS_INDEX_FIRST ROWS_INDEX_NEXT ICP_ATTEMPTS ICP_MATCHES +0 0 0 0 0 # ============== index-only query ============== explain select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; @@ -154,10 +181,11 @@ select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; id1 id2 1 1 call get_read_stats(); -ROWS_READ-@rr 10 -ROWS_REQUESTED-@rq 11 -ROWS_INDEX_FIRST-@rif 1 -ROWS_INDEX_NEXT-@rin 9 +ROWS_READ_DIFF 10 +ROWS_INDEX_FIRST 0 +ROWS_INDEX_NEXT 10 +ICP_ATTEMPTS 0 +ICP_MATCHES 0 # ============== Query without ICP ============== set optimizer_switch='index_condition_pushdown=off'; explain @@ -169,10 +197,11 @@ select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; id id1 id2 value value2 1 1 1 1 1 call get_read_stats(); -ROWS_READ-@rr 10 -ROWS_REQUESTED-@rq 11 -ROWS_INDEX_FIRST-@rif 1 -ROWS_INDEX_NEXT-@rin 9 +ROWS_READ_DIFF 10 +ROWS_INDEX_FIRST 0 +ROWS_INDEX_NEXT 10 +ICP_ATTEMPTS 0 +ICP_MATCHES 0 # ============== Query with ICP ============== set optimizer_switch='index_condition_pushdown=on'; explain @@ -184,10 +213,11 @@ select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; id id1 id2 value value2 1 1 1 1 1 call get_read_stats(); -ROWS_READ-@rr 1 -ROWS_REQUESTED-@rq 1 -ROWS_INDEX_FIRST-@rif 1 -ROWS_INDEX_NEXT-@rin 0 +ROWS_READ_DIFF 1 +ROWS_INDEX_FIRST 0 +ROWS_INDEX_NEXT 1 +ICP_ATTEMPTS 10 +ICP_MATCHES 1 drop table t4; drop procedure save_read_stats; drop procedure get_read_stats; @@ -212,7 +242,7 @@ set @count=0; explain select * from t1 where key1=1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref key1 key1 9 const # Using index condition +1 SIMPLE t1 ref key1 key1 9 const # set @count_diff =(select (value - @count) from information_schema.rocksdb_perf_context where table_schema=database() and table_name='t1' and stat_type='INTERNAL_KEY_SKIPPED_COUNT'); select * from t1 where key1=1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result index d368da16a60..9c4b2d22ad7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result @@ -50,23 +50,17 @@ EXPLAIN "table": { "table_name": "t3", "access_type": "range", - "possible_keys": [ - "kp1" - ], + "possible_keys": ["kp1"], "key": "kp1", - "used_key_parts": [ - "kp1" - ], "key_length": "5", + "used_key_parts": ["kp1"], "rows": 1000, "filtered": 100, - "index_condition": "((`test`.`t3`.`kp1` between 2 and 4) and ((`test`.`t3`.`kp1` % 3) = 0))", - "attached_condition": "(`test`.`t3`.`kp2` like '%foo%')" + "index_condition": "t3.kp1 between 2 and 4 and t3.kp1 % 3 = 0", + "attached_condition": "t3.kp2 like '%foo%'" } } } -Warnings: -Note 1003 /* select#1 */ select `test`.`t3`.`pk` AS `pk`,`test`.`t3`.`kp1` AS `kp1`,`test`.`t3`.`kp2` AS `kp2`,`test`.`t3`.`col1` AS `col1` from `test`.`t3` where ((`test`.`t3`.`kp1` between 2 and 4) and ((`test`.`t3`.`kp1` % 3) = 0) and (`test`.`t3`.`kp2` like '%foo%')) # Check that we handle the case where out-of-range is encountered sooner # than matched index condition explain @@ -82,10 +76,11 @@ id select_type table type possible_keys key key_len ref rows Extra select * from t2 where kp1< 3 and kp2+1>50000; pk kp1 kp2 col1 # Try doing backwards scans +# MariaDB: ICP is not supported for reverse scans. explain select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +1 SIMPLE t2 range kp1 kp1 5 NULL # Using where select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; pk kp1 kp2 col1 10 10 10 10 @@ -96,7 +91,7 @@ pk kp1 kp2 col1 explain select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +1 SIMPLE t2 range kp1 kp1 5 NULL # Using where select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc; pk kp1 kp2 col1 998 998 998 998 @@ -106,7 +101,7 @@ pk kp1 kp2 col1 explain select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +1 SIMPLE t2 range kp1 kp1 5 NULL # Using where select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc; pk kp1 kp2 col1 drop table t0,t1,t2,t3; @@ -115,16 +110,48 @@ drop table t0,t1,t2,t3; # # First, some preparations # +# in facebook/mysql-5.6, it was: +# select ROWS_READ, ROWS_REQUESTED, ROWS_INDEX_FIRST, ROWS_INDEX_NEXT +# +# In MariaDB, we do: create procedure save_read_stats() -select ROWS_READ, ROWS_REQUESTED, ROWS_INDEX_FIRST, ROWS_INDEX_NEXT -into @rr, @rq, @rif, @rin +begin +set @rr=(select ROWS_READ from information_schema.table_statistics -where table_name='t4' and table_schema=database(); +where table_name='t4' and table_schema=database()); +set @rif= (select VARIABLE_VALUE +from information_schema.session_status +where VARIABLE_NAME='Handler_read_first'); +set @rin=(select VARIABLE_VALUE +from information_schema.session_status +where VARIABLE_NAME='Handler_read_next'); +set @icp_attempts=(select VARIABLE_VALUE +from information_schema.session_status +where VARIABLE_NAME='Handler_icp_attempts'); +set @icp_matches=(select VARIABLE_VALUE +from information_schema.session_status +where VARIABLE_NAME='Handler_icp_match'); +end| create procedure get_read_stats() +begin select -ROWS_READ-@rr, ROWS_REQUESTED-@rq, ROWS_INDEX_FIRST-@rif, ROWS_INDEX_NEXT-@rin +(select ROWS_READ from information_schema.table_statistics -where table_name='t4' and table_schema=database(); +where table_name='t4' and table_schema=database() +) - @rr as ROWS_READ_DIFF, +(select VARIABLE_VALUE - @rif +from information_schema.session_status +where VARIABLE_NAME='Handler_read_first') as ROWS_INDEX_FIRST, +(select VARIABLE_VALUE - @rin +from information_schema.session_status +where VARIABLE_NAME='Handler_read_next') as ROWS_INDEX_NEXT, +(select VARIABLE_VALUE - @icp_attempts +from information_schema.session_status +where VARIABLE_NAME='Handler_icp_attempts') as ICP_ATTEMPTS, +(select VARIABLE_VALUE - @icp_matches +from information_schema.session_status +where VARIABLE_NAME='Handler_icp_match') as ICP_MATCHES; +end| create table t4 ( id int, id1 int, @@ -142,8 +169,8 @@ insert into t4 values # call save_read_stats(); call get_read_stats(); -ROWS_READ-@rr ROWS_REQUESTED-@rq ROWS_INDEX_FIRST-@rif ROWS_INDEX_NEXT-@rin -0 0 0 0 +ROWS_READ_DIFF ROWS_INDEX_FIRST ROWS_INDEX_NEXT ICP_ATTEMPTS ICP_MATCHES +0 0 0 0 0 # ============== index-only query ============== explain select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; @@ -154,10 +181,11 @@ select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; id1 id2 1 1 call get_read_stats(); -ROWS_READ-@rr 10 -ROWS_REQUESTED-@rq 11 -ROWS_INDEX_FIRST-@rif 1 -ROWS_INDEX_NEXT-@rin 9 +ROWS_READ_DIFF 10 +ROWS_INDEX_FIRST 0 +ROWS_INDEX_NEXT 10 +ICP_ATTEMPTS 0 +ICP_MATCHES 0 # ============== Query without ICP ============== set optimizer_switch='index_condition_pushdown=off'; explain @@ -169,10 +197,11 @@ select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; id id1 id2 value value2 1 1 1 1 1 call get_read_stats(); -ROWS_READ-@rr 10 -ROWS_REQUESTED-@rq 11 -ROWS_INDEX_FIRST-@rif 1 -ROWS_INDEX_NEXT-@rin 9 +ROWS_READ_DIFF 10 +ROWS_INDEX_FIRST 0 +ROWS_INDEX_NEXT 10 +ICP_ATTEMPTS 0 +ICP_MATCHES 0 # ============== Query with ICP ============== set optimizer_switch='index_condition_pushdown=on'; explain @@ -184,10 +213,11 @@ select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; id id1 id2 value value2 1 1 1 1 1 call get_read_stats(); -ROWS_READ-@rr 1 -ROWS_REQUESTED-@rq 1 -ROWS_INDEX_FIRST-@rif 1 -ROWS_INDEX_NEXT-@rin 0 +ROWS_READ_DIFF 1 +ROWS_INDEX_FIRST 0 +ROWS_INDEX_NEXT 1 +ICP_ATTEMPTS 10 +ICP_MATCHES 1 drop table t4; drop procedure save_read_stats; drop procedure get_read_stats; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp-master.opt index 885b15e36e3..acc0bdaa378 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp-master.opt @@ -1 +1 @@ ---rocksdb_debug_optimizer_n_rows=20000 --rocksdb_records_in_range=1000 --rocksdb_perf_context_level=2 +--rocksdb_debug_optimizer_n_rows=20000 --rocksdb_records_in_range=1000 --rocksdb_perf_context_level=2 --userstat=ON diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev-master.opt index 6ae466bcc09..fe129d79d63 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev-master.opt @@ -1 +1 @@ ---rocksdb_debug_optimizer_n_rows=20000 --rocksdb_records_in_range=1000 +--rocksdb_debug_optimizer_n_rows=20000 --rocksdb_records_in_range=1000 --userstat=ON From f89e07785a38d1526148be2656ed94867772abf0 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 11 Jan 2017 12:41:22 +0300 Subject: [PATCH 122/233] MariaRocks port: Get rocksdb.autoinc_vars_thread test to work MariaDB doesn't have NO_CLEAR_EVENT support in DEBUG_SYNC facility. Luckily, the test can be re-written to use two different sync points instead. (I've checked that the modified test fails with fb/mysql-5.6 without the fix for e004fd9f (PR #394) --- .../rocksdb/r/autoinc_vars_thread.result | 20 ++++++++++++++++--- .../rocksdb/t/autoinc_vars_thread.test | 12 ++++++++--- 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread.result index aefef4fbf38..db64778d345 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread.result @@ -3,19 +3,33 @@ # Issue #390 #--------------------------- CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +connect con1, localhost, root,,; SET auto_increment_increment = 2; SET auto_increment_offset = 1; INSERT INTO t1 VALUES(NULL); +connect con2, localhost, root,,; SET auto_increment_increment = 2; SET auto_increment_offset = 1; -SET debug_sync='rocksdb.autoinc_vars SIGNAL parked1 WAIT_FOR go NO_CLEAR_EVENT'; +connect con3, localhost, root,,; +connection con1; +SET debug_sync='rocksdb.autoinc_vars SIGNAL parked1 WAIT_FOR go1'; INSERT INTO t1 VALUES(NULL); -SET debug_sync='rocksdb.autoinc_vars SIGNAL parked2 WAIT_FOR go NO_CLEAR_EVENT'; +connection con2; +SET debug_sync='rocksdb.autoinc_vars SIGNAL parked2 WAIT_FOR go2'; INSERT INTO t1 VALUES(NULL); +connection default; SET debug_sync='now WAIT_FOR parked1'; SET debug_sync='now WAIT_FOR parked2'; -SET debug_sync='now SIGNAL go'; +SET debug_sync='now SIGNAL go1'; +connection con3; +SET debug_sync='now SIGNAL go2'; +connection default; +connection con1; +connection con2; +connection default; SET debug_sync='RESET'; +disconnect con1; +disconnect con2; SELECT * FROM t1; a 1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread.test index f801b2f683a..78521fbc9ef 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread.test @@ -19,13 +19,15 @@ connect (con2, localhost, root,,); SET auto_increment_increment = 2; SET auto_increment_offset = 1; +connect (con3, localhost, root,,); + # Start each thread on an insert that will block waiting for a signal connection con1; -SET debug_sync='rocksdb.autoinc_vars SIGNAL parked1 WAIT_FOR go NO_CLEAR_EVENT'; +SET debug_sync='rocksdb.autoinc_vars SIGNAL parked1 WAIT_FOR go1'; send INSERT INTO t1 VALUES(NULL); connection con2; -SET debug_sync='rocksdb.autoinc_vars SIGNAL parked2 WAIT_FOR go NO_CLEAR_EVENT'; +SET debug_sync='rocksdb.autoinc_vars SIGNAL parked2 WAIT_FOR go2'; send INSERT INTO t1 VALUES(NULL); # Wait for both threads to be at debug_sync point @@ -34,7 +36,11 @@ SET debug_sync='now WAIT_FOR parked1'; SET debug_sync='now WAIT_FOR parked2'; # Signal both threads to continue -SET debug_sync='now SIGNAL go'; +send SET debug_sync='now SIGNAL go1'; +connection con3; +SET debug_sync='now SIGNAL go2'; +connection default; +reap; connection con1; reap; From edfe980aa13a51a4209fc5e0280459dc14edcae6 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 11 Jan 2017 13:14:52 +0300 Subject: [PATCH 123/233] MariaRocks port: fix rocksdb.rpl_row_stats test "Userstat" feature in MariaDB does not have I_S.table_statistics.rows_requested column. We'll use I_S.table_statistics.rows_read instead. The testcase doesn't do anything where rows_requested != rows_read. --- .../mysql-test/rocksdb/r/rpl_row_stats.result | 24 +++++++++++++------ .../rocksdb/t/init_stats_procedure.inc | 9 +++++-- .../rocksdb/t/rpl_row_stats-slave.opt | 1 + 3 files changed, 25 insertions(+), 9 deletions(-) create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats-slave.opt diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_stats.result index a95642096f5..a14d2693ad3 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_stats.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_stats.result @@ -1,12 +1,13 @@ include/master-slave.inc -Warnings: -Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. -Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. [connection master] +connection master; drop table if exists t1; +connection master; create procedure save_read_stats() begin -select rows_requested into @rq from information_schema.table_statistics +/*select rows_requested into @rq from information_schema.table_statistics +where table_schema=database() and table_name='t1';*/ +select rows_read into @rr_is from information_schema.table_statistics where table_schema=database() and table_name='t1'; select variable_value into @rr from information_schema.global_status where variable_name='rocksdb_rows_read'; @@ -17,7 +18,10 @@ where variable_name='rocksdb_rows_deleted'; end// create procedure get_read_stats() begin -select rows_requested - @rq as rows_requested from +/*select rows_requested - @rq as rows_requested from +information_schema.table_statistics +where table_schema=database() and table_name='t1';*/ +select rows_read - @rr_is as rows_read_userstat from information_schema.table_statistics where table_schema=database() and table_name='t1'; select variable_value - @rr as rows_read from @@ -33,7 +37,9 @@ end// create table t1 (id int primary key, value int); insert into t1 values (1,1), (2,2), (3,3), (4,4), (5,5); include/sync_slave_sql_with_master.inc +connection slave; call save_read_stats(); +connection master; update t1 set value=value+1 where id=1; update t1 set value=value+1 where id=3; select * from t1; @@ -44,8 +50,9 @@ id value 4 4 5 5 include/sync_slave_sql_with_master.inc +connection slave; call get_read_stats(); -rows_requested +rows_read_userstat 2 rows_read 2 @@ -61,6 +68,7 @@ id value 4 4 5 5 call save_read_stats(); +connection master; delete from t1 where id in (4,5); select * from t1; id value @@ -68,8 +76,9 @@ id value 2 2 3 4 include/sync_slave_sql_with_master.inc +connection slave; call get_read_stats(); -rows_requested +rows_read_userstat 2 rows_read 2 @@ -82,6 +91,7 @@ id value 1 2 2 2 3 4 +connection master; drop table t1; drop procedure save_read_stats; drop procedure get_read_stats; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/init_stats_procedure.inc b/storage/rocksdb/mysql-test/rocksdb/t/init_stats_procedure.inc index c798bb91cfa..dda253bc346 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/init_stats_procedure.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/init_stats_procedure.inc @@ -6,7 +6,9 @@ delimiter //; create procedure save_read_stats() begin - select rows_requested into @rq from information_schema.table_statistics + /*select rows_requested into @rq from information_schema.table_statistics + where table_schema=database() and table_name='t1';*/ + select rows_read into @rr_is from information_schema.table_statistics where table_schema=database() and table_name='t1'; select variable_value into @rr from information_schema.global_status where variable_name='rocksdb_rows_read'; @@ -18,7 +20,10 @@ end// create procedure get_read_stats() begin - select rows_requested - @rq as rows_requested from + /*select rows_requested - @rq as rows_requested from + information_schema.table_statistics + where table_schema=database() and table_name='t1';*/ + select rows_read - @rr_is as rows_read_userstat from information_schema.table_statistics where table_schema=database() and table_name='t1'; select variable_value - @rr as rows_read from diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats-slave.opt b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats-slave.opt new file mode 100644 index 00000000000..039295e140d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats-slave.opt @@ -0,0 +1 @@ +--userstat=ON From 80be676fc0ee258e524510539a5a5cf44b951138 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 11 Jan 2017 17:32:47 +0300 Subject: [PATCH 124/233] MariaRocks port - Use rocksdb_sys_vars/my.cnf so that one can run tests from that suite by just "./mtr rocksdb_sys_vars.$TESTNAME" - Add rocksdb and rocksdb_sys_vars to the set of default test suites. Don't run with embedded server, yet. --- storage/rocksdb/mysql-test/rocksdb/suite.pm | 8 ++++++++ storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf | 12 ++++++++++++ storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.pm | 8 ++++++++ 3 files changed, 28 insertions(+) create mode 100644 storage/rocksdb/mysql-test/rocksdb/suite.pm create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.pm diff --git a/storage/rocksdb/mysql-test/rocksdb/suite.pm b/storage/rocksdb/mysql-test/rocksdb/suite.pm new file mode 100644 index 00000000000..658a0b3b4d1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/suite.pm @@ -0,0 +1,8 @@ +package My::Suite::Rocksdb_sys_vars; + +@ISA = qw(My::Suite); + +sub is_default { not $::opt_embedded_server } + +bless { }; + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf b/storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf new file mode 100644 index 00000000000..d5f501e15ad --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf @@ -0,0 +1,12 @@ +!include include/default_my.cnf + +[server] +rocksdb +skip-innodb +default-storage-engine=rocksdb + + +sql-mode=NO_ENGINE_SUBSTITUTION +explicit-defaults-for-timestamp=1 +rocksdb_lock_wait_timeout=1 +rocksdb_strict_collation_check=0 diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.pm b/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.pm new file mode 100644 index 00000000000..658a0b3b4d1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.pm @@ -0,0 +1,8 @@ +package My::Suite::Rocksdb_sys_vars; + +@ISA = qw(My::Suite); + +sub is_default { not $::opt_embedded_server } + +bless { }; + From 1a8731952d86f4a9c033d7ddf4b15daea5d8e6d1 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 11 Jan 2017 19:40:25 +0300 Subject: [PATCH 125/233] MariaRocks port: get rocksdb_sys_vars.rocksdb_deadlock_detect_basic to work --- .../rocksdb_sys_vars/t/rocksdb_deadlock_detect_basic.test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_deadlock_detect_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_deadlock_detect_basic.test index aa532fdc1be..980be0f3924 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_deadlock_detect_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_deadlock_detect_basic.test @@ -14,7 +14,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $read_only=0 --let $session=1 --let $sticky=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; From 81ed973b1a7d06b2c6cafcca4d7f6e39a45ae67d Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 11 Jan 2017 22:13:52 +0300 Subject: [PATCH 126/233] MariaRocks port: fix the build: fetch git submodules earlier mariadb_connector_c fetches all submodules in the tre. Invoke it betfore the CONFIGURE_PLUGINS(). This is generally useful, as one can imagine many plugins have submodules. --- CMakeLists.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 731afdde3d8..1a9d6079c7f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -363,6 +363,10 @@ IF(WITH_UNIT_TESTS) ENDIF() SET (MYSQLD_STATIC_PLUGIN_LIBS "" CACHE INTERNAL "") + +# mariadb_connector_c fetches submodules which is useful for plugins +INCLUDE(mariadb_connector_c) # this does ADD_SUBDIRECTORY(libmariadb) + # Add storage engines and plugins. CONFIGURE_PLUGINS() @@ -372,7 +376,6 @@ ADD_SUBDIRECTORY(strings) ADD_SUBDIRECTORY(vio) ADD_SUBDIRECTORY(mysys) ADD_SUBDIRECTORY(mysys_ssl) -INCLUDE(mariadb_connector_c) # this does ADD_SUBDIRECTORY(libmariadb) ADD_SUBDIRECTORY(client) ADD_SUBDIRECTORY(extra) ADD_SUBDIRECTORY(libservices) From 2d789dd9dd598b0079de39a57ec5576d18e9f1ec Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 11 Jan 2017 22:29:34 +0300 Subject: [PATCH 127/233] MariaRocks: fix a few tests in rocksdb_sys_vars test suite --- storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf | 1 - .../t/rocksdb_print_snapshot_conflict_queries_basic.test | 2 +- .../rocksdb_sys_vars/t/rocksdb_use_direct_writes_basic.test | 2 +- .../t/rocksdb_verify_row_debug_checksums_basic.test | 2 +- 4 files changed, 3 insertions(+), 4 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf b/storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf index d5f501e15ad..5bb50e1da28 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf @@ -9,4 +9,3 @@ default-storage-engine=rocksdb sql-mode=NO_ENGINE_SUBSTITUTION explicit-defaults-for-timestamp=1 rocksdb_lock_wait_timeout=1 -rocksdb_strict_collation_check=0 diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_print_snapshot_conflict_queries_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_print_snapshot_conflict_queries_basic.test index 92a419a8636..24d2f182fe8 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_print_snapshot_conflict_queries_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_print_snapshot_conflict_queries_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_writes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_writes_basic.test index 2abb2478d82..14e6de24652 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_writes_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_writes_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_USE_DIRECT_WRITES --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_row_debug_checksums_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_row_debug_checksums_basic.test index cc35fdb0345..352bc9d9cf0 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_row_debug_checksums_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_row_debug_checksums_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; From 7fb3b348d71ef29ed10ed7f41b332074c8ac0a3d Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 21 Jan 2017 22:58:04 +0300 Subject: [PATCH 128/233] MariaRocks port: Remove handler::init_with_fields - It turns out, ha_rocksdb::table_flags() can return HA_PRIMARY_KEY_IN_READ_INDEX for all kinds of tables (as its meaning is "if there is a PK, PK columns contribute to the secondary index tuple". There is no assumption that a certain PK column can be decoded from the secondary index. (Should probably be fixed in the upstream, too, but I was unable to construct a testcase showing this is necessary). - Following the above, we can undo the init_with_fields() changes in table.cc. MyRocks calls init_with_fields() from ha_rocksdb::open() which sets index-only read capabilities properly. --- sql/ha_partition.cc | 16 ------------- sql/ha_partition.h | 2 -- sql/handler.h | 2 -- sql/table.cc | 44 ++++++++++-------------------------- storage/rocksdb/ha_rocksdb.h | 14 ++++++++---- 5 files changed, 22 insertions(+), 56 deletions(-) diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index aa15e8bb538..d1a5a7bb0ea 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -421,22 +421,6 @@ ha_partition::~ha_partition() } -bool ha_partition::init_with_fields() -{ - /* Pass the call to each partition */ - for (uint i= 0; i < m_tot_parts; i++) - { - if (m_file[i]->init_with_fields()) - return true; - } - /* Re-read table flags in case init_with_fields caused it to change */ - cached_table_flags= (m_file[0]->ha_table_flags() & - ~(PARTITION_DISABLED_TABLE_FLAGS)) | - PARTITION_ENABLED_TABLE_FLAGS; - return false; -} - - /* Initialize partition handler object diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 7324efcb6c9..70cd3760783 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -307,8 +307,6 @@ public: ha_partition *clone_arg, MEM_ROOT *clone_mem_root_arg); ~ha_partition(); - - bool init_with_fields(); /* A partition handler has no characteristics in itself. It only inherits those from the underlying handlers. Here we set-up those constants to diff --git a/sql/handler.h b/sql/handler.h index 98adbf51d9a..8717f229a46 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -2777,8 +2777,6 @@ public: { cached_table_flags= table_flags(); } - - virtual bool init_with_fields() { return false; } /* ha_ methods: pubilc wrappers for private virtual API */ int ha_open(TABLE *table, const char *name, int mode, uint test_if_locked); diff --git a/sql/table.cc b/sql/table.cc index 6b74ec7cfd6..4688b77ecd7 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2230,6 +2230,18 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, if (key == primary_key) { field->flags|= PRI_KEY_FLAG; + /* + If this field is part of the primary key and all keys contains + the primary key, then we can use any key to find this column + */ + if (ha_option & HA_PRIMARY_KEY_IN_READ_INDEX) + { + if (field->key_length() == key_part->length && + !(field->flags & BLOB_FLAG)) + field->part_of_key= share->keys_in_use; + if (field->part_of_sortkey.is_set(key)) + field->part_of_sortkey= share->keys_in_use; + } } if (field->key_length() != key_part->length) { @@ -2289,38 +2301,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, (ha_option & HA_ANY_INDEX_MAY_BE_UNIQUE)) set_if_bigger(share->max_unique_length,keyinfo->key_length); } - - /* - The next call is here for MyRocks/MariaRocks: Now, we have filled in - field and key definitions, give the storage engine a chance to adjust - its properties. - - MyRocks may (and typically does) adjust HA_PRIMARY_KEY_IN_READ_INDEX - flag in this call. - */ - if (handler_file->init_with_fields()) - goto err; - - if (primary_key < MAX_KEY && (handler_file->ha_table_flags() & - HA_PRIMARY_KEY_IN_READ_INDEX)) - { - keyinfo= &share->key_info[primary_key]; - key_part= keyinfo->key_part; - for (i=0 ; i < keyinfo->user_defined_key_parts ; key_part++,i++) - { - Field *field= key_part->field; - /* - If this field is part of the primary key and all keys contains - the primary key, then we can use any key to find this column - */ - if (field->key_length() == key_part->length && - !(field->flags & BLOB_FLAG)) - field->part_of_key= share->keys_in_use; - if (field->part_of_sortkey.is_set(primary_key)) - field->part_of_sortkey= share->keys_in_use; - } - } - if (primary_key < MAX_KEY && (share->keys_in_use.is_set(primary_key))) { diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 8d8a98d7de8..0f575919f5a 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -640,18 +640,24 @@ public: HA_REC_NOT_IN_SEQ If we don't set it, filesort crashes, because it assumes rowids are 1..8 byte numbers + HA_PRIMARY_KEY_IN_READ_INDEX + This flag is always set, even for tables that: + - have no PK + - have some (or all) of PK that can't be decoded from the secondary + index. */ return HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE | HA_REC_NOT_IN_SEQ | HA_CAN_INDEX_BLOBS | - (m_pk_can_be_decoded? HA_PRIMARY_KEY_IN_READ_INDEX : 0) | + HA_PRIMARY_KEY_IN_READ_INDEX | HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | HA_NULL_IN_KEY | HA_PARTIAL_COLUMN_READ | HA_TABLE_SCAN_ON_INDEX; } -//#ifdef MARIAROCKS_NOT_YET - bool init_with_fields() override; -//#endif + +private: + bool init_with_fields(); /* no 'override' in MariaDB */ +public: /** @brief This is a bitmap of flags that indicates how the storage engine implements indexes. The current index flags are documented in From 1f0c28f36eeddf59bddd4e88ea9c59001c87b9be Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 21 Jan 2017 22:58:57 +0300 Subject: [PATCH 129/233] MariaRocks port: move include/atomic_stat.h into storage/rocksdb --- {include => storage/rocksdb}/atomic_stat.h | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {include => storage/rocksdb}/atomic_stat.h (100%) diff --git a/include/atomic_stat.h b/storage/rocksdb/atomic_stat.h similarity index 100% rename from include/atomic_stat.h rename to storage/rocksdb/atomic_stat.h From 351043addab6e0aa7e0eb66bd003fcc76dd15b42 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 21 Jan 2017 23:31:52 +0300 Subject: [PATCH 130/233] MariaRocks port: put back the assert MyRocks seems to no longer depend on it not being present after the merge. --- sql/field.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/sql/field.cc b/sql/field.cc index 2629bdf0e25..df058fa61a5 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -8721,6 +8721,7 @@ int Field_enum::store(longlong nr, bool unsigned_val) double Field_enum::val_real(void) { + ASSERT_COLUMN_MARKED_FOR_READ; return (double) Field_enum::val_int(); } From 024a5ec0ec6643b53a769579ded81d840b66ad14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Mon, 23 Jan 2017 19:47:58 +0200 Subject: [PATCH 131/233] Add ut0counter.h instead of xtradb linked ut0counter.h --- storage/rocksdb/ha_rocksdb.cc | 2 + storage/rocksdb/ha_rocksdb.h | 9 +- storage/rocksdb/rdb_cf_manager.cc | 2 + storage/rocksdb/rdb_datadic.cc | 1 + storage/rocksdb/rdb_sst_info.cc | 2 + storage/rocksdb/ut0counter.h | 203 ++++++++++++++++++++++++++++++ 6 files changed, 211 insertions(+), 8 deletions(-) create mode 100644 storage/rocksdb/ut0counter.h diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 7bc8d205511..c97d7976ba3 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -22,6 +22,8 @@ #include +#include + /* The C++ file's header */ #include "./ha_rocksdb.h" diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 0f575919f5a..824b56b958f 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -26,14 +26,6 @@ #include /* MySQL header files */ -#include "../storage/xtradb/include/ut0counter.h" -/* - 'EMPTY' from field.h conflicts with EMPTY from - /usr/include/x86_64-linux-gnu/bits/utmpx.h - MARIAROCKS_NOT_YET: Sort out #include order so that we don't have to resort - to #undef -*/ -#undef EMPTY #include "./handler.h" /* handler */ #include "./my_global.h" /* ulonglong */ #include "./sql_string.h" @@ -52,6 +44,7 @@ #include "./rdb_perf_context.h" #include "./rdb_sst_info.h" #include "./rdb_utils.h" +#include "./ut0counter.h" /** @note MyRocks Coding Conventions: diff --git a/storage/rocksdb/rdb_cf_manager.cc b/storage/rocksdb/rdb_cf_manager.cc index 4cb5a7014df..b3e9bdb2a93 100644 --- a/storage/rocksdb/rdb_cf_manager.cc +++ b/storage/rocksdb/rdb_cf_manager.cc @@ -20,6 +20,8 @@ #include +#include + /* This C++ files header file */ #include "./rdb_cf_manager.h" diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index 840f8a77c8a..22ad49898f3 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -22,6 +22,7 @@ /* This C++ file's header file */ #include "./rdb_datadic.h" +#include /* C++ standard header files */ #include #include diff --git a/storage/rocksdb/rdb_sst_info.cc b/storage/rocksdb/rdb_sst_info.cc index f51ea907be1..61c79d53fc3 100644 --- a/storage/rocksdb/rdb_sst_info.cc +++ b/storage/rocksdb/rdb_sst_info.cc @@ -19,6 +19,8 @@ /* This C++ file's header file */ #include "./rdb_sst_info.h" +#include + /* C++ standard header files */ #include #include diff --git a/storage/rocksdb/ut0counter.h b/storage/rocksdb/ut0counter.h new file mode 100644 index 00000000000..af2e023af27 --- /dev/null +++ b/storage/rocksdb/ut0counter.h @@ -0,0 +1,203 @@ +/* +Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved. +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA +*****************************************************************************/ + +/**************************************************//** +@file include/ut0counter.h +Counter utility class +Created 2012/04/12 by Sunny Bains +*******************************************************/ + +#ifndef UT0COUNTER_H +#define UT0COUNTER_H + +#include + +/** CPU cache line size */ +#define CACHE_LINE_SIZE 64 + +/** Default number of slots to use in ib_counter_t */ +#define IB_N_SLOTS 64 + +#ifdef __WIN__ +#define get_curr_thread_id() GetCurrentThreadId() +#else +#define get_curr_thread_id() pthread_self() +#endif + +#define UT_ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) + +/** Get the offset into the counter array. */ +template +struct generic_indexer_t { + /** Default constructor/destructor should be OK. */ + + /** @return offset within m_counter */ + size_t offset(size_t index) const { + return(((index % N) + 1) * (CACHE_LINE_SIZE / sizeof(Type))); + } +}; + +#ifdef HAVE_SCHED_GETCPU +//#include // Including this causes problems with EMPTY symbol +#include // Include this instead +/** Use the cpu id to index into the counter array. If it fails then +use the thread id. */ +template +struct get_sched_indexer_t : public generic_indexer_t { + /** Default constructor/destructor should be OK. */ + + /* @return result from sched_getcpu(), the thread id if it fails. */ + size_t get_rnd_index() const { + + size_t cpu = sched_getcpu(); + if (cpu == (size_t) -1) { + cpu = get_curr_thread_id(); + } + + return(cpu); + } +}; +#endif /* HAVE_SCHED_GETCPU */ + +/** Use the thread id to index into the counter array. */ +template +struct thread_id_indexer_t : public generic_indexer_t { + /** Default constructor/destructor should are OK. */ + + /* @return a random number, currently we use the thread id. Where + thread id is represented as a pointer, it may not work as + effectively. */ + size_t get_rnd_index() const { + return get_curr_thread_id(); + } +}; + +/** For counters wher N=1 */ +template +struct single_indexer_t { + /** Default constructor/destructor should are OK. */ + + /** @return offset within m_counter */ + size_t offset(size_t index) const { + DBUG_ASSERT(N == 1); + return((CACHE_LINE_SIZE / sizeof(Type))); + } + + /* @return 1 */ + size_t get_rnd_index() const { + DBUG_ASSERT(N == 1); + return(1); + } +}; + +/** Class for using fuzzy counters. The counter is not protected by any +mutex and the results are not guaranteed to be 100% accurate but close +enough. Creates an array of counters and separates each element by the +CACHE_LINE_SIZE bytes */ +template < + typename Type, + int N = IB_N_SLOTS, + template class Indexer = thread_id_indexer_t> +class ib_counter_t { +public: + ib_counter_t() { memset(m_counter, 0x0, sizeof(m_counter)); } + + ~ib_counter_t() + { + DBUG_ASSERT(validate()); + } + + bool validate() { +#ifdef UNIV_DEBUG + size_t n = (CACHE_LINE_SIZE / sizeof(Type)); + + /* Check that we aren't writing outside our defined bounds. */ + for (size_t i = 0; i < UT_ARRAY_SIZE(m_counter); i += n) { + for (size_t j = 1; j < n - 1; ++j) { + DBUG_ASSERT(m_counter[i + j] == 0); + } + } +#endif /* UNIV_DEBUG */ + return(true); + } + + /** If you can't use a good index id. Increment by 1. */ + void inc() { add(1); } + + /** If you can't use a good index id. + * @param n - is the amount to increment */ + void add(Type n) { + size_t i = m_policy.offset(m_policy.get_rnd_index()); + + DBUG_ASSERT(i < UT_ARRAY_SIZE(m_counter)); + + m_counter[i] += n; + } + + /** Use this if you can use a unique indentifier, saves a + call to get_rnd_index(). + @param i - index into a slot + @param n - amount to increment */ + void add(size_t index, Type n) { + size_t i = m_policy.offset(index); + + DBUG_ASSERT(i < UT_ARRAY_SIZE(m_counter)); + + m_counter[i] += n; + } + + /** If you can't use a good index id. Decrement by 1. */ + void dec() { sub(1); } + + /** If you can't use a good index id. + * @param - n is the amount to decrement */ + void sub(Type n) { + size_t i = m_policy.offset(m_policy.get_rnd_index()); + + DBUG_ASSERT(i < UT_ARRAY_SIZE(m_counter)); + + m_counter[i] -= n; + } + + /** Use this if you can use a unique indentifier, saves a + call to get_rnd_index(). + @param i - index into a slot + @param n - amount to decrement */ + void sub(size_t index, Type n) { + size_t i = m_policy.offset(index); + + DBUG_ASSERT(i < UT_ARRAY_SIZE(m_counter)); + + m_counter[i] -= n; + } + + /* @return total value - not 100% accurate, since it is not atomic. */ + operator Type() const { + Type total = 0; + + for (size_t i = 0; i < N; ++i) { + total += m_counter[m_policy.offset(i)]; + } + + return(total); + } + +private: + /** Indexer into the array */ + Indexerm_policy; + + /** Slot 0 is unused. */ + Type m_counter[(N + 1) * (CACHE_LINE_SIZE / sizeof(Type))]; +}; + +#endif /* UT0COUNTER_H */ From 15d101ca8e9c949f9bf28b74d514d7064d384907 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Mon, 23 Jan 2017 21:31:36 +0200 Subject: [PATCH 132/233] Update gitignore to not show rocksdb generated binaries --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index dce5b5ea934..f5f23da655f 100644 --- a/.gitignore +++ b/.gitignore @@ -174,6 +174,9 @@ storage/myisam/myisamlog storage/myisam/myisampack storage/myisam/rt_test storage/myisam/sp_test +storage/rocksdb/ldb +storage/rocksdb/mysql_ldb +storage/rocksdb/sst_dump storage/tokudb/PerconaFT/buildheader/db.h storage/tokudb/PerconaFT/buildheader/make_tdb storage/tokudb/PerconaFT/buildheader/runcat.sh From 555b1b9f15a32ad55a750d856070a5702077b56d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 24 Jan 2017 09:27:15 +0200 Subject: [PATCH 133/233] Make rocksdb dynamic plugin --- storage/rocksdb/CMakeLists.txt | 2 +- .../mysql-test/rocksdb/include/have_rocksdb.inc | 10 ++++++++++ .../mysql-test/rocksdb}/include/have_rocksdb.opt | 0 .../rocksdb/include/have_rocksdb_default.inc | 10 ++++++++++ .../rocksdb/include/have_rocksdb_replication.inc | 6 ------ storage/rocksdb/mysql-test/rocksdb/my.cnf | 5 ++--- storage/rocksdb/mysql-test/rocksdb/suite.opt | 2 +- 7 files changed, 24 insertions(+), 11 deletions(-) create mode 100644 storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb.inc rename {mysql-test => storage/rocksdb/mysql-test/rocksdb}/include/have_rocksdb.opt (100%) create mode 100644 storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb_default.inc rename mysql-test/include/have_rocksdb.inc => storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb_replication.inc (74%) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index cd73fbdb811..e6948c60a51 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -113,7 +113,7 @@ ENDIF() SET(rocksdb_static_libs ROCKSDB_AUX_LIB ${rocksdb_static_libs} ${ZLIB_LIBRARY} "-lrt") -MYSQL_ADD_PLUGIN(rocksdb_se ${ROCKSDB_SOURCES} STORAGE_ENGINE DEFAULT STATIC_ONLY +MYSQL_ADD_PLUGIN(rocksdb_se ${ROCKSDB_SOURCES} STORAGE_ENGINE DEFAULT MODULE_ONLY LINK_LIBRARIES ${rocksdb_static_libs} ) diff --git a/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb.inc b/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb.inc new file mode 100644 index 00000000000..1f762d38c64 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb.inc @@ -0,0 +1,10 @@ +if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'rocksdb' AND support IN ('YES', 'DEFAULT', 'ENABLED')`) +{ + --skip Test requires engine RocksDB. +} + +--disable_query_log +# Table statistics can vary depending on when the memtables are flushed, so +# flush them at the beginning of the test to ensure the test runs consistently. +set global rocksdb_force_flush_memtable_now = true; +--enable_query_log diff --git a/mysql-test/include/have_rocksdb.opt b/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb.opt similarity index 100% rename from mysql-test/include/have_rocksdb.opt rename to storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb.opt diff --git a/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb_default.inc b/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb_default.inc new file mode 100644 index 00000000000..2c50afd5014 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb_default.inc @@ -0,0 +1,10 @@ +if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'rocksdb'` AND support in ('DEFAULT')`) +{ + --skip Test requires engine RocksDB as default. +} + +--disable_query_log +# Table statistics can vary depending on when the memtables are flushed, so +# flush them at the beginning of the test to ensure the test runs consistently. +set global rocksdb_force_flush_memtable_now = true; +--enable_query_log diff --git a/mysql-test/include/have_rocksdb.inc b/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb_replication.inc similarity index 74% rename from mysql-test/include/have_rocksdb.inc rename to storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb_replication.inc index c76d851e339..92261211bf5 100644 --- a/mysql-test/include/have_rocksdb.inc +++ b/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb_replication.inc @@ -1,8 +1,3 @@ -if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'rocksdb' AND support IN ('DEFAULT')`) -{ - --skip Test requires default engine RocksDB -} - # MARIAROCKS_NOT_YET: replication doesn't work yet: #if (`select count(*) = 0 from information_schema.tables where engine='rocksdb' and table_name='slave_gtid_info'`) #{ @@ -14,4 +9,3 @@ if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'rocksdb # flush them at the beginning of the test to ensure the test runs consistently. set global rocksdb_force_flush_memtable_now = true; --enable_query_log - diff --git a/storage/rocksdb/mysql-test/rocksdb/my.cnf b/storage/rocksdb/mysql-test/rocksdb/my.cnf index d5f501e15ad..5f85ab8b54d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/my.cnf +++ b/storage/rocksdb/mysql-test/rocksdb/my.cnf @@ -1,12 +1,11 @@ !include include/default_my.cnf [server] -rocksdb skip-innodb default-storage-engine=rocksdb sql-mode=NO_ENGINE_SUBSTITUTION explicit-defaults-for-timestamp=1 -rocksdb_lock_wait_timeout=1 -rocksdb_strict_collation_check=0 +#rocksdb_lock_wait_timeout=1 +#rocksdb_strict_collation_check=0 diff --git a/storage/rocksdb/mysql-test/rocksdb/suite.opt b/storage/rocksdb/mysql-test/rocksdb/suite.opt index 8907deed6d8..0d92a9bb29d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/suite.opt +++ b/storage/rocksdb/mysql-test/rocksdb/suite.opt @@ -1,2 +1,2 @@ ---ignore-db-dirs=.rocksdb +--ignore-db-dirs=.rocksdb --plugin-load=ha_rocksdb_se From 13c7839ba72d7b88767dbb016e8d60114e49c377 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 24 Jan 2017 21:51:57 +0300 Subject: [PATCH 134/233] MariaRocks port: Fix for the previous cset (MariaRocks port: put back the assert) - Put back the assert on SQL layer at the right location - Adjust rdb_pack_with_make_sort_key to work around the assert (like it is done at other palaces): MyRocks may need to pack a column value even when the column is not in the read set. --- sql/field.cc | 2 +- storage/rocksdb/rdb_datadic.cc | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/sql/field.cc b/sql/field.cc index df058fa61a5..262252e9787 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -8721,13 +8721,13 @@ int Field_enum::store(longlong nr, bool unsigned_val) double Field_enum::val_real(void) { - ASSERT_COLUMN_MARKED_FOR_READ; return (double) Field_enum::val_int(); } longlong Field_enum::val_int(void) { + ASSERT_COLUMN_MARKED_FOR_READ; return read_lowendian(ptr, packlength); } diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index 22ad49898f3..9d917b8dd62 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -749,7 +749,12 @@ void rdb_pack_with_make_sort_key(Rdb_field_packing* const fpi, DBUG_ASSERT(*dst != nullptr); const int max_len= fpi->m_max_image_len; + my_bitmap_map *old_map; + + old_map= dbug_tmp_use_all_columns(field->table, + field->table->read_set); field->sort_string(*dst, max_len); + dbug_tmp_restore_column_map(field->table->read_set, old_map); *dst += max_len; } From 5875633c2a78fdb2db4ac621b093730962bc1f4d Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Wed, 1 Feb 2017 21:27:13 +0000 Subject: [PATCH 135/233] MDEV-11901 : MariaRocks on Windows fixed compilation, disabled unix-only tests (the ones that use bash etc). Changed plugin library name to ha_rocksdb.dll/so --- include/m_ctype.h | 156 ++++---- include/my_bit.h | 46 ++- include/mysql/plugin.h | 17 + mysys/my_bit.c | 18 - storage/rocksdb/CMakeLists.txt | 245 +++++++------ storage/rocksdb/build_rocksdb.cmake | 337 ++++++++++++++++++ storage/rocksdb/ha_rocksdb.cc | 112 +++--- storage/rocksdb/ha_rocksdb.h | 8 + .../r/innodb_i_s_tables_disabled.result | 42 --- .../mysql-test/rocksdb/r/partition.result | 10 +- storage/rocksdb/mysql-test/rocksdb/suite.opt | 2 +- .../mysql-test/rocksdb/t/checkpoint.test | 3 + .../mysql-test/rocksdb/t/collation.test | 4 + .../mysql-test/rocksdb/t/compact_deletes.test | 1 + .../rocksdb/t/concurrent_alter.test | 3 + .../mysql-test/rocksdb/t/drop_table.test | 2 + .../mysql-test/rocksdb/t/drop_table2.test | 3 + .../mysql-test/rocksdb/t/duplicate_table.test | 6 +- .../rocksdb/t/innodb_i_s_tables_disabled.test | 8 + .../mysql-test/rocksdb/t/partition.test | 3 +- .../rocksdb/t/rocksdb_checksums.test | 2 + .../mysql-test/rocksdb/t/rocksdb_datadir.test | 3 +- .../mysql-test/rocksdb/t/rocksdb_parts.test | 1 + storage/rocksdb/mysql-test/rocksdb/t/rqg.inc | 3 +- .../mysql-test/rocksdb/t/slow_query_log.test | 3 + .../mysql-test/rocksdb/t/statistics.test | 2 +- .../mysql-test/rocksdb/t/unique_sec.test | 2 + storage/rocksdb/properties_collector.cc | 13 +- storage/rocksdb/rdb_buff.h | 10 + storage/rocksdb/rdb_datadic.cc | 12 +- storage/rocksdb/rdb_datadic.h | 3 + storage/rocksdb/rdb_i_s.cc | 6 +- storage/rocksdb/rdb_mariadb_server_port.cc | 2 +- storage/rocksdb/rdb_sst_info.h | 2 +- storage/rocksdb/rdb_threads.h | 26 ++ 35 files changed, 764 insertions(+), 352 deletions(-) create mode 100644 storage/rocksdb/build_rocksdb.cmake diff --git a/include/m_ctype.h b/include/m_ctype.h index 04a82953f0a..1639332f5f7 100644 --- a/include/m_ctype.h +++ b/include/m_ctype.h @@ -360,7 +360,7 @@ struct my_collation_handler_st }; extern MY_COLLATION_HANDLER my_collation_8bit_bin_handler; -extern MY_COLLATION_HANDLER my_collation_8bit_simple_ci_handler; +extern MYSQL_PLUGIN_IMPORT MY_COLLATION_HANDLER my_collation_8bit_simple_ci_handler; extern MY_COLLATION_HANDLER my_collation_8bit_nopad_bin_handler; extern MY_COLLATION_HANDLER my_collation_8bit_simple_nopad_ci_handler; extern MY_COLLATION_HANDLER my_collation_ucs2_uca_handler; @@ -586,83 +586,83 @@ extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_latin1_nopad; extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_filename; extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8_general_ci; -extern struct charset_info_st my_charset_big5_bin; -extern struct charset_info_st my_charset_big5_chinese_ci; -extern struct charset_info_st my_charset_big5_nopad_bin; -extern struct charset_info_st my_charset_big5_chinese_nopad_ci; -extern struct charset_info_st my_charset_cp1250_czech_ci; -extern struct charset_info_st my_charset_cp932_bin; -extern struct charset_info_st my_charset_cp932_japanese_ci; -extern struct charset_info_st my_charset_cp932_nopad_bin; -extern struct charset_info_st my_charset_cp932_japanese_nopad_ci; -extern struct charset_info_st my_charset_eucjpms_bin; -extern struct charset_info_st my_charset_eucjpms_japanese_ci; -extern struct charset_info_st my_charset_eucjpms_nopad_bin; -extern struct charset_info_st my_charset_eucjpms_japanese_nopad_ci; -extern struct charset_info_st my_charset_euckr_bin; -extern struct charset_info_st my_charset_euckr_korean_ci; -extern struct charset_info_st my_charset_euckr_nopad_bin; -extern struct charset_info_st my_charset_euckr_korean_nopad_ci; -extern struct charset_info_st my_charset_gb2312_bin; -extern struct charset_info_st my_charset_gb2312_chinese_ci; -extern struct charset_info_st my_charset_gb2312_nopad_bin; -extern struct charset_info_st my_charset_gb2312_chinese_nopad_ci; -extern struct charset_info_st my_charset_gbk_bin; -extern struct charset_info_st my_charset_gbk_chinese_ci; -extern struct charset_info_st my_charset_gbk_nopad_bin; -extern struct charset_info_st my_charset_gbk_chinese_nopad_ci; -extern struct charset_info_st my_charset_latin1_bin; -extern struct charset_info_st my_charset_latin1_nopad_bin; -extern struct charset_info_st my_charset_latin1_german2_ci; -extern struct charset_info_st my_charset_latin2_czech_ci; -extern struct charset_info_st my_charset_sjis_bin; -extern struct charset_info_st my_charset_sjis_japanese_ci; -extern struct charset_info_st my_charset_sjis_nopad_bin; -extern struct charset_info_st my_charset_sjis_japanese_nopad_ci; -extern struct charset_info_st my_charset_tis620_bin; -extern struct charset_info_st my_charset_tis620_thai_ci; -extern struct charset_info_st my_charset_tis620_nopad_bin; -extern struct charset_info_st my_charset_tis620_thai_nopad_ci; -extern struct charset_info_st my_charset_ucs2_bin; -extern struct charset_info_st my_charset_ucs2_general_ci; -extern struct charset_info_st my_charset_ucs2_nopad_bin; -extern struct charset_info_st my_charset_ucs2_general_nopad_ci; -extern struct charset_info_st my_charset_ucs2_general_mysql500_ci; -extern struct charset_info_st my_charset_ucs2_unicode_ci; -extern struct charset_info_st my_charset_ucs2_unicode_nopad_ci; -extern struct charset_info_st my_charset_ucs2_general_mysql500_ci; -extern struct charset_info_st my_charset_ujis_bin; -extern struct charset_info_st my_charset_ujis_japanese_ci; -extern struct charset_info_st my_charset_ujis_nopad_bin; -extern struct charset_info_st my_charset_ujis_japanese_nopad_ci; -extern struct charset_info_st my_charset_utf16_bin; -extern struct charset_info_st my_charset_utf16_general_ci; -extern struct charset_info_st my_charset_utf16_unicode_ci; -extern struct charset_info_st my_charset_utf16_unicode_nopad_ci; -extern struct charset_info_st my_charset_utf16le_bin; -extern struct charset_info_st my_charset_utf16le_general_ci; -extern struct charset_info_st my_charset_utf16_general_nopad_ci; -extern struct charset_info_st my_charset_utf16_nopad_bin; -extern struct charset_info_st my_charset_utf16le_nopad_bin; -extern struct charset_info_st my_charset_utf16le_general_nopad_ci; -extern struct charset_info_st my_charset_utf32_bin; -extern struct charset_info_st my_charset_utf32_general_ci; -extern struct charset_info_st my_charset_utf32_unicode_ci; -extern struct charset_info_st my_charset_utf32_unicode_nopad_ci; -extern struct charset_info_st my_charset_utf32_nopad_bin; -extern struct charset_info_st my_charset_utf32_general_nopad_ci; -extern struct charset_info_st my_charset_utf8_bin; -extern struct charset_info_st my_charset_utf8_nopad_bin; -extern struct charset_info_st my_charset_utf8_general_nopad_ci; -extern struct charset_info_st my_charset_utf8_general_mysql500_ci; -extern struct charset_info_st my_charset_utf8_unicode_ci; -extern struct charset_info_st my_charset_utf8_unicode_nopad_ci; -extern struct charset_info_st my_charset_utf8mb4_bin; -extern struct charset_info_st my_charset_utf8mb4_general_ci; -extern struct charset_info_st my_charset_utf8mb4_nopad_bin; -extern struct charset_info_st my_charset_utf8mb4_general_nopad_ci; -extern struct charset_info_st my_charset_utf8mb4_unicode_ci; -extern struct charset_info_st my_charset_utf8mb4_unicode_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_big5_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_big5_chinese_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_big5_nopad_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_big5_chinese_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_cp1250_czech_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_cp932_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_cp932_japanese_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_cp932_nopad_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_cp932_japanese_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_eucjpms_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_eucjpms_japanese_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_eucjpms_nopad_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_eucjpms_japanese_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_euckr_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_euckr_korean_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_euckr_nopad_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_euckr_korean_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_gb2312_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_gb2312_chinese_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_gb2312_nopad_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_gb2312_chinese_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_gbk_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_gbk_chinese_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_gbk_nopad_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_gbk_chinese_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_latin1_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_latin1_nopad_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_latin1_german2_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_latin2_czech_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_sjis_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_sjis_japanese_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_sjis_nopad_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_sjis_japanese_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_tis620_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_tis620_thai_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_tis620_nopad_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_tis620_thai_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ucs2_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ucs2_general_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ucs2_nopad_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ucs2_general_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ucs2_general_mysql500_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ucs2_unicode_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ucs2_unicode_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ucs2_general_mysql500_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ujis_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ujis_japanese_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ujis_nopad_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ujis_japanese_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16_general_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16_unicode_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16_unicode_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16le_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16le_general_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16_general_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16_nopad_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16le_nopad_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16le_general_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf32_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf32_general_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf32_unicode_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf32_unicode_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf32_nopad_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf32_general_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8_nopad_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8_general_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8_general_mysql500_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8_unicode_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8_unicode_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8mb4_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8mb4_general_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8mb4_nopad_bin; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8mb4_general_nopad_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8mb4_unicode_ci; +extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8mb4_unicode_nopad_ci; #define MY_UTF8MB3 "utf8" #define MY_UTF8MB4 "utf8mb4" diff --git a/include/my_bit.h b/include/my_bit.h index a50403c312d..218829ba747 100644 --- a/include/my_bit.h +++ b/include/my_bit.h @@ -25,7 +25,6 @@ C_MODE_START -extern const char _my_bits_nbits[256]; extern const uchar _my_bits_reverse_table[256]; /* @@ -40,37 +39,32 @@ static inline uint my_bit_log2(ulong value) return bit; } -static inline uint my_count_bits(ulonglong v) -{ -#if SIZEOF_LONG_LONG > 4 - /* The following code is a bit faster on 16 bit machines than if we would - only shift v */ - ulong v2=(ulong) (v >> 32); - return (uint) (uchar) (_my_bits_nbits[(uchar) v] + - _my_bits_nbits[(uchar) (v >> 8)] + - _my_bits_nbits[(uchar) (v >> 16)] + - _my_bits_nbits[(uchar) (v >> 24)] + - _my_bits_nbits[(uchar) (v2)] + - _my_bits_nbits[(uchar) (v2 >> 8)] + - _my_bits_nbits[(uchar) (v2 >> 16)] + - _my_bits_nbits[(uchar) (v2 >> 24)]); -#else - return (uint) (uchar) (_my_bits_nbits[(uchar) v] + - _my_bits_nbits[(uchar) (v >> 8)] + - _my_bits_nbits[(uchar) (v >> 16)] + - _my_bits_nbits[(uchar) (v >> 24)]); -#endif -} +/* +Count bits in 32bit integer + + Algorithm by Sean Anderson, according to: + http://graphics.stanford.edu/~seander/bithacks.html + under "Counting bits set, in parallel" + + (Orignal code public domain). +*/ static inline uint my_count_bits_uint32(uint32 v) { - return (uint) (uchar) (_my_bits_nbits[(uchar) v] + - _my_bits_nbits[(uchar) (v >> 8)] + - _my_bits_nbits[(uchar) (v >> 16)] + - _my_bits_nbits[(uchar) (v >> 24)]); + v = v - ((v >> 1) & 0x55555555); + v = (v & 0x33333333) + ((v >> 2) & 0x33333333); + return (((v + (v >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24; } +static inline uint my_count_bits(ulonglong x) +{ + return my_count_bits_uint32((uint32)x) + my_count_bits_uint32((uint32)(x >> 32)); +} + + + + /* Next highest power of two diff --git a/include/mysql/plugin.h b/include/mysql/plugin.h index b3c71c65488..12fdab4f2fe 100644 --- a/include/mysql/plugin.h +++ b/include/mysql/plugin.h @@ -393,6 +393,23 @@ DECLARE_MYSQL_SYSVAR_SIMPLE(name, unsigned long long) = { \ PLUGIN_VAR_LONGLONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ #name, comment, check, update, &varname, def, min, max, blk } +#define MYSQL_SYSVAR_UINT64_T(name, varname, opt, comment, check, update, def, min, max, blk) \ +DECLARE_MYSQL_SYSVAR_SIMPLE(name, uint64_t) = { \ + PLUGIN_VAR_LONGLONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ + #name, comment, check, update, &varname, def, min, max, blk } + +#ifdef _WIN64 +#define MYSQL_SYSVAR_SIZE_T(name, varname, opt, comment, check, update, def, min, max, blk) \ +DECLARE_MYSQL_SYSVAR_SIMPLE(name, size_t) = { \ + PLUGIN_VAR_LONGLONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ + #name, comment, check, update, &varname, def, min, max, blk } +#else +#define MYSQL_SYSVAR_SIZE_T(name, varname, opt, comment, check, update, def, min, max, blk) \ +DECLARE_MYSQL_SYSVAR_SIMPLE(name, size_t) = { \ + PLUGIN_VAR_LONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ + #name, comment, check, update, &varname, def, min, max, blk } +#endif + #define MYSQL_SYSVAR_ENUM(name, varname, opt, comment, check, update, def, typelib) \ DECLARE_MYSQL_SYSVAR_TYPELIB(name, unsigned long) = { \ PLUGIN_VAR_ENUM | ((opt) & PLUGIN_VAR_MASK), \ diff --git a/mysys/my_bit.c b/mysys/my_bit.c index d36f52bb3c0..9ceb083cd48 100644 --- a/mysys/my_bit.c +++ b/mysys/my_bit.c @@ -17,24 +17,6 @@ #include -const char _my_bits_nbits[256] = { - 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, - 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, - 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, - 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, - 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, - 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8, -}; /* perl -e 'print map{", 0x".unpack H2,pack B8,unpack b8,chr$_}(0..255)' diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index e6948c60a51..e9e63dcfcd7 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -1,91 +1,157 @@ # TODO: Copyrights -IF (NOT EXISTS "${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/Makefile") + +IF (NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/Makefile") MESSAGE(SEND_ERROR "Missing Makefile in rocksdb directory. Try \"git submodule update\".") ENDIF() -SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") +MACRO(SKIP_ROCKSDB_PLUGIN msg) + MESSAGE_ONCE(SKIP_ROCKSDB_PLUGIN "Can't build rocksdb engine - ${msg}") + RETURN() +ENDMACRO() + + +# This plugin needs recent C++ compilers (it is using C++11 features) +# Skip build for the old compilers +SET(CXX11_FLAGS) +SET(OLD_COMPILER_MSG "requires c++11 -capable compiler (minimal supported versions are g++ 4.8, clang 3.3, VS2015)") + +IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU") + EXECUTE_PROCESS(COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION) + IF (GCC_VERSION VERSION_LESS 4.8) + SKIP_ROCKSDB_PLUGIN("${OLD_COMPILER_MSG}") + ENDIF() + SET(CXX11_FLAGS "-std=c++11") +ELSEIF (CMAKE_CXX_COMPILER_ID MATCHES "Clang") + IF ((CMAKE_CXX_COMPILER_VERSION AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.3) OR + (CLANG_VERSION_STRING AND CLANG_VERSION_STRING VERSION_LESS 3.3)) + SKIP_ROCKSDB_PLUGIN("${OLD_COMPILER_MSG}") + ENDIF() + SET(CXX11_FLAGS "-stdlib=libc++ -std=c++11") +ELSEIF(MSVC) + IF (MSVC_VERSION LESS 1900) + SKIP_ROCKSDB_PLUGIN("${OLD_COMPILER_MSG}") + ENDIF() +ELSE() + SKIP_ROCKSDB_PLUGIN("Compiler not supported") +ENDIF() + +IF(CXX11_FLAGS) + ADD_DEFINITIONS(${CXX11_FLAGS}) +ENDIF() + +SET(ROCKSDB_SE_SOURCES + rdb_mariadb_server_port.cc + rdb_mariadb_server_port.h + ha_rocksdb.cc + ha_rocksdb.h + rdb_i_s.cc + rdb_i_s.h + rdb_mutex_wrapper.cc + rdb_mutex_wrapper.h + rdb_index_merge.cc + rdb_index_merge.h + properties_collector.cc + properties_collector.h + rdb_datadic.cc + rdb_datadic.h + rdb_cf_manager.cc + rdb_cf_manager.h + rdb_utils.cc rdb_utils.h + rdb_threads.cc + rdb_threads.h +) + +MYSQL_ADD_PLUGIN(rocksdb_se ${ROCKSDB_SE_SOURCES} STORAGE_ENGINE MODULE_OUTPUT_NAME ha_rocksdb) + +IF(NOT TARGET rocksdb_se) + # Bail out if compilation with rocksdb engine is not requested + RETURN() +ENDIF() + +INCLUDE(build_rocksdb.cmake) + +ADD_CONVENIENCE_LIBRARY(rocksdb_aux_lib + ha_rocksdb_proto.h + logger.h + rdb_comparator.h + rdb_cf_options.cc + rdb_cf_options.h + event_listener.cc + event_listener.h + rdb_perf_context.cc + rdb_perf_context.h + rdb_sst_info.cc + rdb_sst_info.h + rdb_buff.h + rdb_mariadb_port.h) + +ADD_DEPENDENCIES(rocksdb_aux_lib GenError) + +TARGET_LINK_LIBRARIES(rocksdb_aux_lib rocksdblib ${ZLIB_LIBRARY}) +TARGET_LINK_LIBRARIES(rocksdb_se rocksdb_aux_lib) + +IF(CMAKE_COMPILER_IS_GNUCXX) + # MARIAROCKS_NOT_YET: Add -frtti flag when compiling RocksDB files. + # TODO: is this the right way to do this? + # - SQL layer and storage/rocksdb/*.cc are compiled with -fnortti + # - RocksDB files are compiled with "-fnortti ... -frtti" + # - This causes RocksDB headers to be compiled with different settings: + # = with RTTI when compiling RocksDB + # = without RTTI when compiling storage/rocksdb/*.cc + # + # (facebook/mysql-5.6 just compiles everything without -f*rtti, which means + # everything is compiled with -frtti) + # + # (also had to add -frtti above, because something that event_listener.cc + # includes requires it. So, now everything in MariaRocks is compiled with + # -frtti) + set_source_files_properties(event_listener.cc rdb_cf_options.cc + PROPERTIES COMPILE_FLAGS -frtti) +ENDIF() CHECK_FUNCTION_EXISTS(sched_getcpu HAVE_SCHED_GETCPU) IF(HAVE_SCHED_GETCPU) ADD_DEFINITIONS(-DHAVE_SCHED_GETCPU=1) ENDIF() -# get a list of rocksdb library source files -# run with env -i to avoid passing variables -EXECUTE_PROCESS( - COMMAND env -i ${CMAKE_SOURCE_DIR}/storage/rocksdb/get_rocksdb_files.sh - OUTPUT_VARIABLE SCRIPT_OUTPUT - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} -) -# split the list into lines -STRING(REGEX MATCHALL "[^\n]+" ROCKSDB_LIB_SOURCES ${SCRIPT_OUTPUT}) - -INCLUDE_DIRECTORIES( - ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb - ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/include - ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/third-party/gtest-1.7.0/fused-src -) - -ADD_DEFINITIONS(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DOS_LINUX - -DZLIB) - -# MARIAROCKS_NOT_YET: Add -frtti flag when compiling RocksDB files. -# TODO: is this the right way to do this? -# - SQL layer and storage/rocksdb/*.cc are compiled with -fnortti -# - RocksDB files are compiled with "-fnortti ... -frtti" -# - This causes RocksDB headers to be compiled with different settings: -# = with RTTI when compiling RocksDB -# = without RTTI when compiling storage/rocksdb/*.cc -# -# (facebook/mysql-5.6 just compiles everything without -f*rtti, which means -# everything is compiled with -frtti) -# -# (also had to add -frtti above, because something that event_listener.cc -# includes requires it. So, now everything in MariaRocks is compiled with -# -frtti) -set_source_files_properties(${ROCKSDB_LIB_SOURCES} PROPERTIES COMPILE_FLAGS -frtti) -set_source_files_properties(event_listener.cc PROPERTIES COMPILE_FLAGS -frtti) -set_source_files_properties(rdb_cf_options.cc PROPERTIES COMPILE_FLAGS -frtti) - -ADD_CONVENIENCE_LIBRARY( - ROCKSDB_AUX_LIB - ha_rocksdb_proto.h - logger.h - rdb_comparator.h - rdb_datadic.cc rdb_datadic.h - rdb_cf_options.cc rdb_cf_options.h - rdb_cf_manager.cc rdb_cf_manager.h - properties_collector.cc properties_collector.h - event_listener.cc event_listener.h - rdb_i_s.cc rdb_i_s.h - rdb_index_merge.cc rdb_index_merge.h - rdb_perf_context.cc rdb_perf_context.h - rdb_mutex_wrapper.cc rdb_mutex_wrapper.h - rdb_sst_info.cc rdb_sst_info.h - rdb_utils.cc rdb_utils.h rdb_buff.h - rdb_threads.cc rdb_threads.h - rdb_mariadb_port.h - ${ROCKSDB_LIB_SOURCES} -) - -# We include storage/innobase/include/ut0counter.h, which includes -# univ.i, which includes mysqld_error.h. -# Indicate that MyRocks is dependent on that file (just like innochecksum does) -ADD_DEPENDENCIES(ROCKSDB_AUX_LIB GenError) - -SET(ROCKSDB_SOURCES - rdb_mariadb_server_port.cc rdb_mariadb_server_port.h - ha_rocksdb.cc ha_rocksdb.h -) - -IF(WITH_FB_TSAN) - SET(PIC_EXT "_pic") -ELSE() - SET(PIC_EXT "") +IF(WITH_UNIT_TESTS AND WITH_EMBEDDED_SERVER) + ADD_SUBDIRECTORY(unittest) ENDIF() -SET(rocksdb_static_libs ) +ADD_LIBRARY(rocksdb_tools STATIC + rocksdb/tools/ldb_tool.cc + rocksdb/tools/ldb_cmd.cc + rocksdb/tools/sst_dump_tool.cc +) + +MYSQL_ADD_EXECUTABLE(sst_dump rocksdb/tools/sst_dump.cc) +TARGET_LINK_LIBRARIES(sst_dump rocksdblib) + +MYSQL_ADD_EXECUTABLE(ldb rocksdb/tools/ldb.cc) +TARGET_LINK_LIBRARIES(ldb rocksdb_tools rocksdblib) + +MYSQL_ADD_EXECUTABLE(mysql_ldb tools/mysql_ldb.cc) +TARGET_LINK_LIBRARIES(mysql_ldb rocksdb_tools rocksdb_aux_lib) + +IF(CMAKE_COMPILER_IS_GNUCXX) + SET_TARGET_PROPERTIES(rocksdb_tools sst_dump ldb mysql_ldb PROPERTIES COMPILE_FLAGS -frtti) +ENDIF() +IF(MSVC) + # RocksDB, the storage engine, overdoes "const" by adding + # additional const qualifiers to parameters of the overriden virtual functions + # This creates a lot of warnings, that we silence here. + ADD_DEFINITIONS(/wd4373) + + # Some checks in C++ runtime that make debug build much slower + ADD_DEFINITIONS(-D_ITERATOR_DEBUG_LEVEL=0) +ENDIF() + +# Optional compression libraries. +# +# TODO: search compression libraries properly. +# Use FIND_PACKAGE, CHECK_LIBRARY_EXISTS etc +IF(MARIAROCKS_NOT_YET) IF (NOT "$ENV{WITH_SNAPPY}" STREQUAL "") SET(rocksdb_static_libs ${rocksdb_static_libs} $ENV{WITH_SNAPPY}/libsnappy${PIC_EXT}.a) @@ -110,31 +176,4 @@ IF (NOT "$ENV{WITH_ZSTD}" STREQUAL "") $ENV{WITH_ZSTD}/libzstd${PIC_EXT}.a) ADD_DEFINITIONS(-DZSTD) ENDIF() - -SET(rocksdb_static_libs ROCKSDB_AUX_LIB ${rocksdb_static_libs} ${ZLIB_LIBRARY} "-lrt") - -MYSQL_ADD_PLUGIN(rocksdb_se ${ROCKSDB_SOURCES} STORAGE_ENGINE DEFAULT MODULE_ONLY - LINK_LIBRARIES ${rocksdb_static_libs} -) - -IF(WITH_EMBEDDED_SERVER) - ADD_SUBDIRECTORY(unittest) -ENDIF() - -IF (WITH_ROCKSDB_SE_STORAGE_ENGINE) - # TODO: read this file list from src.mk:TOOL_SOURCES - SET(ROCKSDB_TOOL_SOURCES - ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/tools/ldb_tool.cc - ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/tools/ldb_cmd.cc - ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/tools/sst_dump_tool.cc - ) - set_source_files_properties(${ROCKSDB_TOOL_SOURCES} PROPERTIES COMPILE_FLAGS -frtti) - MYSQL_ADD_EXECUTABLE(sst_dump ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/tools/sst_dump.cc ${ROCKSDB_TOOL_SOURCES}) - TARGET_LINK_LIBRARIES(sst_dump ${rocksdb_static_libs}) - - MYSQL_ADD_EXECUTABLE(ldb ${CMAKE_SOURCE_DIR}/storage/rocksdb/rocksdb/tools/ldb.cc ${ROCKSDB_TOOL_SOURCES}) - TARGET_LINK_LIBRARIES(ldb ${rocksdb_static_libs}) - - MYSQL_ADD_EXECUTABLE(mysql_ldb ${CMAKE_SOURCE_DIR}/storage/rocksdb/tools/mysql_ldb.cc ${ROCKSDB_TOOL_SOURCES}) - TARGET_LINK_LIBRARIES(mysql_ldb ${rocksdb_static_libs}) -ENDIF() +ENDIF(MARIAROCKS_NOT_YET) diff --git a/storage/rocksdb/build_rocksdb.cmake b/storage/rocksdb/build_rocksdb.cmake new file mode 100644 index 00000000000..35e83af73dc --- /dev/null +++ b/storage/rocksdb/build_rocksdb.cmake @@ -0,0 +1,337 @@ + +if(POLICY CMP0042) + cmake_policy(SET CMP0042 NEW) +endif() + +SET(ROCKSDB_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb) + +INCLUDE_DIRECTORIES( + ${ROCKSDB_SOURCE_DIR} + ${ROCKSDB_SOURCE_DIR}/include + ${ROCKSDB_SOURCE_DIR}/third-party/gtest-1.7.0/fused-src +) + + + +list(APPEND CMAKE_MODULE_PATH "${ROCKSDB_SOURCE_DIR}/cmake/modules/") + +if(WIN32) + # include(${ROCKSDB_SOURCE_DIR}/thirdparty.inc) +else() + option(WITH_ROCKSDB_JEMALLOC "build RocksDB with JeMalloc" OFF) + if(WITH_ROCKSDB_JEMALLOC) + find_package(JeMalloc REQUIRED) + add_definitions(-DROCKSDB_JEMALLOC) + include_directories(${JEMALLOC_INCLUDE_DIR}) + endif() + if(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD") + # FreeBSD has jemaloc as default malloc + add_definitions(-DROCKSDB_JEMALLOC) + set(WITH_JEMALLOC ON) + endif() + option(WITH_ROCKSDB_SNAPPY "build RocksDB with SNAPPY" OFF) + if(WITH_ROCKSDB_SNAPPY) + find_package(snappy REQUIRED) + add_definitions(-DSNAPPY) + include_directories(${SNAPPY_INCLUDE_DIR}) + list(APPEND THIRDPARTY_LIBS ${SNAPPY_LIBRARIES}) + endif() +endif() + + + + + +if(CMAKE_SYSTEM_NAME MATCHES "Cygwin") + add_definitions(-fno-builtin-memcmp -DCYGWIN) +elseif(CMAKE_SYSTEM_NAME MATCHES "Darwin") + add_definitions(-DOS_MACOSX) +elseif(CMAKE_SYSTEM_NAME MATCHES "Linux") + add_definitions(-DOS_LINUX) +elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS") + add_definitions(-DOS_SOLARIS) +elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD") + add_definitions(-DOS_FREEBSD) +elseif(CMAKE_SYSTEM_NAME MATCHES "NetBSD") + add_definitions(-DOS_NETBSD) +elseif(CMAKE_SYSTEM_NAME MATCHES "OpenBSD") + add_definitions(-DOS_OPENBSD) +elseif(CMAKE_SYSTEM_NAME MATCHES "DragonFly") + add_definitions(-DOS_DRAGONFLYBSD) +elseif(CMAKE_SYSTEM_NAME MATCHES "Android") + add_definitions(-DOS_ANDROID) +elseif(CMAKE_SYSTEM_NAME MATCHES "Windows") + add_definitions(-DOS_WIN) +endif() + +IF(MSVC) + add_definitions(/wd4244) +ENDIF() +if(NOT WIN32) + add_definitions(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX) +endif() + +option(WITH_FALLOCATE "build with fallocate" ON) + +if(WITH_FALLOCATE AND UNIX) + include(CheckCSourceCompiles) + CHECK_C_SOURCE_COMPILES(" +#include +#include +int main() { + int fd = open(\"/dev/null\", 0); + fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, 1024); +} +" HAVE_FALLOCATE) + if(HAVE_FALLOCATE) + add_definitions(-DROCKSDB_FALLOCATE_PRESENT) + endif() +endif() + +include(CheckFunctionExists) +CHECK_FUNCTION_EXISTS(malloc_usable_size HAVE_MALLOC_USABLE_SIZE) +if(HAVE_MALLOC_USABLE_SIZE) + add_definitions(-DROCKSDB_MALLOC_USABLE_SIZE) +endif() + +include_directories(${ROCKSDB_SOURCE_DIR}) +include_directories(${ROCKSDB_SOURCE_DIR}/include) +include_directories(SYSTEM ${ROCKSDB_SOURCE_DIR}/third-party/gtest-1.7.0/fused-src) + +find_package(Threads REQUIRED) +if(WIN32) + set(SYSTEM_LIBS ${SYSTEM_LIBS} Shlwapi.lib Rpcrt4.lib) +else() + set(SYSTEM_LIBS ${CMAKE_THREAD_LIBS_INIT}) +endif() + +set(ROCKSDB_LIBS rocksdblib}) +set(LIBS ${ROCKSDB_LIBS} ${THIRDPARTY_LIBS} ${SYSTEM_LIBS}) + +#add_subdirectory(${ROCKSDB_SOURCE_DIR}/tools) + +# Main library source code + +set(ROCKSDB_SOURCES + db/auto_roll_logger.cc + db/builder.cc + db/c.cc + db/column_family.cc + db/compacted_db_impl.cc + db/compaction.cc + db/compaction_iterator.cc + db/compaction_job.cc + db/compaction_picker.cc + db/convenience.cc + db/dbformat.cc + db/db_filesnapshot.cc + db/db_impl.cc + db/db_impl_debug.cc + db/db_impl_experimental.cc + db/db_impl_readonly.cc + db/db_info_dumper.cc + db/db_iter.cc + db/event_helpers.cc + db/external_sst_file_ingestion_job.cc + db/experimental.cc + db/filename.cc + db/file_indexer.cc + db/flush_job.cc + db/flush_scheduler.cc + db/forward_iterator.cc + db/internal_stats.cc + db/log_reader.cc + db/log_writer.cc + db/managed_iterator.cc + db/memtable.cc + db/memtable_allocator.cc + db/memtable_list.cc + db/merge_helper.cc + db/merge_operator.cc + db/range_del_aggregator.cc + db/repair.cc + db/snapshot_impl.cc + db/table_cache.cc + db/table_properties_collector.cc + db/transaction_log_impl.cc + db/version_builder.cc + db/version_edit.cc + db/version_set.cc + db/wal_manager.cc + db/write_batch.cc + db/write_batch_base.cc + db/write_controller.cc + db/write_thread.cc + db/xfunc_test_points.cc + memtable/hash_cuckoo_rep.cc + memtable/hash_linklist_rep.cc + memtable/hash_skiplist_rep.cc + memtable/skiplistrep.cc + memtable/vectorrep.cc + port/stack_trace.cc + table/adaptive_table_factory.cc + table/block.cc + table/block_based_filter_block.cc + table/block_based_table_builder.cc + table/block_based_table_factory.cc + table/block_based_table_reader.cc + table/block_builder.cc + table/block_prefix_index.cc + table/bloom_block.cc + table/cuckoo_table_builder.cc + table/cuckoo_table_factory.cc + table/cuckoo_table_reader.cc + table/flush_block_policy.cc + table/format.cc + table/full_filter_block.cc + table/get_context.cc + table/iterator.cc + table/merger.cc + table/sst_file_writer.cc + table/meta_blocks.cc + table/plain_table_builder.cc + table/plain_table_factory.cc + table/plain_table_index.cc + table/plain_table_key_coding.cc + table/plain_table_reader.cc + table/persistent_cache_helper.cc + table/table_properties.cc + table/two_level_iterator.cc + tools/sst_dump_tool.cc + tools/db_bench_tool.cc + tools/dump/db_dump_tool.cc + util/arena.cc + util/bloom.cc + util/cf_options.cc + util/clock_cache.cc + util/coding.cc + util/compaction_job_stats_impl.cc + util/comparator.cc + util/concurrent_arena.cc + util/crc32c.cc + util/db_options.cc + util/delete_scheduler.cc + util/dynamic_bloom.cc + util/env.cc + util/env_chroot.cc + util/env_hdfs.cc + util/event_logger.cc + util/file_util.cc + util/file_reader_writer.cc + util/sst_file_manager_impl.cc + util/filter_policy.cc + util/hash.cc + util/histogram.cc + util/histogram_windowing.cc + util/instrumented_mutex.cc + util/iostats_context.cc + util/lru_cache.cc + tools/ldb_cmd.cc + tools/ldb_tool.cc + util/logging.cc + util/log_buffer.cc + util/memenv.cc + util/murmurhash.cc + util/options.cc + util/options_helper.cc + util/options_parser.cc + util/options_sanity_check.cc + util/perf_context.cc + util/perf_level.cc + util/random.cc + util/rate_limiter.cc + util/sharded_cache.cc + util/slice.cc + util/statistics.cc + util/status.cc + util/status_message.cc + util/string_util.cc + util/sync_point.cc + util/testutil.cc + util/thread_local.cc + util/threadpool_imp.cc + util/thread_status_impl.cc + util/thread_status_updater.cc + util/thread_status_util.cc + util/thread_status_util_debug.cc + util/transaction_test_util.cc + util/xfunc.cc + util/xxhash.cc + utilities/backupable/backupable_db.cc + utilities/blob_db/blob_db.cc + utilities/checkpoint/checkpoint.cc + utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc + utilities/date_tiered/date_tiered_db_impl.cc + utilities/document/document_db.cc + utilities/document/json_document.cc + utilities/document/json_document_builder.cc + utilities/env_mirror.cc + utilities/env_registry.cc + utilities/geodb/geodb_impl.cc + utilities/leveldb_options/leveldb_options.cc + utilities/lua/rocks_lua_compaction_filter.cc + utilities/memory/memory_util.cc + utilities/merge_operators/string_append/stringappend.cc + utilities/merge_operators/string_append/stringappend2.cc + utilities/merge_operators/put.cc + utilities/merge_operators/max.cc + utilities/merge_operators/uint64add.cc + utilities/option_change_migration/option_change_migration.cc + utilities/options/options_util.cc + utilities/persistent_cache/block_cache_tier.cc + utilities/persistent_cache/block_cache_tier_file.cc + utilities/persistent_cache/block_cache_tier_metadata.cc + utilities/persistent_cache/persistent_cache_tier.cc + utilities/persistent_cache/volatile_tier_impl.cc + utilities/redis/redis_lists.cc + utilities/simulator_cache/sim_cache.cc + utilities/spatialdb/spatial_db.cc + utilities/table_properties_collectors/compact_on_deletion_collector.cc + utilities/transactions/optimistic_transaction_impl.cc + utilities/transactions/optimistic_transaction_db_impl.cc + utilities/transactions/transaction_base.cc + utilities/transactions/transaction_impl.cc + utilities/transactions/transaction_db_impl.cc + utilities/transactions/transaction_db_mutex_impl.cc + utilities/transactions/transaction_lock_mgr.cc + utilities/transactions/transaction_util.cc + utilities/ttl/db_ttl_impl.cc + utilities/write_batch_with_index/write_batch_with_index.cc + utilities/write_batch_with_index/write_batch_with_index_internal.cc + utilities/col_buf_encoder.cc + utilities/col_buf_decoder.cc + utilities/column_aware_encoding_util.cc +) + +if(WIN32) + list(APPEND ROCKSDB_SOURCES + port/win/io_win.cc + port/win/env_win.cc + port/win/env_default.cc + port/win/port_win.cc + port/win/win_logger.cc + port/win/xpress_win.cc) +else() + list(APPEND ROCKSDB_SOURCES + port/port_posix.cc + util/env_posix.cc + util/io_posix.cc) +endif() +SET(SOURCES) +FOREACH(s ${ROCKSDB_SOURCES}) + list(APPEND SOURCES ${ROCKSDB_SOURCE_DIR}/${s}) +ENDFOREACH() + +IF(CMAKE_VERSION VERSION_GREATER "2.8.10") + STRING(TIMESTAMP GIT_DATE_TIME "%Y-%m-%d %H:%M:%S") +ENDIF() + +CONFIGURE_FILE(${ROCKSDB_SOURCE_DIR}/util/build_version.cc.in build_version.cc @ONLY) +INCLUDE_DIRECTORIES(${ROCKSDB_SOURCE_DIR}/util) +list(APPEND SOURCES ${CMAKE_CURRENT_BINARY_DIR}/build_version.cc) + +ADD_CONVENIENCE_LIBRARY(rocksdblib STATIC ${SOURCES}) +target_link_libraries(rocksdblib ${THIRDPARTY_LIBS} ${SYSTEM_LIBS}) +if(CMAKE_COMPILER_IS_GNUCXX) + set_target_properties(rocksdblib PROPERTIES COMPILE_FLAGS "-fno-builtin-memcmp -frtti") +endif() diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index c97d7976ba3..fa83b3f5a52 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -394,9 +394,9 @@ static long long rocksdb_block_cache_size; /* Use unsigned long long instead of uint64_t because of MySQL compatibility */ static unsigned long long // NOLINT(runtime/int) rocksdb_rate_limiter_bytes_per_sec; -static uint64_t rocksdb_info_log_level; +static ulong rocksdb_info_log_level; static char * rocksdb_wal_dir; -static uint64_t rocksdb_index_type; +static ulong rocksdb_index_type; static char rocksdb_background_sync; static uint32_t rocksdb_debug_optimizer_n_rows; static my_bool rocksdb_debug_optimizer_no_zero_cardinality; @@ -651,12 +651,12 @@ static MYSQL_SYSVAR_UINT(wal_recovery_mode, /* min */ (uint) rocksdb::WALRecoveryMode::kTolerateCorruptedTailRecords, /* max */ (uint) rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords, 0); -static MYSQL_SYSVAR_ULONG(compaction_readahead_size, +static MYSQL_SYSVAR_SIZE_T(compaction_readahead_size, rocksdb_db_options.compaction_readahead_size, PLUGIN_VAR_RQCMDARG, "DBOptions::compaction_readahead_size for RocksDB", nullptr, nullptr, rocksdb_db_options.compaction_readahead_size, - /* min */ 0L, /* max */ ULONG_MAX, 0); + /* min */ 0L, /* max */ SIZE_T_MAX, 0); static MYSQL_SYSVAR_BOOL(new_table_reader_for_compaction_inputs, *reinterpret_cast @@ -695,12 +695,12 @@ static MYSQL_SYSVAR_INT(max_open_files, nullptr, nullptr, rocksdb_db_options.max_open_files, /* min */ -1, /* max */ INT_MAX, 0); -static MYSQL_SYSVAR_ULONG(max_total_wal_size, +static MYSQL_SYSVAR_UINT64_T(max_total_wal_size, rocksdb_db_options.max_total_wal_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::max_total_wal_size for RocksDB", nullptr, nullptr, rocksdb_db_options.max_total_wal_size, - /* min */ 0L, /* max */ LONG_MAX, 0); + /* min */ 0, /* max */ LONGLONG_MAX, 0); static MYSQL_SYSVAR_BOOL(disabledatasync, *reinterpret_cast(&rocksdb_db_options.disableDataSync), @@ -719,12 +719,12 @@ static MYSQL_SYSVAR_STR(wal_dir, rocksdb_wal_dir, "DBOptions::wal_dir for RocksDB", nullptr, nullptr, rocksdb_db_options.wal_dir.c_str()); -static MYSQL_SYSVAR_ULONG(delete_obsolete_files_period_micros, +static MYSQL_SYSVAR_UINT64_T(delete_obsolete_files_period_micros, rocksdb_db_options.delete_obsolete_files_period_micros, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::delete_obsolete_files_period_micros for RocksDB", nullptr, nullptr, rocksdb_db_options.delete_obsolete_files_period_micros, - /* min */ 0L, /* max */ LONG_MAX, 0); + /* min */ 0, /* max */ LONGLONG_MAX, 0); static MYSQL_SYSVAR_INT(base_background_compactions, rocksdb_db_options.base_background_compactions, @@ -754,33 +754,33 @@ static MYSQL_SYSVAR_UINT(max_subcompactions, nullptr, nullptr, rocksdb_db_options.max_subcompactions, /* min */ 1, /* max */ MAX_SUBCOMPACTIONS, 0); -static MYSQL_SYSVAR_ULONG(max_log_file_size, +static MYSQL_SYSVAR_SIZE_T(max_log_file_size, rocksdb_db_options.max_log_file_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::max_log_file_size for RocksDB", nullptr, nullptr, rocksdb_db_options.max_log_file_size, - /* min */ 0L, /* max */ LONG_MAX, 0); + /* min */ 0L, /* max */ SIZE_T_MAX, 0); -static MYSQL_SYSVAR_ULONG(log_file_time_to_roll, +static MYSQL_SYSVAR_SIZE_T(log_file_time_to_roll, rocksdb_db_options.log_file_time_to_roll, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::log_file_time_to_roll for RocksDB", nullptr, nullptr, rocksdb_db_options.log_file_time_to_roll, - /* min */ 0L, /* max */ LONG_MAX, 0); + /* min */ 0L, /* max */ SIZE_T_MAX, 0); -static MYSQL_SYSVAR_ULONG(keep_log_file_num, +static MYSQL_SYSVAR_SIZE_T(keep_log_file_num, rocksdb_db_options.keep_log_file_num, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::keep_log_file_num for RocksDB", nullptr, nullptr, rocksdb_db_options.keep_log_file_num, - /* min */ 0L, /* max */ LONG_MAX, 0); + /* min */ 0L, /* max */ SIZE_T_MAX, 0); -static MYSQL_SYSVAR_ULONG(max_manifest_file_size, +static MYSQL_SYSVAR_UINT64_T(max_manifest_file_size, rocksdb_db_options.max_manifest_file_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::max_manifest_file_size for RocksDB", nullptr, nullptr, rocksdb_db_options.max_manifest_file_size, - /* min */ 0L, /* max */ ULONG_MAX, 0); + /* min */ 0L, /* max */ ULONGLONG_MAX, 0); static MYSQL_SYSVAR_INT(table_cache_numshardbits, rocksdb_db_options.table_cache_numshardbits, @@ -789,26 +789,26 @@ static MYSQL_SYSVAR_INT(table_cache_numshardbits, nullptr, nullptr, rocksdb_db_options.table_cache_numshardbits, /* min */ 0, /* max */ INT_MAX, 0); -static MYSQL_SYSVAR_ULONG(wal_ttl_seconds, +static MYSQL_SYSVAR_UINT64_T(wal_ttl_seconds, rocksdb_db_options.WAL_ttl_seconds, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::WAL_ttl_seconds for RocksDB", nullptr, nullptr, rocksdb_db_options.WAL_ttl_seconds, - /* min */ 0L, /* max */ LONG_MAX, 0); + /* min */ 0L, /* max */ LONGLONG_MAX, 0); -static MYSQL_SYSVAR_ULONG(wal_size_limit_mb, +static MYSQL_SYSVAR_UINT64_T(wal_size_limit_mb, rocksdb_db_options.WAL_size_limit_MB, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::WAL_size_limit_MB for RocksDB", nullptr, nullptr, rocksdb_db_options.WAL_size_limit_MB, - /* min */ 0L, /* max */ LONG_MAX, 0); + /* min */ 0L, /* max */ LONGLONG_MAX, 0); -static MYSQL_SYSVAR_ULONG(manifest_preallocation_size, +static MYSQL_SYSVAR_SIZE_T(manifest_preallocation_size, rocksdb_db_options.manifest_preallocation_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::manifest_preallocation_size for RocksDB", nullptr, nullptr, rocksdb_db_options.manifest_preallocation_size, - /* min */ 0L, /* max */ LONG_MAX, 0); + /* min */ 0L, /* max */ SIZE_T_MAX, 0); static MYSQL_SYSVAR_BOOL(use_direct_reads, *reinterpret_cast(&rocksdb_db_options.use_direct_reads), @@ -853,12 +853,12 @@ static MYSQL_SYSVAR_BOOL(advise_random_on_open, "DBOptions::advise_random_on_open for RocksDB", nullptr, nullptr, rocksdb_db_options.advise_random_on_open); -static MYSQL_SYSVAR_ULONG(db_write_buffer_size, +static MYSQL_SYSVAR_SIZE_T(db_write_buffer_size, rocksdb_db_options.db_write_buffer_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::db_write_buffer_size for RocksDB", nullptr, nullptr, rocksdb_db_options.db_write_buffer_size, - /* min */ 0L, /* max */ LONG_MAX, 0); + /* min */ 0L, /* max */ SIZE_T_MAX, 0); static MYSQL_SYSVAR_BOOL(use_adaptive_mutex, *reinterpret_cast(&rocksdb_db_options.use_adaptive_mutex), @@ -866,19 +866,19 @@ static MYSQL_SYSVAR_BOOL(use_adaptive_mutex, "DBOptions::use_adaptive_mutex for RocksDB", nullptr, nullptr, rocksdb_db_options.use_adaptive_mutex); -static MYSQL_SYSVAR_ULONG(bytes_per_sync, +static MYSQL_SYSVAR_UINT64_T(bytes_per_sync, rocksdb_db_options.bytes_per_sync, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::bytes_per_sync for RocksDB", nullptr, nullptr, rocksdb_db_options.bytes_per_sync, - /* min */ 0L, /* max */ LONG_MAX, 0); + /* min */ 0L, /* max */ ULONGLONG_MAX, 0); -static MYSQL_SYSVAR_ULONG(wal_bytes_per_sync, +static MYSQL_SYSVAR_UINT64_T(wal_bytes_per_sync, rocksdb_db_options.wal_bytes_per_sync, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::wal_bytes_per_sync for RocksDB", nullptr, nullptr, rocksdb_db_options.wal_bytes_per_sync, - /* min */ 0L, /* max */ LONG_MAX, 0); + /* min */ 0L, /* max */ ULONGLONG_MAX, 0); static MYSQL_SYSVAR_BOOL(enable_thread_tracking, *reinterpret_cast(&rocksdb_db_options.enable_thread_tracking), @@ -921,7 +921,7 @@ static MYSQL_SYSVAR_ENUM(index_type, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::index_type for RocksDB", nullptr, nullptr, - (uint64_t)rocksdb_tbl_options.index_type, &index_type_typelib); + (ulong)rocksdb_tbl_options.index_type, &index_type_typelib); static MYSQL_SYSVAR_BOOL(hash_index_allow_collision, *reinterpret_cast(&rocksdb_tbl_options.hash_index_allow_collision), @@ -935,12 +935,12 @@ static MYSQL_SYSVAR_BOOL(no_block_cache, "BlockBasedTableOptions::no_block_cache for RocksDB", nullptr, nullptr, rocksdb_tbl_options.no_block_cache); -static MYSQL_SYSVAR_ULONG(block_size, +static MYSQL_SYSVAR_SIZE_T(block_size, rocksdb_tbl_options.block_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::block_size for RocksDB", nullptr, nullptr, rocksdb_tbl_options.block_size, - /* min */ 1L, /* max */ LONG_MAX, 0); + /* min */ 1L, /* max */ SIZE_T_MAX, 0); static MYSQL_SYSVAR_INT(block_size_deviation, rocksdb_tbl_options.block_size_deviation, @@ -3097,7 +3097,7 @@ class Rdb_snapshot_status : public Rdb_tx_list_walker m_data += format_string("---SNAPSHOT, ACTIVE %lld sec\n" "%s\n" "lock count %llu, write count %llu\n", - curr_time - snapshot_timestamp, + (longlong)(curr_time - snapshot_timestamp), buffer, tx->get_lock_count(), tx->get_write_count()); } @@ -3307,20 +3307,20 @@ static bool rocksdb_show_status(handlerton* const hton, str.clear(); rocksdb::MemoryUtil::GetApproximateMemoryUsageByType( dbs, cache_set, &temp_usage_by_type); - snprintf(buf, sizeof(buf), "\nMemTable Total: %lu", - temp_usage_by_type[rocksdb::MemoryUtil::kMemTableTotal]); + snprintf(buf, sizeof(buf), "\nMemTable Total: %llu", + (ulonglong)temp_usage_by_type[rocksdb::MemoryUtil::kMemTableTotal]); str.append(buf); - snprintf(buf, sizeof(buf), "\nMemTable Unflushed: %lu", - temp_usage_by_type[rocksdb::MemoryUtil::kMemTableUnFlushed]); + snprintf(buf, sizeof(buf), "\nMemTable Unflushed: %llu", + (ulonglong)temp_usage_by_type[rocksdb::MemoryUtil::kMemTableUnFlushed]); str.append(buf); - snprintf(buf, sizeof(buf), "\nTable Readers Total: %lu", - temp_usage_by_type[rocksdb::MemoryUtil::kTableReadersTotal]); + snprintf(buf, sizeof(buf), "\nTable Readers Total: %llu", + (ulonglong)temp_usage_by_type[rocksdb::MemoryUtil::kTableReadersTotal]); str.append(buf); - snprintf(buf, sizeof(buf), "\nCache Total: %lu", - temp_usage_by_type[rocksdb::MemoryUtil::kCacheTotal]); + snprintf(buf, sizeof(buf), "\nCache Total: %llu", + (ulonglong)temp_usage_by_type[rocksdb::MemoryUtil::kCacheTotal]); str.append(buf); - snprintf(buf, sizeof(buf), "\nDefault Cache Capacity: %lu", - internal_cache_count * kDefaultInternalCacheSize); + snprintf(buf, sizeof(buf), "\nDefault Cache Capacity: %llu", + (ulonglong)internal_cache_count * kDefaultInternalCacheSize); str.append(buf); res |= print_stats(thd, "Memory_Stats", "rocksdb", str, stat_print); } @@ -3711,7 +3711,11 @@ static int rocksdb_init_func(void* const p) and RocksDB doesn't provide any way to check what kind of error it was. Checking system errno happens to work right now. */ - if (status.IsIOError() && errno == ENOENT) + if (status.IsIOError() +#ifndef _WIN32 + && errno == ENOENT +#endif + ) { sql_print_information("RocksDB: Got ENOENT when listing column families"); sql_print_information("RocksDB: assuming that we're creating a new database"); @@ -5662,13 +5666,13 @@ int rdb_normalize_tablename(const std::string& tablename, { DBUG_ASSERT(strbuf != nullptr); - if (tablename.size() < 2 || tablename[0] != '.' || tablename[1] != '/') + if (tablename.size() < 2 || tablename[0] != '.' || tablename[1] != FN_LIBCHAR) { DBUG_ASSERT(0); // We were not passed table name? return HA_ERR_INTERNAL_ERROR; } - size_t pos = tablename.find_first_of('/', 2); + size_t pos = tablename.find_first_of(FN_LIBCHAR, 2); if (pos == std::string::npos) { DBUG_ASSERT(0); // We were not passed table name? @@ -9271,10 +9275,10 @@ void Rdb_drop_index_thread::run() } timespec ts; - clock_gettime(CLOCK_REALTIME, &ts); - ts.tv_sec += dict_manager.is_drop_index_empty() - ? 24*60*60 // no filtering + int sec= dict_manager.is_drop_index_empty() + ? 24 * 60 * 60 // no filtering : 60; // filtering + set_timespec(ts,sec); const auto ret __attribute__((__unused__)) = mysql_cond_timedwait( &m_signal_cond, &m_signal_mutex, &ts); @@ -10805,8 +10809,7 @@ void Rdb_background_thread::run() const int WAKE_UP_INTERVAL = 1; timespec ts_next_sync; - clock_gettime(CLOCK_REALTIME, &ts_next_sync); - ts_next_sync.tv_sec += WAKE_UP_INTERVAL; + set_timespec(ts_next_sync, WAKE_UP_INTERVAL); for (;;) { @@ -10840,8 +10843,9 @@ void Rdb_background_thread::run() ddl_manager.persist_stats(); } - timespec ts; - clock_gettime(CLOCK_REALTIME, &ts); + // Set the next timestamp for mysql_cond_timedwait() (which ends up calling + // pthread_cond_timedwait()) to wait on. + set_timespec(ts_next_sync, WAKE_UP_INTERVAL); // Flush the WAL. if (rdb && rocksdb_background_sync) @@ -10852,10 +10856,6 @@ void Rdb_background_thread::run() rdb_handle_io_error(s, RDB_IO_ERROR_BG_THREAD); } } - - // Set the next timestamp for mysql_cond_timedwait() (which ends up calling - // pthread_cond_timedwait()) to wait on. - ts_next_sync.tv_sec= ts.tv_sec + WAKE_UP_INTERVAL; } // save remaining stats which might've left unsaved diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 824b56b958f..b7fd7062460 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -30,6 +30,14 @@ #include "./my_global.h" /* ulonglong */ #include "./sql_string.h" +#ifdef _WIN32 +#undef pthread_key_create +#undef pthread_key_delete +#undef pthread_setspecific +#undef pthread_getspecific +#endif + + /* RocksDB header files */ #include "rocksdb/cache.h" #include "rocksdb/perf_context.h" diff --git a/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result b/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result index 16df96bc74d..c75fe5893b0 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result @@ -1,39 +1,21 @@ SELECT * FROM INFORMATION_SCHEMA.INNODB_TRX; trx_id trx_state trx_started trx_requested_lock_id trx_wait_started trx_weight trx_mysql_thread_id trx_query trx_operation_state trx_tables_in_use trx_tables_locked trx_lock_structs trx_lock_memory_bytes trx_rows_locked trx_rows_modified trx_concurrency_tickets trx_isolation_level trx_unique_checks trx_foreign_key_checks trx_last_foreign_key_error trx_adaptive_hash_latched trx_adaptive_hash_timeout trx_is_read_only trx_autocommit_non_locking -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_TRX but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCKS; lock_id lock_trx_id lock_mode lock_type lock_table lock_index lock_space lock_page lock_rec lock_data -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_LOCKS but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCK_WAITS; requesting_trx_id requested_lock_id blocking_trx_id blocking_lock_id -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_LOCK_WAITS but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP; page_size compress_ops compress_ops_ok compress_time uncompress_ops uncompress_time -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMP but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_RESET; page_size compress_ops compress_ops_ok compress_time uncompress_ops uncompress_time -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMP_RESET but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX; database_name table_name index_name compress_ops compress_ops_ok compress_time uncompress_ops uncompress_time -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX_RESET; database_name table_name index_name compress_ops compress_ops_ok compress_time uncompress_ops uncompress_time -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX_RESET but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_CMPMEM; page_size buffer_pool_instance pages_used pages_free relocation_ops relocation_time -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMPMEM but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_CMPMEM_RESET; page_size buffer_pool_instance pages_used pages_free relocation_ops relocation_time -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMPMEM_RESET but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_METRICS; NAME SUBSYSTEM COUNT MAX_COUNT MIN_COUNT AVG_COUNT COUNT_RESET MAX_COUNT_RESET MIN_COUNT_RESET AVG_COUNT_RESET TIME_ENABLED TIME_DISABLED TIME_ELAPSED TIME_RESET STATUS TYPE COMMENT metadata_table_handles_opened metadata 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of table handles opened @@ -346,49 +328,25 @@ SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_CONFIG; KEY VALUE SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_POOL_STATS; POOL_ID POOL_SIZE FREE_BUFFERS DATABASE_PAGES OLD_DATABASE_PAGES MODIFIED_DATABASE_PAGES PENDING_DECOMPRESS PENDING_READS PENDING_FLUSH_LRU PENDING_FLUSH_LIST PAGES_MADE_YOUNG PAGES_NOT_MADE_YOUNG PAGES_MADE_YOUNG_RATE PAGES_MADE_NOT_YOUNG_RATE NUMBER_PAGES_READ NUMBER_PAGES_CREATED NUMBER_PAGES_WRITTEN PAGES_READ_RATE PAGES_CREATE_RATE PAGES_WRITTEN_RATE NUMBER_PAGES_GET HIT_RATE YOUNG_MAKE_PER_THOUSAND_GETS NOT_YOUNG_MAKE_PER_THOUSAND_GETS NUMBER_PAGES_READ_AHEAD NUMBER_READ_AHEAD_EVICTED READ_AHEAD_RATE READ_AHEAD_EVICTED_RATE LRU_IO_TOTAL LRU_IO_CURRENT UNCOMPRESS_TOTAL UNCOMPRESS_CURRENT -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_BUFFER_POOL_STATS but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE; POOL_ID BLOCK_ID SPACE PAGE_NUMBER PAGE_TYPE FLUSH_TYPE FIX_COUNT IS_HASHED NEWEST_MODIFICATION OLDEST_MODIFICATION ACCESS_TIME TABLE_NAME INDEX_NAME NUMBER_RECORDS DATA_SIZE COMPRESSED_SIZE PAGE_STATE IO_FIX IS_OLD FREE_PAGE_CLOCK -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_BUFFER_PAGE but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE_LRU; POOL_ID LRU_POSITION SPACE PAGE_NUMBER PAGE_TYPE FLUSH_TYPE FIX_COUNT IS_HASHED NEWEST_MODIFICATION OLDEST_MODIFICATION ACCESS_TIME TABLE_NAME INDEX_NAME NUMBER_RECORDS DATA_SIZE COMPRESSED_SIZE COMPRESSED IO_FIX IS_OLD FREE_PAGE_CLOCK -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_BUFFER_PAGE_LRU but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES; TABLE_ID NAME FLAG N_COLS SPACE FILE_FORMAT ROW_FORMAT ZIP_PAGE_SIZE SPACE_TYPE -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_TABLES but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS; TABLE_ID NAME STATS_INITIALIZED NUM_ROWS CLUST_INDEX_SIZE OTHER_INDEX_SIZE MODIFIED_COUNTER AUTOINC REF_COUNT -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_INDEXES; INDEX_ID NAME TABLE_ID TYPE N_FIELDS PAGE_NO SPACE MERGE_THRESHOLD -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_INDEXES but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_COLUMNS; TABLE_ID NAME POS MTYPE PRTYPE LEN -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_COLUMNS but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FIELDS; INDEX_ID NAME POS -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_FIELDS but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN; ID FOR_NAME REF_NAME N_COLS TYPE -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_FOREIGN but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS; ID FOR_COL_NAME REF_COL_NAME POS -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES; SPACE NAME FLAG FILE_FORMAT ROW_FORMAT PAGE_SIZE ZIP_PAGE_SIZE SPACE_TYPE FS_BLOCK_SIZE FILE_SIZE ALLOCATED_SIZE -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES but the InnoDB storage engine is not installed SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DATAFILES; SPACE PATH -Warnings: -Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_DATAFILES but the InnoDB storage engine is not installed diff --git a/storage/rocksdb/mysql-test/rocksdb/r/partition.result b/storage/rocksdb/mysql-test/rocksdb/r/partition.result index 76085cc1d27..c016c88ab8c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/partition.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/partition.result @@ -3,12 +3,12 @@ DROP TABLE IF EXISTS VAR_POP; DROP TABLE IF EXISTS TEMP0; DROP TABLE IF EXISTS VAR_SAMP; CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; -SHOW TABLES; -Tables_in_test -TEMP0 -VAR_POP -VAR_SAMP +show tables; +tables_in_test t1 +temp0 +var_pop +var_samp SELECT * FROM t1 ORDER BY i LIMIT 10; i j k 1 1 1 diff --git a/storage/rocksdb/mysql-test/rocksdb/suite.opt b/storage/rocksdb/mysql-test/rocksdb/suite.opt index 0d92a9bb29d..431fc331458 100644 --- a/storage/rocksdb/mysql-test/rocksdb/suite.opt +++ b/storage/rocksdb/mysql-test/rocksdb/suite.opt @@ -1,2 +1,2 @@ ---ignore-db-dirs=.rocksdb --plugin-load=ha_rocksdb_se +--ignore-db-dirs=.rocksdb --plugin-load=$HA_ROCKSDB_SO diff --git a/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test b/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test index e5de6246f60..70ab64f8194 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test @@ -1,5 +1,8 @@ --source include/have_rocksdb.inc +# Unixisms ("exec ls" in set_checkpoint.inc etc) +--source include/not_windows.inc + --disable_warnings DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation.test b/storage/rocksdb/mysql-test/rocksdb/t/collation.test index 63756aec48a..65cd6b09cc0 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/collation.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/collation.test @@ -3,6 +3,10 @@ # following check is commented out: # --source include/have_fullregex.inc +# Unixisms (exec grep) +--source include/not_windows.inc + + SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; --disable_warnings diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test index 9cb32e8d615..121c0d610d6 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test @@ -1,3 +1,4 @@ +--source include/not_windows.inc --source include/have_rocksdb.inc --disable_warnings diff --git a/storage/rocksdb/mysql-test/rocksdb/t/concurrent_alter.test b/storage/rocksdb/mysql-test/rocksdb/t/concurrent_alter.test index 2a2896691b7..9ee58aa5217 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/concurrent_alter.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/concurrent_alter.test @@ -1,5 +1,8 @@ --source include/have_rocksdb.inc +# Bash +--source include/not_windows.inc + # # Generate concurrent requests to alter a table using mysqlslap # diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test index 32d8133fab1..f06b04ec561 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test @@ -1,4 +1,6 @@ --source include/have_rocksdb.inc +#Unixisms (possibly Linuxisms, exec truncate) +--source include/not_windows.inc --disable_warnings DROP TABLE IF EXISTS t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test index d259114dbda..69c3ca28f17 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test @@ -1,5 +1,8 @@ --source include/have_rocksdb.inc +#Unixisms (--exec truncate, du, grep ,sed) +--source include/not_windows.inc + --disable_warnings DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test b/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test index 781163f34fb..9ac89a128c9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test @@ -7,10 +7,10 @@ INSERT INTO t values (1), (2), (3); --error ER_TABLE_EXISTS_ERROR CREATE TABLE t(id int primary key) engine=rocksdb; FLUSH TABLES; ---exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm.tmp ---error ER_UNKNOWN_ERROR +move_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm.tmp; +--error ER_UNKNOWN_ERROR CREATE TABLE t(id int primary key) engine=rocksdb; ---exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm +move_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm; FLUSH TABLES; SELECT * FROM t; DROP TABLE t; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test b/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test index b50cf08b227..4ff48e13089 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test @@ -3,8 +3,14 @@ # Make sure that the InnoDb information schema tables are disabled when InnoDB # is turned off and attempting to access them doesn't crash. +# Disable warnings, as the table names in warnings appear in lower or uppercase +# depending on platform + +--disable_warnings + SELECT * FROM INFORMATION_SCHEMA.INNODB_TRX; #Not in MariaDB: SELECT * FROM INFORMATION_SCHEMA.INNODB_FILE_STATUS; + SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCKS; SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCK_WAITS; SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP; @@ -33,3 +39,5 @@ SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS; SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES; SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DATAFILES; #Not in MariaDB: SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DOCSTORE_FIELDS; + +--enable_warnings diff --git a/storage/rocksdb/mysql-test/rocksdb/t/partition.test b/storage/rocksdb/mysql-test/rocksdb/t/partition.test index d5e13fea0a7..52e7d658f67 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/partition.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/partition.test @@ -1,3 +1,4 @@ + --source include/have_rocksdb.inc --source include/have_partition.inc @@ -30,7 +31,7 @@ CREATE TABLE TEMP0 (a int) ENGINE = ROCKSDB PARTITION BY HASH (a) PARTITIONS 3; CREATE TABLE VAR_SAMP (a int) ENGINE = ROCKSDB PARTITION BY HASH (a) PARTITIONS 10; --enable_query_log - +--lowercase_result SHOW TABLES; SELECT * FROM t1 ORDER BY i LIMIT 10; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test index 72eef91196d..101b2085ac4 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test @@ -1,4 +1,6 @@ --source include/have_rocksdb.inc +# Does not run on Windows, because of unixisms (exec grep, cut, truncate file with exec echo) +--source include/not_windows.inc # # Tests for row checksums feature diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test index 6dd4dd11748..ba10dcbe3b6 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test @@ -1,5 +1,6 @@ --source include/have_rocksdb.inc - +# Unixisms (exec ls | wc -l) +--source include/not_windows.inc let $ddir = $MYSQL_TMP_DIR/.rocksdb_datadir.test.install.db; let $rdb_ddir = $MYSQL_TMP_DIR/.rocksdb_datadir.test; let $sql_file = $MYSQL_TMP_DIR/rocksdb_datadir.sql; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test index 53ca05c7fdc..59472e565ab 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test @@ -99,6 +99,7 @@ drop table t1, t2; --echo # CREATE TABLE t1 (c1 INT NOT NULL, c2 CHAR(5)) PARTITION BY HASH(c1) PARTITIONS 4; INSERT INTO t1 VALUES(1,'a'); +--replace_result \\ / --error ER_ERROR_ON_RENAME RENAME TABLE t1 TO db3.t3; SELECT * FROM t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg.inc b/storage/rocksdb/mysql-test/rocksdb/t/rqg.inc index 9a6bf73d6a0..40154d9eaa7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rqg.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg.inc @@ -20,10 +20,11 @@ let TESTDB = $TESTDB; --perl +$ENV{'RQG_HOME'}=$ENV{'RQG_BASE'}; foreach $grammar_file (split(/ /, $ENV{'GRAMMAR_FILES'})) { # Errors from the gentest.pl file will be captured in the results file - my $cmd = "RQG_HOME=$ENV{'RQG_BASE'} perl $ENV{'RQG_BASE'}/gentest.pl " . + my $cmd = "perl $ENV{'RQG_BASE'}/gentest.pl " . "--dsn=dbi:mysql:host=:port=:user=root:database=$ENV{'TESTDB'}" . ":mysql_socket=$ENV{'MYSQL_SOCKET'} " . "--gendata=$ENV{'RQG_BASE'}/conf/$ENV{'TESTDIR'}/$ENV{'DATA_FILE'} " . diff --git a/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log.test b/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log.test index 9f36a7fb958..9f1694ab8bd 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log.test @@ -1,4 +1,7 @@ --source include/have_rocksdb.inc +#Unixism (exec awk) +-- source include/not_windows.inc + SET @cur_long_query_time = @@long_query_time; # Set the long query time to something big so that nothing unexpected gets into it SET @@long_query_time = 600; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/statistics.test b/storage/rocksdb/mysql-test/rocksdb/t/statistics.test index 3971fd18ecd..70fc2f72b7e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/statistics.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/statistics.test @@ -60,7 +60,7 @@ SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables --source include/restart_mysqld.inc # give the server a chance to load in statistics ---exec sleep 5 +--sleep 5 SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE(); diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test index 4483f48b4ca..afae67b48ba 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test @@ -1,5 +1,7 @@ --source include/have_rocksdb.inc --source include/have_partition.inc +# Unixism, exec sed +--source include/not_windows.inc let ddl= $MYSQL_TMP_DIR/unique_sec.sql; --exec sed s/##CF##//g ../storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc > $ddl diff --git a/storage/rocksdb/properties_collector.cc b/storage/rocksdb/properties_collector.cc index 0ddff023f79..10d667181ef 100644 --- a/storage/rocksdb/properties_collector.cc +++ b/storage/rocksdb/properties_collector.cc @@ -15,7 +15,11 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include - +#ifdef _WIN32 +#define _CRT_RAND_S +#include +#define rand_r rand_s +#endif /* This C++ file's header file */ #include "./properties_collector.h" @@ -130,10 +134,9 @@ void Rdb_tbl_prop_coll::AdjustDeletedRows(rocksdb::EntryType type) Rdb_index_stats* Rdb_tbl_prop_coll::AccessStats( const rocksdb::Slice& key) { - GL_INDEX_ID gl_index_id = { - .cf_id = m_cf_id, - .index_id = rdb_netbuf_to_uint32(reinterpret_cast(key.data())) - }; + GL_INDEX_ID gl_index_id; + gl_index_id.cf_id = m_cf_id; + gl_index_id.index_id = rdb_netbuf_to_uint32(reinterpret_cast(key.data())); if (m_last_stats == nullptr || m_last_stats->m_gl_index_id != gl_index_id) { diff --git a/storage/rocksdb/rdb_buff.h b/storage/rocksdb/rdb_buff.h index deb718e88c9..17bbc1c6015 100644 --- a/storage/rocksdb/rdb_buff.h +++ b/storage/rocksdb/rdb_buff.h @@ -20,6 +20,16 @@ #include #include +#ifdef _WIN32 +#include +#define htobe64 _byteswap_uint64 +#define be64toh _byteswap_uint64 +#define htobe32 _byteswap_ulong +#define be32toh _byteswap_ulong +#define htobe16 _byteswap_ushort +#define be16toh _byteswap_ushort +#endif + namespace myrocks { /* diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index 9d917b8dd62..d5af9d83754 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -1523,7 +1523,7 @@ static void rdb_pack_with_varchar_encoding( // pad with zeros if necessary; for (size_t idx= 0; idx < padding_bytes; idx++) *(ptr++)= 0; - *(ptr++) = 255 - padding_bytes; + *(ptr++) = 255 - (uchar)padding_bytes; xfrm_len -= copy_len; encoded_size += RDB_ESCAPE_LENGTH; @@ -1814,7 +1814,7 @@ static int rdb_unpack_binary_or_utf8_varchar( /* Save the length */ if (field_var->length_bytes == 1) { - d0[0]= len; + d0[0]= (uchar)len; } else { @@ -1944,7 +1944,7 @@ static int rdb_unpack_binary_or_utf8_varchar_space_pad( /* Save the length */ if (field_var->length_bytes == 1) { - d0[0]= len; + d0[0]= (uchar)len; } else { @@ -2257,7 +2257,7 @@ rdb_unpack_simple_varchar_space_pad(Rdb_field_packing* const fpi, /* Save the length */ if (field_var->length_bytes == 1) { - d0[0]= len; + d0[0]= (uchar)len; } else { @@ -3796,7 +3796,7 @@ rocksdb::Slice Rdb_binlog_manager::pack_value(uchar* const buf, // store binlog file name length DBUG_ASSERT(strlen(binlog_name) <= FN_REFLEN); - const uint16_t binlog_name_len = strlen(binlog_name); + const uint16_t binlog_name_len = (uint16_t)strlen(binlog_name); rdb_netbuf_store_uint16(buf+pack_len, binlog_name_len); pack_len += sizeof(uint16); @@ -3810,7 +3810,7 @@ rocksdb::Slice Rdb_binlog_manager::pack_value(uchar* const buf, // store binlog gtid length. // If gtid was not set, store 0 instead - const uint16_t binlog_gtid_len = binlog_gtid? strlen(binlog_gtid) : 0; + const uint16_t binlog_gtid_len = binlog_gtid? (uint16_t)strlen(binlog_gtid) : 0; rdb_netbuf_store_uint16(buf+pack_len, binlog_gtid_len); pack_len += sizeof(uint16); diff --git a/storage/rocksdb/rdb_datadic.h b/storage/rocksdb/rdb_datadic.h index 91cb9b8d563..35861bb2088 100644 --- a/storage/rocksdb/rdb_datadic.h +++ b/storage/rocksdb/rdb_datadic.h @@ -24,9 +24,12 @@ #include #include #include +#include /* C standard header files */ +#ifndef _WIN32 #include +#endif /* MyRocks header files */ #include "./ha_rocksdb.h" diff --git a/storage/rocksdb/rdb_i_s.cc b/storage/rocksdb/rdb_i_s.cc index e9a3577b7e8..ab917a62cf6 100644 --- a/storage/rocksdb/rdb_i_s.cc +++ b/storage/rocksdb/rdb_i_s.cc @@ -797,7 +797,7 @@ static int rdb_i_s_global_info_fill_table( char gtid_buf[GTID_BUF_LEN]= {0}; if (blm->read(file_buf, &pos, gtid_buf)) { - snprintf(pos_buf, INT_BUF_LEN, "%lu", (uint64_t) pos); + snprintf(pos_buf, INT_BUF_LEN, "%llu", (ulonglong) pos); ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "FILE", file_buf); ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "POS", pos_buf); ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "GTID", gtid_buf); @@ -1534,7 +1534,7 @@ struct st_maria_plugin rdb_i_s_index_file_map= 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_lock_info= +struct st_maria_plugin rdb_i_s_lock_info= { MYSQL_INFORMATION_SCHEMA_PLUGIN, &rdb_i_s_info, @@ -1551,7 +1551,7 @@ struct st_mysql_plugin rdb_i_s_lock_info= 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_trx_info= +struct st_maria_plugin rdb_i_s_trx_info= { MYSQL_INFORMATION_SCHEMA_PLUGIN, &rdb_i_s_info, diff --git a/storage/rocksdb/rdb_mariadb_server_port.cc b/storage/rocksdb/rdb_mariadb_server_port.cc index 59315c199cd..bd2c730d33d 100644 --- a/storage/rocksdb/rdb_mariadb_server_port.cc +++ b/storage/rocksdb/rdb_mariadb_server_port.cc @@ -64,7 +64,7 @@ bool Regex_list_handler::set_patterns(const std::string& pattern_str) delete m_pattern; m_pattern= pattern; } - catch (const std::regex_error& e) + catch (const std::regex_error&) { // This pattern is invalid. pattern_valid= false; diff --git a/storage/rocksdb/rdb_sst_info.h b/storage/rocksdb/rdb_sst_info.h index 933357c8f08..422cd420cfa 100644 --- a/storage/rocksdb/rdb_sst_info.h +++ b/storage/rocksdb/rdb_sst_info.h @@ -68,7 +68,7 @@ class Rdb_sst_info { const rocksdb::DBOptions& m_db_options; uint64_t m_curr_size; uint64_t m_max_size; - uint m_sst_count; + uint32_t m_sst_count; std::string m_error_msg; std::string m_prefix; static std::string m_suffix; diff --git a/storage/rocksdb/rdb_threads.h b/storage/rocksdb/rdb_threads.h index 01be32f8a6a..b7a1c7cb9b0 100644 --- a/storage/rocksdb/rdb_threads.h +++ b/storage/rocksdb/rdb_threads.h @@ -18,6 +18,17 @@ /* MySQL includes */ #include "./my_global.h" +#ifdef _WIN32 +#include +/* + Rocksdb implements their own pthread_key functions + undefine some my_pthread.h macros +*/ +#undef pthread_key_create +#undef pthread_key_delete +#undef pthread_setspecific +#undef pthread_getspecific +#endif #include #ifdef MARIAROCKS_NOT_YET #include @@ -64,7 +75,22 @@ class Rdb_thread int join() { +#ifndef _WIN32 return pthread_join(m_handle, nullptr); +#else + /* + mysys on Windows creates "detached" threads in pthread_create(). + + m_handle here is the thread id I(it is not reused by the OS + thus it is safe to state there can't be other thread with + the same id at this point). + + If thread is already finished before pthread_join(), + we get EINVAL, and it is safe to ignore and handle this as success. + */ + (void)pthread_join(m_handle, nullptr); + return 0; +#endif } void uninit(); From 7468ccfadf27a0c5f87c8909ee8514bdd52dc4a7 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 6 Feb 2017 17:39:08 +0000 Subject: [PATCH 136/233] Copy of commit d1bb19b8f751875472211312c8e810143a7ba4b6 Author: Manuel Ung Date: Fri Feb 3 11:50:34 2017 -0800 Add cardinality stats to information schema Summary: This adds cardinality stats to the INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP table. This is the only missing user collected properties from SST files that we don't expose, which is useful for debugging cardinality bugs. Reviewed By: hermanlee Differential Revision: D4509156 fbshipit-source-id: 2d3918a --- storage/rocksdb/event_listener.cc | 31 +- storage/rocksdb/event_listener.h | 35 +- storage/rocksdb/ha_rocksdb.cc | 8818 ++++++++--------- storage/rocksdb/ha_rocksdb.h | 896 +- storage/rocksdb/ha_rocksdb_proto.h | 32 +- storage/rocksdb/logger.h | 27 +- .../rocksdb/r/2pc_group_commit.result | 16 +- .../rocksdb/r/add_index_inplace_crash.result | 5 +- .../rocksdb/r/add_unique_index_inplace.result | 89 + .../rocksdb/r/index_file_map.result | 12 +- .../rocksdb/r/information_schema.result | 6 + .../mysql-test/rocksdb/r/loaddata.result | 2 +- .../rocksdb/r/persistent_cache.result | 11 + .../mysql-test/rocksdb/r/rocksdb.result | 10 +- .../mysql-test/rocksdb/r/trx_info_rpl.result | 4 +- .../mysql-test/rocksdb/r/unique_sec.result | 21 + .../rocksdb/t/2pc_group_commit.test | 8 +- .../rocksdb/t/add_index_inplace_crash.test | 13 +- .../rocksdb/t/add_unique_index_inplace.test | 82 + .../rocksdb/t/information_schema.test | 6 + .../mysql-test/rocksdb/t/loaddata.test | 3 +- .../rocksdb/t/persistent_cache.test | 41 + .../rocksdb/mysql-test/rocksdb/t/rocksdb.test | 4 +- .../mysql-test/rocksdb/t/trx_info_rpl.cnf | 2 +- .../mysql-test/rocksdb/t/trx_info_rpl.test | 2 +- .../mysql-test/rocksdb/t/unique_sec.test | 18 + .../include/create_slocket_socket.sh | 2 + .../include/load_data_slocket.sh | 43 + .../include/remove_slocket_socket.sh | 2 + .../include/setup_slocket.inc | 10 + .../rocksdb_hotbackup/include/stream_run.sh | 5 + .../rocksdb_hotbackup/r/slocket.result | 41 + .../r/xbstream_socket.result | 20 + .../rocksdb_hotbackup/t/slocket.test | 46 + .../rocksdb_hotbackup/t/xbstream_socket.test | 22 + .../rocksdb_rpl/r/multiclient_2pc.result | 6 +- .../r/rpl_rocksdb_2pc_crash_recover.result | 10 +- .../r/rpl_skip_trx_api_binlog_format.result | 27 + .../rocksdb_rpl/t/multiclient_2pc.test | 6 +- .../t/rpl_crash_safe_wal_corrupt.cnf | 2 + .../t/rpl_gtid_crash_safe_wal_corrupt.cnf | 2 + .../t/rpl_rocksdb_2pc_crash_recover.test | 10 +- .../rpl_skip_trx_api_binlog_format-master.opt | 2 + .../rpl_skip_trx_api_binlog_format-slave.opt | 2 + .../t/rpl_skip_trx_api_binlog_format.test | 51 + .../rocksdb_sys_vars/r/all_vars.result | 2 + .../r/rocksdb_disable_2pc_basic.result | 86 +- ...db_max_background_compactions_basic.result | 45 +- ...rocksdb_persistent_cache_path_basic.result | 13 + ...rocksdb_persistent_cache_size_basic.result | 14 + .../r/rocksdb_rpl_skip_tx_api_basic.result | 68 - .../r/rocksdb_skip_unique_check_basic.result | 163 - .../t/rocksdb_disable_2pc_basic.test | 2 +- ...ksdb_max_background_compactions_basic.test | 11 +- .../rocksdb_persistent_cache_path_basic.test | 16 + ... rocksdb_persistent_cache_size_basic.test} | 8 +- .../t/rocksdb_skip_unique_check_basic.test | 21 - storage/rocksdb/properties_collector.cc | 298 +- storage/rocksdb/properties_collector.h | 132 +- storage/rocksdb/rdb_buff.h | 283 +- storage/rocksdb/rdb_cf_manager.cc | 106 +- storage/rocksdb/rdb_cf_manager.h | 59 +- storage/rocksdb/rdb_cf_options.cc | 160 +- storage/rocksdb/rdb_cf_options.h | 56 +- storage/rocksdb/rdb_compact_filter.h | 74 +- storage/rocksdb/rdb_comparator.h | 72 +- storage/rocksdb/rdb_datadic.cc | 3450 +++---- storage/rocksdb/rdb_datadic.h | 635 +- storage/rocksdb/rdb_i_s.cc | 1724 ++-- storage/rocksdb/rdb_i_s.h | 4 +- storage/rocksdb/rdb_index_merge.cc | 351 +- storage/rocksdb/rdb_index_merge.h | 182 +- storage/rocksdb/rdb_mutex_wrapper.cc | 99 +- storage/rocksdb/rdb_mutex_wrapper.h | 50 +- storage/rocksdb/rdb_perf_context.cc | 176 +- storage/rocksdb/rdb_perf_context.h | 49 +- storage/rocksdb/rdb_sst_info.cc | 294 +- storage/rocksdb/rdb_sst_info.h | 89 +- storage/rocksdb/rdb_threads.cc | 58 +- storage/rocksdb/rdb_threads.h | 73 +- storage/rocksdb/rdb_utils.cc | 151 +- storage/rocksdb/rdb_utils.h | 113 +- 82 files changed, 9204 insertions(+), 10486 deletions(-) create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/add_unique_index_inplace.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/persistent_cache.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/add_unique_index_inplace.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test create mode 100755 storage/rocksdb/mysql-test/rocksdb_hotbackup/include/create_slocket_socket.sh create mode 100755 storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_slocket.sh create mode 100755 storage/rocksdb/mysql-test/rocksdb_hotbackup/include/remove_slocket_socket.sh create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_slocket.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/r/slocket.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream_socket.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/t/slocket.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream_socket.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_skip_trx_api_binlog_format.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format-slave.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_path_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_size_basic.result delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.result delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_path_basic.test rename storage/rocksdb/mysql-test/rocksdb_sys_vars/t/{rocksdb_rpl_skip_tx_api_basic.test => rocksdb_persistent_cache_size_basic.test} (67%) delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_basic.test diff --git a/storage/rocksdb/event_listener.cc b/storage/rocksdb/event_listener.cc index e4338f07986..04c433acabe 100644 --- a/storage/rocksdb/event_listener.cc +++ b/storage/rocksdb/event_listener.cc @@ -22,8 +22,8 @@ #include /* MySQL includes */ -#include #include +#include /* MyRocks includes */ #include "./ha_rocksdb.h" @@ -33,10 +33,8 @@ namespace myrocks { static std::vector -extract_index_stats( - const std::vector& files, - const rocksdb::TablePropertiesCollection& props -) { +extract_index_stats(const std::vector &files, + const rocksdb::TablePropertiesCollection &props) { std::vector ret; for (auto fn : files) { const auto it = props.find(fn); @@ -49,11 +47,10 @@ extract_index_stats( } void Rdb_event_listener::update_index_stats( - const rocksdb::TableProperties& props -) { + const rocksdb::TableProperties &props) { DBUG_ASSERT(m_ddl_manager != nullptr); const auto tbl_props = - std::make_shared(props); + std::make_shared(props); std::vector stats; Rdb_tbl_prop_coll::read_stats_from_tbl_props(tbl_props, &stats); @@ -62,32 +59,26 @@ void Rdb_event_listener::update_index_stats( } void Rdb_event_listener::OnCompactionCompleted( - rocksdb::DB *db, - const rocksdb::CompactionJobInfo& ci -) { + rocksdb::DB *db, const rocksdb::CompactionJobInfo &ci) { DBUG_ASSERT(db != nullptr); DBUG_ASSERT(m_ddl_manager != nullptr); if (ci.status.ok()) { m_ddl_manager->adjust_stats( - extract_index_stats(ci.output_files, ci.table_properties), - extract_index_stats(ci.input_files, ci.table_properties)); + extract_index_stats(ci.output_files, ci.table_properties), + extract_index_stats(ci.input_files, ci.table_properties)); } } void Rdb_event_listener::OnFlushCompleted( - rocksdb::DB* db, - const rocksdb::FlushJobInfo& flush_job_info -) { + rocksdb::DB *db, const rocksdb::FlushJobInfo &flush_job_info) { DBUG_ASSERT(db != nullptr); update_index_stats(flush_job_info.table_properties); } void Rdb_event_listener::OnExternalFileIngested( - rocksdb::DB* db, - const rocksdb::ExternalFileIngestionInfo& info -) { + rocksdb::DB *db, const rocksdb::ExternalFileIngestionInfo &info) { DBUG_ASSERT(db != nullptr); update_index_stats(info.table_properties); } -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/event_listener.h b/storage/rocksdb/event_listener.h index 8895da45c7b..d535031644b 100644 --- a/storage/rocksdb/event_listener.h +++ b/storage/rocksdb/event_listener.h @@ -21,29 +21,26 @@ namespace myrocks { class Rdb_ddl_manager; -class Rdb_event_listener : public rocksdb::EventListener -{ - public: - Rdb_event_listener(const Rdb_event_listener&) = delete; - Rdb_event_listener& operator=(const Rdb_event_listener&) = delete; +class Rdb_event_listener : public rocksdb::EventListener { +public: + Rdb_event_listener(const Rdb_event_listener &) = delete; + Rdb_event_listener &operator=(const Rdb_event_listener &) = delete; - explicit Rdb_event_listener(Rdb_ddl_manager* const ddl_manager) : - m_ddl_manager(ddl_manager) { - } + explicit Rdb_event_listener(Rdb_ddl_manager *const ddl_manager) + : m_ddl_manager(ddl_manager) {} - void OnCompactionCompleted( - rocksdb::DB* db, const rocksdb::CompactionJobInfo& ci) override; - void OnFlushCompleted( - rocksdb::DB* db, const rocksdb::FlushJobInfo& flush_job_info) override; + void OnCompactionCompleted(rocksdb::DB *db, + const rocksdb::CompactionJobInfo &ci) override; + void OnFlushCompleted(rocksdb::DB *db, + const rocksdb::FlushJobInfo &flush_job_info) override; void OnExternalFileIngested( - rocksdb::DB* db, const rocksdb::ExternalFileIngestionInfo& ingestion_info) - override; + rocksdb::DB *db, + const rocksdb::ExternalFileIngestionInfo &ingestion_info) override; - private: - Rdb_ddl_manager* m_ddl_manager; +private: + Rdb_ddl_manager *m_ddl_manager; - void update_index_stats( - const rocksdb::TableProperties& props); + void update_index_stats(const rocksdb::TableProperties &props); }; -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index a557f96b92f..59c6e1ab33b 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef USE_PRAGMA_IMPLEMENTATION -#pragma implementation // gcc: Class implementation +#pragma implementation // gcc: Class implementation #endif #define MYSQL_SERVER 1 @@ -24,11 +24,11 @@ #include "./ha_rocksdb.h" /* C++ standard header files */ -#include +#include #include +#include #include #include -#include /* MySQL includes */ #include "./debug_sync.h" @@ -36,12 +36,13 @@ #include "./my_stacktrace.h" #include "./sql_audit.h" #include "./sql_table.h" -#include #include #include +#include /* RocksDB includes */ #include "rocksdb/compaction_filter.h" +#include "rocksdb/persistent_cache.h" #include "rocksdb/rate_limiter.h" #include "rocksdb/slice_transform.h" #include "rocksdb/utilities/checkpoint.h" @@ -61,8 +62,7 @@ #include "./rdb_threads.h" // Internal MySQL APIs not exposed in any header. -extern "C" -{ +extern "C" { /** Mark transaction to rollback and mark error as fatal to a sub-statement. @param thd Thread handle @@ -107,17 +107,15 @@ static handler *rocksdb_create_handler(my_core::handlerton *hton, my_core::TABLE_SHARE *table_arg, my_core::MEM_ROOT *mem_root); -bool can_use_bloom_filter(THD *thd, - const Rdb_key_def& kd, +bool can_use_bloom_filter(THD *thd, const Rdb_key_def &kd, const rocksdb::Slice &eq_cond, - const bool use_all_keys, - bool is_ascending); + const bool use_all_keys, bool is_ascending); /////////////////////////////////////////////////////////// // Parameters and settings /////////////////////////////////////////////////////////// -static char * rocksdb_default_cf_options; -static char * rocksdb_override_cf_options; +static char *rocksdb_default_cf_options; +static char *rocksdb_override_cf_options; Rdb_cf_options rocksdb_cf_options_map; /////////////////////////////////////////////////////////// @@ -125,20 +123,18 @@ Rdb_cf_options rocksdb_cf_options_map; /////////////////////////////////////////////////////////// handlerton *rocksdb_hton; -rocksdb::TransactionDB *rdb= nullptr; +rocksdb::TransactionDB *rdb = nullptr; static std::shared_ptr rocksdb_stats; static std::unique_ptr flashcache_aware_env; -static std::shared_ptr - properties_collector_factory; +static std::shared_ptr properties_collector_factory; Rdb_dict_manager dict_manager; Rdb_cf_manager cf_manager; Rdb_ddl_manager ddl_manager; -const char* m_mysql_gtid; +const char *m_mysql_gtid; Rdb_binlog_manager binlog_manager; - /** MyRocks background thread control N.B. This is besides RocksDB's own background threads @@ -147,127 +143,107 @@ Rdb_binlog_manager binlog_manager; static Rdb_background_thread rdb_bg_thread; - // List of table names (using regex) that are exceptions to the strict // collation check requirement. Regex_list_handler *rdb_collation_exceptions; -static const char* const ERRSTR_ROLLBACK_ONLY - = "This transaction was rolled back and cannot be " +static const char *const ERRSTR_ROLLBACK_ONLY = + "This transaction was rolled back and cannot be " "committed. Only supported operation is to roll it back, " "so all pending changes will be discarded. " "Please restart another transaction."; - -static void -rocksdb_flush_all_memtables() -{ - const Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); +static void rocksdb_flush_all_memtables() { + const Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); for (const auto &cf_handle : cf_manager.get_all_cf()) { rdb->Flush(rocksdb::FlushOptions(), cf_handle); } } -static void -rocksdb_compact_column_family_stub(THD* const thd, - struct st_mysql_sys_var* const var, - void* const var_ptr, - const void* const save) -{ -} +static void rocksdb_compact_column_family_stub( + THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, + const void *const save) {} -static int -rocksdb_compact_column_family(THD* const thd, - struct st_mysql_sys_var* const var, - void* const var_ptr, - struct st_mysql_value* const value) -{ +static int rocksdb_compact_column_family(THD *const thd, + struct st_mysql_sys_var *const var, + void *const var_ptr, + struct st_mysql_value *const value) { char buff[STRING_BUFFER_USUAL_SIZE]; - int len = sizeof(buff); + int len = sizeof(buff); DBUG_ASSERT(value != nullptr); - if (const char* const cf = value->val_str(value, buff, &len)) { + if (const char *const cf = value->val_str(value, buff, &len)) { bool is_automatic; auto cfh = cf_manager.get_cf(cf, "", nullptr, &is_automatic); if (cfh != nullptr && rdb != nullptr) { - sql_print_information("RocksDB: Manual compaction of column family: %s\n", cf); + sql_print_information("RocksDB: Manual compaction of column family: %s\n", + cf); rdb->CompactRange(rocksdb::CompactRangeOptions(), cfh, nullptr, nullptr); } } - return 0; + return HA_EXIT_SUCCESS; } /////////////////////////////////////////////////////////// // Hash map: table name => open table handler /////////////////////////////////////////////////////////// -namespace // anonymous namespace = not visible outside this source file +namespace // anonymous namespace = not visible outside this source file { -const ulong TABLE_HASH_SIZE= 32; +const ulong TABLE_HASH_SIZE = 32; -struct Rdb_open_tables_map -{ +struct Rdb_open_tables_map { /* Hash table used to track the handlers of open tables */ - my_core::HASH m_hash; + my_core::HASH m_hash; /* The mutex used to protect the hash table */ mutable mysql_mutex_t m_mutex; - void init_hash(void) - { - (void) my_hash_init(&m_hash, my_core::system_charset_info, TABLE_HASH_SIZE, - 0, 0, - (my_hash_get_key) Rdb_open_tables_map::get_hash_key, - 0, 0); + void init_hash(void) { + (void)my_hash_init(&m_hash, my_core::system_charset_info, TABLE_HASH_SIZE, + 0, 0, (my_hash_get_key)Rdb_open_tables_map::get_hash_key, + 0, 0); } - void free_hash(void) - { - my_hash_free(&m_hash); - } + void free_hash(void) { my_hash_free(&m_hash); } - static uchar* get_hash_key(Rdb_table_handler* const table_handler, - size_t* const length, - my_bool not_used __attribute__((__unused__))); + static uchar *get_hash_key(Rdb_table_handler *const table_handler, + size_t *const length, + my_bool not_used MY_ATTRIBUTE((__unused__))); - Rdb_table_handler* get_table_handler(const char* const table_name); - void release_table_handler(Rdb_table_handler* const table_handler); + Rdb_table_handler *get_table_handler(const char *const table_name); + void release_table_handler(Rdb_table_handler *const table_handler); std::vector get_table_names(void) const; }; -} // anonymous namespace +} // anonymous namespace static Rdb_open_tables_map rdb_open_tables; - -static std::string rdb_normalize_dir(std::string dir) -{ - while (dir.size() > 0 && dir.back() == '/') - { +static std::string rdb_normalize_dir(std::string dir) { + while (dir.size() > 0 && dir.back() == '/') { dir.resize(dir.size() - 1); } return dir; } - static int rocksdb_create_checkpoint( - THD* const thd __attribute__((__unused__)), - struct st_mysql_sys_var* const var __attribute__((__unused__)), - void* const save __attribute__((__unused__)), - struct st_mysql_value* const value) -{ + THD *const thd MY_ATTRIBUTE((__unused__)), + struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const save MY_ATTRIBUTE((__unused__)), + struct st_mysql_value *const value) { char buf[FN_REFLEN]; int len = sizeof(buf); - const char* const checkpoint_dir_raw= value->val_str(value, buf, &len); + const char *const checkpoint_dir_raw = value->val_str(value, buf, &len); if (checkpoint_dir_raw) { if (rdb != nullptr) { - std::string checkpoint_dir= rdb_normalize_dir(checkpoint_dir_raw); + std::string checkpoint_dir = rdb_normalize_dir(checkpoint_dir_raw); // NO_LINT_DEBUG sql_print_information("RocksDB: creating checkpoint in directory : %s\n", - checkpoint_dir.c_str()); - rocksdb::Checkpoint* checkpoint; + checkpoint_dir.c_str()); + rocksdb::Checkpoint *checkpoint; auto status = rocksdb::Checkpoint::Create(rdb, &checkpoint); if (status.ok()) { status = checkpoint->CreateCheckpoint(checkpoint_dir.c_str()); @@ -284,613 +260,612 @@ static int rocksdb_create_checkpoint( delete checkpoint; } else { const std::string err_text(status.ToString()); - my_printf_error(ER_UNKNOWN_ERROR, - "RocksDB: failed to initialize checkpoint. status %d %s\n", - MYF(0), status.code(), err_text.c_str()); + my_printf_error( + ER_UNKNOWN_ERROR, + "RocksDB: failed to initialize checkpoint. status %d %s\n", MYF(0), + status.code(), err_text.c_str()); } return status.code(); - } + } } return HA_ERR_INTERNAL_ERROR; } /* This method is needed to indicate that the ROCKSDB_CREATE_CHECKPOINT command is not read-only */ -static void -rocksdb_create_checkpoint_stub(THD* const thd, - struct st_mysql_sys_var* const var, - void* const var_ptr, - const void* const save) -{ -} +static void rocksdb_create_checkpoint_stub(THD *const thd, + struct st_mysql_sys_var *const var, + void *const var_ptr, + const void *const save) {} -static void -rocksdb_force_flush_memtable_now_stub(THD* const thd, - struct st_mysql_sys_var* const var, - void* const var_ptr, - const void* const save) -{ -} +static void rocksdb_force_flush_memtable_now_stub( + THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, + const void *const save) {} -static int -rocksdb_force_flush_memtable_now(THD* const thd, - struct st_mysql_sys_var* const var, - void* const var_ptr, - struct st_mysql_value* const value) -{ +static int rocksdb_force_flush_memtable_now( + THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, + struct st_mysql_value *const value) { sql_print_information("RocksDB: Manual memtable flush\n"); rocksdb_flush_all_memtables(); - return 0; + return HA_EXIT_SUCCESS; } static void rocksdb_drop_index_wakeup_thread( - my_core::THD* const thd __attribute__((__unused__)), - struct st_mysql_sys_var* const var __attribute__((__unused__)), - void* const var_ptr __attribute__((__unused__)), - const void* const save); + my_core::THD *const thd MY_ATTRIBUTE((__unused__)), + struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save); -static my_bool rocksdb_pause_background_work= 0; +static my_bool rocksdb_pause_background_work = 0; static mysql_mutex_t rdb_sysvars_mutex; static void rocksdb_set_pause_background_work( - my_core::THD* const thd __attribute__((__unused__)), - struct st_mysql_sys_var* const var __attribute__((__unused__)), - void* const var_ptr __attribute__((__unused__)), - const void* const save) -{ + my_core::THD *const thd MY_ATTRIBUTE((__unused__)), + struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { mysql_mutex_lock(&rdb_sysvars_mutex); - const bool pause_requested= *static_cast(save); + const bool pause_requested = *static_cast(save); if (rocksdb_pause_background_work != pause_requested) { if (pause_requested) { rdb->PauseBackgroundWork(); } else { rdb->ContinueBackgroundWork(); } - rocksdb_pause_background_work= pause_requested; + rocksdb_pause_background_work = pause_requested; } mysql_mutex_unlock(&rdb_sysvars_mutex); } -static void -rocksdb_set_compaction_options(THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save); +static void rocksdb_set_compaction_options(THD *thd, + struct st_mysql_sys_var *var, + void *var_ptr, const void *save); -static void -rocksdb_set_table_stats_sampling_pct(THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save); +static void rocksdb_set_table_stats_sampling_pct(THD *thd, + struct st_mysql_sys_var *var, + void *var_ptr, + const void *save); -static void -rocksdb_set_rate_limiter_bytes_per_sec(THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save); +static void rocksdb_set_rate_limiter_bytes_per_sec(THD *thd, + struct st_mysql_sys_var *var, + void *var_ptr, + const void *save); static void rdb_set_collation_exception_list(const char *exception_list); -static void -rocksdb_set_collation_exception_list(THD* thd, - struct st_mysql_sys_var* var, - void* var_ptr, - const void* save); +static void rocksdb_set_collation_exception_list(THD *thd, + struct st_mysql_sys_var *var, + void *var_ptr, + const void *save); static void -rocksdb_set_bulk_load(THD* thd, - struct st_mysql_sys_var* var __attribute__((__unused__)), - void* var_ptr, - const void* save); +rocksdb_set_bulk_load(THD *thd, + struct st_mysql_sys_var *var MY_ATTRIBUTE((__unused__)), + void *var_ptr, const void *save); + +static void rocksdb_set_max_background_compactions( + THD *thd, struct st_mysql_sys_var *const var, void *const var_ptr, + const void *const save); ////////////////////////////////////////////////////////////////////////////// // Options definitions ////////////////////////////////////////////////////////////////////////////// static long long rocksdb_block_cache_size; /* Use unsigned long long instead of uint64_t because of MySQL compatibility */ -static unsigned long long // NOLINT(runtime/int) +static unsigned long long // NOLINT(runtime/int) rocksdb_rate_limiter_bytes_per_sec; +static unsigned long // NOLINT(runtime/int) + rocksdb_persistent_cache_size; static uint64_t rocksdb_info_log_level; -static char * rocksdb_wal_dir; +static char *rocksdb_wal_dir; +static char *rocksdb_persistent_cache_path; static uint64_t rocksdb_index_type; static char rocksdb_background_sync; static uint32_t rocksdb_debug_optimizer_n_rows; static my_bool rocksdb_debug_optimizer_no_zero_cardinality; static uint32_t rocksdb_wal_recovery_mode; static uint32_t rocksdb_access_hint_on_compaction_start; -static char * rocksdb_compact_cf_name; -static char * rocksdb_checkpoint_name; +static char *rocksdb_compact_cf_name; +static char *rocksdb_checkpoint_name; static my_bool rocksdb_signal_drop_index_thread; -static my_bool rocksdb_strict_collation_check= 1; -static my_bool rocksdb_disable_2pc= 0; -static char * rocksdb_strict_collation_exceptions; -static my_bool rocksdb_collect_sst_properties= 1; -static my_bool rocksdb_force_flush_memtable_now_var= 0; -static uint64_t rocksdb_number_stat_computes= 0; -static uint32_t rocksdb_seconds_between_stat_computes= 3600; -static long long rocksdb_compaction_sequential_deletes= 0l; -static long long rocksdb_compaction_sequential_deletes_window= 0l; -static long long rocksdb_compaction_sequential_deletes_file_size= 0l; +static my_bool rocksdb_strict_collation_check = 1; +static my_bool rocksdb_enable_2pc = 0; +static char *rocksdb_strict_collation_exceptions; +static my_bool rocksdb_collect_sst_properties = 1; +static my_bool rocksdb_force_flush_memtable_now_var = 0; +static uint64_t rocksdb_number_stat_computes = 0; +static uint32_t rocksdb_seconds_between_stat_computes = 3600; +static long long rocksdb_compaction_sequential_deletes = 0l; +static long long rocksdb_compaction_sequential_deletes_window = 0l; +static long long rocksdb_compaction_sequential_deletes_file_size = 0l; static uint32_t rocksdb_validate_tables = 1; -static char * rocksdb_datadir; +static char *rocksdb_datadir; static uint32_t rocksdb_table_stats_sampling_pct; -static my_bool rocksdb_enable_bulk_load_api= 1; -static my_bool rpl_skip_tx_api_var= 0; -static my_bool rocksdb_print_snapshot_conflict_queries= 0; +static my_bool rocksdb_enable_bulk_load_api = 1; +static my_bool rocksdb_print_snapshot_conflict_queries = 0; std::atomic rocksdb_snapshot_conflict_errors(0); std::atomic rocksdb_wal_group_syncs(0); -static rocksdb::DBOptions rdb_init_rocksdb_db_options(void) -{ +static rocksdb::DBOptions rdb_init_rocksdb_db_options(void) { rocksdb::DBOptions o; - o.create_if_missing= true; + o.create_if_missing = true; o.listeners.push_back(std::make_shared(&ddl_manager)); - o.info_log_level= rocksdb::InfoLogLevel::INFO_LEVEL; - o.max_subcompactions= DEFAULT_SUBCOMPACTIONS; + o.info_log_level = rocksdb::InfoLogLevel::INFO_LEVEL; + o.max_subcompactions = DEFAULT_SUBCOMPACTIONS; return o; } -static rocksdb::DBOptions rocksdb_db_options= rdb_init_rocksdb_db_options(); +static rocksdb::DBOptions rocksdb_db_options = rdb_init_rocksdb_db_options(); static rocksdb::BlockBasedTableOptions rocksdb_tbl_options; static std::shared_ptr rocksdb_rate_limiter; /* This enum needs to be kept up to date with rocksdb::InfoLogLevel */ -static const char* info_log_level_names[] = { - "debug_level", - "info_level", - "warn_level", - "error_level", - "fatal_level", - NullS -}; +static const char *info_log_level_names[] = {"debug_level", "info_level", + "warn_level", "error_level", + "fatal_level", NullS}; static TYPELIB info_log_level_typelib = { - array_elements(info_log_level_names) - 1, - "info_log_level_typelib", - info_log_level_names, - nullptr -}; + array_elements(info_log_level_names) - 1, "info_log_level_typelib", + info_log_level_names, nullptr}; -static void -rocksdb_set_rocksdb_info_log_level(THD* const thd, - struct st_mysql_sys_var* const var, - void* const var_ptr, - const void* const save) -{ +static void rocksdb_set_rocksdb_info_log_level( + THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, + const void *const save) { DBUG_ASSERT(save != nullptr); mysql_mutex_lock(&rdb_sysvars_mutex); - rocksdb_info_log_level = *static_cast(save); + rocksdb_info_log_level = *static_cast(save); rocksdb_db_options.info_log->SetInfoLogLevel( static_cast(rocksdb_info_log_level)); mysql_mutex_unlock(&rdb_sysvars_mutex); } -static const char* index_type_names[] = { - "kBinarySearch", - "kHashSearch", - NullS -}; +static const char *index_type_names[] = {"kBinarySearch", "kHashSearch", NullS}; -static TYPELIB index_type_typelib = { - array_elements(index_type_names) - 1, - "index_type_typelib", - index_type_names, - nullptr -}; +static TYPELIB index_type_typelib = {array_elements(index_type_names) - 1, + "index_type_typelib", index_type_names, + nullptr}; -const ulong RDB_MAX_LOCK_WAIT_SECONDS= 1024*1024*1024; -const ulong RDB_MAX_ROW_LOCKS= 1024*1024*1024; -const ulong RDB_DEFAULT_BULK_LOAD_SIZE= 1000; -const ulong RDB_MAX_BULK_LOAD_SIZE= 1024*1024*1024; -const size_t RDB_DEFAULT_MERGE_BUF_SIZE= 64*1024*1024; -const size_t RDB_MIN_MERGE_BUF_SIZE= 100; -const size_t RDB_DEFAULT_MERGE_COMBINE_READ_SIZE= 1024*1024*1024; -const size_t RDB_MIN_MERGE_COMBINE_READ_SIZE= 100; -const int64 RDB_DEFAULT_BLOCK_CACHE_SIZE= 512*1024*1024; -const int64 RDB_MIN_BLOCK_CACHE_SIZE= 1024; -const int RDB_MAX_CHECKSUMS_PCT= 100; +const ulong RDB_MAX_LOCK_WAIT_SECONDS = 1024 * 1024 * 1024; +const ulong RDB_MAX_ROW_LOCKS = 1024 * 1024 * 1024; +const ulong RDB_DEFAULT_BULK_LOAD_SIZE = 1000; +const ulong RDB_MAX_BULK_LOAD_SIZE = 1024 * 1024 * 1024; +const size_t RDB_DEFAULT_MERGE_BUF_SIZE = 64 * 1024 * 1024; +const size_t RDB_MIN_MERGE_BUF_SIZE = 100; +const size_t RDB_DEFAULT_MERGE_COMBINE_READ_SIZE = 1024 * 1024 * 1024; +const size_t RDB_MIN_MERGE_COMBINE_READ_SIZE = 100; +const int64 RDB_DEFAULT_BLOCK_CACHE_SIZE = 512 * 1024 * 1024; +const int64 RDB_MIN_BLOCK_CACHE_SIZE = 1024; +const int RDB_MAX_CHECKSUMS_PCT = 100; -//TODO: 0 means don't wait at all, and we don't support it yet? +// TODO: 0 means don't wait at all, and we don't support it yet? static MYSQL_THDVAR_ULONG(lock_wait_timeout, PLUGIN_VAR_RQCMDARG, - "Number of seconds to wait for lock", - nullptr, nullptr, /*default*/ 1, /*min*/ 1, - /*max*/ RDB_MAX_LOCK_WAIT_SECONDS, 0); + "Number of seconds to wait for lock", nullptr, + nullptr, /*default*/ 1, /*min*/ 1, + /*max*/ RDB_MAX_LOCK_WAIT_SECONDS, 0); static MYSQL_THDVAR_BOOL(deadlock_detect, PLUGIN_VAR_RQCMDARG, - "Enables deadlock detection", nullptr, nullptr, FALSE); + "Enables deadlock detection", nullptr, nullptr, FALSE); -static MYSQL_THDVAR_BOOL(trace_sst_api, PLUGIN_VAR_RQCMDARG, - "Generate trace output in the log for each call to the SstFileWriter", - nullptr, nullptr, FALSE); +static MYSQL_THDVAR_BOOL( + trace_sst_api, PLUGIN_VAR_RQCMDARG, + "Generate trace output in the log for each call to the SstFileWriter", + nullptr, nullptr, FALSE); -static MYSQL_THDVAR_BOOL(bulk_load, PLUGIN_VAR_RQCMDARG, - "Use bulk-load mode for inserts. This enables both " - "rocksdb_skip_unique_check and rocksdb_commit_in_the_middle.", - nullptr, rocksdb_set_bulk_load, FALSE); +static MYSQL_THDVAR_BOOL( + bulk_load, PLUGIN_VAR_RQCMDARG, + "Use bulk-load mode for inserts. This disables " + "unique_checks and enables rocksdb_commit_in_the_middle.", + nullptr, rocksdb_set_bulk_load, FALSE); -static MYSQL_SYSVAR_BOOL(enable_bulk_load_api, - rocksdb_enable_bulk_load_api, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Enables using SstFileWriter for bulk loading", - nullptr, nullptr, rocksdb_enable_bulk_load_api); +static MYSQL_SYSVAR_BOOL(enable_bulk_load_api, rocksdb_enable_bulk_load_api, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Enables using SstFileWriter for bulk loading", + nullptr, nullptr, rocksdb_enable_bulk_load_api); -static MYSQL_THDVAR_STR(tmpdir, - PLUGIN_VAR_OPCMDARG|PLUGIN_VAR_MEMALLOC, - "Directory for temporary files during DDL operations.", - nullptr, nullptr, ""); +static MYSQL_THDVAR_STR(tmpdir, PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_MEMALLOC, + "Directory for temporary files during DDL operations.", + nullptr, nullptr, ""); -static MYSQL_THDVAR_STR(skip_unique_check_tables, - PLUGIN_VAR_RQCMDARG|PLUGIN_VAR_MEMALLOC, - "Skip unique constraint checking for the specified tables", nullptr, nullptr, - ".*"); +static MYSQL_THDVAR_STR( + skip_unique_check_tables, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, + "Skip unique constraint checking for the specified tables", nullptr, + nullptr, ".*"); -static MYSQL_THDVAR_BOOL(skip_unique_check, PLUGIN_VAR_RQCMDARG, - "Skip unique constraint checking for all tables", nullptr, nullptr, FALSE); +static MYSQL_THDVAR_BOOL( + commit_in_the_middle, PLUGIN_VAR_RQCMDARG, + "Commit rows implicitly every rocksdb_bulk_load_size, on bulk load/insert, " + "update and delete", + nullptr, nullptr, FALSE); -static MYSQL_THDVAR_BOOL(commit_in_the_middle, PLUGIN_VAR_RQCMDARG, - "Commit rows implicitly every rocksdb_bulk_load_size, on bulk load/insert, " - "update and delete", - nullptr, nullptr, FALSE); - -static MYSQL_THDVAR_STR(read_free_rpl_tables, - PLUGIN_VAR_RQCMDARG|PLUGIN_VAR_MEMALLOC, - "List of tables that will use read-free replication on the slave " - "(i.e. not lookup a row during replication)", nullptr, nullptr, ""); - -static MYSQL_SYSVAR_BOOL( - rpl_skip_tx_api, - rpl_skip_tx_api_var, - PLUGIN_VAR_RQCMDARG, - "Use write batches for replication thread instead of tx api", nullptr, - nullptr, FALSE); +static MYSQL_THDVAR_STR( + read_free_rpl_tables, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, + "List of tables that will use read-free replication on the slave " + "(i.e. not lookup a row during replication)", + nullptr, nullptr, ""); static MYSQL_THDVAR_BOOL(skip_bloom_filter_on_read, PLUGIN_VAR_RQCMDARG, - "Skip using bloom filter for reads", nullptr, nullptr, FALSE); + "Skip using bloom filter for reads", nullptr, nullptr, + FALSE); static MYSQL_THDVAR_ULONG(max_row_locks, PLUGIN_VAR_RQCMDARG, - "Maximum number of locks a transaction can have", - nullptr, nullptr, - /*default*/ RDB_MAX_ROW_LOCKS, - /*min*/ 1, - /*max*/ RDB_MAX_ROW_LOCKS, 0); + "Maximum number of locks a transaction can have", + nullptr, nullptr, + /*default*/ RDB_MAX_ROW_LOCKS, + /*min*/ 1, + /*max*/ RDB_MAX_ROW_LOCKS, 0); -static MYSQL_THDVAR_BOOL(lock_scanned_rows, PLUGIN_VAR_RQCMDARG, - "Take and hold locks on rows that are scanned but not updated", - nullptr, nullptr, FALSE); +static MYSQL_THDVAR_BOOL( + lock_scanned_rows, PLUGIN_VAR_RQCMDARG, + "Take and hold locks on rows that are scanned but not updated", nullptr, + nullptr, FALSE); static MYSQL_THDVAR_ULONG(bulk_load_size, PLUGIN_VAR_RQCMDARG, - "Max #records in a batch for bulk-load mode", - nullptr, nullptr, - /*default*/ RDB_DEFAULT_BULK_LOAD_SIZE, - /*min*/ 1, - /*max*/ RDB_MAX_BULK_LOAD_SIZE, 0); + "Max #records in a batch for bulk-load mode", nullptr, + nullptr, + /*default*/ RDB_DEFAULT_BULK_LOAD_SIZE, + /*min*/ 1, + /*max*/ RDB_MAX_BULK_LOAD_SIZE, 0); -static MYSQL_THDVAR_ULONGLONG(merge_buf_size, PLUGIN_VAR_RQCMDARG, - "Size to allocate for merge sort buffers written out to disk " - "during inplace index creation.", - nullptr, nullptr, - /* default (64MB) */ RDB_DEFAULT_MERGE_BUF_SIZE, - /* min (100B) */ RDB_MIN_MERGE_BUF_SIZE, - /* max */ SIZE_T_MAX, 1); +static MYSQL_THDVAR_ULONGLONG( + merge_buf_size, PLUGIN_VAR_RQCMDARG, + "Size to allocate for merge sort buffers written out to disk " + "during inplace index creation.", + nullptr, nullptr, + /* default (64MB) */ RDB_DEFAULT_MERGE_BUF_SIZE, + /* min (100B) */ RDB_MIN_MERGE_BUF_SIZE, + /* max */ SIZE_T_MAX, 1); -static MYSQL_THDVAR_ULONGLONG(merge_combine_read_size, PLUGIN_VAR_RQCMDARG, - "Size that we have to work with during combine (reading from disk) phase of " - "external sort during fast index creation.", - nullptr, nullptr, - /* default (1GB) */ RDB_DEFAULT_MERGE_COMBINE_READ_SIZE, - /* min (100B) */ RDB_MIN_MERGE_COMBINE_READ_SIZE, - /* max */ SIZE_T_MAX, 1); +static MYSQL_THDVAR_ULONGLONG( + merge_combine_read_size, PLUGIN_VAR_RQCMDARG, + "Size that we have to work with during combine (reading from disk) phase " + "of " + "external sort during fast index creation.", + nullptr, nullptr, + /* default (1GB) */ RDB_DEFAULT_MERGE_COMBINE_READ_SIZE, + /* min (100B) */ RDB_MIN_MERGE_COMBINE_READ_SIZE, + /* max */ SIZE_T_MAX, 1); -static MYSQL_SYSVAR_BOOL(create_if_missing, - *reinterpret_cast(&rocksdb_db_options.create_if_missing), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::create_if_missing for RocksDB", - nullptr, nullptr, rocksdb_db_options.create_if_missing); +static MYSQL_SYSVAR_BOOL( + create_if_missing, + *reinterpret_cast(&rocksdb_db_options.create_if_missing), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::create_if_missing for RocksDB", nullptr, nullptr, + rocksdb_db_options.create_if_missing); -static MYSQL_SYSVAR_BOOL(create_missing_column_families, - *reinterpret_cast( - &rocksdb_db_options.create_missing_column_families), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::create_missing_column_families for RocksDB", - nullptr, nullptr, rocksdb_db_options.create_missing_column_families); +static MYSQL_SYSVAR_BOOL( + create_missing_column_families, + *reinterpret_cast( + &rocksdb_db_options.create_missing_column_families), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::create_missing_column_families for RocksDB", nullptr, nullptr, + rocksdb_db_options.create_missing_column_families); -static MYSQL_SYSVAR_BOOL(error_if_exists, - *reinterpret_cast(&rocksdb_db_options.error_if_exists), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::error_if_exists for RocksDB", - nullptr, nullptr, rocksdb_db_options.error_if_exists); +static MYSQL_SYSVAR_BOOL( + error_if_exists, + *reinterpret_cast(&rocksdb_db_options.error_if_exists), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::error_if_exists for RocksDB", nullptr, nullptr, + rocksdb_db_options.error_if_exists); -static MYSQL_SYSVAR_BOOL(paranoid_checks, - *reinterpret_cast(&rocksdb_db_options.paranoid_checks), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::paranoid_checks for RocksDB", - nullptr, nullptr, rocksdb_db_options.paranoid_checks); +static MYSQL_SYSVAR_BOOL( + paranoid_checks, + *reinterpret_cast(&rocksdb_db_options.paranoid_checks), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::paranoid_checks for RocksDB", nullptr, nullptr, + rocksdb_db_options.paranoid_checks); -static MYSQL_SYSVAR_ULONGLONG(rate_limiter_bytes_per_sec, - rocksdb_rate_limiter_bytes_per_sec, - PLUGIN_VAR_RQCMDARG, - "DBOptions::rate_limiter bytes_per_sec for RocksDB", - nullptr, rocksdb_set_rate_limiter_bytes_per_sec, /* default */ 0L, - /* min */ 0L, /* max */ MAX_RATE_LIMITER_BYTES_PER_SEC, 0); +static MYSQL_SYSVAR_ULONGLONG( + rate_limiter_bytes_per_sec, rocksdb_rate_limiter_bytes_per_sec, + PLUGIN_VAR_RQCMDARG, "DBOptions::rate_limiter bytes_per_sec for RocksDB", + nullptr, rocksdb_set_rate_limiter_bytes_per_sec, /* default */ 0L, + /* min */ 0L, /* max */ MAX_RATE_LIMITER_BYTES_PER_SEC, 0); -static MYSQL_SYSVAR_ENUM(info_log_level, - rocksdb_info_log_level, - PLUGIN_VAR_RQCMDARG, - "Filter level for info logs to be written mysqld error log. " - "Valid values include 'debug_level', 'info_level', 'warn_level'" - "'error_level' and 'fatal_level'.", - nullptr, rocksdb_set_rocksdb_info_log_level, - rocksdb::InfoLogLevel::ERROR_LEVEL, &info_log_level_typelib); +static MYSQL_SYSVAR_ENUM( + info_log_level, rocksdb_info_log_level, PLUGIN_VAR_RQCMDARG, + "Filter level for info logs to be written mysqld error log. " + "Valid values include 'debug_level', 'info_level', 'warn_level'" + "'error_level' and 'fatal_level'.", + nullptr, rocksdb_set_rocksdb_info_log_level, + rocksdb::InfoLogLevel::ERROR_LEVEL, &info_log_level_typelib); -static MYSQL_THDVAR_INT(perf_context_level, - PLUGIN_VAR_RQCMDARG, - "Perf Context Level for rocksdb internal timer stat collection", - nullptr, nullptr, - /* default */ rocksdb::PerfLevel::kUninitialized, - /* min */ rocksdb::PerfLevel::kUninitialized, - /* max */ rocksdb::PerfLevel::kOutOfBounds - 1, 0); +static MYSQL_THDVAR_INT( + perf_context_level, PLUGIN_VAR_RQCMDARG, + "Perf Context Level for rocksdb internal timer stat collection", nullptr, + nullptr, + /* default */ rocksdb::PerfLevel::kUninitialized, + /* min */ rocksdb::PerfLevel::kUninitialized, + /* max */ rocksdb::PerfLevel::kOutOfBounds - 1, 0); -static MYSQL_SYSVAR_UINT(wal_recovery_mode, - rocksdb_wal_recovery_mode, - PLUGIN_VAR_RQCMDARG, - "DBOptions::wal_recovery_mode for RocksDB", - nullptr, nullptr, - /* default */ (uint) rocksdb::WALRecoveryMode::kPointInTimeRecovery, - /* min */ (uint) rocksdb::WALRecoveryMode::kTolerateCorruptedTailRecords, - /* max */ (uint) rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords, 0); +static MYSQL_SYSVAR_UINT( + wal_recovery_mode, rocksdb_wal_recovery_mode, PLUGIN_VAR_RQCMDARG, + "DBOptions::wal_recovery_mode for RocksDB", nullptr, nullptr, + /* default */ (uint)rocksdb::WALRecoveryMode::kPointInTimeRecovery, + /* min */ (uint)rocksdb::WALRecoveryMode::kTolerateCorruptedTailRecords, + /* max */ (uint)rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords, 0); static MYSQL_SYSVAR_ULONG(compaction_readahead_size, - rocksdb_db_options.compaction_readahead_size, - PLUGIN_VAR_RQCMDARG, - "DBOptions::compaction_readahead_size for RocksDB", - nullptr, nullptr, rocksdb_db_options.compaction_readahead_size, - /* min */ 0L, /* max */ ULONG_MAX, 0); + rocksdb_db_options.compaction_readahead_size, + PLUGIN_VAR_RQCMDARG, + "DBOptions::compaction_readahead_size for RocksDB", + nullptr, nullptr, + rocksdb_db_options.compaction_readahead_size, + /* min */ 0L, /* max */ ULONG_MAX, 0); -static MYSQL_SYSVAR_BOOL(new_table_reader_for_compaction_inputs, - *reinterpret_cast - (&rocksdb_db_options.new_table_reader_for_compaction_inputs), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::new_table_reader_for_compaction_inputs for RocksDB", - nullptr, nullptr, rocksdb_db_options.new_table_reader_for_compaction_inputs); +static MYSQL_SYSVAR_BOOL( + new_table_reader_for_compaction_inputs, + *reinterpret_cast( + &rocksdb_db_options.new_table_reader_for_compaction_inputs), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::new_table_reader_for_compaction_inputs for RocksDB", nullptr, + nullptr, rocksdb_db_options.new_table_reader_for_compaction_inputs); -static MYSQL_SYSVAR_UINT(access_hint_on_compaction_start, - rocksdb_access_hint_on_compaction_start, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::access_hint_on_compaction_start for RocksDB", - nullptr, nullptr, - /* default */ (uint) rocksdb::Options::AccessHint::NORMAL, - /* min */ (uint) rocksdb::Options::AccessHint::NONE, - /* max */ (uint) rocksdb::Options::AccessHint::WILLNEED, 0); +static MYSQL_SYSVAR_UINT( + access_hint_on_compaction_start, rocksdb_access_hint_on_compaction_start, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::access_hint_on_compaction_start for RocksDB", nullptr, nullptr, + /* default */ (uint)rocksdb::Options::AccessHint::NORMAL, + /* min */ (uint)rocksdb::Options::AccessHint::NONE, + /* max */ (uint)rocksdb::Options::AccessHint::WILLNEED, 0); -static MYSQL_SYSVAR_BOOL(allow_concurrent_memtable_write, - *reinterpret_cast( - &rocksdb_db_options.allow_concurrent_memtable_write), - PLUGIN_VAR_RQCMDARG, - "DBOptions::allow_concurrent_memtable_write for RocksDB", - nullptr, nullptr, false); +static MYSQL_SYSVAR_BOOL( + allow_concurrent_memtable_write, + *reinterpret_cast( + &rocksdb_db_options.allow_concurrent_memtable_write), + PLUGIN_VAR_RQCMDARG, + "DBOptions::allow_concurrent_memtable_write for RocksDB", nullptr, nullptr, + false); -static MYSQL_SYSVAR_BOOL(enable_write_thread_adaptive_yield, - *reinterpret_cast( - &rocksdb_db_options.enable_write_thread_adaptive_yield), - PLUGIN_VAR_RQCMDARG, - "DBOptions::enable_write_thread_adaptive_yield for RocksDB", - nullptr, nullptr, false); +static MYSQL_SYSVAR_BOOL( + enable_write_thread_adaptive_yield, + *reinterpret_cast( + &rocksdb_db_options.enable_write_thread_adaptive_yield), + PLUGIN_VAR_RQCMDARG, + "DBOptions::enable_write_thread_adaptive_yield for RocksDB", nullptr, + nullptr, false); -static MYSQL_SYSVAR_INT(max_open_files, - rocksdb_db_options.max_open_files, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::max_open_files for RocksDB", - nullptr, nullptr, rocksdb_db_options.max_open_files, - /* min */ -1, /* max */ INT_MAX, 0); +static MYSQL_SYSVAR_INT(max_open_files, rocksdb_db_options.max_open_files, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_open_files for RocksDB", nullptr, + nullptr, rocksdb_db_options.max_open_files, + /* min */ -1, /* max */ INT_MAX, 0); static MYSQL_SYSVAR_ULONG(max_total_wal_size, - rocksdb_db_options.max_total_wal_size, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::max_total_wal_size for RocksDB", - nullptr, nullptr, rocksdb_db_options.max_total_wal_size, - /* min */ 0L, /* max */ LONG_MAX, 0); + rocksdb_db_options.max_total_wal_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_total_wal_size for RocksDB", nullptr, + nullptr, rocksdb_db_options.max_total_wal_size, + /* min */ 0L, /* max */ LONG_MAX, 0); -static MYSQL_SYSVAR_BOOL(disabledatasync, - *reinterpret_cast(&rocksdb_db_options.disableDataSync), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::disableDataSync for RocksDB", - nullptr, nullptr, rocksdb_db_options.disableDataSync); +static MYSQL_SYSVAR_BOOL( + disabledatasync, + *reinterpret_cast(&rocksdb_db_options.disableDataSync), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::disableDataSync for RocksDB", nullptr, nullptr, + rocksdb_db_options.disableDataSync); -static MYSQL_SYSVAR_BOOL(use_fsync, - *reinterpret_cast(&rocksdb_db_options.use_fsync), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::use_fsync for RocksDB", - nullptr, nullptr, rocksdb_db_options.use_fsync); +static MYSQL_SYSVAR_BOOL( + use_fsync, *reinterpret_cast(&rocksdb_db_options.use_fsync), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::use_fsync for RocksDB", nullptr, nullptr, + rocksdb_db_options.use_fsync); static MYSQL_SYSVAR_STR(wal_dir, rocksdb_wal_dir, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::wal_dir for RocksDB", - nullptr, nullptr, rocksdb_db_options.wal_dir.c_str()); + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::wal_dir for RocksDB", nullptr, nullptr, + rocksdb_db_options.wal_dir.c_str()); -static MYSQL_SYSVAR_ULONG(delete_obsolete_files_period_micros, - rocksdb_db_options.delete_obsolete_files_period_micros, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::delete_obsolete_files_period_micros for RocksDB", - nullptr, nullptr, rocksdb_db_options.delete_obsolete_files_period_micros, - /* min */ 0L, /* max */ LONG_MAX, 0); +static MYSQL_SYSVAR_STR( + persistent_cache_path, rocksdb_persistent_cache_path, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Path for BlockBasedTableOptions::persistent_cache for RocksDB", nullptr, + nullptr, ""); + +static MYSQL_SYSVAR_ULONG( + persistent_cache_size, rocksdb_persistent_cache_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Size of cache for BlockBasedTableOptions::persistent_cache for RocksDB", + nullptr, nullptr, rocksdb_persistent_cache_size, + /* min */ 0L, /* max */ ULONG_MAX, 0); + +static MYSQL_SYSVAR_ULONG( + delete_obsolete_files_period_micros, + rocksdb_db_options.delete_obsolete_files_period_micros, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::delete_obsolete_files_period_micros for RocksDB", nullptr, + nullptr, rocksdb_db_options.delete_obsolete_files_period_micros, + /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_INT(base_background_compactions, - rocksdb_db_options.base_background_compactions, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::base_background_compactions for RocksDB", - nullptr, nullptr, rocksdb_db_options.base_background_compactions, - /* min */ -1, /* max */ MAX_BACKGROUND_COMPACTIONS, 0); + rocksdb_db_options.base_background_compactions, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::base_background_compactions for RocksDB", + nullptr, nullptr, + rocksdb_db_options.base_background_compactions, + /* min */ -1, /* max */ MAX_BACKGROUND_COMPACTIONS, 0); static MYSQL_SYSVAR_INT(max_background_compactions, - rocksdb_db_options.max_background_compactions, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::max_background_compactions for RocksDB", - nullptr, nullptr, rocksdb_db_options.max_background_compactions, - /* min */ 1, /* max */ MAX_BACKGROUND_COMPACTIONS, 0); + rocksdb_db_options.max_background_compactions, + PLUGIN_VAR_RQCMDARG, + "DBOptions::max_background_compactions for RocksDB", + nullptr, rocksdb_set_max_background_compactions, + rocksdb_db_options.max_background_compactions, + /* min */ 1, /* max */ MAX_BACKGROUND_COMPACTIONS, 0); static MYSQL_SYSVAR_INT(max_background_flushes, - rocksdb_db_options.max_background_flushes, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::max_background_flushes for RocksDB", - nullptr, nullptr, rocksdb_db_options.max_background_flushes, - /* min */ 1, /* max */ MAX_BACKGROUND_FLUSHES, 0); + rocksdb_db_options.max_background_flushes, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_background_flushes for RocksDB", + nullptr, nullptr, + rocksdb_db_options.max_background_flushes, + /* min */ 1, /* max */ MAX_BACKGROUND_FLUSHES, 0); static MYSQL_SYSVAR_UINT(max_subcompactions, - rocksdb_db_options.max_subcompactions, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::max_subcompactions for RocksDB", - nullptr, nullptr, rocksdb_db_options.max_subcompactions, - /* min */ 1, /* max */ MAX_SUBCOMPACTIONS, 0); + rocksdb_db_options.max_subcompactions, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_subcompactions for RocksDB", nullptr, + nullptr, rocksdb_db_options.max_subcompactions, + /* min */ 1, /* max */ MAX_SUBCOMPACTIONS, 0); static MYSQL_SYSVAR_ULONG(max_log_file_size, - rocksdb_db_options.max_log_file_size, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::max_log_file_size for RocksDB", - nullptr, nullptr, rocksdb_db_options.max_log_file_size, - /* min */ 0L, /* max */ LONG_MAX, 0); + rocksdb_db_options.max_log_file_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_log_file_size for RocksDB", nullptr, + nullptr, rocksdb_db_options.max_log_file_size, + /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_ULONG(log_file_time_to_roll, - rocksdb_db_options.log_file_time_to_roll, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::log_file_time_to_roll for RocksDB", - nullptr, nullptr, rocksdb_db_options.log_file_time_to_roll, - /* min */ 0L, /* max */ LONG_MAX, 0); + rocksdb_db_options.log_file_time_to_roll, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::log_file_time_to_roll for RocksDB", + nullptr, nullptr, + rocksdb_db_options.log_file_time_to_roll, + /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_ULONG(keep_log_file_num, - rocksdb_db_options.keep_log_file_num, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::keep_log_file_num for RocksDB", - nullptr, nullptr, rocksdb_db_options.keep_log_file_num, - /* min */ 0L, /* max */ LONG_MAX, 0); + rocksdb_db_options.keep_log_file_num, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::keep_log_file_num for RocksDB", nullptr, + nullptr, rocksdb_db_options.keep_log_file_num, + /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_ULONG(max_manifest_file_size, - rocksdb_db_options.max_manifest_file_size, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::max_manifest_file_size for RocksDB", - nullptr, nullptr, rocksdb_db_options.max_manifest_file_size, - /* min */ 0L, /* max */ ULONG_MAX, 0); + rocksdb_db_options.max_manifest_file_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_manifest_file_size for RocksDB", + nullptr, nullptr, + rocksdb_db_options.max_manifest_file_size, + /* min */ 0L, /* max */ ULONG_MAX, 0); static MYSQL_SYSVAR_INT(table_cache_numshardbits, - rocksdb_db_options.table_cache_numshardbits, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::table_cache_numshardbits for RocksDB", - nullptr, nullptr, rocksdb_db_options.table_cache_numshardbits, - /* min */ 0, /* max */ INT_MAX, 0); + rocksdb_db_options.table_cache_numshardbits, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::table_cache_numshardbits for RocksDB", + nullptr, nullptr, + rocksdb_db_options.table_cache_numshardbits, + /* min */ 0, /* max */ INT_MAX, 0); -static MYSQL_SYSVAR_ULONG(wal_ttl_seconds, - rocksdb_db_options.WAL_ttl_seconds, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::WAL_ttl_seconds for RocksDB", - nullptr, nullptr, rocksdb_db_options.WAL_ttl_seconds, - /* min */ 0L, /* max */ LONG_MAX, 0); +static MYSQL_SYSVAR_ULONG(wal_ttl_seconds, rocksdb_db_options.WAL_ttl_seconds, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::WAL_ttl_seconds for RocksDB", nullptr, + nullptr, rocksdb_db_options.WAL_ttl_seconds, + /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_ULONG(wal_size_limit_mb, - rocksdb_db_options.WAL_size_limit_MB, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::WAL_size_limit_MB for RocksDB", - nullptr, nullptr, rocksdb_db_options.WAL_size_limit_MB, - /* min */ 0L, /* max */ LONG_MAX, 0); + rocksdb_db_options.WAL_size_limit_MB, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::WAL_size_limit_MB for RocksDB", nullptr, + nullptr, rocksdb_db_options.WAL_size_limit_MB, + /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_ULONG(manifest_preallocation_size, - rocksdb_db_options.manifest_preallocation_size, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::manifest_preallocation_size for RocksDB", - nullptr, nullptr, rocksdb_db_options.manifest_preallocation_size, - /* min */ 0L, /* max */ LONG_MAX, 0); + rocksdb_db_options.manifest_preallocation_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::manifest_preallocation_size for RocksDB", + nullptr, nullptr, + rocksdb_db_options.manifest_preallocation_size, + /* min */ 0L, /* max */ LONG_MAX, 0); -static MYSQL_SYSVAR_BOOL(use_direct_reads, - *reinterpret_cast(&rocksdb_db_options.use_direct_reads), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::use_direct_reads for RocksDB", - nullptr, nullptr, rocksdb_db_options.use_direct_reads); +static MYSQL_SYSVAR_BOOL( + use_direct_reads, + *reinterpret_cast(&rocksdb_db_options.use_direct_reads), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::use_direct_reads for RocksDB", nullptr, nullptr, + rocksdb_db_options.use_direct_reads); -static MYSQL_SYSVAR_BOOL(use_direct_writes, - *reinterpret_cast(&rocksdb_db_options.use_direct_writes), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::use_direct_writes for RocksDB", - nullptr, nullptr, rocksdb_db_options.use_direct_writes); +static MYSQL_SYSVAR_BOOL( + use_direct_writes, + *reinterpret_cast(&rocksdb_db_options.use_direct_writes), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::use_direct_writes for RocksDB", nullptr, nullptr, + rocksdb_db_options.use_direct_writes); -static MYSQL_SYSVAR_BOOL(allow_mmap_reads, - *reinterpret_cast(&rocksdb_db_options.allow_mmap_reads), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::allow_mmap_reads for RocksDB", - nullptr, nullptr, rocksdb_db_options.allow_mmap_reads); +static MYSQL_SYSVAR_BOOL( + allow_mmap_reads, + *reinterpret_cast(&rocksdb_db_options.allow_mmap_reads), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::allow_mmap_reads for RocksDB", nullptr, nullptr, + rocksdb_db_options.allow_mmap_reads); -static MYSQL_SYSVAR_BOOL(allow_mmap_writes, - *reinterpret_cast(&rocksdb_db_options.allow_mmap_writes), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::allow_mmap_writes for RocksDB", - nullptr, nullptr, rocksdb_db_options.allow_mmap_writes); +static MYSQL_SYSVAR_BOOL( + allow_mmap_writes, + *reinterpret_cast(&rocksdb_db_options.allow_mmap_writes), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::allow_mmap_writes for RocksDB", nullptr, nullptr, + rocksdb_db_options.allow_mmap_writes); -static MYSQL_SYSVAR_BOOL(is_fd_close_on_exec, - *reinterpret_cast(&rocksdb_db_options.is_fd_close_on_exec), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::is_fd_close_on_exec for RocksDB", - nullptr, nullptr, rocksdb_db_options.is_fd_close_on_exec); +static MYSQL_SYSVAR_BOOL( + is_fd_close_on_exec, + *reinterpret_cast(&rocksdb_db_options.is_fd_close_on_exec), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::is_fd_close_on_exec for RocksDB", nullptr, nullptr, + rocksdb_db_options.is_fd_close_on_exec); static MYSQL_SYSVAR_UINT(stats_dump_period_sec, - rocksdb_db_options.stats_dump_period_sec, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::stats_dump_period_sec for RocksDB", - nullptr, nullptr, rocksdb_db_options.stats_dump_period_sec, - /* min */ 0, /* max */ INT_MAX, 0); + rocksdb_db_options.stats_dump_period_sec, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::stats_dump_period_sec for RocksDB", + nullptr, nullptr, + rocksdb_db_options.stats_dump_period_sec, + /* min */ 0, /* max */ INT_MAX, 0); -static MYSQL_SYSVAR_BOOL(advise_random_on_open, - *reinterpret_cast(&rocksdb_db_options.advise_random_on_open), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::advise_random_on_open for RocksDB", - nullptr, nullptr, rocksdb_db_options.advise_random_on_open); +static MYSQL_SYSVAR_BOOL( + advise_random_on_open, + *reinterpret_cast(&rocksdb_db_options.advise_random_on_open), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::advise_random_on_open for RocksDB", nullptr, nullptr, + rocksdb_db_options.advise_random_on_open); static MYSQL_SYSVAR_ULONG(db_write_buffer_size, - rocksdb_db_options.db_write_buffer_size, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::db_write_buffer_size for RocksDB", - nullptr, nullptr, rocksdb_db_options.db_write_buffer_size, - /* min */ 0L, /* max */ LONG_MAX, 0); + rocksdb_db_options.db_write_buffer_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::db_write_buffer_size for RocksDB", + nullptr, nullptr, + rocksdb_db_options.db_write_buffer_size, + /* min */ 0L, /* max */ LONG_MAX, 0); -static MYSQL_SYSVAR_BOOL(use_adaptive_mutex, - *reinterpret_cast(&rocksdb_db_options.use_adaptive_mutex), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::use_adaptive_mutex for RocksDB", - nullptr, nullptr, rocksdb_db_options.use_adaptive_mutex); +static MYSQL_SYSVAR_BOOL( + use_adaptive_mutex, + *reinterpret_cast(&rocksdb_db_options.use_adaptive_mutex), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::use_adaptive_mutex for RocksDB", nullptr, nullptr, + rocksdb_db_options.use_adaptive_mutex); -static MYSQL_SYSVAR_ULONG(bytes_per_sync, - rocksdb_db_options.bytes_per_sync, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::bytes_per_sync for RocksDB", - nullptr, nullptr, rocksdb_db_options.bytes_per_sync, - /* min */ 0L, /* max */ LONG_MAX, 0); +static MYSQL_SYSVAR_ULONG(bytes_per_sync, rocksdb_db_options.bytes_per_sync, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::bytes_per_sync for RocksDB", nullptr, + nullptr, rocksdb_db_options.bytes_per_sync, + /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_ULONG(wal_bytes_per_sync, - rocksdb_db_options.wal_bytes_per_sync, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::wal_bytes_per_sync for RocksDB", - nullptr, nullptr, rocksdb_db_options.wal_bytes_per_sync, - /* min */ 0L, /* max */ LONG_MAX, 0); + rocksdb_db_options.wal_bytes_per_sync, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::wal_bytes_per_sync for RocksDB", nullptr, + nullptr, rocksdb_db_options.wal_bytes_per_sync, + /* min */ 0L, /* max */ LONG_MAX, 0); -static MYSQL_SYSVAR_BOOL(enable_thread_tracking, - *reinterpret_cast(&rocksdb_db_options.enable_thread_tracking), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::enable_thread_tracking for RocksDB", - nullptr, nullptr, rocksdb_db_options.enable_thread_tracking); +static MYSQL_SYSVAR_BOOL( + enable_thread_tracking, + *reinterpret_cast(&rocksdb_db_options.enable_thread_tracking), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::enable_thread_tracking for RocksDB", nullptr, nullptr, + rocksdb_db_options.enable_thread_tracking); static MYSQL_SYSVAR_LONGLONG(block_cache_size, rocksdb_block_cache_size, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "block_cache size for RocksDB", - nullptr, nullptr, - /* default */ RDB_DEFAULT_BLOCK_CACHE_SIZE, - /* min */ RDB_MIN_BLOCK_CACHE_SIZE, - /* max */ LONGLONG_MAX, /* Block size */ RDB_MIN_BLOCK_CACHE_SIZE); + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "block_cache size for RocksDB", nullptr, nullptr, + /* default */ RDB_DEFAULT_BLOCK_CACHE_SIZE, + /* min */ RDB_MIN_BLOCK_CACHE_SIZE, + /* max */ LONGLONG_MAX, + /* Block size */ RDB_MIN_BLOCK_CACHE_SIZE); -static MYSQL_SYSVAR_BOOL(cache_index_and_filter_blocks, - *reinterpret_cast( - &rocksdb_tbl_options.cache_index_and_filter_blocks), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "BlockBasedTableOptions::cache_index_and_filter_blocks for RocksDB", - nullptr, nullptr, true); +static MYSQL_SYSVAR_BOOL( + cache_index_and_filter_blocks, + *reinterpret_cast( + &rocksdb_tbl_options.cache_index_and_filter_blocks), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::cache_index_and_filter_blocks for RocksDB", + nullptr, nullptr, true); // When pin_l0_filter_and_index_blocks_in_cache is true, RocksDB will use the // LRU cache, but will always keep the filter & idndex block's handle checked @@ -900,406 +875,402 @@ static MYSQL_SYSVAR_BOOL(cache_index_and_filter_blocks, // This fixes the mutex contention between :ShardedLRUCache::Lookup and // ShardedLRUCache::Release which reduced the QPS ratio (QPS using secondary // index / QPS using PK). -static MYSQL_SYSVAR_BOOL(pin_l0_filter_and_index_blocks_in_cache, - *reinterpret_cast( - &rocksdb_tbl_options.pin_l0_filter_and_index_blocks_in_cache), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "pin_l0_filter_and_index_blocks_in_cache for RocksDB", - nullptr, nullptr, true); +static MYSQL_SYSVAR_BOOL( + pin_l0_filter_and_index_blocks_in_cache, + *reinterpret_cast( + &rocksdb_tbl_options.pin_l0_filter_and_index_blocks_in_cache), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "pin_l0_filter_and_index_blocks_in_cache for RocksDB", nullptr, nullptr, + true); -static MYSQL_SYSVAR_ENUM(index_type, - rocksdb_index_type, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "BlockBasedTableOptions::index_type for RocksDB", - nullptr, nullptr, - (uint64_t)rocksdb_tbl_options.index_type, &index_type_typelib); - -static MYSQL_SYSVAR_BOOL(hash_index_allow_collision, - *reinterpret_cast(&rocksdb_tbl_options.hash_index_allow_collision), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "BlockBasedTableOptions::hash_index_allow_collision for RocksDB", - nullptr, nullptr, rocksdb_tbl_options.hash_index_allow_collision); - -static MYSQL_SYSVAR_BOOL(no_block_cache, - *reinterpret_cast(&rocksdb_tbl_options.no_block_cache), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "BlockBasedTableOptions::no_block_cache for RocksDB", - nullptr, nullptr, rocksdb_tbl_options.no_block_cache); - -static MYSQL_SYSVAR_ULONG(block_size, - rocksdb_tbl_options.block_size, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "BlockBasedTableOptions::block_size for RocksDB", - nullptr, nullptr, rocksdb_tbl_options.block_size, - /* min */ 1L, /* max */ LONG_MAX, 0); - -static MYSQL_SYSVAR_INT(block_size_deviation, - rocksdb_tbl_options.block_size_deviation, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "BlockBasedTableOptions::block_size_deviation for RocksDB", - nullptr, nullptr, rocksdb_tbl_options.block_size_deviation, - /* min */ 0, /* max */ INT_MAX, 0); - -static MYSQL_SYSVAR_INT(block_restart_interval, - rocksdb_tbl_options.block_restart_interval, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "BlockBasedTableOptions::block_restart_interval for RocksDB", - nullptr, nullptr, rocksdb_tbl_options.block_restart_interval, - /* min */ 1, /* max */ INT_MAX, 0); - -static MYSQL_SYSVAR_BOOL(whole_key_filtering, - *reinterpret_cast(&rocksdb_tbl_options.whole_key_filtering), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "BlockBasedTableOptions::whole_key_filtering for RocksDB", - nullptr, nullptr, rocksdb_tbl_options.whole_key_filtering); - -static MYSQL_SYSVAR_STR(default_cf_options, rocksdb_default_cf_options, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "default cf options for RocksDB", - nullptr, nullptr, ""); - -static MYSQL_SYSVAR_STR(override_cf_options, rocksdb_override_cf_options, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "option overrides per cf for RocksDB", - nullptr, nullptr, ""); - -static MYSQL_SYSVAR_BOOL(background_sync, - rocksdb_background_sync, - PLUGIN_VAR_RQCMDARG, - "turns on background syncs for RocksDB", - nullptr, nullptr, FALSE); - -static MYSQL_THDVAR_BOOL(write_sync, - PLUGIN_VAR_RQCMDARG, - "WriteOptions::sync for RocksDB", - nullptr, nullptr, rocksdb::WriteOptions().sync); - -static MYSQL_THDVAR_BOOL(write_disable_wal, - PLUGIN_VAR_RQCMDARG, - "WriteOptions::disableWAL for RocksDB", - nullptr, nullptr, rocksdb::WriteOptions().disableWAL); - -static MYSQL_THDVAR_BOOL(write_ignore_missing_column_families, - PLUGIN_VAR_RQCMDARG, - "WriteOptions::ignore_missing_column_families for RocksDB", - nullptr, nullptr, rocksdb::WriteOptions().ignore_missing_column_families); - -static MYSQL_THDVAR_BOOL(skip_fill_cache, - PLUGIN_VAR_RQCMDARG, - "Skip filling block cache on read requests", - nullptr, nullptr, FALSE); - -static MYSQL_THDVAR_BOOL(unsafe_for_binlog, - PLUGIN_VAR_RQCMDARG, - "Allowing statement based binary logging which may break consistency", - nullptr, nullptr, FALSE); - -static MYSQL_THDVAR_UINT(records_in_range, - PLUGIN_VAR_RQCMDARG, - "Used to override the result of records_in_range(). Set to a positive number to override", - nullptr, nullptr, 0, - /* min */ 0, /* max */ INT_MAX, 0); - -static MYSQL_THDVAR_UINT(force_index_records_in_range, - PLUGIN_VAR_RQCMDARG, - "Used to override the result of records_in_range() when FORCE INDEX is used.", - nullptr, nullptr, 0, - /* min */ 0, /* max */ INT_MAX, 0); - -static MYSQL_SYSVAR_UINT(debug_optimizer_n_rows, - rocksdb_debug_optimizer_n_rows, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY | PLUGIN_VAR_NOSYSVAR, - "Test only to override rocksdb estimates of table size in a memtable", - nullptr, nullptr, 0, /* min */ 0, /* max */ INT_MAX, 0); - -static MYSQL_SYSVAR_BOOL(debug_optimizer_no_zero_cardinality, - rocksdb_debug_optimizer_no_zero_cardinality, - PLUGIN_VAR_RQCMDARG, - "In case if cardinality is zero, overrides it with some value", - nullptr, nullptr, TRUE); - -static MYSQL_SYSVAR_STR(compact_cf, rocksdb_compact_cf_name, - PLUGIN_VAR_RQCMDARG, - "Compact column family", - rocksdb_compact_column_family, rocksdb_compact_column_family_stub, ""); - -static MYSQL_SYSVAR_STR(create_checkpoint, rocksdb_checkpoint_name, - PLUGIN_VAR_RQCMDARG, - "Checkpoint directory", - rocksdb_create_checkpoint, rocksdb_create_checkpoint_stub, ""); - -static MYSQL_SYSVAR_BOOL(signal_drop_index_thread, - rocksdb_signal_drop_index_thread, - PLUGIN_VAR_RQCMDARG, - "Wake up drop index thread", - nullptr, rocksdb_drop_index_wakeup_thread, FALSE); - -static MYSQL_SYSVAR_BOOL(pause_background_work, - rocksdb_pause_background_work, - PLUGIN_VAR_RQCMDARG, - "Disable all rocksdb background operations", - nullptr, rocksdb_set_pause_background_work, FALSE); - -static MYSQL_SYSVAR_BOOL(disable_2pc, - rocksdb_disable_2pc, - PLUGIN_VAR_RQCMDARG, - "Disable two phase commit for MyRocks", - nullptr, nullptr, TRUE); - -static MYSQL_SYSVAR_BOOL(strict_collation_check, - rocksdb_strict_collation_check, - PLUGIN_VAR_RQCMDARG, - "Enforce case sensitive collation for MyRocks indexes", - nullptr, nullptr, TRUE); - -static MYSQL_SYSVAR_STR(strict_collation_exceptions, - rocksdb_strict_collation_exceptions, - PLUGIN_VAR_RQCMDARG|PLUGIN_VAR_MEMALLOC, - "List of tables (using regex) that are excluded " - "from the case sensitive collation enforcement", - nullptr, rocksdb_set_collation_exception_list, ""); - -static MYSQL_SYSVAR_BOOL(collect_sst_properties, - rocksdb_collect_sst_properties, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Enables collecting SST file properties on each flush", - nullptr, nullptr, rocksdb_collect_sst_properties); +static MYSQL_SYSVAR_ENUM(index_type, rocksdb_index_type, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::index_type for RocksDB", + nullptr, nullptr, + (uint64_t)rocksdb_tbl_options.index_type, + &index_type_typelib); static MYSQL_SYSVAR_BOOL( - force_flush_memtable_now, - rocksdb_force_flush_memtable_now_var, - PLUGIN_VAR_RQCMDARG, - "Forces memstore flush which may block all write requests so be careful", - rocksdb_force_flush_memtable_now, - rocksdb_force_flush_memtable_now_stub, FALSE); + hash_index_allow_collision, + *reinterpret_cast( + &rocksdb_tbl_options.hash_index_allow_collision), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::hash_index_allow_collision for RocksDB", nullptr, + nullptr, rocksdb_tbl_options.hash_index_allow_collision); + +static MYSQL_SYSVAR_BOOL( + no_block_cache, + *reinterpret_cast(&rocksdb_tbl_options.no_block_cache), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::no_block_cache for RocksDB", nullptr, nullptr, + rocksdb_tbl_options.no_block_cache); + +static MYSQL_SYSVAR_ULONG(block_size, rocksdb_tbl_options.block_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::block_size for RocksDB", + nullptr, nullptr, rocksdb_tbl_options.block_size, + /* min */ 1L, /* max */ LONG_MAX, 0); + +static MYSQL_SYSVAR_INT( + block_size_deviation, rocksdb_tbl_options.block_size_deviation, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::block_size_deviation for RocksDB", nullptr, + nullptr, rocksdb_tbl_options.block_size_deviation, + /* min */ 0, /* max */ INT_MAX, 0); + +static MYSQL_SYSVAR_INT( + block_restart_interval, rocksdb_tbl_options.block_restart_interval, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::block_restart_interval for RocksDB", nullptr, + nullptr, rocksdb_tbl_options.block_restart_interval, + /* min */ 1, /* max */ INT_MAX, 0); + +static MYSQL_SYSVAR_BOOL( + whole_key_filtering, + *reinterpret_cast(&rocksdb_tbl_options.whole_key_filtering), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::whole_key_filtering for RocksDB", nullptr, nullptr, + rocksdb_tbl_options.whole_key_filtering); + +static MYSQL_SYSVAR_STR(default_cf_options, rocksdb_default_cf_options, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "default cf options for RocksDB", nullptr, nullptr, ""); + +static MYSQL_SYSVAR_STR(override_cf_options, rocksdb_override_cf_options, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "option overrides per cf for RocksDB", nullptr, nullptr, + ""); + +static MYSQL_SYSVAR_BOOL(background_sync, rocksdb_background_sync, + PLUGIN_VAR_RQCMDARG, + "turns on background syncs for RocksDB", nullptr, + nullptr, FALSE); + +static MYSQL_THDVAR_BOOL(write_sync, PLUGIN_VAR_RQCMDARG, + "WriteOptions::sync for RocksDB", nullptr, nullptr, + rocksdb::WriteOptions().sync); + +static MYSQL_THDVAR_BOOL(write_disable_wal, PLUGIN_VAR_RQCMDARG, + "WriteOptions::disableWAL for RocksDB", nullptr, + nullptr, rocksdb::WriteOptions().disableWAL); static MYSQL_THDVAR_BOOL( - flush_memtable_on_analyze, - PLUGIN_VAR_RQCMDARG, - "Forces memtable flush on ANALZYE table to get accurate cardinality", - nullptr, nullptr, true); + write_ignore_missing_column_families, PLUGIN_VAR_RQCMDARG, + "WriteOptions::ignore_missing_column_families for RocksDB", nullptr, + nullptr, rocksdb::WriteOptions().ignore_missing_column_families); -static MYSQL_SYSVAR_UINT(seconds_between_stat_computes, - rocksdb_seconds_between_stat_computes, - PLUGIN_VAR_RQCMDARG, - "Sets a number of seconds to wait between optimizer stats recomputation. " - "Only changed indexes will be refreshed.", - nullptr, nullptr, rocksdb_seconds_between_stat_computes, - /* min */ 0L, /* max */ UINT_MAX, 0); +static MYSQL_THDVAR_BOOL(skip_fill_cache, PLUGIN_VAR_RQCMDARG, + "Skip filling block cache on read requests", nullptr, + nullptr, FALSE); -static MYSQL_SYSVAR_LONGLONG( - compaction_sequential_deletes, - rocksdb_compaction_sequential_deletes, - PLUGIN_VAR_RQCMDARG, - "RocksDB will trigger compaction for the file if it has more than this number sequential deletes per window", - nullptr, rocksdb_set_compaction_options, - DEFAULT_COMPACTION_SEQUENTIAL_DELETES, - /* min */ 0L, /* max */ MAX_COMPACTION_SEQUENTIAL_DELETES, 0); +static MYSQL_THDVAR_BOOL( + unsafe_for_binlog, PLUGIN_VAR_RQCMDARG, + "Allowing statement based binary logging which may break consistency", + nullptr, nullptr, FALSE); -static MYSQL_SYSVAR_LONGLONG( - compaction_sequential_deletes_window, - rocksdb_compaction_sequential_deletes_window, - PLUGIN_VAR_RQCMDARG, - "Size of the window for counting rocksdb_compaction_sequential_deletes", - nullptr, rocksdb_set_compaction_options, - DEFAULT_COMPACTION_SEQUENTIAL_DELETES_WINDOW, - /* min */ 0L, /* max */ MAX_COMPACTION_SEQUENTIAL_DELETES_WINDOW, 0); +static MYSQL_THDVAR_UINT(records_in_range, PLUGIN_VAR_RQCMDARG, + "Used to override the result of records_in_range(). " + "Set to a positive number to override", + nullptr, nullptr, 0, + /* min */ 0, /* max */ INT_MAX, 0); -static MYSQL_SYSVAR_LONGLONG( - compaction_sequential_deletes_file_size, - rocksdb_compaction_sequential_deletes_file_size, - PLUGIN_VAR_RQCMDARG, - "Minimum file size required for compaction_sequential_deletes", - nullptr, rocksdb_set_compaction_options, 0L, - /* min */ -1L, /* max */ LONGLONG_MAX, 0); - -static MYSQL_SYSVAR_BOOL(compaction_sequential_deletes_count_sd, - rocksdb_compaction_sequential_deletes_count_sd, - PLUGIN_VAR_RQCMDARG, - "Counting SingleDelete as rocksdb_compaction_sequential_deletes", - nullptr, nullptr, rocksdb_compaction_sequential_deletes_count_sd); - -static MYSQL_SYSVAR_BOOL(print_snapshot_conflict_queries, - rocksdb_print_snapshot_conflict_queries, - PLUGIN_VAR_RQCMDARG, - "Logging queries that got snapshot conflict errors into *.err log", - nullptr, nullptr, rocksdb_print_snapshot_conflict_queries); - -static MYSQL_THDVAR_INT(checksums_pct, - PLUGIN_VAR_RQCMDARG, - "How many percentages of rows to be checksummed", - nullptr, nullptr, RDB_MAX_CHECKSUMS_PCT, - /* min */ 0, /* max */ RDB_MAX_CHECKSUMS_PCT, 0); - -static MYSQL_THDVAR_BOOL(store_row_debug_checksums, - PLUGIN_VAR_RQCMDARG, - "Include checksums when writing index/table records", - nullptr, nullptr, false /* default value */); - -static MYSQL_THDVAR_BOOL(verify_row_debug_checksums, - PLUGIN_VAR_RQCMDARG, - "Verify checksums when reading index/table records", - nullptr, nullptr, false /* default value */); - -static MYSQL_SYSVAR_UINT(validate_tables, - rocksdb_validate_tables, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Verify all .frm files match all RocksDB tables (0 means no verification, " - "1 means verify and fail on error, and 2 means verify but continue", - nullptr, nullptr, 1 /* default value */, 0 /* min value */, - 2 /* max value */, 0); - -static MYSQL_SYSVAR_STR(datadir, - rocksdb_datadir, - PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, - "RocksDB data directory", - nullptr, nullptr, "./.rocksdb"); +static MYSQL_THDVAR_UINT(force_index_records_in_range, PLUGIN_VAR_RQCMDARG, + "Used to override the result of records_in_range() " + "when FORCE INDEX is used.", + nullptr, nullptr, 0, + /* min */ 0, /* max */ INT_MAX, 0); static MYSQL_SYSVAR_UINT( - table_stats_sampling_pct, - rocksdb_table_stats_sampling_pct, - PLUGIN_VAR_RQCMDARG, - "Percentage of entries to sample when collecting statistics about table " - "properties. Specify either 0 to sample everything or percentage [" - STRINGIFY_ARG(RDB_TBL_STATS_SAMPLE_PCT_MIN) ".." - STRINGIFY_ARG(RDB_TBL_STATS_SAMPLE_PCT_MAX) "]. " "By default " - STRINGIFY_ARG(RDB_DEFAULT_TBL_STATS_SAMPLE_PCT) "% of entries are " - "sampled.", - nullptr, rocksdb_set_table_stats_sampling_pct, /* default */ - RDB_DEFAULT_TBL_STATS_SAMPLE_PCT, /* everything */ 0, - /* max */ RDB_TBL_STATS_SAMPLE_PCT_MAX, 0); + debug_optimizer_n_rows, rocksdb_debug_optimizer_n_rows, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY | PLUGIN_VAR_NOSYSVAR, + "Test only to override rocksdb estimates of table size in a memtable", + nullptr, nullptr, 0, /* min */ 0, /* max */ INT_MAX, 0); -static const int ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE= 100; +static MYSQL_SYSVAR_BOOL( + debug_optimizer_no_zero_cardinality, + rocksdb_debug_optimizer_no_zero_cardinality, PLUGIN_VAR_RQCMDARG, + "In case if cardinality is zero, overrides it with some value", nullptr, + nullptr, TRUE); -static struct st_mysql_sys_var* rocksdb_system_variables[]= { - MYSQL_SYSVAR(lock_wait_timeout), - MYSQL_SYSVAR(deadlock_detect), - MYSQL_SYSVAR(max_row_locks), - MYSQL_SYSVAR(lock_scanned_rows), - MYSQL_SYSVAR(bulk_load), - MYSQL_SYSVAR(skip_unique_check_tables), - MYSQL_SYSVAR(trace_sst_api), - MYSQL_SYSVAR(skip_unique_check), - MYSQL_SYSVAR(commit_in_the_middle), - MYSQL_SYSVAR(read_free_rpl_tables), - MYSQL_SYSVAR(rpl_skip_tx_api), - MYSQL_SYSVAR(bulk_load_size), - MYSQL_SYSVAR(merge_buf_size), - MYSQL_SYSVAR(enable_bulk_load_api), - MYSQL_SYSVAR(tmpdir), - MYSQL_SYSVAR(merge_combine_read_size), - MYSQL_SYSVAR(skip_bloom_filter_on_read), +static MYSQL_SYSVAR_STR(compact_cf, rocksdb_compact_cf_name, + PLUGIN_VAR_RQCMDARG, "Compact column family", + rocksdb_compact_column_family, + rocksdb_compact_column_family_stub, ""); - MYSQL_SYSVAR(create_if_missing), - MYSQL_SYSVAR(create_missing_column_families), - MYSQL_SYSVAR(error_if_exists), - MYSQL_SYSVAR(paranoid_checks), - MYSQL_SYSVAR(rate_limiter_bytes_per_sec), - MYSQL_SYSVAR(info_log_level), - MYSQL_SYSVAR(max_open_files), - MYSQL_SYSVAR(max_total_wal_size), - MYSQL_SYSVAR(disabledatasync), - MYSQL_SYSVAR(use_fsync), - MYSQL_SYSVAR(wal_dir), - MYSQL_SYSVAR(delete_obsolete_files_period_micros), - MYSQL_SYSVAR(base_background_compactions), - MYSQL_SYSVAR(max_background_compactions), - MYSQL_SYSVAR(max_background_flushes), - MYSQL_SYSVAR(max_log_file_size), - MYSQL_SYSVAR(max_subcompactions), - MYSQL_SYSVAR(log_file_time_to_roll), - MYSQL_SYSVAR(keep_log_file_num), - MYSQL_SYSVAR(max_manifest_file_size), - MYSQL_SYSVAR(table_cache_numshardbits), - MYSQL_SYSVAR(wal_ttl_seconds), - MYSQL_SYSVAR(wal_size_limit_mb), - MYSQL_SYSVAR(manifest_preallocation_size), - MYSQL_SYSVAR(use_direct_reads), - MYSQL_SYSVAR(use_direct_writes), - MYSQL_SYSVAR(allow_mmap_reads), - MYSQL_SYSVAR(allow_mmap_writes), - MYSQL_SYSVAR(is_fd_close_on_exec), - MYSQL_SYSVAR(stats_dump_period_sec), - MYSQL_SYSVAR(advise_random_on_open), - MYSQL_SYSVAR(db_write_buffer_size), - MYSQL_SYSVAR(use_adaptive_mutex), - MYSQL_SYSVAR(bytes_per_sync), - MYSQL_SYSVAR(wal_bytes_per_sync), - MYSQL_SYSVAR(enable_thread_tracking), - MYSQL_SYSVAR(perf_context_level), - MYSQL_SYSVAR(wal_recovery_mode), - MYSQL_SYSVAR(access_hint_on_compaction_start), - MYSQL_SYSVAR(new_table_reader_for_compaction_inputs), - MYSQL_SYSVAR(compaction_readahead_size), - MYSQL_SYSVAR(allow_concurrent_memtable_write), - MYSQL_SYSVAR(enable_write_thread_adaptive_yield), +static MYSQL_SYSVAR_STR(create_checkpoint, rocksdb_checkpoint_name, + PLUGIN_VAR_RQCMDARG, "Checkpoint directory", + rocksdb_create_checkpoint, + rocksdb_create_checkpoint_stub, ""); - MYSQL_SYSVAR(block_cache_size), - MYSQL_SYSVAR(cache_index_and_filter_blocks), - MYSQL_SYSVAR(pin_l0_filter_and_index_blocks_in_cache), - MYSQL_SYSVAR(index_type), - MYSQL_SYSVAR(hash_index_allow_collision), - MYSQL_SYSVAR(no_block_cache), - MYSQL_SYSVAR(block_size), - MYSQL_SYSVAR(block_size_deviation), - MYSQL_SYSVAR(block_restart_interval), - MYSQL_SYSVAR(whole_key_filtering), +static MYSQL_SYSVAR_BOOL(signal_drop_index_thread, + rocksdb_signal_drop_index_thread, PLUGIN_VAR_RQCMDARG, + "Wake up drop index thread", nullptr, + rocksdb_drop_index_wakeup_thread, FALSE); - MYSQL_SYSVAR(default_cf_options), - MYSQL_SYSVAR(override_cf_options), +static MYSQL_SYSVAR_BOOL(pause_background_work, rocksdb_pause_background_work, + PLUGIN_VAR_RQCMDARG, + "Disable all rocksdb background operations", nullptr, + rocksdb_set_pause_background_work, FALSE); - MYSQL_SYSVAR(background_sync), +static MYSQL_SYSVAR_BOOL(enable_2pc, rocksdb_enable_2pc, PLUGIN_VAR_RQCMDARG, + "Enable two phase commit for MyRocks", nullptr, + nullptr, TRUE); - MYSQL_SYSVAR(write_sync), - MYSQL_SYSVAR(write_disable_wal), - MYSQL_SYSVAR(write_ignore_missing_column_families), +static MYSQL_SYSVAR_BOOL(strict_collation_check, rocksdb_strict_collation_check, + PLUGIN_VAR_RQCMDARG, + "Enforce case sensitive collation for MyRocks indexes", + nullptr, nullptr, TRUE); - MYSQL_SYSVAR(skip_fill_cache), - MYSQL_SYSVAR(unsafe_for_binlog), +static MYSQL_SYSVAR_STR(strict_collation_exceptions, + rocksdb_strict_collation_exceptions, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, + "List of tables (using regex) that are excluded " + "from the case sensitive collation enforcement", + nullptr, rocksdb_set_collation_exception_list, ""); - MYSQL_SYSVAR(records_in_range), - MYSQL_SYSVAR(force_index_records_in_range), - MYSQL_SYSVAR(debug_optimizer_n_rows), - MYSQL_SYSVAR(debug_optimizer_no_zero_cardinality), +static MYSQL_SYSVAR_BOOL(collect_sst_properties, rocksdb_collect_sst_properties, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Enables collecting SST file properties on each flush", + nullptr, nullptr, rocksdb_collect_sst_properties); - MYSQL_SYSVAR(compact_cf), - MYSQL_SYSVAR(signal_drop_index_thread), - MYSQL_SYSVAR(pause_background_work), - MYSQL_SYSVAR(disable_2pc), - MYSQL_SYSVAR(strict_collation_check), - MYSQL_SYSVAR(strict_collation_exceptions), - MYSQL_SYSVAR(collect_sst_properties), - MYSQL_SYSVAR(force_flush_memtable_now), - MYSQL_SYSVAR(flush_memtable_on_analyze), - MYSQL_SYSVAR(seconds_between_stat_computes), +static MYSQL_SYSVAR_BOOL( + force_flush_memtable_now, rocksdb_force_flush_memtable_now_var, + PLUGIN_VAR_RQCMDARG, + "Forces memstore flush which may block all write requests so be careful", + rocksdb_force_flush_memtable_now, rocksdb_force_flush_memtable_now_stub, + FALSE); - MYSQL_SYSVAR(compaction_sequential_deletes), - MYSQL_SYSVAR(compaction_sequential_deletes_window), - MYSQL_SYSVAR(compaction_sequential_deletes_file_size), - MYSQL_SYSVAR(compaction_sequential_deletes_count_sd), - MYSQL_SYSVAR(print_snapshot_conflict_queries), +static MYSQL_THDVAR_BOOL( + flush_memtable_on_analyze, PLUGIN_VAR_RQCMDARG, + "Forces memtable flush on ANALZYE table to get accurate cardinality", + nullptr, nullptr, true); - MYSQL_SYSVAR(datadir), - MYSQL_SYSVAR(create_checkpoint), +static MYSQL_SYSVAR_UINT( + seconds_between_stat_computes, rocksdb_seconds_between_stat_computes, + PLUGIN_VAR_RQCMDARG, + "Sets a number of seconds to wait between optimizer stats recomputation. " + "Only changed indexes will be refreshed.", + nullptr, nullptr, rocksdb_seconds_between_stat_computes, + /* min */ 0L, /* max */ UINT_MAX, 0); - MYSQL_SYSVAR(checksums_pct), - MYSQL_SYSVAR(store_row_debug_checksums), - MYSQL_SYSVAR(verify_row_debug_checksums), +static MYSQL_SYSVAR_LONGLONG(compaction_sequential_deletes, + rocksdb_compaction_sequential_deletes, + PLUGIN_VAR_RQCMDARG, + "RocksDB will trigger compaction for the file if " + "it has more than this number sequential deletes " + "per window", + nullptr, rocksdb_set_compaction_options, + DEFAULT_COMPACTION_SEQUENTIAL_DELETES, + /* min */ 0L, + /* max */ MAX_COMPACTION_SEQUENTIAL_DELETES, 0); - MYSQL_SYSVAR(validate_tables), - MYSQL_SYSVAR(table_stats_sampling_pct), - nullptr -}; +static MYSQL_SYSVAR_LONGLONG( + compaction_sequential_deletes_window, + rocksdb_compaction_sequential_deletes_window, PLUGIN_VAR_RQCMDARG, + "Size of the window for counting rocksdb_compaction_sequential_deletes", + nullptr, rocksdb_set_compaction_options, + DEFAULT_COMPACTION_SEQUENTIAL_DELETES_WINDOW, + /* min */ 0L, /* max */ MAX_COMPACTION_SEQUENTIAL_DELETES_WINDOW, 0); +static MYSQL_SYSVAR_LONGLONG( + compaction_sequential_deletes_file_size, + rocksdb_compaction_sequential_deletes_file_size, PLUGIN_VAR_RQCMDARG, + "Minimum file size required for compaction_sequential_deletes", nullptr, + rocksdb_set_compaction_options, 0L, + /* min */ -1L, /* max */ LONGLONG_MAX, 0); -static rocksdb::WriteOptions rdb_get_rocksdb_write_options( - my_core::THD* const thd) -{ +static MYSQL_SYSVAR_BOOL( + compaction_sequential_deletes_count_sd, + rocksdb_compaction_sequential_deletes_count_sd, PLUGIN_VAR_RQCMDARG, + "Counting SingleDelete as rocksdb_compaction_sequential_deletes", nullptr, + nullptr, rocksdb_compaction_sequential_deletes_count_sd); + +static MYSQL_SYSVAR_BOOL( + print_snapshot_conflict_queries, rocksdb_print_snapshot_conflict_queries, + PLUGIN_VAR_RQCMDARG, + "Logging queries that got snapshot conflict errors into *.err log", nullptr, + nullptr, rocksdb_print_snapshot_conflict_queries); + +static MYSQL_THDVAR_INT(checksums_pct, PLUGIN_VAR_RQCMDARG, + "How many percentages of rows to be checksummed", + nullptr, nullptr, RDB_MAX_CHECKSUMS_PCT, + /* min */ 0, /* max */ RDB_MAX_CHECKSUMS_PCT, 0); + +static MYSQL_THDVAR_BOOL(store_row_debug_checksums, PLUGIN_VAR_RQCMDARG, + "Include checksums when writing index/table records", + nullptr, nullptr, false /* default value */); + +static MYSQL_THDVAR_BOOL(verify_row_debug_checksums, PLUGIN_VAR_RQCMDARG, + "Verify checksums when reading index/table records", + nullptr, nullptr, false /* default value */); + +static MYSQL_SYSVAR_UINT( + validate_tables, rocksdb_validate_tables, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Verify all .frm files match all RocksDB tables (0 means no verification, " + "1 means verify and fail on error, and 2 means verify but continue", + nullptr, nullptr, 1 /* default value */, 0 /* min value */, + 2 /* max value */, 0); + +static MYSQL_SYSVAR_STR(datadir, rocksdb_datadir, + PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, + "RocksDB data directory", nullptr, nullptr, + "./.rocksdb"); + +static MYSQL_SYSVAR_UINT( + table_stats_sampling_pct, rocksdb_table_stats_sampling_pct, + PLUGIN_VAR_RQCMDARG, + "Percentage of entries to sample when collecting statistics about table " + "properties. Specify either 0 to sample everything or percentage " + "[" STRINGIFY_ARG(RDB_TBL_STATS_SAMPLE_PCT_MIN) ".." STRINGIFY_ARG( + RDB_TBL_STATS_SAMPLE_PCT_MAX) "]. " + "By default " STRINGIFY_ARG( + RDB_DEFAULT_TBL_STATS_SAMPLE_PCT) "% " + "of" + " e" + "nt" + "ri" + "es" + " a" + "re" + " " + "sa" + "mp" + "le" + "d" + ".", + nullptr, rocksdb_set_table_stats_sampling_pct, /* default */ + RDB_DEFAULT_TBL_STATS_SAMPLE_PCT, /* everything */ 0, + /* max */ RDB_TBL_STATS_SAMPLE_PCT_MAX, 0); + +static const int ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE = 100; + +static struct st_mysql_sys_var *rocksdb_system_variables[] = { + MYSQL_SYSVAR(lock_wait_timeout), + MYSQL_SYSVAR(deadlock_detect), + MYSQL_SYSVAR(max_row_locks), + MYSQL_SYSVAR(lock_scanned_rows), + MYSQL_SYSVAR(bulk_load), + MYSQL_SYSVAR(skip_unique_check_tables), + MYSQL_SYSVAR(trace_sst_api), + MYSQL_SYSVAR(commit_in_the_middle), + MYSQL_SYSVAR(read_free_rpl_tables), + MYSQL_SYSVAR(bulk_load_size), + MYSQL_SYSVAR(merge_buf_size), + MYSQL_SYSVAR(enable_bulk_load_api), + MYSQL_SYSVAR(tmpdir), + MYSQL_SYSVAR(merge_combine_read_size), + MYSQL_SYSVAR(skip_bloom_filter_on_read), + + MYSQL_SYSVAR(create_if_missing), + MYSQL_SYSVAR(create_missing_column_families), + MYSQL_SYSVAR(error_if_exists), + MYSQL_SYSVAR(paranoid_checks), + MYSQL_SYSVAR(rate_limiter_bytes_per_sec), + MYSQL_SYSVAR(info_log_level), + MYSQL_SYSVAR(max_open_files), + MYSQL_SYSVAR(max_total_wal_size), + MYSQL_SYSVAR(disabledatasync), + MYSQL_SYSVAR(use_fsync), + MYSQL_SYSVAR(wal_dir), + MYSQL_SYSVAR(persistent_cache_path), + MYSQL_SYSVAR(persistent_cache_size), + MYSQL_SYSVAR(delete_obsolete_files_period_micros), + MYSQL_SYSVAR(base_background_compactions), + MYSQL_SYSVAR(max_background_compactions), + MYSQL_SYSVAR(max_background_flushes), + MYSQL_SYSVAR(max_log_file_size), + MYSQL_SYSVAR(max_subcompactions), + MYSQL_SYSVAR(log_file_time_to_roll), + MYSQL_SYSVAR(keep_log_file_num), + MYSQL_SYSVAR(max_manifest_file_size), + MYSQL_SYSVAR(table_cache_numshardbits), + MYSQL_SYSVAR(wal_ttl_seconds), + MYSQL_SYSVAR(wal_size_limit_mb), + MYSQL_SYSVAR(manifest_preallocation_size), + MYSQL_SYSVAR(use_direct_reads), + MYSQL_SYSVAR(use_direct_writes), + MYSQL_SYSVAR(allow_mmap_reads), + MYSQL_SYSVAR(allow_mmap_writes), + MYSQL_SYSVAR(is_fd_close_on_exec), + MYSQL_SYSVAR(stats_dump_period_sec), + MYSQL_SYSVAR(advise_random_on_open), + MYSQL_SYSVAR(db_write_buffer_size), + MYSQL_SYSVAR(use_adaptive_mutex), + MYSQL_SYSVAR(bytes_per_sync), + MYSQL_SYSVAR(wal_bytes_per_sync), + MYSQL_SYSVAR(enable_thread_tracking), + MYSQL_SYSVAR(perf_context_level), + MYSQL_SYSVAR(wal_recovery_mode), + MYSQL_SYSVAR(access_hint_on_compaction_start), + MYSQL_SYSVAR(new_table_reader_for_compaction_inputs), + MYSQL_SYSVAR(compaction_readahead_size), + MYSQL_SYSVAR(allow_concurrent_memtable_write), + MYSQL_SYSVAR(enable_write_thread_adaptive_yield), + + MYSQL_SYSVAR(block_cache_size), + MYSQL_SYSVAR(cache_index_and_filter_blocks), + MYSQL_SYSVAR(pin_l0_filter_and_index_blocks_in_cache), + MYSQL_SYSVAR(index_type), + MYSQL_SYSVAR(hash_index_allow_collision), + MYSQL_SYSVAR(no_block_cache), + MYSQL_SYSVAR(block_size), + MYSQL_SYSVAR(block_size_deviation), + MYSQL_SYSVAR(block_restart_interval), + MYSQL_SYSVAR(whole_key_filtering), + + MYSQL_SYSVAR(default_cf_options), + MYSQL_SYSVAR(override_cf_options), + + MYSQL_SYSVAR(background_sync), + + MYSQL_SYSVAR(write_sync), + MYSQL_SYSVAR(write_disable_wal), + MYSQL_SYSVAR(write_ignore_missing_column_families), + + MYSQL_SYSVAR(skip_fill_cache), + MYSQL_SYSVAR(unsafe_for_binlog), + + MYSQL_SYSVAR(records_in_range), + MYSQL_SYSVAR(force_index_records_in_range), + MYSQL_SYSVAR(debug_optimizer_n_rows), + MYSQL_SYSVAR(debug_optimizer_no_zero_cardinality), + + MYSQL_SYSVAR(compact_cf), + MYSQL_SYSVAR(signal_drop_index_thread), + MYSQL_SYSVAR(pause_background_work), + MYSQL_SYSVAR(enable_2pc), + MYSQL_SYSVAR(strict_collation_check), + MYSQL_SYSVAR(strict_collation_exceptions), + MYSQL_SYSVAR(collect_sst_properties), + MYSQL_SYSVAR(force_flush_memtable_now), + MYSQL_SYSVAR(flush_memtable_on_analyze), + MYSQL_SYSVAR(seconds_between_stat_computes), + + MYSQL_SYSVAR(compaction_sequential_deletes), + MYSQL_SYSVAR(compaction_sequential_deletes_window), + MYSQL_SYSVAR(compaction_sequential_deletes_file_size), + MYSQL_SYSVAR(compaction_sequential_deletes_count_sd), + MYSQL_SYSVAR(print_snapshot_conflict_queries), + + MYSQL_SYSVAR(datadir), + MYSQL_SYSVAR(create_checkpoint), + + MYSQL_SYSVAR(checksums_pct), + MYSQL_SYSVAR(store_row_debug_checksums), + MYSQL_SYSVAR(verify_row_debug_checksums), + + MYSQL_SYSVAR(validate_tables), + MYSQL_SYSVAR(table_stats_sampling_pct), + nullptr}; + +static rocksdb::WriteOptions +rdb_get_rocksdb_write_options(my_core::THD *const thd) { rocksdb::WriteOptions opt; - opt.sync= THDVAR(thd, write_sync); - opt.disableWAL= THDVAR(thd, write_disable_wal); - opt.ignore_missing_column_families= + opt.sync = THDVAR(thd, write_sync); + opt.disableWAL = THDVAR(thd, write_disable_wal); + opt.ignore_missing_column_families = THDVAR(thd, write_ignore_missing_column_families); return opt; @@ -1312,104 +1283,93 @@ static rocksdb::WriteOptions rdb_get_rocksdb_write_options( Function we use in the creation of our hash to get key. */ -uchar* Rdb_open_tables_map::get_hash_key( - Rdb_table_handler* const table_handler, size_t* const length, - my_bool not_used __attribute__((__unused__))) -{ - *length= table_handler->m_table_name_length; - return reinterpret_cast(table_handler->m_table_name); +uchar * +Rdb_open_tables_map::get_hash_key(Rdb_table_handler *const table_handler, + size_t *const length, + my_bool not_used MY_ATTRIBUTE((__unused__))) { + *length = table_handler->m_table_name_length; + return reinterpret_cast(table_handler->m_table_name); } - /* The following is needed as an argument for mysql_stage_register, irrespectively of whether we're compiling with P_S or not. */ -PSI_stage_info stage_waiting_on_row_lock= { 0, "Waiting for row lock", 0}; +PSI_stage_info stage_waiting_on_row_lock = {0, "Waiting for row lock", 0}; #ifdef HAVE_PSI_INTERFACE static PSI_thread_key rdb_background_psi_thread_key; static PSI_thread_key rdb_drop_idx_psi_thread_key; -static PSI_stage_info *all_rocksdb_stages[]= -{ - & stage_waiting_on_row_lock -}; - +static PSI_stage_info *all_rocksdb_stages[] = {&stage_waiting_on_row_lock}; static my_core::PSI_mutex_key rdb_psi_open_tbls_mutex_key, - rdb_signal_bg_psi_mutex_key, rdb_signal_drop_idx_psi_mutex_key, - rdb_collation_data_mutex_key, - rdb_mem_cmp_space_mutex_key, - key_mutex_tx_list, rdb_sysvars_psi_mutex_key; + rdb_signal_bg_psi_mutex_key, rdb_signal_drop_idx_psi_mutex_key, + rdb_collation_data_mutex_key, rdb_mem_cmp_space_mutex_key, + key_mutex_tx_list, rdb_sysvars_psi_mutex_key; -static PSI_mutex_info all_rocksdb_mutexes[]= -{ - { &rdb_psi_open_tbls_mutex_key, "open tables", PSI_FLAG_GLOBAL}, - { &rdb_signal_bg_psi_mutex_key, "stop background", PSI_FLAG_GLOBAL}, - { &rdb_signal_drop_idx_psi_mutex_key, "signal drop index", PSI_FLAG_GLOBAL}, - { &rdb_collation_data_mutex_key, "collation data init", PSI_FLAG_GLOBAL}, - { &rdb_mem_cmp_space_mutex_key, "collation space char data init", - PSI_FLAG_GLOBAL}, - { &key_mutex_tx_list, "tx_list", PSI_FLAG_GLOBAL}, - { &rdb_sysvars_psi_mutex_key, "setting sysvar", PSI_FLAG_GLOBAL}, +static PSI_mutex_info all_rocksdb_mutexes[] = { + {&rdb_psi_open_tbls_mutex_key, "open tables", PSI_FLAG_GLOBAL}, + {&rdb_signal_bg_psi_mutex_key, "stop background", PSI_FLAG_GLOBAL}, + {&rdb_signal_drop_idx_psi_mutex_key, "signal drop index", PSI_FLAG_GLOBAL}, + {&rdb_collation_data_mutex_key, "collation data init", PSI_FLAG_GLOBAL}, + {&rdb_mem_cmp_space_mutex_key, "collation space char data init", + PSI_FLAG_GLOBAL}, + {&key_mutex_tx_list, "tx_list", PSI_FLAG_GLOBAL}, + {&rdb_sysvars_psi_mutex_key, "setting sysvar", PSI_FLAG_GLOBAL}, }; static PSI_rwlock_key key_rwlock_collation_exception_list; static PSI_rwlock_key key_rwlock_read_free_rpl_tables; static PSI_rwlock_key key_rwlock_skip_unique_check_tables; -static PSI_rwlock_info all_rocksdb_rwlocks[]= -{ - { &key_rwlock_collation_exception_list, "collation_exception_list", - PSI_FLAG_GLOBAL}, - { &key_rwlock_read_free_rpl_tables, "read_free_rpl_tables", PSI_FLAG_GLOBAL}, - { &key_rwlock_skip_unique_check_tables, "skip_unique_check_tables", - PSI_FLAG_GLOBAL}, +static PSI_rwlock_info all_rocksdb_rwlocks[] = { + {&key_rwlock_collation_exception_list, "collation_exception_list", + PSI_FLAG_GLOBAL}, + {&key_rwlock_read_free_rpl_tables, "read_free_rpl_tables", PSI_FLAG_GLOBAL}, + {&key_rwlock_skip_unique_check_tables, "skip_unique_check_tables", + PSI_FLAG_GLOBAL}, }; PSI_cond_key rdb_signal_bg_psi_cond_key, rdb_signal_drop_idx_psi_cond_key; -static PSI_cond_info all_rocksdb_conds[]= -{ - { &rdb_signal_bg_psi_cond_key, "cond signal background", PSI_FLAG_GLOBAL}, - { &rdb_signal_drop_idx_psi_cond_key, "cond signal drop index", - PSI_FLAG_GLOBAL}, +static PSI_cond_info all_rocksdb_conds[] = { + {&rdb_signal_bg_psi_cond_key, "cond signal background", PSI_FLAG_GLOBAL}, + {&rdb_signal_drop_idx_psi_cond_key, "cond signal drop index", + PSI_FLAG_GLOBAL}, }; -static PSI_thread_info all_rocksdb_threads[]= -{ - { &rdb_background_psi_thread_key, "background", PSI_FLAG_GLOBAL}, - { &rdb_drop_idx_psi_thread_key, "drop index", PSI_FLAG_GLOBAL}, +static PSI_thread_info all_rocksdb_threads[] = { + {&rdb_background_psi_thread_key, "background", PSI_FLAG_GLOBAL}, + {&rdb_drop_idx_psi_thread_key, "drop index", PSI_FLAG_GLOBAL}, }; -static void init_rocksdb_psi_keys() -{ - const char* const category= "rocksdb"; +static void init_rocksdb_psi_keys() { + const char *const category = "rocksdb"; int count; if (PSI_server == nullptr) return; - count= array_elements(all_rocksdb_mutexes); + count = array_elements(all_rocksdb_mutexes); PSI_server->register_mutex(category, all_rocksdb_mutexes, count); - count= array_elements(all_rocksdb_rwlocks); + count = array_elements(all_rocksdb_rwlocks); PSI_server->register_rwlock(category, all_rocksdb_rwlocks, count); - count= array_elements(all_rocksdb_conds); - // TODO Disabling PFS for conditions due to the bug https://github.com/MySQLOnRocksDB/mysql-5.6/issues/92 + count = array_elements(all_rocksdb_conds); + // TODO Disabling PFS for conditions due to the bug + // https://github.com/MySQLOnRocksDB/mysql-5.6/issues/92 // PSI_server->register_cond(category, all_rocksdb_conds, count); - count= array_elements(all_rocksdb_stages); + count = array_elements(all_rocksdb_stages); mysql_stage_register(category, all_rocksdb_stages, count); - count= array_elements(all_rocksdb_threads); + count = array_elements(all_rocksdb_threads); mysql_thread_register(category, all_rocksdb_threads, count); } #endif - /* Drop index thread's control */ @@ -1417,21 +1377,18 @@ static void init_rocksdb_psi_keys() static Rdb_drop_index_thread rdb_drop_idx_thread; static void rocksdb_drop_index_wakeup_thread( - my_core::THD* const thd __attribute__((__unused__)), - struct st_mysql_sys_var* const var __attribute__((__unused__)), - void* const var_ptr __attribute__((__unused__)), - const void* const save) -{ - if (*static_cast(save)) { + my_core::THD *const thd MY_ATTRIBUTE((__unused__)), + struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { + if (*static_cast(save)) { rdb_drop_idx_thread.signal(); } } -static inline uint32_t rocksdb_perf_context_level(THD* const thd) -{ +static inline uint32_t rocksdb_perf_context_level(THD *const thd) { DBUG_ASSERT(thd != nullptr); - const int session_perf_context_level= THDVAR(thd, perf_context_level); + const int session_perf_context_level = THDVAR(thd, perf_context_level); if (session_perf_context_level > rocksdb::PerfLevel::kUninitialized) { return session_perf_context_level; } @@ -1441,7 +1398,7 @@ static inline uint32_t rocksdb_perf_context_level(THD* const thd) value. */ - const int global_perf_context_level= THDVAR(nullptr, perf_context_level); + const int global_perf_context_level = THDVAR(nullptr, perf_context_level); if (global_perf_context_level > rocksdb::PerfLevel::kUninitialized) { return global_perf_context_level; } @@ -1454,10 +1411,9 @@ static inline uint32_t rocksdb_perf_context_level(THD* const thd) Rdb_transaction::walk_tx_list() */ -interface Rdb_tx_list_walker -{ +interface Rdb_tx_list_walker { virtual ~Rdb_tx_list_walker() {} - virtual void process_tran(const Rdb_transaction* const) = 0; + virtual void process_tran(const Rdb_transaction *const) = 0; }; /* @@ -1465,74 +1421,68 @@ interface Rdb_tx_list_walker a snapshot gets created. */ -class Rdb_snapshot_notifier : public rocksdb::TransactionNotifier -{ - Rdb_transaction* m_owning_tx; +class Rdb_snapshot_notifier : public rocksdb::TransactionNotifier { + Rdb_transaction *m_owning_tx; void SnapshotCreated(const rocksdb::Snapshot *snapshot) override; - public: - Rdb_snapshot_notifier(const Rdb_snapshot_notifier&) = delete; - Rdb_snapshot_notifier& operator=(const Rdb_snapshot_notifier&) = delete; +public: + Rdb_snapshot_notifier(const Rdb_snapshot_notifier &) = delete; + Rdb_snapshot_notifier &operator=(const Rdb_snapshot_notifier &) = delete; - explicit Rdb_snapshot_notifier(Rdb_transaction* const owning_tx) - : m_owning_tx(owning_tx) {} + explicit Rdb_snapshot_notifier(Rdb_transaction *const owning_tx) + : m_owning_tx(owning_tx) {} // If the owning Rdb_transaction gets destructed we need to not reference // it anymore. - void detach() - { - m_owning_tx = nullptr; - } + void detach() { m_owning_tx = nullptr; } }; /* This is the base class for transactions when interacting with rocksdb. */ -class Rdb_transaction -{ - protected: - ulonglong m_write_count= 0; - ulonglong m_lock_count= 0; +class Rdb_transaction { +protected: + ulonglong m_write_count = 0; + ulonglong m_lock_count = 0; - bool m_is_delayed_snapshot= false; - bool m_is_two_phase= false; + bool m_is_delayed_snapshot = false; + bool m_is_two_phase = false; - THD* m_thd= nullptr; + THD *m_thd = nullptr; rocksdb::ReadOptions m_read_opts; - static std::multiset s_tx_list; + static std::multiset s_tx_list; static mysql_mutex_t s_tx_list_mutex; - Rdb_io_perf* m_tbl_io_perf; + Rdb_io_perf *m_tbl_io_perf; - bool m_tx_read_only= false; + bool m_tx_read_only = false; int m_timeout_sec; /* Cached value of @@rocksdb_lock_wait_timeout */ /* Maximum number of locks the transaction can have */ ulonglong m_max_row_locks; - bool m_is_tx_failed= false; - bool m_rollback_only= false; + bool m_is_tx_failed = false; + bool m_rollback_only = false; std::shared_ptr m_notifier; // This should be used only when updating binlog information. - virtual rocksdb::WriteBatchBase* get_write_batch()= 0; - virtual bool commit_no_binlog()= 0; - virtual rocksdb::Iterator *get_iterator( - const rocksdb::ReadOptions &options, - rocksdb::ColumnFamilyHandle* column_family)= 0; - + virtual rocksdb::WriteBatchBase *get_write_batch() = 0; + virtual bool commit_no_binlog() = 0; + virtual rocksdb::Iterator * + get_iterator(const rocksdb::ReadOptions &options, + rocksdb::ColumnFamilyHandle *column_family) = 0; public: - const char* m_mysql_log_file_name; + const char *m_mysql_log_file_name; my_off_t m_mysql_log_offset; - const char* m_mysql_gtid; - const char* m_mysql_max_gtid; + const char *m_mysql_gtid; + const char *m_mysql_max_gtid; String m_detailed_error; - int64_t m_snapshot_timestamp= 0; + int64_t m_snapshot_timestamp = 0; bool m_ddl_transaction; /* @@ -1541,19 +1491,16 @@ public: */ virtual bool is_writebatch_trx() const = 0; - static void init_mutex() - { + static void init_mutex() { mysql_mutex_init(key_mutex_tx_list, &s_tx_list_mutex, MY_MUTEX_INIT_FAST); } - static void term_mutex() - { + static void term_mutex() { DBUG_ASSERT(s_tx_list.size() == 0); mysql_mutex_destroy(&s_tx_list_mutex); } - static void walk_tx_list(Rdb_tx_list_walker* walker) - { + static void walk_tx_list(Rdb_tx_list_walker *walker) { DBUG_ASSERT(walker != nullptr); mysql_mutex_lock(&s_tx_list_mutex); @@ -1562,15 +1509,12 @@ public: mysql_mutex_unlock(&s_tx_list_mutex); } - int set_status_error(THD* const thd, const rocksdb::Status &s, - const Rdb_key_def& kd, - Rdb_tbl_def* const tbl_def) - { + int set_status_error(THD *const thd, const rocksdb::Status &s, + const Rdb_key_def &kd, Rdb_tbl_def *const tbl_def) { DBUG_ASSERT(!s.ok()); DBUG_ASSERT(tbl_def != nullptr); - if (s.IsTimedOut()) - { + if (s.IsTimedOut()) { /* SQL layer has weird expectations. If we return an error when doing a read in DELETE IGNORE, it will ignore the error ("because it's @@ -1580,51 +1524,44 @@ public: rollback before returning HA_ERR_LOCK_WAIT_TIMEOUT: */ my_core::thd_mark_transaction_to_rollback(thd, false /*just statement*/); - m_detailed_error.copy(timeout_message("index", - tbl_def->full_tablename().c_str(), - kd.get_name().c_str())); + m_detailed_error.copy(timeout_message( + "index", tbl_def->full_tablename().c_str(), kd.get_name().c_str())); return HA_ERR_LOCK_WAIT_TIMEOUT; } - if (s.IsDeadlock()) - { + if (s.IsDeadlock()) { my_core::thd_mark_transaction_to_rollback(thd, false /* just statement */); return HA_ERR_LOCK_DEADLOCK; - } - else if (s.IsBusy()) - { + } else if (s.IsBusy()) { rocksdb_snapshot_conflict_errors++; - if (rocksdb_print_snapshot_conflict_queries) - { + if (rocksdb_print_snapshot_conflict_queries) { char user_host_buff[MAX_USER_HOST_SIZE + 1]; make_user_name(thd, user_host_buff); // NO_LINT_DEBUG sql_print_warning("Got snapshot conflict errors: User: %s " - "Query: %s", user_host_buff, thd->query()); + "Query: %s", + user_host_buff, thd->query()); } return HA_ERR_LOCK_DEADLOCK; } - if (s.IsLockLimit()) - { + if (s.IsLockLimit()) { return HA_ERR_ROCKSDB_TOO_MANY_LOCKS; } - if (s.IsIOError() || s.IsCorruption()) - { + if (s.IsIOError() || s.IsCorruption()) { rdb_handle_io_error(s, RDB_IO_ERROR_GENERAL); } my_error(ER_INTERNAL_ERROR, MYF(0), s.ToString().c_str()); return HA_ERR_INTERNAL_ERROR; } - THD* get_thd() const { return m_thd; } + THD *get_thd() const { return m_thd; } /* Used for tracking io_perf counters */ - void io_perf_start(Rdb_io_perf* const io_perf) - { + void io_perf_start(Rdb_io_perf *const io_perf) { /* Since perf_context is tracked per thread, it is difficult and expensive to maintain perf_context on a per table basis. Therefore, roll all @@ -1641,37 +1578,31 @@ public: gather stats during commit/rollback is needed. */ if (m_tbl_io_perf == nullptr && - io_perf->start(rocksdb_perf_context_level(m_thd))) - { - m_tbl_io_perf= io_perf; + io_perf->start(rocksdb_perf_context_level(m_thd))) { + m_tbl_io_perf = io_perf; } } - void io_perf_end_and_record(void) - { - if (m_tbl_io_perf != nullptr) - { + void io_perf_end_and_record(void) { + if (m_tbl_io_perf != nullptr) { m_tbl_io_perf->end_and_record(rocksdb_perf_context_level(m_thd)); - m_tbl_io_perf= nullptr; + m_tbl_io_perf = nullptr; } } - void io_perf_end_and_record(Rdb_io_perf* const io_perf) - { - if (m_tbl_io_perf == io_perf) - { + void io_perf_end_and_record(Rdb_io_perf *const io_perf) { + if (m_tbl_io_perf == io_perf) { io_perf_end_and_record(); } } - void set_params(int timeout_sec_arg, int max_row_locks_arg) - { - m_timeout_sec= timeout_sec_arg; - m_max_row_locks= max_row_locks_arg; + void set_params(int timeout_sec_arg, int max_row_locks_arg) { + m_timeout_sec = timeout_sec_arg; + m_max_row_locks = max_row_locks_arg; set_lock_timeout(timeout_sec_arg); } - virtual void set_lock_timeout(int timeout_sec_arg)= 0; + virtual void set_lock_timeout(int timeout_sec_arg) = 0; ulonglong get_write_count() const { return m_write_count; } @@ -1679,64 +1610,53 @@ public: ulonglong get_lock_count() const { return m_lock_count; } - virtual void set_sync(bool sync)= 0; + virtual void set_sync(bool sync) = 0; - virtual void release_lock(rocksdb::ColumnFamilyHandle* const column_family, - const std::string& rowkey)= 0; + virtual void release_lock(rocksdb::ColumnFamilyHandle *const column_family, + const std::string &rowkey) = 0; - virtual bool prepare(const rocksdb::TransactionName& name)= 0; + virtual bool prepare(const rocksdb::TransactionName &name) = 0; - bool commit_or_rollback() - { + bool commit_or_rollback() { bool res; - if (m_is_tx_failed) - { + if (m_is_tx_failed) { rollback(); - res= false; - } - else - res= commit(); + res = false; + } else + res = commit(); return res; } - bool commit() - { - if (get_write_count() == 0) - { + bool commit() { + if (get_write_count() == 0) { rollback(); return false; - } - else if (m_rollback_only) - { - /* - Transactions marked as rollback_only are expected to be rolled back at - prepare(). But there are some exceptions like below that prepare() is - never called and commit() is called instead. - 1. Binlog is disabled - 2. No modification exists in binlog cache for the transaction (#195) - In both cases, rolling back transaction is safe. Nothing is written to - binlog. - */ + } else if (m_rollback_only) { + /* + Transactions marked as rollback_only are expected to be rolled back at + prepare(). But there are some exceptions like below that prepare() is + never called and commit() is called instead. + 1. Binlog is disabled + 2. No modification exists in binlog cache for the transaction (#195) + In both cases, rolling back transaction is safe. Nothing is written to + binlog. + */ my_printf_error(ER_UNKNOWN_ERROR, ERRSTR_ROLLBACK_ONLY, MYF(0)); rollback(); return true; - } - else - { + } else { my_core::thd_binlog_pos(m_thd, &m_mysql_log_file_name, &m_mysql_log_offset, &m_mysql_gtid, &m_mysql_max_gtid); - binlog_manager.update(m_mysql_log_file_name, - m_mysql_log_offset, + binlog_manager.update(m_mysql_log_file_name, m_mysql_log_offset, m_mysql_max_gtid, get_write_batch()); return commit_no_binlog(); } } - virtual void rollback()= 0; + virtual void rollback() = 0; - void snapshot_created(const rocksdb::Snapshot* const snapshot) - { + void snapshot_created(const rocksdb::Snapshot *const snapshot) { DBUG_ASSERT(snapshot != nullptr); m_read_opts.snapshot = snapshot; @@ -1744,31 +1664,25 @@ public: m_is_delayed_snapshot = false; } - virtual void acquire_snapshot(bool acquire_now)= 0; - virtual void release_snapshot()= 0; + virtual void acquire_snapshot(bool acquire_now) = 0; + virtual void release_snapshot() = 0; - bool has_snapshot() const - { - return m_read_opts.snapshot != nullptr; - } + bool has_snapshot() const { return m_read_opts.snapshot != nullptr; } - private: +private: // The tables we are currently loading. In a partitioned table this can // have more than one entry - std::vector m_curr_bulk_load; + std::vector m_curr_bulk_load; - public: - int finish_bulk_load() - { - int rc= 0; +public: + int finish_bulk_load() { + int rc = 0; - std::vector::iterator it; - while ((it = m_curr_bulk_load.begin()) != m_curr_bulk_load.end()) - { - int rc2= (*it)->finalize_bulk_load(); - if (rc2 != 0 && rc == 0) - { - rc= rc2; + std::vector::iterator it; + while ((it = m_curr_bulk_load.begin()) != m_curr_bulk_load.end()) { + int rc2 = (*it)->finalize_bulk_load(); + if (rc2 != 0 && rc == 0) { + rc = rc2; } } @@ -1777,8 +1691,7 @@ public: return rc; } - void start_bulk_load(ha_rocksdb* const bulk_load) - { + void start_bulk_load(ha_rocksdb *const bulk_load) { /* If we already have an open bulk load of a table and the name doesn't match the current one, close out the currently running one. This allows @@ -1788,22 +1701,18 @@ public: DBUG_ASSERT(bulk_load != nullptr); if (!m_curr_bulk_load.empty() && - !bulk_load->same_table(*m_curr_bulk_load[0])) - { - const auto res= finish_bulk_load(); + !bulk_load->same_table(*m_curr_bulk_load[0])) { + const auto res = finish_bulk_load(); SHIP_ASSERT(res == 0); } m_curr_bulk_load.push_back(bulk_load); } - void end_bulk_load(ha_rocksdb* const bulk_load) - { + void end_bulk_load(ha_rocksdb *const bulk_load) { for (auto it = m_curr_bulk_load.begin(); it != m_curr_bulk_load.end(); - it++) - { - if (*it == bulk_load) - { + it++) { + if (*it == bulk_load) { m_curr_bulk_load.erase(it); return; } @@ -1813,10 +1722,7 @@ public: SHIP_ASSERT(0); } - int num_ongoing_bulk_load() const - { - return m_curr_bulk_load.size(); - } + int num_ongoing_bulk_load() const { return m_curr_bulk_load.size(); } /* Flush the data accumulated so far. This assumes we're doing a bulk insert. @@ -1831,8 +1737,7 @@ public: Add test coverage for what happens when somebody attempts to do bulk inserts while inside a multi-statement transaction. */ - bool flush_batch() - { + bool flush_batch() { if (get_write_count() == 0) return false; @@ -1845,42 +1750,39 @@ public: return false; } - virtual rocksdb::Status put(rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key, - const rocksdb::Slice& value)= 0; - virtual rocksdb::Status delete_key( - rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key)= 0; - virtual rocksdb::Status single_delete( - rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key)= 0; + virtual rocksdb::Status put(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, + const rocksdb::Slice &value) = 0; + virtual rocksdb::Status + delete_key(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key) = 0; + virtual rocksdb::Status + single_delete(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key) = 0; - virtual bool has_modifications() const= 0; + virtual bool has_modifications() const = 0; - virtual rocksdb::WriteBatchBase* get_indexed_write_batch()= 0; + virtual rocksdb::WriteBatchBase *get_indexed_write_batch() = 0; /* Return a WriteBatch that one can write to. The writes will skip any transaction locking. The writes will NOT be visible to the transaction. */ - rocksdb::WriteBatchBase* get_blind_write_batch() - { + rocksdb::WriteBatchBase *get_blind_write_batch() { return get_indexed_write_batch()->GetWriteBatch(); } - virtual rocksdb::Status get(rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key, - std::string* value) const= 0; - virtual rocksdb::Status get_for_update( - rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key, std::string* const value, bool exclusive)= 0; + virtual rocksdb::Status get(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, + std::string *value) const = 0; + virtual rocksdb::Status + get_for_update(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, std::string *const value, + bool exclusive) = 0; - rocksdb::Iterator *get_iterator( - rocksdb::ColumnFamilyHandle* const column_family, - bool skip_bloom_filter, - bool fill_cache, - bool read_current= false, - bool create_snapshot= true) - { + rocksdb::Iterator * + get_iterator(rocksdb::ColumnFamilyHandle *const column_family, + bool skip_bloom_filter, bool fill_cache, + bool read_current = false, bool create_snapshot = true) { // Make sure we are not doing both read_current (which implies we don't // want a snapshot) and create_snapshot which makes sure we create // a snapshot @@ -1890,56 +1792,48 @@ public: if (create_snapshot) acquire_snapshot(true); - rocksdb::ReadOptions options= m_read_opts; + rocksdb::ReadOptions options = m_read_opts; - if (skip_bloom_filter) - { - options.total_order_seek= true; - } - else - { + if (skip_bloom_filter) { + options.total_order_seek = true; + } else { // With this option, Iterator::Valid() returns false if key // is outside of the prefix bloom filter range set at Seek(). // Must not be set to true if not using bloom filter. - options.prefix_same_as_start= true; + options.prefix_same_as_start = true; } - options.fill_cache= fill_cache; - if (read_current) - { - options.snapshot= nullptr; + options.fill_cache = fill_cache; + if (read_current) { + options.snapshot = nullptr; } return get_iterator(options, column_family); } - virtual bool is_tx_started() const= 0; - virtual void start_tx()= 0; - virtual void start_stmt()= 0; - virtual void rollback_stmt()= 0; + virtual bool is_tx_started() const = 0; + virtual void start_tx() = 0; + virtual void start_stmt() = 0; + virtual void rollback_stmt() = 0; - void set_tx_failed(bool failed_arg) { m_is_tx_failed= failed_arg; } + void set_tx_failed(bool failed_arg) { m_is_tx_failed = failed_arg; } - bool can_prepare() const - { - if (m_rollback_only) - { + bool can_prepare() const { + if (m_rollback_only) { my_printf_error(ER_UNKNOWN_ERROR, ERRSTR_ROLLBACK_ONLY, MYF(0)); return false; } return true; } - int rollback_to_savepoint(void* const savepoint) - { - if (has_modifications()) - { + int rollback_to_savepoint(void *const savepoint) { + if (has_modifications()) { my_printf_error(ER_UNKNOWN_ERROR, "MyRocks currently does not support ROLLBACK TO " "SAVEPOINT if modifying rows.", MYF(0)); - m_rollback_only= true; - return 1; + m_rollback_only = true; + return HA_EXIT_FAILURE; } - return 0; + return HA_EXIT_SUCCESS; } /* @@ -1948,23 +1842,14 @@ public: snapshot has to be created via DB::GetSnapshot(), not via Transaction API. */ - bool is_tx_read_only() const - { - return m_tx_read_only; - } + bool is_tx_read_only() const { return m_tx_read_only; } - bool is_two_phase() const - { - return m_is_two_phase; - } + bool is_two_phase() const { return m_is_two_phase; } - void set_tx_read_only(bool val) - { - m_tx_read_only= val; - } + void set_tx_read_only(bool val) { m_tx_read_only = val; } - explicit Rdb_transaction(THD* const thd): m_thd(thd), m_tbl_io_perf(nullptr) - { + explicit Rdb_transaction(THD *const thd) + : m_thd(thd), m_tbl_io_perf(nullptr) { mysql_mutex_lock(&s_tx_list_mutex); s_tx_list.insert(this); mysql_mutex_unlock(&s_tx_list_mutex); @@ -1986,92 +1871,79 @@ public: The changes are made to individual tables, which store them here and then this object commits them on commit. */ -class Rdb_transaction_impl : public Rdb_transaction -{ - rocksdb::Transaction *m_rocksdb_tx= nullptr; - rocksdb::Transaction *m_rocksdb_reuse_tx= nullptr; +class Rdb_transaction_impl : public Rdb_transaction { + rocksdb::Transaction *m_rocksdb_tx = nullptr; + rocksdb::Transaction *m_rocksdb_reuse_tx = nullptr; - public: - void set_lock_timeout(int timeout_sec_arg) override - { +public: + void set_lock_timeout(int timeout_sec_arg) override { if (m_rocksdb_tx) m_rocksdb_tx->SetLockTimeout(rdb_convert_sec_to_ms(m_timeout_sec)); } - void set_sync(bool sync) override - { - m_rocksdb_tx->GetWriteOptions()->sync= sync; + void set_sync(bool sync) override { + m_rocksdb_tx->GetWriteOptions()->sync = sync; } - void release_lock(rocksdb::ColumnFamilyHandle* const column_family, - const std::string &rowkey) override - { - if (!THDVAR(m_thd, lock_scanned_rows)) - { + void release_lock(rocksdb::ColumnFamilyHandle *const column_family, + const std::string &rowkey) override { + if (!THDVAR(m_thd, lock_scanned_rows)) { m_rocksdb_tx->UndoGetForUpdate(column_family, rocksdb::Slice(rowkey)); } } virtual bool is_writebatch_trx() const override { return false; } - private: - void release_tx(void) - { +private: + void release_tx(void) { // We are done with the current active transaction object. Preserve it // for later reuse. DBUG_ASSERT(m_rocksdb_reuse_tx == nullptr); - m_rocksdb_reuse_tx= m_rocksdb_tx; - m_rocksdb_tx= nullptr; + m_rocksdb_reuse_tx = m_rocksdb_tx; + m_rocksdb_tx = nullptr; } - bool prepare(const rocksdb::TransactionName& name) override - { + bool prepare(const rocksdb::TransactionName &name) override { rocksdb::Status s; - s= m_rocksdb_tx->SetName(name); - if (!s.ok()) - { + s = m_rocksdb_tx->SetName(name); + if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); return false; } - s= m_rocksdb_tx->Prepare(); - if (!s.ok()) - { + s = m_rocksdb_tx->Prepare(); + if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); return false; } return true; } - bool commit_no_binlog() override - { - bool res= false; + bool commit_no_binlog() override { + bool res = false; release_snapshot(); - const rocksdb::Status s= m_rocksdb_tx->Commit(); - if (!s.ok()) - { + const rocksdb::Status s = m_rocksdb_tx->Commit(); + if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); - res= true; + res = true; } /* Save the transaction object to be reused */ release_tx(); - m_write_count= 0; - m_lock_count= 0; + m_write_count = 0; + m_lock_count = 0; set_tx_read_only(false); - m_rollback_only= false; + m_rollback_only = false; return res; } - public: - void rollback() override - { - m_write_count= 0; - m_lock_count= 0; - m_ddl_transaction= false; - if (m_rocksdb_tx) - { +public: + void rollback() override { + m_write_count = 0; + m_lock_count = 0; + m_ddl_transaction = false; + if (m_rocksdb_tx) { release_snapshot(); /* This will also release all of the locks: */ m_rocksdb_tx->Rollback(); @@ -2080,41 +1952,33 @@ class Rdb_transaction_impl : public Rdb_transaction release_tx(); set_tx_read_only(false); - m_rollback_only= false; + m_rollback_only = false; } } - void acquire_snapshot(bool acquire_now) override - { + void acquire_snapshot(bool acquire_now) override { if (m_read_opts.snapshot == nullptr) { if (is_tx_read_only()) { snapshot_created(rdb->GetSnapshot()); - } - else if (acquire_now) { + } else if (acquire_now) { m_rocksdb_tx->SetSnapshot(); snapshot_created(m_rocksdb_tx->GetSnapshot()); - } - else if (!m_is_delayed_snapshot) { + } else if (!m_is_delayed_snapshot) { m_rocksdb_tx->SetSnapshotOnNextOperation(m_notifier); m_is_delayed_snapshot = true; } } } - void release_snapshot() override - { + void release_snapshot() override { bool need_clear = m_is_delayed_snapshot; - if (m_read_opts.snapshot != nullptr) - { + if (m_read_opts.snapshot != nullptr) { m_snapshot_timestamp = 0; - if (is_tx_read_only()) - { + if (is_tx_read_only()) { rdb->ReleaseSnapshot(m_read_opts.snapshot); need_clear = false; - } - else - { + } else { need_clear = true; } m_read_opts.snapshot = nullptr; @@ -2124,15 +1988,11 @@ class Rdb_transaction_impl : public Rdb_transaction m_rocksdb_tx->ClearSnapshot(); } - bool has_snapshot() - { - return m_read_opts.snapshot != nullptr; - } + bool has_snapshot() { return m_read_opts.snapshot != nullptr; } - rocksdb::Status put(rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key, - const rocksdb::Slice& value) override - { + rocksdb::Status put(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, + const rocksdb::Slice &value) override { ++m_write_count; ++m_lock_count; if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks) @@ -2140,9 +2000,8 @@ class Rdb_transaction_impl : public Rdb_transaction return m_rocksdb_tx->Put(column_family, key, value); } - rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key) override - { + rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key) override { ++m_write_count; ++m_lock_count; if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks) @@ -2150,10 +2009,9 @@ class Rdb_transaction_impl : public Rdb_transaction return m_rocksdb_tx->Delete(column_family, key); } - rocksdb::Status single_delete( - rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key) override - { + rocksdb::Status + single_delete(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key) override { ++m_write_count; ++m_lock_count; if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks) @@ -2161,17 +2019,14 @@ class Rdb_transaction_impl : public Rdb_transaction return m_rocksdb_tx->SingleDelete(column_family, key); } - bool has_modifications() const override - { + bool has_modifications() const override { return m_rocksdb_tx->GetWriteBatch() && m_rocksdb_tx->GetWriteBatch()->GetWriteBatch() && m_rocksdb_tx->GetWriteBatch()->GetWriteBatch()->Count() > 0; } - rocksdb::WriteBatchBase* get_write_batch() override - { - if (is_two_phase()) - { + rocksdb::WriteBatchBase *get_write_batch() override { + if (is_two_phase()) { return m_rocksdb_tx->GetCommitTimeWriteBatch(); } return m_rocksdb_tx->GetWriteBatch()->GetWriteBatch(); @@ -2181,24 +2036,21 @@ class Rdb_transaction_impl : public Rdb_transaction Return a WriteBatch that one can write to. The writes will skip any transaction locking. The writes WILL be visible to the transaction. */ - rocksdb::WriteBatchBase* get_indexed_write_batch() override - { + rocksdb::WriteBatchBase *get_indexed_write_batch() override { ++m_write_count; return m_rocksdb_tx->GetWriteBatch(); } - rocksdb::Status get(rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key, - std::string* value) const override - { + rocksdb::Status get(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, + std::string *value) const override { return m_rocksdb_tx->Get(m_read_opts, column_family, key, value); } - rocksdb::Status get_for_update( - rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key, - std::string* const value, bool exclusive) override - { + rocksdb::Status + get_for_update(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, std::string *const value, + bool exclusive) override { if (++m_lock_count > m_max_row_locks) return rocksdb::Status::Aborted(rocksdb::Status::kLockLimit); @@ -2206,45 +2058,40 @@ class Rdb_transaction_impl : public Rdb_transaction exclusive); } - rocksdb::Iterator *get_iterator(const rocksdb::ReadOptions &options, - rocksdb::ColumnFamilyHandle* const column_family) - override - { + rocksdb::Iterator * + get_iterator(const rocksdb::ReadOptions &options, + rocksdb::ColumnFamilyHandle *const column_family) override { return m_rocksdb_tx->GetIterator(options, column_family); } - const rocksdb::Transaction* get_rdb_trx() const { return m_rocksdb_tx; } + const rocksdb::Transaction *get_rdb_trx() const { return m_rocksdb_tx; } - bool is_tx_started() const override - { - return (m_rocksdb_tx != nullptr); - } + bool is_tx_started() const override { return (m_rocksdb_tx != nullptr); } - void start_tx() override - { + void start_tx() override { rocksdb::TransactionOptions tx_opts; rocksdb::WriteOptions write_opts; - tx_opts.set_snapshot= false; - tx_opts.lock_timeout= rdb_convert_sec_to_ms(m_timeout_sec); - tx_opts.deadlock_detect= THDVAR(m_thd, deadlock_detect); + tx_opts.set_snapshot = false; + tx_opts.lock_timeout = rdb_convert_sec_to_ms(m_timeout_sec); + tx_opts.deadlock_detect = THDVAR(m_thd, deadlock_detect); - write_opts.sync= THDVAR(m_thd, write_sync); - write_opts.disableWAL= THDVAR(m_thd, write_disable_wal); - write_opts.ignore_missing_column_families= - THDVAR(m_thd, write_ignore_missing_column_families); - m_is_two_phase= !rocksdb_disable_2pc; + write_opts.sync = THDVAR(m_thd, write_sync); + write_opts.disableWAL = THDVAR(m_thd, write_disable_wal); + write_opts.ignore_missing_column_families = + THDVAR(m_thd, write_ignore_missing_column_families); + m_is_two_phase = rocksdb_enable_2pc; /* If m_rocksdb_reuse_tx is null this will create a new transaction object. Otherwise it will reuse the existing one. */ - m_rocksdb_tx= rdb->BeginTransaction(write_opts, tx_opts, - m_rocksdb_reuse_tx); - m_rocksdb_reuse_tx= nullptr; + m_rocksdb_tx = + rdb->BeginTransaction(write_opts, tx_opts, m_rocksdb_reuse_tx); + m_rocksdb_reuse_tx = nullptr; - m_read_opts= rocksdb::ReadOptions(); + m_read_opts = rocksdb::ReadOptions(); - m_ddl_transaction= false; + m_ddl_transaction = false; } /* @@ -2256,8 +2103,7 @@ class Rdb_transaction_impl : public Rdb_transaction For hooking to start of statement that is its own transaction, see ha_rocksdb::external_lock(). */ - void start_stmt() override - { + void start_stmt() override { // Set the snapshot to delayed acquisition (SetSnapshotOnNextOperation) acquire_snapshot(false); m_rocksdb_tx->SetSavePoint(); @@ -2267,17 +2113,14 @@ class Rdb_transaction_impl : public Rdb_transaction This must be called when last statement is rolled back, but the transaction continues */ - void rollback_stmt() override - { + void rollback_stmt() override { /* TODO: here we must release the locks taken since the start_stmt() call */ - if (m_rocksdb_tx) - { - const rocksdb::Snapshot* const org_snapshot = m_rocksdb_tx->GetSnapshot(); + if (m_rocksdb_tx) { + const rocksdb::Snapshot *const org_snapshot = m_rocksdb_tx->GetSnapshot(); m_rocksdb_tx->RollbackToSavePoint(); - const rocksdb::Snapshot* const cur_snapshot = m_rocksdb_tx->GetSnapshot(); - if (org_snapshot != cur_snapshot) - { + const rocksdb::Snapshot *const cur_snapshot = m_rocksdb_tx->GetSnapshot(); + if (org_snapshot != cur_snapshot) { if (org_snapshot != nullptr) m_snapshot_timestamp = 0; @@ -2290,15 +2133,13 @@ class Rdb_transaction_impl : public Rdb_transaction } } - explicit Rdb_transaction_impl(THD* const thd) : - Rdb_transaction(thd), m_rocksdb_tx(nullptr) - { + explicit Rdb_transaction_impl(THD *const thd) + : Rdb_transaction(thd), m_rocksdb_tx(nullptr) { // Create a notifier that can be called when a snapshot gets generated. m_notifier = std::make_shared(this); } - virtual ~Rdb_transaction_impl() - { + virtual ~Rdb_transaction_impl() { rollback(); // Theoretically the notifier could outlive the Rdb_transaction_impl @@ -2320,90 +2161,75 @@ class Rdb_transaction_impl : public Rdb_transaction to be non-conflicting. Any further usage of this class should completely be thought thoroughly. */ -class Rdb_writebatch_impl : public Rdb_transaction -{ - rocksdb::WriteBatchWithIndex* m_batch; +class Rdb_writebatch_impl : public Rdb_transaction { + rocksdb::WriteBatchWithIndex *m_batch; rocksdb::WriteOptions write_opts; // Called after commit/rollback. - void reset() - { + void reset() { m_batch->Clear(); m_read_opts = rocksdb::ReadOptions(); - m_ddl_transaction= false; - } - private: - bool prepare(const rocksdb::TransactionName& name) override - { - return true; + m_ddl_transaction = false; } - bool commit_no_binlog() override - { - bool res= false; +private: + bool prepare(const rocksdb::TransactionName &name) override { return true; } + + bool commit_no_binlog() override { + bool res = false; release_snapshot(); - const rocksdb::Status s= rdb->GetBaseDB()->Write(write_opts, - m_batch->GetWriteBatch()); - if (!s.ok()) - { + const rocksdb::Status s = + rdb->GetBaseDB()->Write(write_opts, m_batch->GetWriteBatch()); + if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); - res= true; + res = true; } reset(); - m_write_count= 0; + m_write_count = 0; set_tx_read_only(false); - m_rollback_only= false; + m_rollback_only = false; return res; } - public: + +public: bool is_writebatch_trx() const override { return true; } - void set_lock_timeout(int timeout_sec_arg) override - { + void set_lock_timeout(int timeout_sec_arg) override { // Nothing to do here. } - void set_sync(bool sync) override - { - write_opts.sync= sync; - } + void set_sync(bool sync) override { write_opts.sync = sync; } - void release_lock(rocksdb::ColumnFamilyHandle* const column_family, - const std::string &rowkey) override - { + void release_lock(rocksdb::ColumnFamilyHandle *const column_family, + const std::string &rowkey) override { // Nothing to do here since we don't hold any row locks. } - void rollback() override - { - m_write_count= 0; - m_lock_count= 0; + void rollback() override { + m_write_count = 0; + m_lock_count = 0; release_snapshot(); reset(); set_tx_read_only(false); - m_rollback_only= false; + m_rollback_only = false; } - void acquire_snapshot(bool acquire_now) override - { + void acquire_snapshot(bool acquire_now) override { if (m_read_opts.snapshot == nullptr) snapshot_created(rdb->GetSnapshot()); } - void release_snapshot() override - { - if (m_read_opts.snapshot != nullptr) - { + void release_snapshot() override { + if (m_read_opts.snapshot != nullptr) { rdb->ReleaseSnapshot(m_read_opts.snapshot); m_read_opts.snapshot = nullptr; } } - rocksdb::Status put(rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key, - const rocksdb::Slice& value) override - { + rocksdb::Status put(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, + const rocksdb::Slice &value) override { ++m_write_count; m_batch->Put(column_family, key, value); // Note Put/Delete in write batch doesn't return any error code. We simply @@ -2411,180 +2237,145 @@ class Rdb_writebatch_impl : public Rdb_transaction return rocksdb::Status::OK(); } - rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key) override - { + rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key) override { ++m_write_count; m_batch->Delete(column_family, key); return rocksdb::Status::OK(); } - rocksdb::Status single_delete( - rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key) override - { + rocksdb::Status + single_delete(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key) override { ++m_write_count; m_batch->SingleDelete(column_family, key); return rocksdb::Status::OK(); } - bool has_modifications() const override - { + bool has_modifications() const override { return m_batch->GetWriteBatch()->Count() > 0; } - rocksdb::WriteBatchBase* get_write_batch() override - { - return m_batch; - } + rocksdb::WriteBatchBase *get_write_batch() override { return m_batch; } - rocksdb::WriteBatchBase* get_indexed_write_batch() override - { + rocksdb::WriteBatchBase *get_indexed_write_batch() override { ++m_write_count; return m_batch; } - rocksdb::Status get(rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key, - std::string* const value) const override - { - return m_batch->GetFromBatchAndDB( - rdb, m_read_opts, column_family, key, value); + rocksdb::Status get(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, + std::string *const value) const override { + return m_batch->GetFromBatchAndDB(rdb, m_read_opts, column_family, key, + value); } - rocksdb::Status get_for_update( - rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key, - std::string* const value, bool exclusive) override - { + rocksdb::Status + get_for_update(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, std::string *const value, + bool exclusive) override { return get(column_family, key, value); } - rocksdb::Iterator *get_iterator(const rocksdb::ReadOptions &options, - rocksdb::ColumnFamilyHandle* const column_family) - override - { + rocksdb::Iterator * + get_iterator(const rocksdb::ReadOptions &options, + rocksdb::ColumnFamilyHandle *const column_family) override { const auto it = rdb->NewIterator(options); return m_batch->NewIteratorWithBase(it); } - bool is_tx_started() const override - { - return (m_batch != nullptr); - } + bool is_tx_started() const override { return (m_batch != nullptr); } - void start_tx() override - { + void start_tx() override { reset(); - write_opts.sync= THDVAR(m_thd, write_sync); - write_opts.disableWAL= THDVAR(m_thd, write_disable_wal); - write_opts.ignore_missing_column_families= - THDVAR(m_thd, write_ignore_missing_column_families); + write_opts.sync = THDVAR(m_thd, write_sync); + write_opts.disableWAL = THDVAR(m_thd, write_disable_wal); + write_opts.ignore_missing_column_families = + THDVAR(m_thd, write_ignore_missing_column_families); } - void start_stmt() override - { - m_batch->SetSavePoint(); - } + void start_stmt() override { m_batch->SetSavePoint(); } - void rollback_stmt() override - { + void rollback_stmt() override { if (m_batch) m_batch->RollbackToSavePoint(); } - explicit Rdb_writebatch_impl(THD* const thd) : - Rdb_transaction(thd), m_batch(nullptr) - { - m_batch = new rocksdb::WriteBatchWithIndex(rocksdb::BytewiseComparator(), - 0 , true); + explicit Rdb_writebatch_impl(THD *const thd) + : Rdb_transaction(thd), m_batch(nullptr) { + m_batch = new rocksdb::WriteBatchWithIndex(rocksdb::BytewiseComparator(), 0, + true); } - virtual ~Rdb_writebatch_impl() - { + virtual ~Rdb_writebatch_impl() { rollback(); delete m_batch; } }; void Rdb_snapshot_notifier::SnapshotCreated( - const rocksdb::Snapshot* const snapshot) -{ - if (m_owning_tx != nullptr) - { + const rocksdb::Snapshot *const snapshot) { + if (m_owning_tx != nullptr) { m_owning_tx->snapshot_created(snapshot); } } -std::multiset Rdb_transaction::s_tx_list; +std::multiset Rdb_transaction::s_tx_list; mysql_mutex_t Rdb_transaction::s_tx_list_mutex; -static Rdb_transaction* &get_tx_from_thd(THD* const thd) -{ - return *reinterpret_cast( - my_core::thd_ha_data(thd, rocksdb_hton)); +static Rdb_transaction *&get_tx_from_thd(THD *const thd) { + return *reinterpret_cast( + my_core::thd_ha_data(thd, rocksdb_hton)); } namespace { -class Rdb_perf_context_guard -{ +class Rdb_perf_context_guard { Rdb_io_perf m_io_perf; THD *m_thd; - public: - Rdb_perf_context_guard(const Rdb_perf_context_guard&) = delete; - Rdb_perf_context_guard& operator=(const Rdb_perf_context_guard&) = delete; +public: + Rdb_perf_context_guard(const Rdb_perf_context_guard &) = delete; + Rdb_perf_context_guard &operator=(const Rdb_perf_context_guard &) = delete; - explicit Rdb_perf_context_guard(THD* const thd) : m_thd(thd) - { - Rdb_transaction*& tx= get_tx_from_thd(m_thd); + explicit Rdb_perf_context_guard(THD *const thd) : m_thd(thd) { + Rdb_transaction *&tx = get_tx_from_thd(m_thd); /* if perf_context information is already being recorded, this becomes a no-op */ - if (tx != nullptr) - { + if (tx != nullptr) { tx->io_perf_start(&m_io_perf); } } - ~Rdb_perf_context_guard() - { - Rdb_transaction*& tx= get_tx_from_thd(m_thd); - if (tx != nullptr) - { + ~Rdb_perf_context_guard() { + Rdb_transaction *&tx = get_tx_from_thd(m_thd); + if (tx != nullptr) { tx->io_perf_end_and_record(); } } }; -} // anonymous namespace +} // anonymous namespace /* TODO: maybe, call this in external_lock() and store in ha_rocksdb.. */ -static Rdb_transaction *get_or_create_tx(THD* const thd) -{ - Rdb_transaction*& tx= get_tx_from_thd(thd); +static Rdb_transaction *get_or_create_tx(THD *const thd) { + Rdb_transaction *&tx = get_tx_from_thd(thd); // TODO: this is called too many times.. O(#rows) - if (tx == nullptr) - { - if (rpl_skip_tx_api_var && thd->rli_slave) - tx= new Rdb_writebatch_impl(thd); + if (tx == nullptr) { + if (rpl_skip_tx_api && thd->rli_slave) + tx = new Rdb_writebatch_impl(thd); else - tx= new Rdb_transaction_impl(thd); - tx->set_params(THDVAR(thd, lock_wait_timeout), - THDVAR(thd, max_row_locks)); + tx = new Rdb_transaction_impl(thd); + tx->set_params(THDVAR(thd, lock_wait_timeout), THDVAR(thd, max_row_locks)); tx->start_tx(); - } - else - { - tx->set_params(THDVAR(thd, lock_wait_timeout), - THDVAR(thd, max_row_locks)); - if (!tx->is_tx_started()) - { + } else { + tx->set_params(THDVAR(thd, lock_wait_timeout), THDVAR(thd, max_row_locks)); + if (!tx->is_tx_started()) { tx->start_tx(); } } @@ -2592,33 +2383,29 @@ static Rdb_transaction *get_or_create_tx(THD* const thd) return tx; } - -static int rocksdb_close_connection(handlerton* const hton, THD* const thd) -{ - Rdb_transaction*& tx= get_tx_from_thd(thd); - if (tx != nullptr) - { - int rc= tx->finish_bulk_load(); - if (rc != 0) - { +static int rocksdb_close_connection(handlerton *const hton, THD *const thd) { + Rdb_transaction *&tx = get_tx_from_thd(thd); + if (tx != nullptr) { + int rc = tx->finish_bulk_load(); + if (rc != 0) { // NO_LINT_DEBUG sql_print_error("RocksDB: Error %d finalizing last SST file while " - "disconnecting", rc); + "disconnecting", + rc); abort_with_stack_traces(); } delete tx; - tx= nullptr; + tx = nullptr; } - return 0; + return HA_EXIT_SUCCESS; } /* * Serializes an xid to a string so that it can * be used as a rocksdb transaction name */ -static std::string rdb_xid_to_string(const XID& src) -{ +static std::string rdb_xid_to_string(const XID &src) { DBUG_ASSERT(src.gtrid_length >= 0 && src.gtrid_length <= MAXGTRIDSIZE); DBUG_ASSERT(src.bqual_length >= 0 && src.bqual_length <= MAXBQUALSIZE); @@ -2630,10 +2417,10 @@ static std::string rdb_xid_to_string(const XID& src) * then reinterpret bit pattern as unsigned and store in network order */ uchar fidbuf[RDB_FORMATID_SZ]; - int64 signed_fid8= src.formatID; - const uint64 raw_fid8= *reinterpret_cast(&signed_fid8); + int64 signed_fid8 = src.formatID; + const uint64 raw_fid8 = *reinterpret_cast(&signed_fid8); rdb_netbuf_store_uint64(fidbuf, raw_fid8); - buf.append(reinterpret_cast(fidbuf), RDB_FORMATID_SZ); + buf.append(reinterpret_cast(fidbuf), RDB_FORMATID_SZ); buf.push_back(src.gtrid_length); buf.push_back(src.bqual_length); @@ -2641,35 +2428,30 @@ static std::string rdb_xid_to_string(const XID& src) return buf; } - /** Called by hton->flush_logs after MySQL group commit prepares a set of transactions. */ -static bool rocksdb_flush_wal( - handlerton* const hton __attribute__((__unused__)), - ulonglong target_lsn __attribute__((__unused__))) -{ +static bool rocksdb_flush_wal(handlerton *const hton MY_ATTRIBUTE((__unused__)), + ulonglong target_lsn MY_ATTRIBUTE((__unused__))) { DBUG_ASSERT(rdb != nullptr); rocksdb_wal_group_syncs++; - const rocksdb::Status s= rdb->SyncWAL(); + const rocksdb::Status s = rdb->SyncWAL(); if (!s.ok()) { - return 1; + return HA_EXIT_FAILURE; } - return 0; + return HA_EXIT_SUCCESS; } /** For a slave, prepare() updates the slave_gtid_info table which tracks the replication progress. */ -static int rocksdb_prepare(handlerton* const hton, THD* const thd, - bool prepare_tx, bool async) -{ - Rdb_transaction*& tx= get_tx_from_thd(thd); - if (!tx->can_prepare()) - { - return 1; +static int rocksdb_prepare(handlerton *const hton, THD *const thd, + bool prepare_tx, bool async) { + Rdb_transaction *&tx = get_tx_from_thd(thd); + if (!tx->can_prepare()) { + return HA_EXIT_FAILURE; } if (prepare_tx || (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { @@ -2678,7 +2460,7 @@ static int rocksdb_prepare(handlerton* const hton, THD* const thd, std::vector slave_gtid_info; my_core::thd_slave_gtid_info(thd, &slave_gtid_info); for (const auto &it : slave_gtid_info) { - rocksdb::WriteBatchBase* const write_batch = tx->get_blind_write_batch(); + rocksdb::WriteBatchBase *const write_batch = tx->get_blind_write_batch(); binlog_manager.update_slave_gtid_info(it.id, it.db, it.gtid, write_batch); } @@ -2687,12 +2469,12 @@ static int rocksdb_prepare(handlerton* const hton, THD* const thd, tx->set_sync(false); } XID xid; - thd_get_xid(thd, reinterpret_cast(&xid)); + thd_get_xid(thd, reinterpret_cast(&xid)); if (!tx->prepare(rdb_xid_to_string(xid))) { - return 1; + return HA_EXIT_FAILURE; } - if (thd->durability_property == HA_IGNORE_DURABILITY - && THDVAR(thd, write_sync)) { + if (thd->durability_property == HA_IGNORE_DURABILITY && + THDVAR(thd, write_sync)) { /** we set the log sequence as '1' just to trigger hton->flush_logs */ @@ -2703,92 +2485,86 @@ static int rocksdb_prepare(handlerton* const hton, THD* const thd, DEBUG_SYNC(thd, "rocksdb.prepared"); } - return 0; + return HA_EXIT_SUCCESS; } /** do nothing for prepare/commit by xid this is needed to avoid crashes in XA scenarios */ -static int rocksdb_commit_by_xid(handlerton* const hton, XID* const xid) -{ - const auto name= rdb_xid_to_string(*xid); - rocksdb::Transaction* const trx= rdb->GetTransactionByName(name); +static int rocksdb_commit_by_xid(handlerton *const hton, XID *const xid) { + const auto name = rdb_xid_to_string(*xid); + rocksdb::Transaction *const trx = rdb->GetTransactionByName(name); if (trx == nullptr) { - return 1; + return HA_EXIT_FAILURE; } - const rocksdb::Status s= trx->Commit(); + const rocksdb::Status s = trx->Commit(); if (!s.ok()) { - return 1; + return HA_EXIT_FAILURE; } delete trx; - return 0; + return HA_EXIT_SUCCESS; } -static int rocksdb_rollback_by_xid( - handlerton* const hton __attribute__((__unused__)), - XID* const xid) -{ - const auto name= rdb_xid_to_string(*xid); - rocksdb::Transaction* const trx= rdb->GetTransactionByName(name); +static int +rocksdb_rollback_by_xid(handlerton *const hton MY_ATTRIBUTE((__unused__)), + XID *const xid) { + const auto name = rdb_xid_to_string(*xid); + rocksdb::Transaction *const trx = rdb->GetTransactionByName(name); if (trx == nullptr) { - return 1; + return HA_EXIT_FAILURE; } - const rocksdb::Status s= trx->Rollback(); + const rocksdb::Status s = trx->Rollback(); if (!s.ok()) { - return 1; + return HA_EXIT_FAILURE; } delete trx; - return 0; + return HA_EXIT_SUCCESS; } /** Rebuilds an XID from a serialized version stored in a string. */ -static void rdb_xid_from_string(const std::string& src, XID* const dst) -{ +static void rdb_xid_from_string(const std::string &src, XID *const dst) { DBUG_ASSERT(dst != nullptr); - uint offset= 0; - uint64 raw_fid8= - rdb_netbuf_to_uint64(reinterpret_cast(src.data())); - const int64 signed_fid8= *reinterpret_cast(&raw_fid8); - dst->formatID= signed_fid8; + uint offset = 0; + uint64 raw_fid8 = + rdb_netbuf_to_uint64(reinterpret_cast(src.data())); + const int64 signed_fid8 = *reinterpret_cast(&raw_fid8); + dst->formatID = signed_fid8; offset += RDB_FORMATID_SZ; - dst->gtrid_length= src.at(offset); + dst->gtrid_length = src.at(offset); offset += RDB_GTRID_SZ; - dst->bqual_length= src.at(offset); + dst->bqual_length = src.at(offset); offset += RDB_BQUAL_SZ; DBUG_ASSERT(dst->gtrid_length >= 0 && dst->gtrid_length <= MAXGTRIDSIZE); DBUG_ASSERT(dst->bqual_length >= 0 && dst->bqual_length <= MAXBQUALSIZE); - src.copy(dst->data, (dst->gtrid_length)+(dst->bqual_length), RDB_XIDHDR_LEN); + src.copy(dst->data, (dst->gtrid_length) + (dst->bqual_length), + RDB_XIDHDR_LEN); } /** Reading last committed binary log info from RocksDB system row. The info is needed for crash safe slave/master to work. */ -static int rocksdb_recover(handlerton* const hton, XID* const xid_list, - uint len, char* const binlog_file, - my_off_t* const binlog_pos, - Gtid* const binlog_max_gtid) -{ - if (binlog_file && binlog_pos) - { - char file_buf[FN_REFLEN+1]= {0}; +static int rocksdb_recover(handlerton *const hton, XID *const xid_list, + uint len, char *const binlog_file, + my_off_t *const binlog_pos, + Gtid *const binlog_max_gtid) { + if (binlog_file && binlog_pos) { + char file_buf[FN_REFLEN + 1] = {0}; my_off_t pos; - char gtid_buf[FN_REFLEN+1]= {0}; - if (binlog_manager.read(file_buf, &pos, gtid_buf)) - { - if (is_binlog_advanced(binlog_file, *binlog_pos, file_buf, pos)) - { + char gtid_buf[FN_REFLEN + 1] = {0}; + if (binlog_manager.read(file_buf, &pos, gtid_buf)) { + if (is_binlog_advanced(binlog_file, *binlog_pos, file_buf, pos)) { memcpy(binlog_file, file_buf, FN_REFLEN + 1); - *binlog_pos= pos; + *binlog_pos = pos; fprintf(stderr, "RocksDB: Last binlog file position %llu," - " file name %s\n", pos, file_buf); - if (*gtid_buf) - { + " file name %s\n", + pos, file_buf); + if (*gtid_buf) { global_sid_lock->rdlock(); binlog_max_gtid->parse(global_sid_map, gtid_buf); global_sid_lock->unlock(); @@ -2798,44 +2574,41 @@ static int rocksdb_recover(handlerton* const hton, XID* const xid_list, } } - if (len == 0 || xid_list == nullptr) - { - return 0; + if (len == 0 || xid_list == nullptr) { + return HA_EXIT_SUCCESS; } - std::vector trans_list; + std::vector trans_list; rdb->GetAllPreparedTransactions(&trans_list); - uint count= 0; - for (auto& trans : trans_list) - { - if (count >= len) - { + uint count = 0; + for (auto &trans : trans_list) { + if (count >= len) { break; } - auto name= trans->GetName(); + auto name = trans->GetName(); rdb_xid_from_string(name, &xid_list[count]); count++; } return count; } -static int rocksdb_commit(handlerton* const hton, THD* const thd, - bool commit_tx, bool) -{ - DBUG_ENTER("rocksdb_commit"); +static int rocksdb_commit(handlerton *const hton, THD *const thd, + bool commit_tx, bool) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(hton != nullptr); + DBUG_ASSERT(thd != nullptr); /* this will trigger saving of perf_context information */ Rdb_perf_context_guard guard(thd); /* note: h->external_lock(F_UNLCK) is called after this function is called) */ - Rdb_transaction*& tx= get_tx_from_thd(thd); + Rdb_transaction *&tx = get_tx_from_thd(thd); - if (tx != nullptr) - { - if (commit_tx || - (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) - { + if (tx != nullptr) { + if (commit_tx || (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | + OPTION_BEGIN))) { /* We get here - For a COMMIT statement that finishes a multi-statement transaction @@ -2843,9 +2616,7 @@ static int rocksdb_commit(handlerton* const hton, THD* const thd, */ if (tx->commit()) DBUG_RETURN(HA_ERR_INTERNAL_ERROR); - } - else - { + } else { /* We get here when committing a statement within a transaction. @@ -2855,27 +2626,23 @@ static int rocksdb_commit(handlerton* const hton, THD* const thd, tx->set_tx_failed(false); } - if (my_core::thd_tx_isolation(thd) <= ISO_READ_COMMITTED) - { + if (my_core::thd_tx_isolation(thd) <= ISO_READ_COMMITTED) { // For READ_COMMITTED, we release any existing snapshot so that we will // see any changes that occurred since the last statement. tx->release_snapshot(); } } - DBUG_RETURN(0); + + DBUG_RETURN(HA_EXIT_SUCCESS); } - -static int rocksdb_rollback(handlerton* const hton, THD* const thd, - bool rollback_tx) -{ +static int rocksdb_rollback(handlerton *const hton, THD *const thd, + bool rollback_tx) { Rdb_perf_context_guard guard(thd); - Rdb_transaction*& tx= get_tx_from_thd(thd); + Rdb_transaction *&tx = get_tx_from_thd(thd); - if (tx != nullptr) - { - if (rollback_tx) - { + if (tx != nullptr) { + if (rollback_tx) { /* We get here, when - ROLLBACK statement is issued. @@ -2883,9 +2650,7 @@ static int rocksdb_rollback(handlerton* const hton, THD* const thd, Discard the changes made by the transaction */ tx->rollback(); - } - else - { + } else { /* We get here when - a statement with AUTOCOMMIT=1 is being rolled back (because of some @@ -2897,34 +2662,27 @@ static int rocksdb_rollback(handlerton* const hton, THD* const thd, tx->set_tx_failed(true); } - if (my_core::thd_tx_isolation(thd) <= ISO_READ_COMMITTED) - { + if (my_core::thd_tx_isolation(thd) <= ISO_READ_COMMITTED) { // For READ_COMMITTED, we release any existing snapshot so that we will // see any changes that occurred since the last statement. tx->release_snapshot(); } } - return 0; + return HA_EXIT_SUCCESS; } -static bool print_stats(THD* const thd, - std::string const& type, - std::string const& name, - std::string const& status, - stat_print_fn *stat_print) -{ +static bool print_stats(THD *const thd, std::string const &type, + std::string const &name, std::string const &status, + stat_print_fn *stat_print) { return stat_print(thd, type.c_str(), type.size(), name.c_str(), name.size(), status.c_str(), status.size()); } -static std::string format_string( - const char* const format, - ...) -{ +static std::string format_string(const char *const format, ...) { std::string res; - va_list args; - va_list args_copy; - char static_buff[256]; + va_list args; + va_list args_copy; + char static_buff[256]; DBUG_ASSERT(format != nullptr); @@ -2935,32 +2693,26 @@ static std::string format_string( int len = vsnprintf(nullptr, 0, format, args); va_end(args); - if (len < 0) - { + if (len < 0) { res = std::string(""); - } - else if (len == 0) - { + } else if (len == 0) { // Shortcut for an empty string res = std::string(""); - } - else - { + } else { // For short enough output use a static buffer - char* buff= static_buff; - std::unique_ptr dynamic_buff= nullptr; + char *buff = static_buff; + std::unique_ptr dynamic_buff = nullptr; - len++; // Add one for null terminator + len++; // Add one for null terminator // for longer output use an allocated buffer - if (static_cast(len) > sizeof(static_buff)) - { + if (static_cast(len) > sizeof(static_buff)) { dynamic_buff.reset(new char[len]); - buff= dynamic_buff.get(); + buff = dynamic_buff.get(); } // Now re-do the vsnprintf with the buffer which is now large enough - (void) vsnprintf(buff, len, format, args_copy); + (void)vsnprintf(buff, len, format, args_copy); // Convert to a std::string. Note we could have created a std::string // large enough and then converted the buffer to a 'char*' and created @@ -2975,13 +2727,11 @@ static std::string format_string( return res; } -class Rdb_snapshot_status : public Rdb_tx_list_walker -{ - private: +class Rdb_snapshot_status : public Rdb_tx_list_walker { +private: std::string m_data; - static std::string current_timestamp(void) - { + static std::string current_timestamp(void) { static const char *const format = "%d-%02d-%02d %02d:%02d:%02d"; time_t currtime; struct tm currtm; @@ -2995,53 +2745,46 @@ class Rdb_snapshot_status : public Rdb_tx_list_walker currtm.tm_sec); } - static std::string get_header(void) - { - return - "\n============================================================\n" + - current_timestamp() + - " ROCKSDB TRANSACTION MONITOR OUTPUT\n" - "============================================================\n" - "---------\n" - "SNAPSHOTS\n" - "---------\n" - "LIST OF SNAPSHOTS FOR EACH SESSION:\n"; + static std::string get_header(void) { + return "\n============================================================\n" + + current_timestamp() + + " ROCKSDB TRANSACTION MONITOR OUTPUT\n" + "============================================================\n" + "---------\n" + "SNAPSHOTS\n" + "---------\n" + "LIST OF SNAPSHOTS FOR EACH SESSION:\n"; } - static std::string get_footer(void) - { - return - "-----------------------------------------\n" - "END OF ROCKSDB TRANSACTION MONITOR OUTPUT\n" - "=========================================\n"; + static std::string get_footer(void) { + return "-----------------------------------------\n" + "END OF ROCKSDB TRANSACTION MONITOR OUTPUT\n" + "=========================================\n"; } - public: +public: Rdb_snapshot_status() : m_data(get_header()) {} std::string getResult() { return m_data + get_footer(); } /* Implement Rdb_transaction interface */ /* Create one row in the snapshot status table */ - void process_tran(const Rdb_transaction* const tx) override - { + void process_tran(const Rdb_transaction *const tx) override { DBUG_ASSERT(tx != nullptr); /* Calculate the duration the snapshot has existed */ int64_t snapshot_timestamp = tx->m_snapshot_timestamp; - if (snapshot_timestamp != 0) - { + if (snapshot_timestamp != 0) { int64_t curr_time; rdb->GetEnv()->GetCurrentTime(&curr_time); - THD* thd = tx->get_thd(); - char buffer[1024]; + THD *thd = tx->get_thd(); + char buffer[1024]; thd_security_context(thd, buffer, sizeof buffer, 0); m_data += format_string("---SNAPSHOT, ACTIVE %lld sec\n" "%s\n" "lock count %llu, write count %llu\n", - curr_time - snapshot_timestamp, - buffer, + curr_time - snapshot_timestamp, buffer, tx->get_lock_count(), tx->get_write_count()); } } @@ -3052,52 +2795,47 @@ class Rdb_snapshot_status : public Rdb_tx_list_walker * walks through all non-replication transactions and copies * out relevant information for information_schema.rocksdb_trx */ -class Rdb_trx_info_aggregator : public Rdb_tx_list_walker -{ - private: +class Rdb_trx_info_aggregator : public Rdb_tx_list_walker { +private: std::vector *m_trx_info; - public: - explicit Rdb_trx_info_aggregator(std::vector* const trx_info) : - m_trx_info(trx_info) {} +public: + explicit Rdb_trx_info_aggregator(std::vector *const trx_info) + : m_trx_info(trx_info) {} - void process_tran(const Rdb_transaction* const tx) override - { + void process_tran(const Rdb_transaction *const tx) override { static const std::map state_map = { - {rocksdb::Transaction::STARTED, "STARTED"}, - {rocksdb::Transaction::AWAITING_PREPARE, "AWAITING_PREPARE"}, - {rocksdb::Transaction::PREPARED, "PREPARED"}, - {rocksdb::Transaction::AWAITING_COMMIT, "AWAITING_COMMIT"}, - {rocksdb::Transaction::COMMITED, "COMMITED"}, - {rocksdb::Transaction::AWAITING_ROLLBACK, "AWAITING_ROLLBACK"}, - {rocksdb::Transaction::ROLLEDBACK, "ROLLEDBACK"}, + {rocksdb::Transaction::STARTED, "STARTED"}, + {rocksdb::Transaction::AWAITING_PREPARE, "AWAITING_PREPARE"}, + {rocksdb::Transaction::PREPARED, "PREPARED"}, + {rocksdb::Transaction::AWAITING_COMMIT, "AWAITING_COMMIT"}, + {rocksdb::Transaction::COMMITED, "COMMITED"}, + {rocksdb::Transaction::AWAITING_ROLLBACK, "AWAITING_ROLLBACK"}, + {rocksdb::Transaction::ROLLEDBACK, "ROLLEDBACK"}, }; DBUG_ASSERT(tx != nullptr); - THD* const thd = tx->get_thd(); + THD *const thd = tx->get_thd(); ulong thread_id = thd_thread_id(thd); if (tx->is_writebatch_trx()) { - const auto wb_impl = static_cast(tx); + const auto wb_impl = static_cast(tx); DBUG_ASSERT(wb_impl); - m_trx_info->push_back({"", /* name */ - 0, /* trx_id */ - wb_impl->get_write_count(), - 0, /* lock_count */ - 0, /* timeout_sec */ - "", /* state */ - "", /* waiting_key */ - 0, /* waiting_cf_id */ - 1, /*is_replication */ - 1, /* skip_trx_api */ - wb_impl->is_tx_read_only(), - 0, /* deadlock detection */ - wb_impl->num_ongoing_bulk_load(), - thread_id, - "" /* query string */ }); + m_trx_info->push_back( + {"", /* name */ + 0, /* trx_id */ + wb_impl->get_write_count(), 0, /* lock_count */ + 0, /* timeout_sec */ + "", /* state */ + "", /* waiting_key */ + 0, /* waiting_cf_id */ + 1, /*is_replication */ + 1, /* skip_trx_api */ + wb_impl->is_tx_read_only(), 0, /* deadlock detection */ + wb_impl->num_ongoing_bulk_load(), thread_id, "" /* query string */}); } else { - const auto tx_impl= static_cast(tx); + const auto tx_impl = static_cast(tx); DBUG_ASSERT(tx_impl); const rocksdb::Transaction *rdb_trx = tx_impl->get_rdb_trx(); @@ -3106,9 +2844,9 @@ class Rdb_trx_info_aggregator : public Rdb_tx_list_walker } std::string query_str; - LEX_STRING* const lex_str = thd_query_string(thd); + LEX_STRING *const lex_str = thd_query_string(thd); if (lex_str != nullptr && lex_str->str != nullptr) { - query_str = std::string(lex_str->str); + query_str = std::string(lex_str->str); } const auto state_it = state_map.find(rdb_trx->GetState()); @@ -3118,22 +2856,14 @@ class Rdb_trx_info_aggregator : public Rdb_tx_list_walker std::string waiting_key; rdb_trx->GetWaitingTxns(&waiting_cf_id, &waiting_key), - m_trx_info->push_back({rdb_trx->GetName(), - rdb_trx->GetID(), - tx_impl->get_write_count(), - tx_impl->get_lock_count(), - tx_impl->get_timeout_sec(), - state_it->second, - waiting_key, - waiting_cf_id, - is_replication, - 0, /* skip_trx_api */ - tx_impl->is_tx_read_only(), - rdb_trx->IsDeadlockDetect(), - tx_impl->num_ongoing_bulk_load(), - thread_id, - query_str}); - } + m_trx_info->push_back( + {rdb_trx->GetName(), rdb_trx->GetID(), tx_impl->get_write_count(), + tx_impl->get_lock_count(), tx_impl->get_timeout_sec(), + state_it->second, waiting_key, waiting_cf_id, is_replication, + 0, /* skip_trx_api */ + tx_impl->is_tx_read_only(), rdb_trx->IsDeadlockDetect(), + tx_impl->num_ongoing_bulk_load(), thread_id, query_str}); + } } }; @@ -3149,17 +2879,15 @@ std::vector rdb_get_all_trx_info() { } /* Generate the snapshot status table */ -static bool rocksdb_show_snapshot_status(handlerton* const hton, - THD* const thd, - stat_print_fn* const stat_print) -{ +static bool rocksdb_show_snapshot_status(handlerton *const hton, THD *const thd, + stat_print_fn *const stat_print) { Rdb_snapshot_status showStatus; Rdb_transaction::walk_tx_list(&showStatus); /* Send the result data back to MySQL */ return print_stats(thd, "SNAPSHOTS", "rocksdb", showStatus.getResult(), - stat_print); + stat_print); } /* @@ -3169,14 +2897,11 @@ static bool rocksdb_show_snapshot_status(handlerton* const hton, what column families are there) */ -static bool rocksdb_show_status(handlerton* const hton, - THD* const thd, - stat_print_fn* const stat_print, - enum ha_stat_type stat_type) -{ - bool res= false; - if (stat_type == HA_ENGINE_STATUS) - { +static bool rocksdb_show_status(handlerton *const hton, THD *const thd, + stat_print_fn *const stat_print, + enum ha_stat_type stat_type) { + bool res = false; + if (stat_type == HA_ENGINE_STATUS) { std::string str; /* Per DB stats */ @@ -3185,16 +2910,15 @@ static bool rocksdb_show_status(handlerton* const hton, } /* Per column family stats */ - for (const auto &cf_name : cf_manager.get_cf_names()) - { - rocksdb::ColumnFamilyHandle* cfh; + for (const auto &cf_name : cf_manager.get_cf_names()) { + rocksdb::ColumnFamilyHandle *cfh; bool is_automatic; /* Only the cf name is important. Whether it was generated automatically does not matter, so is_automatic is ignored. */ - cfh= cf_manager.get_cf(cf_name.c_str(), "", nullptr, &is_automatic); + cfh = cf_manager.get_cf(cf_name.c_str(), "", nullptr, &is_automatic); if (cfh == nullptr) continue; @@ -3205,35 +2929,28 @@ static bool rocksdb_show_status(handlerton* const hton, } /* Memory Statistics */ - std::vector dbs; - std::unordered_set cache_set; + std::vector dbs; + std::unordered_set cache_set; size_t internal_cache_count = 0; size_t kDefaultInternalCacheSize = 8 * 1024 * 1024; char buf[100]; dbs.push_back(rdb); cache_set.insert(rocksdb_tbl_options.block_cache.get()); - for (const auto& cf_handle : cf_manager.get_all_cf()) - { + for (const auto &cf_handle : cf_manager.get_all_cf()) { rocksdb::ColumnFamilyDescriptor cf_desc; cf_handle->GetDescriptor(&cf_desc); - auto* const table_factory = cf_desc.options.table_factory.get(); - if (table_factory != nullptr) - { + auto *const table_factory = cf_desc.options.table_factory.get(); + if (table_factory != nullptr) { std::string tf_name = table_factory->Name(); - if (tf_name.find("BlockBasedTable") != std::string::npos) - { - const rocksdb::BlockBasedTableOptions* const bbt_opt = - reinterpret_cast( - table_factory->GetOptions()); - if (bbt_opt != nullptr) - { - if (bbt_opt->block_cache.get() != nullptr) - { + if (tf_name.find("BlockBasedTable") != std::string::npos) { + const rocksdb::BlockBasedTableOptions *const bbt_opt = + reinterpret_cast( + table_factory->GetOptions()); + if (bbt_opt != nullptr) { + if (bbt_opt->block_cache.get() != nullptr) { cache_set.insert(bbt_opt->block_cache.get()); - } - else - { + } else { internal_cache_count++; } cache_set.insert(bbt_opt->block_cache_compressed.get()); @@ -3244,8 +2961,8 @@ static bool rocksdb_show_status(handlerton* const hton, std::map temp_usage_by_type; str.clear(); - rocksdb::MemoryUtil::GetApproximateMemoryUsageByType( - dbs, cache_set, &temp_usage_by_type); + rocksdb::MemoryUtil::GetApproximateMemoryUsageByType(dbs, cache_set, + &temp_usage_by_type); snprintf(buf, sizeof(buf), "\nMemTable Total: %lu", temp_usage_by_type[rocksdb::MemoryUtil::kMemTableTotal]); str.append(buf); @@ -3262,9 +2979,7 @@ static bool rocksdb_show_status(handlerton* const hton, internal_cache_count * kDefaultInternalCacheSize); str.append(buf); res |= print_stats(thd, "Memory_Stats", "rocksdb", str, stat_print); - } - else if (stat_type == HA_ENGINE_TRX) - { + } else if (stat_type == HA_ENGINE_TRX) { /* Handle the SHOW ENGINE ROCKSDB TRANSACTION STATUS command */ res |= rocksdb_show_snapshot_status(hton, thd, stat_print); } @@ -3272,14 +2987,12 @@ static bool rocksdb_show_status(handlerton* const hton, return res; } -static inline void rocksdb_register_tx(handlerton* const hton, THD* const thd, - Rdb_transaction* const tx) -{ +static inline void rocksdb_register_tx(handlerton *const hton, THD *const thd, + Rdb_transaction *const tx) { DBUG_ASSERT(tx != nullptr); trans_register_ha(thd, FALSE, rocksdb_hton); - if (my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) - { + if (my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { tx->start_stmt(); trans_register_ha(thd, TRUE, rocksdb_hton); } @@ -3307,37 +3020,36 @@ static inline void rocksdb_register_tx(handlerton* const hton, THD* const thd, InnoDB and RocksDB transactions. */ static int rocksdb_start_tx_and_assign_read_view( - handlerton* const hton, /*!< in: RocksDB handlerton */ - THD* const thd, /*!< in: MySQL thread handle of the - user for whom the transaction should - be committed */ - char* const binlog_file, /* out: binlog file for last commit */ - ulonglong* const binlog_pos, /* out: binlog pos for last commit */ - char** gtid_executed, /* out: Gtids logged until last commit */ - int* const gtid_executed_length)/*out: Length of gtid_executed string */ + handlerton *const hton, /*!< in: RocksDB handlerton */ + THD *const thd, /*!< in: MySQL thread handle of the + user for whom the transaction should + be committed */ + char *const binlog_file, /* out: binlog file for last commit */ + ulonglong *const binlog_pos, /* out: binlog pos for last commit */ + char **gtid_executed, /* out: Gtids logged until last commit */ + int *const gtid_executed_length) /*out: Length of gtid_executed string */ { Rdb_perf_context_guard guard(thd); ulong const tx_isolation = my_core::thd_tx_isolation(thd); - if (tx_isolation != ISO_REPEATABLE_READ) - { + if (tx_isolation != ISO_REPEATABLE_READ) { my_printf_error(ER_UNKNOWN_ERROR, "Only REPEATABLE READ isolation level is supported " "for START TRANSACTION WITH CONSISTENT SNAPSHOT " - "in RocksDB Storage Engine.", MYF(0)); - return 1; + "in RocksDB Storage Engine.", + MYF(0)); + return HA_EXIT_FAILURE; } - if (binlog_file) - { + if (binlog_file) { if (binlog_pos && mysql_bin_log_is_open()) mysql_bin_log_lock_commits(); else - return 1; + return HA_EXIT_FAILURE; } - Rdb_transaction* const tx= get_or_create_tx(thd); + Rdb_transaction *const tx = get_or_create_tx(thd); DBUG_ASSERT(!tx->has_snapshot()); tx->set_tx_read_only(true); rocksdb_register_tx(hton, thd, tx); @@ -3347,7 +3059,7 @@ static int rocksdb_start_tx_and_assign_read_view( mysql_bin_log_unlock_commits(binlog_file, binlog_pos, gtid_executed, gtid_executed_length); - return 0; + return HA_EXIT_SUCCESS; } /* Dummy SAVEPOINT support. This is needed for long running transactions @@ -3355,23 +3067,20 @@ static int rocksdb_start_tx_and_assign_read_view( * Current SAVEPOINT does not correctly handle ROLLBACK and does not return * errors. This needs to be addressed in future versions (Issue#96). */ -static int rocksdb_savepoint(handlerton* const hton, THD* const thd, - void* const savepoint) -{ - return 0; +static int rocksdb_savepoint(handlerton *const hton, THD *const thd, + void *const savepoint) { + return HA_EXIT_SUCCESS; } -static int rocksdb_rollback_to_savepoint(handlerton* const hton, THD* const thd, - void* const savepoint) -{ - Rdb_transaction*& tx= get_tx_from_thd(thd); +static int rocksdb_rollback_to_savepoint(handlerton *const hton, THD *const thd, + void *const savepoint) { + Rdb_transaction *&tx = get_tx_from_thd(thd); return tx->rollback_to_savepoint(savepoint); } -static bool rocksdb_rollback_to_savepoint_can_release_mdl( - handlerton* const hton, - THD* const thd) -{ +static bool +rocksdb_rollback_to_savepoint_can_release_mdl(handlerton *const hton, + THD *const thd) { return true; } @@ -3379,14 +3088,12 @@ static bool rocksdb_rollback_to_savepoint_can_release_mdl( This is called for INFORMATION_SCHEMA */ static void rocksdb_update_table_stats( - /* per-table stats callback */ - void (*cb)(const char* db, const char* tbl, bool is_partition, - my_io_perf_t* r, my_io_perf_t* w, my_io_perf_t* r_blob, - my_io_perf_t* r_primary, my_io_perf_t* r_secondary, - page_stats_t *page_stats, comp_stats_t *comp_stats, - int n_lock_wait, int n_lock_wait_timeout, - const char* engine)) -{ + /* per-table stats callback */ + void (*cb)(const char *db, const char *tbl, bool is_partition, + my_io_perf_t *r, my_io_perf_t *w, my_io_perf_t *r_blob, + my_io_perf_t *r_primary, my_io_perf_t *r_secondary, + page_stats_t *page_stats, comp_stats_t *comp_stats, + int n_lock_wait, int n_lock_wait_timeout, const char *engine)) { my_io_perf_t io_perf_read; my_io_perf_t io_perf; page_stats_t page_stats; @@ -3401,10 +3108,9 @@ static void rocksdb_update_table_stats( memset(&page_stats, 0, sizeof(page_stats)); memset(&comp_stats, 0, sizeof(comp_stats)); - tablenames= rdb_open_tables.get_table_names(); + tablenames = rdb_open_tables.get_table_names(); - for (const auto& it : tablenames) - { + for (const auto &it : tablenames) { Rdb_table_handler *table_handler; std::string str, dbname, tablename, partname; char dbname_sys[NAME_LEN + 1]; @@ -3420,37 +3126,35 @@ static void rocksdb_update_table_stats( return; } - if (rdb_split_normalized_tablename(str, &dbname, &tablename, &partname)) - { + if (rdb_split_normalized_tablename(str, &dbname, &tablename, &partname)) { continue; } - is_partition= (partname.size() != 0); + is_partition = (partname.size() != 0); - table_handler= rdb_open_tables.get_table_handler(it.c_str()); - if (table_handler == nullptr) - { + table_handler = rdb_open_tables.get_table_handler(it.c_str()); + if (table_handler == nullptr) { continue; } - io_perf_read.bytes= table_handler->m_io_perf_read.bytes.load(); - io_perf_read.requests= table_handler->m_io_perf_read.requests.load(); + io_perf_read.bytes = table_handler->m_io_perf_read.bytes.load(); + io_perf_read.requests = table_handler->m_io_perf_read.requests.load(); /* Convert from rocksdb timer to mysql timer. RocksDB values are in nanoseconds, but table statistics expect the value to be in my_timer format. */ - io_perf_read.svc_time= my_core::microseconds_to_my_timer( - table_handler->m_io_perf_read.svc_time.load() / 1000); - io_perf_read.svc_time_max= my_core::microseconds_to_my_timer( - table_handler->m_io_perf_read.svc_time_max.load() / 1000); - io_perf_read.wait_time= my_core::microseconds_to_my_timer( - table_handler->m_io_perf_read.wait_time.load() / 1000); - io_perf_read.wait_time_max= my_core::microseconds_to_my_timer( - table_handler->m_io_perf_read.wait_time_max.load() / 1000); - io_perf_read.slow_ios= table_handler->m_io_perf_read.slow_ios.load(); - rdb_open_tables.release_table_handler(table_handler); + io_perf_read.svc_time = my_core::microseconds_to_my_timer( + table_handler->m_io_perf_read.svc_time.load() / 1000); + io_perf_read.svc_time_max = my_core::microseconds_to_my_timer( + table_handler->m_io_perf_read.svc_time_max.load() / 1000); + io_perf_read.wait_time = my_core::microseconds_to_my_timer( + table_handler->m_io_perf_read.wait_time.load() / 1000); + io_perf_read.wait_time_max = my_core::microseconds_to_my_timer( + table_handler->m_io_perf_read.wait_time_max.load() / 1000); + io_perf_read.slow_ios = table_handler->m_io_perf_read.slow_ios.load(); + rdb_open_tables.release_table_handler(table_handler); /* Table stats expects our database and table name to be in system encoding, @@ -3466,19 +3170,15 @@ static void rocksdb_update_table_stats( } } - static rocksdb::Status check_rocksdb_options_compatibility( - const char* const dbpath, - const rocksdb::Options& main_opts, - const std::vector& cf_descr) -{ + const char *const dbpath, const rocksdb::Options &main_opts, + const std::vector &cf_descr) { DBUG_ASSERT(rocksdb_datadir != nullptr); rocksdb::DBOptions loaded_db_opt; std::vector loaded_cf_descs; - rocksdb::Status status = LoadLatestOptions(dbpath, - rocksdb::Env::Default(), &loaded_db_opt, - &loaded_cf_descs); + rocksdb::Status status = LoadLatestOptions(dbpath, rocksdb::Env::Default(), + &loaded_db_opt, &loaded_cf_descs); // If we're starting from scratch and there are no options saved yet then this // is a valid case. Therefore we can't compare the current set of options to @@ -3492,32 +3192,32 @@ static rocksdb::Status check_rocksdb_options_compatibility( } if (loaded_cf_descs.size() != cf_descr.size()) { - return rocksdb::Status::NotSupported("Mismatched size of column family " \ - "descriptors."); + return rocksdb::Status::NotSupported("Mismatched size of column family " + "descriptors."); } // Please see RocksDB documentation for more context about why we need to set // user-defined functions and pointer-typed options manually. for (size_t i = 0; i < loaded_cf_descs.size(); i++) { loaded_cf_descs[i].options.compaction_filter = - cf_descr[i].options.compaction_filter; - loaded_cf_descs[i].options.compaction_filter_factory = - cf_descr[i].options.compaction_filter_factory; + cf_descr[i].options.compaction_filter; + loaded_cf_descs[i].options.compaction_filter_factory = + cf_descr[i].options.compaction_filter_factory; loaded_cf_descs[i].options.comparator = cf_descr[i].options.comparator; loaded_cf_descs[i].options.memtable_factory = - cf_descr[i].options.memtable_factory; + cf_descr[i].options.memtable_factory; loaded_cf_descs[i].options.merge_operator = - cf_descr[i].options.merge_operator; + cf_descr[i].options.merge_operator; loaded_cf_descs[i].options.prefix_extractor = - cf_descr[i].options.prefix_extractor; + cf_descr[i].options.prefix_extractor; loaded_cf_descs[i].options.table_factory = - cf_descr[i].options.table_factory; + cf_descr[i].options.table_factory; } // This is the essence of the function - determine if it's safe to open the // database or not. - status = CheckOptionsCompatibility(dbpath, rocksdb::Env::Default(), - main_opts, loaded_cf_descs); + status = CheckOptionsCompatibility(dbpath, rocksdb::Env::Default(), main_opts, + loaded_cf_descs); return status; } @@ -3526,9 +3226,8 @@ static rocksdb::Status check_rocksdb_options_compatibility( Storage Engine initialization function, invoked when plugin is loaded. */ -static int rocksdb_init_func(void* const p) -{ - DBUG_ENTER("rocksdb_init_func"); +static int rocksdb_init_func(void *const p) { + DBUG_ENTER_FUNC(); // Validate the assumption about the size of ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN. static_assert(sizeof(longlong) == 8, "Assuming that longlong is 8 bytes."); @@ -3537,12 +3236,11 @@ static int rocksdb_init_func(void* const p) init_rocksdb_psi_keys(); #endif - rocksdb_hton= (handlerton *)p; + rocksdb_hton = (handlerton *)p; mysql_mutex_init(rdb_psi_open_tbls_mutex_key, &rdb_open_tables.m_mutex, MY_MUTEX_INIT_FAST); #ifdef HAVE_PSI_INTERFACE - rdb_bg_thread.init(rdb_signal_bg_psi_mutex_key, - rdb_signal_bg_psi_cond_key); + rdb_bg_thread.init(rdb_signal_bg_psi_mutex_key, rdb_signal_bg_psi_cond_key); rdb_drop_idx_thread.init(rdb_signal_drop_idx_psi_mutex_key, rdb_signal_drop_idx_psi_cond_key); #else @@ -3555,8 +3253,8 @@ static int rocksdb_init_func(void* const p) MY_MUTEX_INIT_FAST); #if defined(HAVE_PSI_INTERFACE) - rdb_collation_exceptions = new Regex_list_handler( - key_rwlock_collation_exception_list); + rdb_collation_exceptions = + new Regex_list_handler(key_rwlock_collation_exception_list); #else rdb_collation_exceptions = new Regex_list_handler(); #endif @@ -3566,151 +3264,153 @@ static int rocksdb_init_func(void* const p) rdb_open_tables.init_hash(); Rdb_transaction::init_mutex(); - rocksdb_hton->state= SHOW_OPTION_YES; - rocksdb_hton->create= rocksdb_create_handler; - rocksdb_hton->close_connection= rocksdb_close_connection; - rocksdb_hton->prepare= rocksdb_prepare; - rocksdb_hton->commit_by_xid= rocksdb_commit_by_xid; - rocksdb_hton->rollback_by_xid= rocksdb_rollback_by_xid; - rocksdb_hton->recover= rocksdb_recover; - rocksdb_hton->commit= rocksdb_commit; - rocksdb_hton->rollback= rocksdb_rollback; - rocksdb_hton->db_type= DB_TYPE_ROCKSDB; - rocksdb_hton->show_status= rocksdb_show_status; - rocksdb_hton->start_consistent_snapshot= - rocksdb_start_tx_and_assign_read_view; - rocksdb_hton->savepoint_set= rocksdb_savepoint; - rocksdb_hton->savepoint_rollback= rocksdb_rollback_to_savepoint; - rocksdb_hton->savepoint_rollback_can_release_mdl= - rocksdb_rollback_to_savepoint_can_release_mdl; + rocksdb_hton->state = SHOW_OPTION_YES; + rocksdb_hton->create = rocksdb_create_handler; + rocksdb_hton->close_connection = rocksdb_close_connection; + rocksdb_hton->prepare = rocksdb_prepare; + rocksdb_hton->commit_by_xid = rocksdb_commit_by_xid; + rocksdb_hton->rollback_by_xid = rocksdb_rollback_by_xid; + rocksdb_hton->recover = rocksdb_recover; + rocksdb_hton->commit = rocksdb_commit; + rocksdb_hton->rollback = rocksdb_rollback; + rocksdb_hton->db_type = DB_TYPE_ROCKSDB; + rocksdb_hton->show_status = rocksdb_show_status; + rocksdb_hton->start_consistent_snapshot = + rocksdb_start_tx_and_assign_read_view; + rocksdb_hton->savepoint_set = rocksdb_savepoint; + rocksdb_hton->savepoint_rollback = rocksdb_rollback_to_savepoint; + rocksdb_hton->savepoint_rollback_can_release_mdl = + rocksdb_rollback_to_savepoint_can_release_mdl; rocksdb_hton->update_table_stats = rocksdb_update_table_stats; - rocksdb_hton->flush_logs= rocksdb_flush_wal; + rocksdb_hton->flush_logs = rocksdb_flush_wal; - rocksdb_hton->flags= HTON_TEMPORARY_NOT_SUPPORTED | - HTON_SUPPORTS_EXTENDED_KEYS | - HTON_CAN_RECREATE; + rocksdb_hton->flags = HTON_TEMPORARY_NOT_SUPPORTED | + HTON_SUPPORTS_EXTENDED_KEYS | HTON_CAN_RECREATE; DBUG_ASSERT(!mysqld_embedded); - rocksdb_stats= rocksdb::CreateDBStatistics(); - rocksdb_db_options.statistics= rocksdb_stats; + rocksdb_stats = rocksdb::CreateDBStatistics(); + rocksdb_db_options.statistics = rocksdb_stats; if (rocksdb_rate_limiter_bytes_per_sec != 0) { - rocksdb_rate_limiter.reset(rocksdb::NewGenericRateLimiter( - rocksdb_rate_limiter_bytes_per_sec)); - rocksdb_db_options.rate_limiter= rocksdb_rate_limiter; + rocksdb_rate_limiter.reset( + rocksdb::NewGenericRateLimiter(rocksdb_rate_limiter_bytes_per_sec)); + rocksdb_db_options.rate_limiter = rocksdb_rate_limiter; } - std::shared_ptr myrocks_logger= std::make_shared(); - rocksdb::Status s= rocksdb::CreateLoggerFromOptions( + std::shared_ptr myrocks_logger = std::make_shared(); + rocksdb::Status s = rocksdb::CreateLoggerFromOptions( rocksdb_datadir, rocksdb_db_options, &rocksdb_db_options.info_log); if (s.ok()) { myrocks_logger->SetRocksDBLogger(rocksdb_db_options.info_log); } - rocksdb_db_options.info_log= myrocks_logger; + rocksdb_db_options.info_log = myrocks_logger; myrocks_logger->SetInfoLogLevel( - static_cast(rocksdb_info_log_level)); - rocksdb_db_options.wal_dir= rocksdb_wal_dir; + static_cast(rocksdb_info_log_level)); + rocksdb_db_options.wal_dir = rocksdb_wal_dir; - rocksdb_db_options.wal_recovery_mode= - static_cast(rocksdb_wal_recovery_mode); + rocksdb_db_options.wal_recovery_mode = + static_cast(rocksdb_wal_recovery_mode); - rocksdb_db_options.access_hint_on_compaction_start= - static_cast - (rocksdb_access_hint_on_compaction_start); + rocksdb_db_options.access_hint_on_compaction_start = + static_cast( + rocksdb_access_hint_on_compaction_start); if (rocksdb_db_options.allow_mmap_reads && - rocksdb_db_options.use_direct_reads) - { + rocksdb_db_options.use_direct_reads) { // allow_mmap_reads implies !use_direct_reads and RocksDB will not open if // mmap_reads and direct_reads are both on. (NO_LINT_DEBUG) sql_print_error("RocksDB: Can't enable both use_direct_reads " "and allow_mmap_reads\n"); rdb_open_tables.free_hash(); - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } if (rocksdb_db_options.allow_mmap_writes && - rocksdb_db_options.use_direct_writes) - { + rocksdb_db_options.use_direct_writes) { // See above comment for allow_mmap_reads. (NO_LINT_DEBUG) sql_print_error("RocksDB: Can't enable both use_direct_writes " "and allow_mmap_writes\n"); rdb_open_tables.free_hash(); - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } std::vector cf_names; rocksdb::Status status; - status= rocksdb::DB::ListColumnFamilies(rocksdb_db_options, rocksdb_datadir, - &cf_names); - if (!status.ok()) - { + status = rocksdb::DB::ListColumnFamilies(rocksdb_db_options, rocksdb_datadir, + &cf_names); + if (!status.ok()) { /* When we start on an empty datadir, ListColumnFamilies returns IOError, and RocksDB doesn't provide any way to check what kind of error it was. Checking system errno happens to work right now. */ - if (status.IsIOError() && errno == ENOENT) - { + if (status.IsIOError() && errno == ENOENT) { sql_print_information("RocksDB: Got ENOENT when listing column families"); - sql_print_information("RocksDB: assuming that we're creating a new database"); - } - else - { - std::string err_text= status.ToString(); - sql_print_error("RocksDB: Error listing column families: %s", err_text.c_str()); + sql_print_information( + "RocksDB: assuming that we're creating a new database"); + } else { + std::string err_text = status.ToString(); + sql_print_error("RocksDB: Error listing column families: %s", + err_text.c_str()); rdb_open_tables.free_hash(); - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } - } - else - sql_print_information("RocksDB: %ld column families found", cf_names.size()); + } else + sql_print_information("RocksDB: %ld column families found", + cf_names.size()); std::vector cf_descr; - std::vector cf_handles; + std::vector cf_handles; - rocksdb_tbl_options.index_type= - (rocksdb::BlockBasedTableOptions::IndexType)rocksdb_index_type; + rocksdb_tbl_options.index_type = + (rocksdb::BlockBasedTableOptions::IndexType)rocksdb_index_type; if (!rocksdb_tbl_options.no_block_cache) { - rocksdb_tbl_options.block_cache= + rocksdb_tbl_options.block_cache = rocksdb::NewLRUCache(rocksdb_block_cache_size); } // Using newer BlockBasedTable format version for better compression // and better memory allocation. - // See: https://github.com/facebook/rocksdb/commit/9ab5adfc59a621d12357580c94451d9f7320c2dd - rocksdb_tbl_options.format_version= 2; + // See: + // https://github.com/facebook/rocksdb/commit/9ab5adfc59a621d12357580c94451d9f7320c2dd + rocksdb_tbl_options.format_version = 2; if (rocksdb_collect_sst_properties) { - properties_collector_factory = std::make_shared - ( - &ddl_manager - ); + properties_collector_factory = + std::make_shared(&ddl_manager); rocksdb_set_compaction_options(nullptr, nullptr, nullptr, nullptr); mysql_mutex_lock(&rdb_sysvars_mutex); - DBUG_ASSERT(rocksdb_table_stats_sampling_pct - <= RDB_TBL_STATS_SAMPLE_PCT_MAX); + DBUG_ASSERT(rocksdb_table_stats_sampling_pct <= + RDB_TBL_STATS_SAMPLE_PCT_MAX); properties_collector_factory->SetTableStatsSamplingPct( - rocksdb_table_stats_sampling_pct); + rocksdb_table_stats_sampling_pct); mysql_mutex_unlock(&rdb_sysvars_mutex); } + if (rocksdb_persistent_cache_size > 0) { + std::shared_ptr pcache; + rocksdb::NewPersistentCache( + rocksdb::Env::Default(), std::string(rocksdb_persistent_cache_path), + rocksdb_persistent_cache_size, myrocks_logger, true, &pcache); + rocksdb_tbl_options.persistent_cache = pcache; + } else if (strlen(rocksdb_persistent_cache_path)) { + sql_print_error("RocksDB: Must specify rocksdb_persistent_cache_size"); + DBUG_RETURN(1); + } + if (!rocksdb_cf_options_map.init( - rocksdb_tbl_options, - properties_collector_factory, - rocksdb_default_cf_options, - rocksdb_override_cf_options)) - { + rocksdb_tbl_options, properties_collector_factory, + rocksdb_default_cf_options, rocksdb_override_cf_options)) { // NO_LINT_DEBUG sql_print_error("RocksDB: Failed to initialize CF options map."); rdb_open_tables.free_hash(); - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } /* @@ -3722,8 +3422,7 @@ static int rocksdb_init_func(void* const p) std::vector compaction_enabled_cf_indices; sql_print_information("RocksDB: Column Families at start:"); - for (size_t i = 0; i < cf_names.size(); ++i) - { + for (size_t i = 0; i < cf_names.size(); ++i) { rocksdb::ColumnFamilyOptions opts; rocksdb_cf_options_map.get_cf_options(cf_names[i], &opts); @@ -3736,8 +3435,7 @@ static int rocksdb_init_func(void* const p) Temporarily disable compactions to prevent a race condition where compaction starts before compaction filter is ready. */ - if (!opts.disable_auto_compactions) - { + if (!opts.disable_auto_compactions) { compaction_enabled_cf_indices.push_back(i); opts.disable_auto_compactions = true; } @@ -3752,57 +3450,53 @@ static int rocksdb_init_func(void* const p) main_opts.env->SetBackgroundThreads(main_opts.max_background_compactions, rocksdb::Env::Priority::LOW); rocksdb::TransactionDBOptions tx_db_options; - tx_db_options.transaction_lock_timeout= 2; // 2 seconds - tx_db_options.custom_mutex_factory= std::make_shared(); + tx_db_options.transaction_lock_timeout = 2; // 2 seconds + tx_db_options.custom_mutex_factory = std::make_shared(); - status= check_rocksdb_options_compatibility(rocksdb_datadir, main_opts, - cf_descr); + status = + check_rocksdb_options_compatibility(rocksdb_datadir, main_opts, cf_descr); // We won't start if we'll determine that there's a chance of data corruption // because of incompatible options. if (!status.ok()) { // NO_LINT_DEBUG - sql_print_error("RocksDB: compatibility check against existing database " \ - "options failed. %s", status.ToString().c_str()); + sql_print_error("RocksDB: compatibility check against existing database " + "options failed. %s", + status.ToString().c_str()); rdb_open_tables.free_hash(); - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } - status= rocksdb::TransactionDB::Open(main_opts, tx_db_options, - rocksdb_datadir, cf_descr, - &cf_handles, &rdb); + status = rocksdb::TransactionDB::Open( + main_opts, tx_db_options, rocksdb_datadir, cf_descr, &cf_handles, &rdb); - if (!status.ok()) - { - std::string err_text= status.ToString(); + if (!status.ok()) { + std::string err_text = status.ToString(); sql_print_error("RocksDB: Error opening instance: %s", err_text.c_str()); rdb_open_tables.free_hash(); - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } cf_manager.init(&rocksdb_cf_options_map, &cf_handles); - if (dict_manager.init(rdb->GetBaseDB(), &cf_manager)) - { + if (dict_manager.init(rdb->GetBaseDB(), &cf_manager)) { // NO_LINT_DEBUG sql_print_error("RocksDB: Failed to initialize data dictionary."); rdb_open_tables.free_hash(); - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } - if (binlog_manager.init(&dict_manager)) - { + if (binlog_manager.init(&dict_manager)) { // NO_LINT_DEBUG sql_print_error("RocksDB: Failed to initialize binlog manager."); rdb_open_tables.free_hash(); - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } - if (ddl_manager.init(&dict_manager, &cf_manager, rocksdb_validate_tables)) - { + if (ddl_manager.init(&dict_manager, &cf_manager, rocksdb_validate_tables)) { // NO_LINT_DEBUG sql_print_error("RocksDB: Failed to initialize DDL manager."); rdb_open_tables.free_hash(); - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } Rdb_sst_info::init(rdb); @@ -3811,46 +3505,46 @@ static int rocksdb_init_func(void* const p) Enable auto compaction, things needed for compaction filter are finished initializing */ - std::vector compaction_enabled_cf_handles; + std::vector compaction_enabled_cf_handles; compaction_enabled_cf_handles.reserve(compaction_enabled_cf_indices.size()); - for (const auto &index : compaction_enabled_cf_indices) - { + for (const auto &index : compaction_enabled_cf_indices) { compaction_enabled_cf_handles.push_back(cf_handles[index]); } - status= rdb->EnableAutoCompaction(compaction_enabled_cf_handles); + status = rdb->EnableAutoCompaction(compaction_enabled_cf_handles); - if (!status.ok()) - { - const std::string err_text= status.ToString(); + if (!status.ok()) { + const std::string err_text = status.ToString(); // NO_LINT_DEBUG sql_print_error("RocksDB: Error enabling compaction: %s", err_text.c_str()); rdb_open_tables.free_hash(); - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } - auto err= rdb_bg_thread.create_thread( + auto err = rdb_bg_thread.create_thread(BG_THREAD_NAME #ifdef HAVE_PSI_INTERFACE - rdb_background_psi_thread_key + , + rdb_background_psi_thread_key #endif - ); + ); if (err != 0) { sql_print_error("RocksDB: Couldn't start the background thread: (errno=%d)", err); rdb_open_tables.free_hash(); - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } - err= rdb_drop_idx_thread.create_thread( + err = rdb_drop_idx_thread.create_thread(INDEX_THREAD_NAME #ifdef HAVE_PSI_INTERFACE - rdb_drop_idx_psi_thread_key + , + rdb_drop_idx_psi_thread_key #endif - ); + ); if (err != 0) { sql_print_error("RocksDB: Couldn't start the drop index thread: (errno=%d)", err); rdb_open_tables.free_hash(); - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } rdb_set_collation_exception_list(rocksdb_strict_collation_exceptions); @@ -3861,28 +3555,28 @@ static int rocksdb_init_func(void* const p) // NO_LINT_DEBUG sql_print_information("RocksDB: global statistics using %s indexer", - STRINGIFY_ARG(RDB_INDEXER)); + STRINGIFY_ARG(RDB_INDEXER)); #if defined(HAVE_SCHED_GETCPU) - if (sched_getcpu() == -1) - { + if (sched_getcpu() == -1) { // NO_LINT_DEBUG - sql_print_information("RocksDB: sched_getcpu() failed - " + sql_print_information( + "RocksDB: sched_getcpu() failed - " "global statistics will use thread_id_indexer_t instead"); } #endif sql_print_information("RocksDB instance opened"); - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } /* Storage Engine deinitialization function, invoked when plugin is unloaded. */ -static int rocksdb_done_func(void* const p) -{ - int error= 0; - DBUG_ENTER("rocksdb_done_func"); +static int rocksdb_done_func(void *const p) { + DBUG_ENTER_FUNC(); + + int error = 0; // signal the drop index thread to stop rdb_drop_idx_thread.signal(true); @@ -3901,7 +3595,7 @@ static int rocksdb_done_func(void* const p) rdb_bg_thread.signal(true); // Wait for the background thread to finish. - auto err= rdb_bg_thread.join(); + auto err = rdb_bg_thread.join(); if (err != 0) { // We'll log the message and continue because we're shutting down and // continuation is the optimal strategy. @@ -3911,18 +3605,16 @@ static int rocksdb_done_func(void* const p) } // Wait for the drop index thread to finish. - err= rdb_drop_idx_thread.join(); + err = rdb_drop_idx_thread.join(); if (err != 0) { // NO_LINT_DEBUG - sql_print_error("RocksDB: Couldn't stop the index thread: (errno=%d)", - err); + sql_print_error("RocksDB: Couldn't stop the index thread: (errno=%d)", err); } - if (rdb_open_tables.m_hash.records) - { + if (rdb_open_tables.m_hash.records) { // Looks like we are getting unloaded and yet we have some open tables // left behind. - error= 1; + error = 1; } rdb_open_tables.free_hash(); @@ -3935,10 +3627,9 @@ static int rocksdb_done_func(void* const p) Rdb_transaction::term_mutex(); - for (auto& it : rdb_collation_data) - { + for (auto &it : rdb_collation_data) { delete it; - it= nullptr; + it = nullptr; } ddl_manager.cleanup(); @@ -3947,14 +3638,13 @@ static int rocksdb_done_func(void* const p) cf_manager.cleanup(); delete rdb; - rdb= nullptr; + rdb = nullptr; - // Disown the cache data since we're shutting down. - // This results in memory leaks but it improved the shutdown time. - // Don't disown when running under valgrind +// Disown the cache data since we're shutting down. +// This results in memory leaks but it improved the shutdown time. +// Don't disown when running under valgrind #ifndef HAVE_purify - if (rocksdb_tbl_options.block_cache) - { + if (rocksdb_tbl_options.block_cache) { rocksdb_tbl_options.block_cache->DisownData(); } #endif /* HAVE_purify */ @@ -3962,7 +3652,6 @@ static int rocksdb_done_func(void* const p) DBUG_RETURN(error); } - /** @brief Example of simple lock controls. The "table_handler" it creates is a @@ -3971,42 +3660,35 @@ static int rocksdb_done_func(void* const p) they are needed to function. */ -Rdb_table_handler* Rdb_open_tables_map::get_table_handler( - const char* const table_name) -{ +Rdb_table_handler * +Rdb_open_tables_map::get_table_handler(const char *const table_name) { Rdb_table_handler *table_handler; uint length; char *tmp_name; DBUG_ASSERT(table_name != nullptr); - length=(uint) strlen(table_name); + length = (uint)strlen(table_name); // First, look up the table in the hash map. mysql_mutex_lock(&m_mutex); - if (!(table_handler= reinterpret_cast( - my_hash_search(&m_hash, reinterpret_cast(table_name), - length)))) - { + if (!(table_handler = reinterpret_cast(my_hash_search( + &m_hash, reinterpret_cast(table_name), length)))) { // Since we did not find it in the hash map, attempt to create and add it // to the hash map. - if (!(table_handler= reinterpret_cast( - my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), - &table_handler, sizeof(*table_handler), - &tmp_name, length+1, - NullS)))) - { + if (!(table_handler = reinterpret_cast(my_multi_malloc( + MYF(MY_WME | MY_ZEROFILL), &table_handler, sizeof(*table_handler), + &tmp_name, length + 1, NullS)))) { // Allocating a new Rdb_table_handler and a new table name failed. mysql_mutex_unlock(&m_mutex); return nullptr; } - table_handler->m_ref_count= 0; - table_handler->m_table_name_length= length; - table_handler->m_table_name= tmp_name; + table_handler->m_ref_count = 0; + table_handler->m_table_name_length = length; + table_handler->m_table_name = tmp_name; strmov(table_handler->m_table_name, table_name); - if (my_hash_insert(&m_hash, reinterpret_cast(table_handler))) - { + if (my_hash_insert(&m_hash, reinterpret_cast(table_handler))) { // Inserting into the hash map failed. mysql_mutex_unlock(&m_mutex); my_free(table_handler); @@ -4024,25 +3706,19 @@ Rdb_table_handler* Rdb_open_tables_map::get_table_handler( return table_handler; } - -std::vector rdb_get_open_table_names(void) -{ +std::vector rdb_get_open_table_names(void) { return rdb_open_tables.get_table_names(); } - -std::vector Rdb_open_tables_map::get_table_names(void) const -{ +std::vector Rdb_open_tables_map::get_table_names(void) const { ulong i; const Rdb_table_handler *table_handler; std::vector names; mysql_mutex_lock(&m_mutex); - for (i = 0; - (table_handler= reinterpret_cast( - my_hash_const_element(&m_hash, i))); - i++) - { + for (i = 0; (table_handler = reinterpret_cast( + my_hash_const_element(&m_hash, i))); + i++) { DBUG_ASSERT(table_handler != nullptr); names.push_back(table_handler->m_table_name); } @@ -4052,12 +3728,10 @@ std::vector Rdb_open_tables_map::get_table_names(void) const return names; } - -void ha_rocksdb::load_auto_incr_value() -{ - const int save_active_index= active_index; - active_index= table->s->next_number_index; - const uint8 save_table_status= table->status; +void ha_rocksdb::load_auto_incr_value() { + const int save_active_index = active_index; + active_index = table->s->next_number_index; + const uint8 save_table_status = table->status; /* load_auto_incr_value() may be called by statements that @@ -4065,26 +3739,25 @@ void ha_rocksdb::load_auto_incr_value() index_last() creates a snapshot. When a snapshot is created here, it has to be released as well. (GitHub issue#189) */ - Rdb_transaction* const tx= get_or_create_tx(table->in_use); - const bool is_new_snapshot= !tx->has_snapshot(); + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + const bool is_new_snapshot = !tx->has_snapshot(); // Do a lookup. We only need index column, so it should be index-only. // (another reason to make it index-only is that table->read_set is // not set appropriately and non-index-only lookup will not read the value) - const bool save_keyread_only= m_keyread_only; - m_keyread_only= true; + const bool save_keyread_only = m_keyread_only; + m_keyread_only = true; if (!index_last(table->record[0])) update_auto_incr_val(); - m_keyread_only= save_keyread_only; - if (is_new_snapshot) - { + m_keyread_only = save_keyread_only; + if (is_new_snapshot) { tx->release_snapshot(); } - table->status= save_table_status; - active_index= save_active_index; + table->status = save_table_status; + active_index = save_active_index; /* Do what ha_rocksdb::index_end() does. @@ -4100,47 +3773,41 @@ void ha_rocksdb::load_auto_incr_value() end of ha_rocksdb::create. Also see opened issue here: https://github.com/facebook/mysql-5.6/issues/153 */ -void ha_rocksdb::update_auto_incr_val() -{ +void ha_rocksdb::update_auto_incr_val() { Field *field; longlong new_val; - field= table->key_info[table->s->next_number_index].key_part[0].field; + field = table->key_info[table->s->next_number_index].key_part[0].field; - my_bitmap_map* const old_map= - dbug_tmp_use_all_columns(table, table->read_set); - new_val= field->val_int() + 1; + my_bitmap_map *const old_map = + dbug_tmp_use_all_columns(table, table->read_set); + new_val = field->val_int() + 1; dbug_tmp_restore_column_map(table->read_set, old_map); - longlong auto_incr_val= m_tbl_def->m_auto_incr_val; + longlong auto_incr_val = m_tbl_def->m_auto_incr_val; while (auto_incr_val < new_val && - !m_tbl_def->m_auto_incr_val.compare_exchange_weak( - auto_incr_val, new_val)) - { + !m_tbl_def->m_auto_incr_val.compare_exchange_weak(auto_incr_val, + new_val)) { // Do nothing - just loop until auto_incr_val is >= new_val or // we successfully set it } } -int ha_rocksdb::load_hidden_pk_value() -{ - const int save_active_index= active_index; - active_index= m_tbl_def->m_key_count - 1; - const uint8 save_table_status= table->status; +int ha_rocksdb::load_hidden_pk_value() { + const int save_active_index = active_index; + active_index = m_tbl_def->m_key_count - 1; + const uint8 save_table_status = table->status; - Rdb_transaction* const tx= get_or_create_tx(table->in_use); - const bool is_new_snapshot= !tx->has_snapshot(); + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + const bool is_new_snapshot = !tx->has_snapshot(); // Do a lookup. - if (!index_last(table->record[0])) - { + if (!index_last(table->record[0])) { /* Decode PK field from the key */ - longlong hidden_pk_id= 0; - if (read_hidden_pk_id_from_rowkey(&hidden_pk_id)) - { - if (is_new_snapshot) - { + longlong hidden_pk_id = 0; + if (read_hidden_pk_id_from_rowkey(&hidden_pk_id)) { + if (is_new_snapshot) { tx->release_snapshot(); } return HA_ERR_INTERNAL_ERROR; @@ -4148,35 +3815,33 @@ int ha_rocksdb::load_hidden_pk_value() hidden_pk_id++; longlong old = m_tbl_def->m_hidden_pk_val; - while (old < hidden_pk_id && - !m_tbl_def->m_hidden_pk_val.compare_exchange_weak(old, hidden_pk_id)) - {} + while ( + old < hidden_pk_id && + !m_tbl_def->m_hidden_pk_val.compare_exchange_weak(old, hidden_pk_id)) { + } } - if (is_new_snapshot) - { + if (is_new_snapshot) { tx->release_snapshot(); } - table->status= save_table_status; - active_index= save_active_index; + table->status = save_table_status; + active_index = save_active_index; release_scan_iterator(); - return 0; + return HA_EXIT_SUCCESS; } /* Get PK value from m_tbl_def->m_hidden_pk_info. */ -longlong ha_rocksdb::update_hidden_pk_val() -{ +longlong ha_rocksdb::update_hidden_pk_val() { DBUG_ASSERT(has_hidden_pk(table)); - const longlong new_val= m_tbl_def->m_hidden_pk_val++; + const longlong new_val = m_tbl_def->m_hidden_pk_val++; return new_val; } /* Get the id of the hidden pk id from m_last_rowkey */ -int ha_rocksdb::read_hidden_pk_id_from_rowkey(longlong* const hidden_pk_id) -{ +int ha_rocksdb::read_hidden_pk_id_from_rowkey(longlong *const hidden_pk_id) { DBUG_ASSERT(hidden_pk_id != nullptr); DBUG_ASSERT(table != nullptr); DBUG_ASSERT(has_hidden_pk(table)); @@ -4186,20 +3851,18 @@ int ha_rocksdb::read_hidden_pk_id_from_rowkey(longlong* const hidden_pk_id) // Get hidden primary key from old key slice Rdb_string_reader reader(&rowkey_slice); if ((!reader.read(Rdb_key_def::INDEX_NUMBER_SIZE))) - return 1; + return HA_EXIT_FAILURE; - const int length= Field_longlong::PACK_LENGTH; - const uchar* from= reinterpret_cast(reader.read(length)); - if (from == nullptr) - { - return 1; /* Mem-comparable image doesn't have enough bytes */ + const int length = Field_longlong::PACK_LENGTH; + const uchar *from = reinterpret_cast(reader.read(length)); + if (from == nullptr) { + return HA_EXIT_FAILURE; /* Mem-comparable image doesn't have enough bytes */ } - *hidden_pk_id= rdb_netbuf_read_uint64(&from); - return 0; + *hidden_pk_id = rdb_netbuf_read_uint64(&from); + return HA_EXIT_SUCCESS; } - /** @brief Free lock controls. We call this whenever we close a table. If the table had @@ -4208,18 +3871,16 @@ int ha_rocksdb::read_hidden_pk_id_from_rowkey(longlong* const hidden_pk_id) */ void Rdb_open_tables_map::release_table_handler( - Rdb_table_handler* const table_handler) -{ + Rdb_table_handler *const table_handler) { mysql_mutex_lock(&m_mutex); DBUG_ASSERT(table_handler != nullptr); DBUG_ASSERT(table_handler->m_ref_count > 0); - if (!--table_handler->m_ref_count) - { + if (!--table_handler->m_ref_count) { // Last rereference was released. Tear down the hash entry. - const auto ret __attribute__((__unused__)) = - my_hash_delete(&m_hash, reinterpret_cast(table_handler)); - DBUG_ASSERT(!ret); // the hash entry must actually be found and deleted + const auto ret MY_ATTRIBUTE((__unused__)) = + my_hash_delete(&m_hash, reinterpret_cast(table_handler)); + DBUG_ASSERT(!ret); // the hash entry must actually be found and deleted my_core::thr_lock_delete(&table_handler->m_thr_lock); my_free(table_handler); } @@ -4227,68 +3888,58 @@ void Rdb_open_tables_map::release_table_handler( mysql_mutex_unlock(&m_mutex); } - -static handler* rocksdb_create_handler(my_core::handlerton* const hton, - my_core::TABLE_SHARE* const table_arg, - my_core::MEM_ROOT* const mem_root) -{ +static handler *rocksdb_create_handler(my_core::handlerton *const hton, + my_core::TABLE_SHARE *const table_arg, + my_core::MEM_ROOT *const mem_root) { return new (mem_root) ha_rocksdb(hton, table_arg); } - -ha_rocksdb::ha_rocksdb(my_core::handlerton* const hton, - my_core::TABLE_SHARE* const table_arg) - : handler(hton, table_arg), m_table_handler(nullptr), m_scan_it(nullptr), - m_scan_it_skips_bloom(false), m_scan_it_snapshot(nullptr), - m_tbl_def(nullptr), m_pk_descr(nullptr), m_key_descr_arr(nullptr), - m_pk_can_be_decoded(false), - m_maybe_unpack_info(false), - m_pk_tuple(nullptr), m_pk_packed_tuple(nullptr), - m_sk_packed_tuple(nullptr), - m_sk_match_prefix(nullptr), m_sk_match_prefix_buf(nullptr), - m_sk_packed_tuple_old(nullptr), - m_pack_buffer(nullptr), m_lock_rows(RDB_LOCK_NONE), - m_keyread_only(FALSE), m_bulk_load_tx(nullptr), m_encoder_arr(nullptr), - m_row_checksums_checked(0), - m_in_rpl_delete_rows(false), m_in_rpl_update_rows(false), - m_force_skip_unique_check(false) -{ +ha_rocksdb::ha_rocksdb(my_core::handlerton *const hton, + my_core::TABLE_SHARE *const table_arg) + : handler(hton, table_arg), m_table_handler(nullptr), m_scan_it(nullptr), + m_scan_it_skips_bloom(false), m_scan_it_snapshot(nullptr), + m_tbl_def(nullptr), m_pk_descr(nullptr), m_key_descr_arr(nullptr), + m_pk_can_be_decoded(false), m_maybe_unpack_info(false), + m_pk_tuple(nullptr), m_pk_packed_tuple(nullptr), + m_sk_packed_tuple(nullptr), m_end_key_packed_tuple(nullptr), + m_sk_match_prefix(nullptr), m_sk_match_prefix_buf(nullptr), + m_sk_packed_tuple_old(nullptr), m_dup_sk_packed_tuple(nullptr), + m_dup_sk_packed_tuple_old(nullptr), m_pack_buffer(nullptr), + m_lock_rows(RDB_LOCK_NONE), m_keyread_only(FALSE), + m_bulk_load_tx(nullptr), m_encoder_arr(nullptr), + m_row_checksums_checked(0), m_in_rpl_delete_rows(false), + m_in_rpl_update_rows(false), m_force_skip_unique_check(false) { // TODO(alexyang): create a valid PSI_mutex_key for this mutex mysql_mutex_init(0, &m_bulk_load_mutex, MY_MUTEX_INIT_FAST); } +static const char *ha_rocksdb_exts[] = {NullS}; -static const char *ha_rocksdb_exts[] = { - NullS -}; +const char **ha_rocksdb::bas_ext() const { + DBUG_ENTER_FUNC(); - -const char **ha_rocksdb::bas_ext() const -{ - return ha_rocksdb_exts; + DBUG_RETURN(ha_rocksdb_exts); } -bool ha_rocksdb::same_table(const ha_rocksdb& other) const -{ +bool ha_rocksdb::same_table(const ha_rocksdb &other) const { return m_tbl_def->base_tablename() == other.m_tbl_def->base_tablename(); } -bool ha_rocksdb::init_with_fields() -{ - const uint pk= table_share->primary_key; - if (pk != MAX_KEY) - { - const uint key_parts= table_share->key_info[pk].user_defined_key_parts; - check_keyread_allowed(pk /*PK*/, key_parts-1, true); - } - else - m_pk_can_be_decoded= false; +bool ha_rocksdb::init_with_fields() { + DBUG_ENTER_FUNC(); - cached_table_flags= table_flags(); - return false; /* Ok */ + const uint pk = table_share->primary_key; + if (pk != MAX_KEY) { + const uint key_parts = table_share->key_info[pk].user_defined_key_parts; + check_keyread_allowed(pk /*PK*/, key_parts - 1, true); + } else + m_pk_can_be_decoded = false; + + cached_table_flags = table_flags(); + + DBUG_RETURN(false); /* Ok */ } - /** Convert record from table->record[0] form into a form that can be written into rocksdb. @@ -4299,10 +3950,8 @@ bool ha_rocksdb::init_with_fields() */ void ha_rocksdb::convert_record_to_storage_format( - const rocksdb::Slice& pk_packed_slice, - Rdb_string_writer* const pk_unpack_info, - rocksdb::Slice* const packed_rec) -{ + const rocksdb::Slice &pk_packed_slice, + Rdb_string_writer *const pk_unpack_info, rocksdb::Slice *const packed_rec) { DBUG_ASSERT_IMP(m_maybe_unpack_info, pk_unpack_info); m_storage_record.length(0); @@ -4312,93 +3961,78 @@ void ha_rocksdb::convert_record_to_storage_format( // If a primary key may have non-empty unpack_info for certain values, // (m_maybe_unpack_info=TRUE), we write the unpack_info block. The block // itself was prepared in Rdb_key_def::pack_record. - if (m_maybe_unpack_info) - { - m_storage_record.append(reinterpret_cast(pk_unpack_info->ptr()), + if (m_maybe_unpack_info) { + m_storage_record.append(reinterpret_cast(pk_unpack_info->ptr()), pk_unpack_info->get_current_pos()); } - for (uint i=0; i < table->s->fields; i++) - { + for (uint i = 0; i < table->s->fields; i++) { /* Don't pack decodable PK key parts */ - if (m_encoder_arr[i].m_storage_type != Rdb_field_encoder::STORE_ALL) - { + if (m_encoder_arr[i].m_storage_type != Rdb_field_encoder::STORE_ALL) { continue; } - Field* const field= table->field[i]; - if (m_encoder_arr[i].maybe_null()) - { - char* const data= (char*)m_storage_record.ptr(); - if (field->is_null()) - { - data[m_encoder_arr[i].m_null_offset]|= m_encoder_arr[i].m_null_mask; + Field *const field = table->field[i]; + if (m_encoder_arr[i].maybe_null()) { + char *const data = (char *)m_storage_record.ptr(); + if (field->is_null()) { + data[m_encoder_arr[i].m_null_offset] |= m_encoder_arr[i].m_null_mask; /* Don't write anything for NULL values */ continue; } } - if (m_encoder_arr[i].m_field_type == MYSQL_TYPE_BLOB) - { - my_core::Field_blob *blob= (my_core::Field_blob*)field; + if (m_encoder_arr[i].m_field_type == MYSQL_TYPE_BLOB) { + my_core::Field_blob *blob = (my_core::Field_blob *)field; /* Get the number of bytes needed to store length*/ - const uint length_bytes= blob->pack_length() - portable_sizeof_char_ptr; + const uint length_bytes = blob->pack_length() - portable_sizeof_char_ptr; /* Store the length of the value */ - m_storage_record.append(reinterpret_cast(blob->ptr), length_bytes); + m_storage_record.append(reinterpret_cast(blob->ptr), + length_bytes); /* Store the blob value itself */ char *data_ptr; - memcpy(&data_ptr, blob->ptr + length_bytes, sizeof(uchar**)); + memcpy(&data_ptr, blob->ptr + length_bytes, sizeof(uchar **)); m_storage_record.append(data_ptr, blob->get_length()); - } - else if (m_encoder_arr[i].m_field_type == MYSQL_TYPE_VARCHAR) - { - Field_varstring* const field_var= (Field_varstring*)field; + } else if (m_encoder_arr[i].m_field_type == MYSQL_TYPE_VARCHAR) { + Field_varstring *const field_var = (Field_varstring *)field; uint data_len; /* field_var->length_bytes is 1 or 2 */ - if (field_var->length_bytes == 1) - { - data_len= field_var->ptr[0]; + if (field_var->length_bytes == 1) { + data_len = field_var->ptr[0]; + } else { + DBUG_ASSERT(field_var->length_bytes == 2); + data_len = uint2korr(field_var->ptr); } - else - { - DBUG_ASSERT(field_var->length_bytes==2); - data_len= uint2korr(field_var->ptr); - } - m_storage_record.append(reinterpret_cast(field_var->ptr), + m_storage_record.append(reinterpret_cast(field_var->ptr), field_var->length_bytes + data_len); - } - else - { + } else { /* Copy the field data */ - const uint len= field->pack_length_in_rec(); - m_storage_record.append(reinterpret_cast(field->ptr), len); + const uint len = field->pack_length_in_rec(); + m_storage_record.append(reinterpret_cast(field->ptr), len); } } - if (should_store_row_debug_checksums()) - { - const uint32_t key_crc32= my_core::crc32(0, - rdb_slice_to_uchar_ptr(&pk_packed_slice), - pk_packed_slice.size()); - const uint32_t val_crc32= my_core::crc32( - 0, rdb_mysql_str_to_uchar_str(&m_storage_record), - m_storage_record.length()); + if (should_store_row_debug_checksums()) { + const uint32_t key_crc32 = my_core::crc32( + 0, rdb_slice_to_uchar_ptr(&pk_packed_slice), pk_packed_slice.size()); + const uint32_t val_crc32 = + my_core::crc32(0, rdb_mysql_str_to_uchar_str(&m_storage_record), + m_storage_record.length()); uchar key_crc_buf[RDB_CHECKSUM_SIZE]; uchar val_crc_buf[RDB_CHECKSUM_SIZE]; rdb_netbuf_store_uint32(key_crc_buf, key_crc32); rdb_netbuf_store_uint32(val_crc_buf, val_crc32); - m_storage_record.append((const char*)&RDB_CHECKSUM_DATA_TAG, 1); - m_storage_record.append((const char*)key_crc_buf, RDB_CHECKSUM_SIZE); - m_storage_record.append((const char*)val_crc_buf, RDB_CHECKSUM_SIZE); + m_storage_record.append((const char *)&RDB_CHECKSUM_DATA_TAG, 1); + m_storage_record.append((const char *)key_crc_buf, RDB_CHECKSUM_SIZE); + m_storage_record.append((const char *)val_crc_buf, RDB_CHECKSUM_SIZE); } - *packed_rec= rocksdb::Slice(m_storage_record.ptr(), - m_storage_record.length()); + *packed_rec = + rocksdb::Slice(m_storage_record.ptr(), m_storage_record.length()); } - /* @brief Setup which fields will be unpacked when reading rows @@ -4406,7 +4040,8 @@ void ha_rocksdb::convert_record_to_storage_format( @detail Two special cases when we still unpack all fields: - When this table is being updated (m_lock_rows==RDB_LOCK_WRITE). - - When @@rocksdb_verify_row_debug_checksums is ON (In this mode, we need to read all + - When @@rocksdb_verify_row_debug_checksums is ON (In this mode, we need to + read all fields to find whether there is a row checksum at the end. We could skip the fields instead of decoding them, but currently we do decoding.) @@ -4414,40 +4049,31 @@ void ha_rocksdb::convert_record_to_storage_format( ha_rocksdb::setup_field_converters() ha_rocksdb::convert_record_from_storage_format() */ -void ha_rocksdb::setup_read_decoders() -{ +void ha_rocksdb::setup_read_decoders() { m_decoders_vect.clear(); - int last_useful= 0; - int skip_size= 0; + int last_useful = 0; + int skip_size = 0; - for (uint i= 0; i < table->s->fields; i++) - { + for (uint i = 0; i < table->s->fields; i++) { // We only need the decoder if the whole record is stored. - if (m_encoder_arr[i].m_storage_type != Rdb_field_encoder::STORE_ALL) - { + if (m_encoder_arr[i].m_storage_type != Rdb_field_encoder::STORE_ALL) { continue; } if (m_lock_rows == RDB_LOCK_WRITE || m_verify_row_debug_checksums || - bitmap_is_set(table->read_set, table->field[i]->field_index)) - { + bitmap_is_set(table->read_set, table->field[i]->field_index)) { // We will need to decode this field m_decoders_vect.push_back({&m_encoder_arr[i], true, skip_size}); - last_useful= m_decoders_vect.size(); - skip_size= 0; - } - else - { + last_useful = m_decoders_vect.size(); + skip_size = 0; + } else { if (m_encoder_arr[i].uses_variable_len_encoding() || - m_encoder_arr[i].maybe_null()) - { + m_encoder_arr[i].maybe_null()) { // For variable-length field, we need to read the data and skip it m_decoders_vect.push_back({&m_encoder_arr[i], false, skip_size}); - skip_size= 0; - } - else - { + skip_size = 0; + } else { // Fixed-width field can be skipped without looking at it. // Add appropriate skip_size to the next field. skip_size += m_encoder_arr[i].m_pack_length_in_rec; @@ -4461,20 +4087,14 @@ void ha_rocksdb::setup_read_decoders() m_decoders_vect.end()); } - #ifndef NDEBUG -void dbug_append_garbage_at_end(std::string &on_disk_rec) -{ +void dbug_append_garbage_at_end(std::string &on_disk_rec) { on_disk_rec.append("abc"); } -void dbug_truncate_record(std::string &on_disk_rec) -{ - on_disk_rec.resize(0); -} +void dbug_truncate_record(std::string &on_disk_rec) { on_disk_rec.resize(0); } -void dbug_modify_rec_varchar12(std::string &on_disk_rec) -{ +void dbug_modify_rec_varchar12(std::string &on_disk_rec) { std::string res; // The record is NULL-byte followed by VARCHAR(10). // Put the NULL-byte @@ -4486,9 +4106,7 @@ void dbug_modify_rec_varchar12(std::string &on_disk_rec) on_disk_rec.assign(res); } - -void dbug_modify_key_varchar8(String &on_disk_rec) -{ +void dbug_modify_key_varchar8(String &on_disk_rec) { std::string res; // The key starts with index number res.append(on_disk_rec.ptr(), Rdb_key_def::INDEX_NUMBER_SIZE); @@ -4499,13 +4117,14 @@ void dbug_modify_key_varchar8(String &on_disk_rec) on_disk_rec.append(res.data(), res.size()); } +void dbug_create_err_inplace_alter() { + my_printf_error(ER_UNKNOWN_ERROR, + "Intentional failure in inplace alter occurred.", MYF(0)); +} #endif - int ha_rocksdb::convert_record_from_storage_format( - const rocksdb::Slice* const key, - uchar* const buf) -{ + const rocksdb::Slice *const key, uchar *const buf) { DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read1", dbug_append_garbage_at_end(m_retrieved_record);); DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read2", @@ -4518,7 +4137,6 @@ int ha_rocksdb::convert_record_from_storage_format( return convert_record_from_storage_format(key, &retrieved_rec_slice, buf); } - /* @brief Unpack the record in this->m_retrieved_record and this->m_last_rowkey from @@ -4544,15 +4162,13 @@ int ha_rocksdb::convert_record_from_storage_format( */ int ha_rocksdb::convert_record_from_storage_format( - const rocksdb::Slice* const key, - const rocksdb::Slice* const value, - uchar* const buf) -{ + const rocksdb::Slice *const key, const rocksdb::Slice *const value, + uchar *const buf) { DBUG_ASSERT(key != nullptr); DBUG_ASSERT(buf != nullptr); Rdb_string_reader reader(value); - const my_ptrdiff_t ptr_diff= buf - table->record[0]; + const my_ptrdiff_t ptr_diff = buf - table->record[0]; /* Decode PK fields from the key @@ -4562,152 +4178,127 @@ int ha_rocksdb::convert_record_from_storage_format( const rocksdb::Slice rowkey_slice(m_last_rowkey.ptr(), m_last_rowkey.length()); - const char *unpack_info= nullptr; - uint16 unpack_info_len= 0; + const char *unpack_info = nullptr; + uint16 unpack_info_len = 0; rocksdb::Slice unpack_slice; /* Other fields are decoded from the value */ - const char *null_bytes= nullptr; - if (m_null_bytes_in_rec && !(null_bytes= reader.read(m_null_bytes_in_rec))) - { + const char *null_bytes = nullptr; + if (m_null_bytes_in_rec && !(null_bytes = reader.read(m_null_bytes_in_rec))) { return HA_ERR_INTERNAL_ERROR; } - if (m_maybe_unpack_info) - { - unpack_info= reader.read(RDB_UNPACK_HEADER_SIZE); + if (m_maybe_unpack_info) { + unpack_info = reader.read(RDB_UNPACK_HEADER_SIZE); - if (!unpack_info || unpack_info[0] != RDB_UNPACK_DATA_TAG) - { + if (!unpack_info || unpack_info[0] != RDB_UNPACK_DATA_TAG) { return HA_ERR_INTERNAL_ERROR; } - unpack_info_len= rdb_netbuf_to_uint16( - reinterpret_cast(unpack_info + 1)); - unpack_slice= rocksdb::Slice(unpack_info, unpack_info_len); + unpack_info_len = + rdb_netbuf_to_uint16(reinterpret_cast(unpack_info + 1)); + unpack_slice = rocksdb::Slice(unpack_info, unpack_info_len); reader.read(unpack_info_len - RDB_UNPACK_HEADER_SIZE); } if (m_pk_descr->unpack_record(table, buf, &rowkey_slice, unpack_info ? &unpack_slice : nullptr, - false /* verify_checksum */)) - { + false /* verify_checksum */)) { return HA_ERR_INTERNAL_ERROR; } - for (auto it= m_decoders_vect.begin(); it != m_decoders_vect.end(); it++) - { - const Rdb_field_encoder* const field_dec= it->m_field_enc; - const bool decode= it->m_decode; - const bool isNull = field_dec->maybe_null() && - ((null_bytes[field_dec->m_null_offset] & field_dec->m_null_mask) != 0); + for (auto it = m_decoders_vect.begin(); it != m_decoders_vect.end(); it++) { + const Rdb_field_encoder *const field_dec = it->m_field_enc; + const bool decode = it->m_decode; + const bool isNull = + field_dec->maybe_null() && + ((null_bytes[field_dec->m_null_offset] & field_dec->m_null_mask) != 0); - Field* const field= table->field[field_dec->m_field_index]; + Field *const field = table->field[field_dec->m_field_index]; /* Skip the bytes we need to skip */ if (it->m_skip && !reader.read(it->m_skip)) return HA_ERR_INTERNAL_ERROR; - if (isNull) - { - if (decode) - { + if (isNull) { + if (decode) { /* This sets the NULL-bit of this record */ field->set_null(ptr_diff); /* Besides that, set the field value to default value. CHECKSUM TABLE depends on this. */ - uint field_offset= field->ptr - table->record[0]; - memcpy(buf + field_offset, - table->s->default_values + field_offset, + uint field_offset = field->ptr - table->record[0]; + memcpy(buf + field_offset, table->s->default_values + field_offset, field->pack_length()); } continue; - } - else - { + } else { if (decode) field->set_notnull(ptr_diff); } - if (field_dec->m_field_type == MYSQL_TYPE_BLOB) - { - my_core::Field_blob* const blob= (my_core::Field_blob*)field; + if (field_dec->m_field_type == MYSQL_TYPE_BLOB) { + my_core::Field_blob *const blob = (my_core::Field_blob *)field; /* Get the number of bytes needed to store length*/ - const uint length_bytes= blob->pack_length() - portable_sizeof_char_ptr; + const uint length_bytes = blob->pack_length() - portable_sizeof_char_ptr; blob->move_field_offset(ptr_diff); const char *data_len_str; - if (!(data_len_str= reader.read(length_bytes))) - { + if (!(data_len_str = reader.read(length_bytes))) { blob->move_field_offset(-ptr_diff); return HA_ERR_INTERNAL_ERROR; } memcpy(blob->ptr, data_len_str, length_bytes); - const uint32 data_len= blob->get_length((uchar*)data_len_str, - length_bytes, - table->s->db_low_byte_first); + const uint32 data_len = blob->get_length( + (uchar *)data_len_str, length_bytes, table->s->db_low_byte_first); const char *blob_ptr; - if (!(blob_ptr= reader.read(data_len))) - { + if (!(blob_ptr = reader.read(data_len))) { blob->move_field_offset(-ptr_diff); return HA_ERR_INTERNAL_ERROR; } - if (decode) - { + if (decode) { // set 8-byte pointer to 0, like innodb does (relevant for 32-bit // platforms) memset(blob->ptr + length_bytes, 0, 8); - memcpy(blob->ptr + length_bytes, &blob_ptr, sizeof(uchar**)); + memcpy(blob->ptr + length_bytes, &blob_ptr, sizeof(uchar **)); blob->move_field_offset(-ptr_diff); } - } - else if (field_dec->m_field_type == MYSQL_TYPE_VARCHAR) - { - Field_varstring* const field_var= (Field_varstring*)field; + } else if (field_dec->m_field_type == MYSQL_TYPE_VARCHAR) { + Field_varstring *const field_var = (Field_varstring *)field; const char *data_len_str; - if (!(data_len_str= reader.read(field_var->length_bytes))) + if (!(data_len_str = reader.read(field_var->length_bytes))) return HA_ERR_INTERNAL_ERROR; uint data_len; /* field_var->length_bytes is 1 or 2 */ - if (field_var->length_bytes == 1) - { - data_len= (uchar)data_len_str[0]; - } - else - { + if (field_var->length_bytes == 1) { + data_len = (uchar)data_len_str[0]; + } else { DBUG_ASSERT(field_var->length_bytes == 2); - data_len= uint2korr(data_len_str); + data_len = uint2korr(data_len_str); } - if (data_len > field->field_length) - { + if (data_len > field->field_length) { /* The data on disk is longer than table DDL allows? */ return HA_ERR_INTERNAL_ERROR; } if (!reader.read(data_len)) return HA_ERR_INTERNAL_ERROR; - if (decode) - { + if (decode) { memcpy(field_var->ptr + ptr_diff, data_len_str, field_var->length_bytes + data_len); } - } - else - { + } else { const char *data_bytes; - const uint len= field_dec->m_pack_length_in_rec; - if (len > 0) - { - if ((data_bytes= reader.read(len)) == nullptr) - { + const uint len = field_dec->m_pack_length_in_rec; + if (len > 0) { + if ((data_bytes = reader.read(len)) == nullptr) { return HA_ERR_INTERNAL_ERROR; } if (decode) @@ -4716,35 +4307,31 @@ int ha_rocksdb::convert_record_from_storage_format( } } - if (m_verify_row_debug_checksums) - { + if (m_verify_row_debug_checksums) { if (reader.remaining_bytes() == RDB_CHECKSUM_CHUNK_SIZE && - reader.read(1)[0] == RDB_CHECKSUM_DATA_TAG) - { - uint32_t stored_key_chksum= - rdb_netbuf_to_uint32((const uchar*)reader.read(RDB_CHECKSUM_SIZE)); - uint32_t stored_val_chksum= - rdb_netbuf_to_uint32((const uchar*)reader.read(RDB_CHECKSUM_SIZE)); + reader.read(1)[0] == RDB_CHECKSUM_DATA_TAG) { + uint32_t stored_key_chksum = + rdb_netbuf_to_uint32((const uchar *)reader.read(RDB_CHECKSUM_SIZE)); + uint32_t stored_val_chksum = + rdb_netbuf_to_uint32((const uchar *)reader.read(RDB_CHECKSUM_SIZE)); - const uint32_t computed_key_chksum= + const uint32_t computed_key_chksum = my_core::crc32(0, rdb_slice_to_uchar_ptr(key), key->size()); - const uint32_t computed_val_chksum= + const uint32_t computed_val_chksum = my_core::crc32(0, rdb_slice_to_uchar_ptr(value), value->size() - RDB_CHECKSUM_CHUNK_SIZE); DBUG_EXECUTE_IF("myrocks_simulate_bad_pk_checksum1", stored_key_chksum++;); - if (stored_key_chksum != computed_key_chksum) - { + if (stored_key_chksum != computed_key_chksum) { m_pk_descr->report_checksum_mismatch(true, key->data(), key->size()); return HA_ERR_INTERNAL_ERROR; } DBUG_EXECUTE_IF("myrocks_simulate_bad_pk_checksum2", stored_val_chksum++;); - if (stored_val_chksum != computed_val_chksum) - { + if (stored_val_chksum != computed_val_chksum) { m_pk_descr->report_checksum_mismatch(false, value->data(), value->size()); return HA_ERR_INTERNAL_ERROR; @@ -4756,52 +4343,45 @@ int ha_rocksdb::convert_record_from_storage_format( return HA_ERR_INTERNAL_ERROR; } - return 0; + return HA_EXIT_SUCCESS; } - -void ha_rocksdb::get_storage_type(Rdb_field_encoder* const encoder, - const uint &kp) -{ +void ha_rocksdb::get_storage_type(Rdb_field_encoder *const encoder, + const uint &kp) { // STORE_SOME uses unpack_info. - if (m_pk_descr->has_unpack_info(kp)) - { + if (m_pk_descr->has_unpack_info(kp)) { DBUG_ASSERT(m_pk_descr->can_unpack(kp)); - encoder->m_storage_type= Rdb_field_encoder::STORE_SOME; - m_maybe_unpack_info= true; - } - else if (m_pk_descr->can_unpack(kp)) - { - encoder->m_storage_type= Rdb_field_encoder::STORE_NONE; + encoder->m_storage_type = Rdb_field_encoder::STORE_SOME; + m_maybe_unpack_info = true; + } else if (m_pk_descr->can_unpack(kp)) { + encoder->m_storage_type = Rdb_field_encoder::STORE_NONE; } } /* - Setup data needed to convert table->record[] to and from record storage format. + Setup data needed to convert table->record[] to and from record storage + format. @seealso ha_rocksdb::convert_record_to_storage_format, ha_rocksdb::convert_record_from_storage_format */ -void ha_rocksdb::setup_field_converters() -{ +void ha_rocksdb::setup_field_converters() { uint i; - uint null_bytes= 0; - uchar cur_null_mask= 0x1; + uint null_bytes = 0; + uchar cur_null_mask = 0x1; DBUG_ASSERT(m_encoder_arr == nullptr); - m_encoder_arr= static_cast( + m_encoder_arr = static_cast( my_malloc(table->s->fields * sizeof(Rdb_field_encoder), MYF(0))); - if (m_encoder_arr == nullptr) - { + if (m_encoder_arr == nullptr) { return; } - for (i= 0; i < table->s->fields; i++) - { - Field* const field= table->field[i]; - m_encoder_arr[i].m_storage_type= Rdb_field_encoder::STORE_ALL; + for (i = 0; i < table->s->fields; i++) { + Field *const field = table->field[i]; + m_encoder_arr[i].m_storage_type = Rdb_field_encoder::STORE_ALL; /* Check if this field is @@ -4814,39 +4394,31 @@ void ha_rocksdb::setup_field_converters() part of the hidden pk. */ if (!has_hidden_pk(table) && - field->part_of_key.is_set(table->s->primary_key)) - { - KEY* const pk_info= &table->key_info[table->s->primary_key]; - for (uint kp= 0; kp < pk_info->user_defined_key_parts; kp++) - { + field->part_of_key.is_set(table->s->primary_key)) { + KEY *const pk_info = &table->key_info[table->s->primary_key]; + for (uint kp = 0; kp < pk_info->user_defined_key_parts; kp++) { /* key_part->fieldnr is counted from 1 */ - if (field->field_index + 1 == pk_info->key_part[kp].fieldnr) - { + if (field->field_index + 1 == pk_info->key_part[kp].fieldnr) { get_storage_type(&m_encoder_arr[i], kp); break; } } } - m_encoder_arr[i].m_field_type= field->real_type(); - m_encoder_arr[i].m_field_index= i; - m_encoder_arr[i].m_pack_length_in_rec= field->pack_length_in_rec(); + m_encoder_arr[i].m_field_type = field->real_type(); + m_encoder_arr[i].m_field_index = i; + m_encoder_arr[i].m_pack_length_in_rec = field->pack_length_in_rec(); - if (field->real_maybe_null()) - { - m_encoder_arr[i].m_null_mask= cur_null_mask; - m_encoder_arr[i].m_null_offset= null_bytes; - if (cur_null_mask == 0x80) - { - cur_null_mask= 0x1; + if (field->real_maybe_null()) { + m_encoder_arr[i].m_null_mask = cur_null_mask; + m_encoder_arr[i].m_null_offset = null_bytes; + if (cur_null_mask == 0x80) { + cur_null_mask = 0x1; null_bytes++; - } - else - cur_null_mask= cur_null_mask << 1; - } - else - { - m_encoder_arr[i].m_null_mask= 0; + } else + cur_null_mask = cur_null_mask << 1; + } else { + m_encoder_arr[i].m_null_mask = 0; } } @@ -4854,107 +4426,128 @@ void ha_rocksdb::setup_field_converters() if (cur_null_mask != 0x1) null_bytes++; - m_null_bytes_in_rec= null_bytes; + m_null_bytes_in_rec = null_bytes; } -int ha_rocksdb::alloc_key_buffers(const TABLE* const table_arg, - const Rdb_tbl_def* const tbl_def_arg) -{ - DBUG_ENTER("ha_rocksdb::alloc_key_buffers"); +int ha_rocksdb::alloc_key_buffers(const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg, + bool alloc_alter_buffers) { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(m_pk_tuple == nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); - std::shared_ptr* const kd_arr= tbl_def_arg->m_key_descr_arr; + std::shared_ptr *const kd_arr = tbl_def_arg->m_key_descr_arr; - uint key_len= 0; - m_pk_descr= kd_arr[pk_index(table_arg, tbl_def_arg)]; - if (has_hidden_pk(table_arg)) - { - m_pk_key_parts= 1; - } - else - { - m_pk_key_parts= + uint key_len = 0; + uint max_packed_sk_len = 0; + uint pack_key_len = 0; + + m_pk_descr = kd_arr[pk_index(table_arg, tbl_def_arg)]; + if (has_hidden_pk(table_arg)) { + m_pk_key_parts = 1; + } else { + m_pk_key_parts = table->key_info[table->s->primary_key].user_defined_key_parts; - key_len= table->key_info[table->s->primary_key].key_length; + key_len = table->key_info[table->s->primary_key].key_length; } // move this into get_table_handler() ?? m_pk_descr->setup(table_arg, tbl_def_arg); - m_pk_tuple= reinterpret_cast(my_malloc(key_len, MYF(0))); - if (m_pk_tuple == nullptr) - { - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + m_pk_tuple = reinterpret_cast(my_malloc(key_len, MYF(0))); + if (m_pk_tuple == nullptr) { + goto error; } - const uint pack_key_len= m_pk_descr->max_storage_fmt_length(); - m_pk_packed_tuple= reinterpret_cast(my_malloc(pack_key_len, MYF(0))); - if (m_pk_packed_tuple == nullptr) - { - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + pack_key_len = m_pk_descr->max_storage_fmt_length(); + m_pk_packed_tuple = + reinterpret_cast(my_malloc(pack_key_len, MYF(0))); + if (m_pk_packed_tuple == nullptr) { + goto error; } /* Sometimes, we may use m_sk_packed_tuple for storing packed PK */ - uint max_packed_sk_len= pack_key_len; - for (uint i= 0; i < table_arg->s->keys; i++) - { + max_packed_sk_len = pack_key_len; + for (uint i = 0; i < table_arg->s->keys; i++) { if (i == table_arg->s->primary_key) /* Primary key was processed above */ continue; // TODO: move this into get_table_handler() ?? kd_arr[i]->setup(table_arg, tbl_def_arg); - const uint packed_len= kd_arr[i]->max_storage_fmt_length(); - if (packed_len > max_packed_sk_len) - { - max_packed_sk_len= packed_len; + const uint packed_len = kd_arr[i]->max_storage_fmt_length(); + if (packed_len > max_packed_sk_len) { + max_packed_sk_len = packed_len; } } - if (!(m_sk_packed_tuple= - reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || - !(m_sk_match_prefix_buf= - reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || - !(m_sk_packed_tuple_old= - reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || - !(m_end_key_packed_tuple= - reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || - !((m_pack_buffer= - reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))))) - { - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + if (!(m_sk_packed_tuple = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || + !(m_sk_match_prefix_buf = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || + !(m_sk_packed_tuple_old = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || + !(m_end_key_packed_tuple = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || + !((m_pack_buffer = reinterpret_cast( + my_malloc(max_packed_sk_len, MYF(0)))))) { + goto error; } - DBUG_RETURN(0); + /* + If inplace alter is happening, allocate special buffers for unique + secondary index duplicate checking. + */ + if (alloc_alter_buffers && + (!(m_dup_sk_packed_tuple = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || + !(m_dup_sk_packed_tuple_old = reinterpret_cast( + my_malloc(max_packed_sk_len, MYF(0)))))) { + goto error; + } + + DBUG_RETURN(HA_EXIT_SUCCESS); + +error: + // If we're here then this means that at some point above an allocation may + // have failed. To avoid any resource leaks and maintain a clear contract + // we'll clean up before returning the error code. + free_key_buffers(); + + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } -void ha_rocksdb::free_key_buffers() -{ +void ha_rocksdb::free_key_buffers() { my_free(m_pk_tuple); - m_pk_tuple= nullptr; + m_pk_tuple = nullptr; my_free(m_pk_packed_tuple); - m_pk_packed_tuple= nullptr; + m_pk_packed_tuple = nullptr; my_free(m_sk_packed_tuple); - m_sk_packed_tuple= nullptr; + m_sk_packed_tuple = nullptr; my_free(m_sk_match_prefix_buf); - m_sk_match_prefix_buf= nullptr; + m_sk_match_prefix_buf = nullptr; my_free(m_sk_packed_tuple_old); - m_sk_packed_tuple_old= nullptr; + m_sk_packed_tuple_old = nullptr; my_free(m_end_key_packed_tuple); - m_end_key_packed_tuple= nullptr; + m_end_key_packed_tuple = nullptr; my_free(m_pack_buffer); - m_pack_buffer= nullptr; + m_pack_buffer = nullptr; + + my_free(m_dup_sk_packed_tuple); + m_dup_sk_packed_tuple = nullptr; + + my_free(m_dup_sk_packed_tuple_old); + m_dup_sk_packed_tuple_old = nullptr; } -void ha_rocksdb::set_use_read_free_rpl(const char* const whitelist) -{ +void ha_rocksdb::set_use_read_free_rpl(const char *const whitelist) { DBUG_ASSERT(whitelist != nullptr); #if defined(HAVE_PSI_INTERFACE) @@ -4963,16 +4556,14 @@ void ha_rocksdb::set_use_read_free_rpl(const char* const whitelist) Regex_list_handler regex_handler; #endif - if (!regex_handler.set_patterns(whitelist)) - { + if (!regex_handler.set_patterns(whitelist)) { warn_about_bad_patterns(®ex_handler, "read_free_rpl_tables"); } m_use_read_free_rpl = regex_handler.matches(m_tbl_def->base_tablename()); } -void ha_rocksdb::set_skip_unique_check_tables(const char* const whitelist) -{ +void ha_rocksdb::set_skip_unique_check_tables(const char *const whitelist) { DBUG_ASSERT(whitelist != nullptr); #if defined(HAVE_PSI_INTERFACE) @@ -4981,25 +4572,23 @@ void ha_rocksdb::set_skip_unique_check_tables(const char* const whitelist) Regex_list_handler regex_handler; #endif - if (!regex_handler.set_patterns(whitelist)) - { + if (!regex_handler.set_patterns(whitelist)) { warn_about_bad_patterns(®ex_handler, "skip_unique_check_tables"); } - m_skip_unique_check= regex_handler.matches(m_tbl_def->base_tablename()); + m_skip_unique_check = regex_handler.matches(m_tbl_def->base_tablename()); } -int ha_rocksdb::open(const char* const name, int mode, uint test_if_locked) -{ - DBUG_ENTER("ha_rocksdb::open"); +int ha_rocksdb::open(const char *const name, int mode, uint test_if_locked) { + DBUG_ENTER_FUNC(); - if (m_table_handler != nullptr) - { - rdb_open_tables.release_table_handler(m_table_handler); + if (close()) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } - m_table_handler= rdb_open_tables.get_table_handler(name); - if (m_table_handler == nullptr) - { + + m_table_handler = rdb_open_tables.get_table_handler(name); + + if (m_table_handler == nullptr) { DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } @@ -5008,38 +4597,33 @@ int ha_rocksdb::open(const char* const name, int mode, uint test_if_locked) m_io_perf.init(&m_table_handler->m_table_perf_context, &m_table_handler->m_io_perf_read, &stats); - /* - note: m_pk_descr may be non-NULL here, when using partitions. It seems, - it's ok to discard it - */ std::string fullname; - if (rdb_normalize_tablename(name, &fullname)) - { - return HA_ERR_INTERNAL_ERROR; - } - - m_tbl_def= ddl_manager.find(fullname); - if (m_tbl_def == nullptr) - { - my_error(ER_INTERNAL_ERROR, MYF(0), - "Attempt to open a table that is not present in RocksDB-SE data " - "dictionary"); + if (rdb_normalize_tablename(name, &fullname)) { DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } - m_lock_rows= RDB_LOCK_NONE; - m_key_descr_arr= m_tbl_def->m_key_descr_arr; + m_tbl_def = ddl_manager.find(fullname); + if (m_tbl_def == nullptr) { + my_error(ER_INTERNAL_ERROR, MYF(0), + "Attempt to open a table that is not present in RocksDB-SE data " + "dictionary"); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + m_lock_rows = RDB_LOCK_NONE; + + m_key_descr_arr = m_tbl_def->m_key_descr_arr; /* Full table scan actually uses primary key (UPDATE needs to know this, otherwise it will go into infinite loop on queries like "UPDATE tbl SET pk=pk+100") */ - key_used_on_scan= table->s->primary_key; + key_used_on_scan = table->s->primary_key; - int err; - if ((err = alloc_key_buffers(table, m_tbl_def))) - { + // close() above has already called free_key_buffers(). No need to do it here. + int err = alloc_key_buffers(table, m_tbl_def); + + if (err) { DBUG_RETURN(err); } @@ -5067,14 +4651,16 @@ int ha_rocksdb::open(const char* const name, int mode, uint test_if_locked) Initialize the necessary variables for them: */ - m_verify_row_debug_checksums= false; + m_verify_row_debug_checksums = false; /* TODO: move the following to where TABLE_SHARE is opened: */ if (table->found_next_number_field) load_auto_incr_value(); - if (has_hidden_pk(table) && load_hidden_pk_value()) + if (has_hidden_pk(table) && load_hidden_pk_value()) { + free_key_buffers(); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } /* Index block size in MyRocks: used by MySQL in query optimization */ stats.block_size = rocksdb_tbl_options.block_size; @@ -5085,78 +4671,78 @@ int ha_rocksdb::open(const char* const name, int mode, uint test_if_locked) /* Determine at open whether we should skip unique checks for this table */ set_skip_unique_check_tables(THDVAR(ha_thd(), skip_unique_check_tables)); - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } +int ha_rocksdb::close(void) { + DBUG_ENTER_FUNC(); -int ha_rocksdb::close(void) -{ - DBUG_ENTER("ha_rocksdb::close"); - - m_pk_descr= nullptr; - m_key_descr_arr= nullptr; + m_pk_descr = nullptr; + m_key_descr_arr = nullptr; free_key_buffers(); my_free(m_encoder_arr); - m_encoder_arr= nullptr; + m_encoder_arr = nullptr; - rdb_open_tables.release_table_handler(m_table_handler); - m_table_handler= nullptr; + if (m_table_handler != nullptr) { + rdb_open_tables.release_table_handler(m_table_handler); + m_table_handler = nullptr; + } - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } -bool ha_rocksdb::get_error_message(const int error, String* const buf) -{ +bool ha_rocksdb::get_error_message(const int error, String *const buf) { + DBUG_ENTER_FUNC(); + static_assert(HA_ERR_ROCKSDB_LAST > HA_ERR_FIRST, - "HA_ERR_ROCKSDB_LAST > HA_ERR_FIRST"); + "HA_ERR_ROCKSDB_LAST > HA_ERR_FIRST"); static_assert(HA_ERR_ROCKSDB_LAST > HA_ERR_LAST, - "HA_ERR_ROCKSDB_LAST > HA_ERR_LAST"); + "HA_ERR_ROCKSDB_LAST > HA_ERR_LAST"); DBUG_ASSERT(error > 0); DBUG_ASSERT(error <= HA_ERR_ROCKSDB_LAST); DBUG_ASSERT(buf != nullptr); - Rdb_transaction* const tx = get_tx_from_thd(ha_thd()); + Rdb_transaction *const tx = get_tx_from_thd(ha_thd()); bool temp_error = false; switch (error) { - case HA_ERR_ROCKSDB_PK_REQUIRED: - buf->append("Table must have a PRIMARY KEY."); - break; - case HA_ERR_ROCKSDB_UNIQUE_NOT_SUPPORTED: - buf->append("Unique indexes are not supported."); - break; - case HA_ERR_ROCKSDB_TOO_MANY_LOCKS: - buf->append("Number of locks held reached @@rocksdb_max_row_locks."); - break; - case HA_ERR_LOCK_WAIT_TIMEOUT: - DBUG_ASSERT(tx != nullptr); - buf->append(tx->m_detailed_error); - temp_error = true; - break; - default: - // We can be called with the values which are < HA_ERR_FIRST because most - // MySQL internal functions will just return 1 in case of an error. - break; + case HA_ERR_ROCKSDB_PK_REQUIRED: + buf->append("Table must have a PRIMARY KEY."); + break; + case HA_ERR_ROCKSDB_UNIQUE_NOT_SUPPORTED: + buf->append("Unique indexes are not supported."); + break; + case HA_ERR_ROCKSDB_TOO_MANY_LOCKS: + buf->append("Number of locks held reached @@rocksdb_max_row_locks."); + break; + case HA_ERR_LOCK_WAIT_TIMEOUT: + DBUG_ASSERT(tx != nullptr); + buf->append(tx->m_detailed_error); + temp_error = true; + break; + default: + // We can be called with the values which are < HA_ERR_FIRST because most + // MySQL internal functions will just return HA_EXIT_FAILURE in case of + // an error. + break; } - return temp_error; + DBUG_RETURN(temp_error); } /* MyRocks supports only the following collations for indexed columns */ -static const std::set RDB_INDEX_COLLATIONS= - {&my_charset_bin, &my_charset_utf8_bin, &my_charset_latin1_bin}; +static const std::set RDB_INDEX_COLLATIONS = { + &my_charset_bin, &my_charset_utf8_bin, &my_charset_latin1_bin}; - -static bool rdb_is_index_collation_supported(const my_core::Field* const field) -{ - const my_core::enum_field_types type= field->real_type(); +static bool +rdb_is_index_collation_supported(const my_core::Field *const field) { + const my_core::enum_field_types type = field->real_type(); /* Handle [VAR](CHAR|BINARY) or TEXT|BLOB */ if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING || - type == MYSQL_TYPE_BLOB) - { + type == MYSQL_TYPE_BLOB) { return RDB_INDEX_COLLATIONS.find(field->charset()) != RDB_INDEX_COLLATIONS.end(); } @@ -5179,17 +4765,16 @@ static bool rdb_is_index_collation_supported(const my_core::Field* const field) 0 - Ok other - error, either given table ddl is not supported by rocksdb or OOM. */ -int ha_rocksdb::create_key_defs(const TABLE* const table_arg, - Rdb_tbl_def* const tbl_def_arg, - const TABLE* const old_table_arg /* = nullptr */, - const Rdb_tbl_def* const old_tbl_def_arg - /* = nullptr */) const -{ +int ha_rocksdb::create_key_defs( + const TABLE *const table_arg, Rdb_tbl_def *const tbl_def_arg, + const TABLE *const old_table_arg /* = nullptr */, + const Rdb_tbl_def *const old_tbl_def_arg + /* = nullptr */) const { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); - DBUG_ENTER("ha_rocksdb::create_key_defs"); - uint i; /* @@ -5204,13 +4789,11 @@ int ha_rocksdb::create_key_defs(const TABLE* const table_arg, allocated to each key definition. See below for more details. http://github.com/MySQLOnRocksDB/mysql-5.6/issues/86#issuecomment-138515501 */ - if (create_cfs(table_arg, tbl_def_arg, &cfs)) - { - DBUG_RETURN(1); + if (create_cfs(table_arg, tbl_def_arg, &cfs)) { + DBUG_RETURN(HA_EXIT_FAILURE); }; - if (!old_tbl_def_arg) - { + if (!old_tbl_def_arg) { /* old_tbl_def doesn't exist. this means we are in the process of creating a new table. @@ -5218,30 +4801,25 @@ int ha_rocksdb::create_key_defs(const TABLE* const table_arg, Get the index numbers (this will update the next_index_number) and create Rdb_key_def structures. */ - for (i= 0; i < tbl_def_arg->m_key_count; i++) - { - if (create_key_def(table_arg, i, tbl_def_arg, - &m_key_descr_arr[i], cfs[i])) - { - DBUG_RETURN(1); + for (i = 0; i < tbl_def_arg->m_key_count; i++) { + if (create_key_def(table_arg, i, tbl_def_arg, &m_key_descr_arr[i], + cfs[i])) { + DBUG_RETURN(HA_EXIT_FAILURE); } } - } - else - { + } else { /* old_tbl_def exists. This means we are creating a new tbl_def as part of in-place alter table. Copy over existing keys from the old_tbl_def and generate the necessary new key definitions if any. */ if (create_inplace_key_defs(table_arg, tbl_def_arg, old_table_arg, - old_tbl_def_arg, cfs)) - { - DBUG_RETURN(1); + old_tbl_def_arg, cfs)) { + DBUG_RETURN(HA_EXIT_FAILURE); } } - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } /* @@ -5260,15 +4838,14 @@ int ha_rocksdb::create_key_defs(const TABLE* const table_arg, 0 - Ok other - error */ -int ha_rocksdb::create_cfs(const TABLE* const table_arg, - Rdb_tbl_def* const tbl_def_arg, - std::array* const cfs) const -{ +int ha_rocksdb::create_cfs( + const TABLE *const table_arg, Rdb_tbl_def *const tbl_def_arg, + std::array *const cfs) const { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); - DBUG_ENTER("ha_rocksdb::create_cfs"); - char tablename_sys[NAME_LEN + 1]; my_core::filename_to_tablename(tbl_def_arg->base_tablename().c_str(), @@ -5278,36 +4855,31 @@ int ha_rocksdb::create_cfs(const TABLE* const table_arg, The first loop checks the index parameters and creates column families if necessary. */ - for (uint i= 0; i < tbl_def_arg->m_key_count; i++) - { - rocksdb::ColumnFamilyHandle* cf_handle; + for (uint i = 0; i < tbl_def_arg->m_key_count; i++) { + rocksdb::ColumnFamilyHandle *cf_handle; if (rocksdb_strict_collation_check && !is_hidden_pk(i, table_arg, tbl_def_arg) && - tbl_def_arg->base_tablename().find(tmp_file_prefix) != 0) - { - for (uint part= 0; part < table_arg->key_info[i].actual_key_parts; part++) - { + tbl_def_arg->base_tablename().find(tmp_file_prefix) != 0) { + for (uint part = 0; part < table_arg->key_info[i].actual_key_parts; + part++) { if (!rdb_is_index_collation_supported( - table_arg->key_info[i].key_part[part].field) && - !rdb_collation_exceptions->matches(tablename_sys)) - { + table_arg->key_info[i].key_part[part].field) && + !rdb_collation_exceptions->matches(tablename_sys)) { std::string collation_err; - for (const auto &coll : RDB_INDEX_COLLATIONS) - { - if (collation_err != "") - { + for (const auto &coll : RDB_INDEX_COLLATIONS) { + if (collation_err != "") { collation_err += ", "; } collation_err += coll->name; } - my_printf_error(ER_UNKNOWN_ERROR, - "Unsupported collation on string indexed " - "column %s.%s Use binary collation (%s).", MYF(0), - tbl_def_arg->full_tablename().c_str(), - table_arg->key_info[i].key_part[part].field->field_name, - collation_err.c_str()); - DBUG_RETURN(1); + my_printf_error( + ER_UNKNOWN_ERROR, "Unsupported collation on string indexed " + "column %s.%s Use binary collation (%s).", + MYF(0), tbl_def_arg->full_tablename().c_str(), + table_arg->key_info[i].key_part[part].field->field_name, + collation_err.c_str()); + DBUG_RETURN(HA_EXIT_FAILURE); } } } @@ -5316,36 +4888,34 @@ int ha_rocksdb::create_cfs(const TABLE* const table_arg, index comment has Column Family name. If there was no comment, we get NULL, and it means use the default column family. */ - const char* const comment = get_key_comment(i, table_arg, tbl_def_arg); - const char* const key_name = get_key_name(i, table_arg, tbl_def_arg); + const char *const comment = get_key_comment(i, table_arg, tbl_def_arg); + const char *const key_name = get_key_name(i, table_arg, tbl_def_arg); - if (looks_like_per_index_cf_typo(comment)) - { + if (looks_like_per_index_cf_typo(comment)) { my_error(ER_NOT_SUPPORTED_YET, MYF(0), "column family name looks like a typo of $per_index_cf"); - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } /* Prevent create from using the system column family */ - if (comment && strcmp(DEFAULT_SYSTEM_CF_NAME, comment) == 0) - { + if (comment && strcmp(DEFAULT_SYSTEM_CF_NAME, comment) == 0) { my_error(ER_WRONG_ARGUMENTS, MYF(0), "column family not valid for storing index data"); - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } bool is_auto_cf_flag; - cf_handle = cf_manager.get_or_create_cf(rdb, comment, - tbl_def_arg->full_tablename(), - key_name, &is_auto_cf_flag); + cf_handle = + cf_manager.get_or_create_cf(rdb, comment, tbl_def_arg->full_tablename(), + key_name, &is_auto_cf_flag); if (!cf_handle) - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); - auto& cf = (*cfs)[i]; + auto &cf = (*cfs)[i]; cf.cf_handle = cf_handle; cf.is_reverse_cf = Rdb_cf_manager::is_cf_name_reverse(comment); cf.is_auto_cf = is_auto_cf_flag; } - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } /* @@ -5362,51 +4932,46 @@ int ha_rocksdb::create_cfs(const TABLE* const table_arg, 0 - Ok other - error, either given table ddl is not supported by rocksdb or OOM. */ -int ha_rocksdb::create_inplace_key_defs(const TABLE* const table_arg, - Rdb_tbl_def* const tbl_def_arg, - const TABLE* const old_table_arg, - const Rdb_tbl_def* const old_tbl_def_arg, - const std::array& cfs) const -{ +int ha_rocksdb::create_inplace_key_defs( + const TABLE *const table_arg, Rdb_tbl_def *const tbl_def_arg, + const TABLE *const old_table_arg, const Rdb_tbl_def *const old_tbl_def_arg, + const std::array &cfs) const { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); DBUG_ASSERT(old_tbl_def_arg != nullptr); - DBUG_ENTER("create_key_def"); - - std::shared_ptr* const old_key_descr= + std::shared_ptr *const old_key_descr = old_tbl_def_arg->m_key_descr_arr; - std::shared_ptr* const new_key_descr= + std::shared_ptr *const new_key_descr = tbl_def_arg->m_key_descr_arr; const std::unordered_map old_key_pos = - get_old_key_positions(table_arg, tbl_def_arg, old_table_arg, - old_tbl_def_arg); + get_old_key_positions(table_arg, tbl_def_arg, old_table_arg, + old_tbl_def_arg); uint i; - for (i= 0; i < tbl_def_arg->m_key_count; i++) - { + for (i = 0; i < tbl_def_arg->m_key_count; i++) { const auto &it = old_key_pos.find(get_key_name(i, table_arg, tbl_def_arg)); - if (it != old_key_pos.end()) - { + if (it != old_key_pos.end()) { /* Found matching index in old table definition, so copy it over to the new one created. */ - const Rdb_key_def& okd= *old_key_descr[it->second]; + const Rdb_key_def &okd = *old_key_descr[it->second]; - uint16 index_dict_version= 0; - uchar index_type= 0; - uint16 kv_version= 0; - const GL_INDEX_ID gl_index_id= okd.get_gl_index_id(); + uint16 index_dict_version = 0; + uchar index_type = 0; + uint16 kv_version = 0; + const GL_INDEX_ID gl_index_id = okd.get_gl_index_id(); if (!dict_manager.get_index_info(gl_index_id, &index_dict_version, - &index_type, &kv_version)) - { + &index_type, &kv_version)) { // NO_LINT_DEBUG sql_print_error("RocksDB: Could not get index information " "for Index Number (%u,%u), table %s", gl_index_id.cf_id, gl_index_id.index_id, old_tbl_def_arg->full_tablename().c_str()); - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } /* @@ -5414,59 +4979,45 @@ int ha_rocksdb::create_inplace_key_defs(const TABLE* const table_arg, keynr within the pack_info for each field and the keyno of the keydef itself. */ - new_key_descr[i]= std::make_shared( - okd.get_index_number(), - i, - okd.get_cf(), - index_dict_version, - index_type, - kv_version, - okd.m_is_reverse_cf, - okd.m_is_auto_cf, - okd.m_name.c_str(), - dict_manager.get_stats(gl_index_id)); - } - else if (create_key_def(table_arg, i, tbl_def_arg, - &new_key_descr[i], cfs[i])) - { - DBUG_RETURN(1); + new_key_descr[i] = std::make_shared( + okd.get_index_number(), i, okd.get_cf(), index_dict_version, + index_type, kv_version, okd.m_is_reverse_cf, okd.m_is_auto_cf, + okd.m_name.c_str(), dict_manager.get_stats(gl_index_id)); + } else if (create_key_def(table_arg, i, tbl_def_arg, &new_key_descr[i], + cfs[i])) { + DBUG_RETURN(HA_EXIT_FAILURE); } DBUG_ASSERT(new_key_descr[i] != nullptr); new_key_descr[i]->setup(table_arg, tbl_def_arg); } - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } std::unordered_map ha_rocksdb::get_old_key_positions( - const TABLE* const table_arg, - const Rdb_tbl_def* const tbl_def_arg, - const TABLE* const old_table_arg, - const Rdb_tbl_def* const old_tbl_def_arg) const -{ + const TABLE *const table_arg, const Rdb_tbl_def *const tbl_def_arg, + const TABLE *const old_table_arg, + const Rdb_tbl_def *const old_tbl_def_arg) const { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(old_table_arg != nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); DBUG_ASSERT(old_tbl_def_arg != nullptr); - DBUG_ENTER("get_old_key_positions"); - - std::shared_ptr* const old_key_descr= + std::shared_ptr *const old_key_descr = old_tbl_def_arg->m_key_descr_arr; std::unordered_map old_key_pos; std::unordered_map new_key_pos; uint i; - for (i= 0; i < tbl_def_arg->m_key_count; i++) - { + for (i = 0; i < tbl_def_arg->m_key_count; i++) { new_key_pos[get_key_name(i, table_arg, tbl_def_arg)] = i; } - for (i= 0; i < old_tbl_def_arg->m_key_count; i++) - { - if (is_hidden_pk(i, old_table_arg, old_tbl_def_arg)) - { + for (i = 0; i < old_tbl_def_arg->m_key_count; i++) { + if (is_hidden_pk(i, old_table_arg, old_tbl_def_arg)) { old_key_pos[old_key_descr[i]->m_name] = i; continue; } @@ -5480,17 +5031,15 @@ std::unordered_map ha_rocksdb::get_old_key_positions( CREATE TABLE t1 (a INT, b INT, KEY ka(a)) ENGINE=RocksDB; ALTER TABLE t1 DROP INDEX ka, ADD INDEX ka(b), ALGORITHM=INPLACE; */ - const KEY* const old_key = &old_table_arg->key_info[i]; + const KEY *const old_key = &old_table_arg->key_info[i]; const auto &it = new_key_pos.find(old_key->name); - if (it == new_key_pos.end()) - { + if (it == new_key_pos.end()) { continue; } - KEY* const new_key = &table_arg->key_info[it->second]; + KEY *const new_key = &table_arg->key_info[it->second]; - if (!compare_key_parts(old_key, new_key)) - { + if (!compare_key_parts(old_key, new_key)) { old_key_pos[old_key->name] = i; } } @@ -5499,31 +5048,27 @@ std::unordered_map ha_rocksdb::get_old_key_positions( } /* Check two keys to ensure that key parts within keys match */ -int ha_rocksdb::compare_key_parts(const KEY* const old_key, - const KEY* const new_key) const -{ +int ha_rocksdb::compare_key_parts(const KEY *const old_key, + const KEY *const new_key) const { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(old_key != nullptr); DBUG_ASSERT(new_key != nullptr); - DBUG_ENTER("compare_key_parts"); - /* Skip if key parts do not match, as it is a different key */ - if (new_key->user_defined_key_parts != old_key->user_defined_key_parts) - { - DBUG_RETURN(1); + if (new_key->user_defined_key_parts != old_key->user_defined_key_parts) { + DBUG_RETURN(HA_EXIT_FAILURE); } /* Check to see that key parts themselves match */ - for (uint i= 0; i < old_key->user_defined_key_parts; i++) - { + for (uint i = 0; i < old_key->user_defined_key_parts; i++) { if (strcmp(old_key->key_part[i].field->field_name, - new_key->key_part[i].field->field_name) != 0) - { - DBUG_RETURN(1); + new_key->key_part[i].field->field_name) != 0) { + DBUG_RETURN(HA_EXIT_FAILURE); } } - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } /* @@ -5543,86 +5088,75 @@ int ha_rocksdb::compare_key_parts(const KEY* const old_key, 0 - Ok other - error, either given table ddl is not supported by rocksdb or OOM. */ -int ha_rocksdb::create_key_def(const TABLE* const table_arg, const uint &i, - const Rdb_tbl_def* const tbl_def_arg, - std::shared_ptr* const new_key_def, - const struct key_def_cf_info& cf_info) const -{ - DBUG_ENTER("create_key_def"); +int ha_rocksdb::create_key_def(const TABLE *const table_arg, const uint &i, + const Rdb_tbl_def *const tbl_def_arg, + std::shared_ptr *const new_key_def, + const struct key_def_cf_info &cf_info) const { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(new_key_def != nullptr); DBUG_ASSERT(*new_key_def == nullptr); - const uint index_id= ddl_manager.get_and_update_next_number(&dict_manager); - const uint16_t index_dict_version= Rdb_key_def::INDEX_INFO_VERSION_LATEST; + const uint index_id = ddl_manager.get_and_update_next_number(&dict_manager); + const uint16_t index_dict_version = Rdb_key_def::INDEX_INFO_VERSION_LATEST; uchar index_type; uint16_t kv_version; - if (is_hidden_pk(i, table_arg, tbl_def_arg)) - { - index_type= Rdb_key_def::INDEX_TYPE_HIDDEN_PRIMARY; - kv_version= Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; - } - else if (i == table_arg->s->primary_key) - { - index_type= Rdb_key_def::INDEX_TYPE_PRIMARY; - uint16 pk_latest_version= Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; - kv_version= pk_latest_version; - } - else - { - index_type= Rdb_key_def::INDEX_TYPE_SECONDARY; - uint16 sk_latest_version= Rdb_key_def::SECONDARY_FORMAT_VERSION_LATEST; - kv_version= sk_latest_version; + if (is_hidden_pk(i, table_arg, tbl_def_arg)) { + index_type = Rdb_key_def::INDEX_TYPE_HIDDEN_PRIMARY; + kv_version = Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; + } else if (i == table_arg->s->primary_key) { + index_type = Rdb_key_def::INDEX_TYPE_PRIMARY; + uint16 pk_latest_version = Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; + kv_version = pk_latest_version; + } else { + index_type = Rdb_key_def::INDEX_TYPE_SECONDARY; + uint16 sk_latest_version = Rdb_key_def::SECONDARY_FORMAT_VERSION_LATEST; + kv_version = sk_latest_version; } - const char* const key_name = get_key_name(i, table_arg, m_tbl_def); - *new_key_def= std::make_shared( + const char *const key_name = get_key_name(i, table_arg, m_tbl_def); + *new_key_def = std::make_shared( index_id, i, cf_info.cf_handle, index_dict_version, index_type, kv_version, cf_info.is_reverse_cf, cf_info.is_auto_cf, key_name); - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } -int rdb_normalize_tablename(const std::string& tablename, - std::string* const strbuf) -{ +int rdb_normalize_tablename(const std::string &tablename, + std::string *const strbuf) { DBUG_ASSERT(strbuf != nullptr); - if (tablename.size() < 2 || tablename[0] != '.' || tablename[1] != '/') - { - DBUG_ASSERT(0); // We were not passed table name? + if (tablename.size() < 2 || tablename[0] != '.' || tablename[1] != '/') { + DBUG_ASSERT(0); // We were not passed table name? return HA_ERR_INTERNAL_ERROR; } size_t pos = tablename.find_first_of('/', 2); - if (pos == std::string::npos) - { - DBUG_ASSERT(0); // We were not passed table name? + if (pos == std::string::npos) { + DBUG_ASSERT(0); // We were not passed table name? return HA_ERR_INTERNAL_ERROR; } *strbuf = tablename.substr(2, pos - 2) + "." + tablename.substr(pos + 1); - return 0; + return HA_EXIT_SUCCESS; } /* Check to see if the user's original statement includes foreign key references */ -bool ha_rocksdb::contains_foreign_key(THD* const thd) -{ +bool ha_rocksdb::contains_foreign_key(THD *const thd) { bool success; - const char* str = thd_query_string(thd)->str; + const char *str = thd_query_string(thd)->str; DBUG_ASSERT(str != nullptr); - while (*str != '\0') - { + while (*str != '\0') { // Scan from our current pos looking for 'FOREIGN' str = rdb_find_in_string(str, "FOREIGN", &success); - if (!success) - { + if (!success) { return false; } @@ -5630,22 +5164,19 @@ bool ha_rocksdb::contains_foreign_key(THD* const thd) str = rdb_check_next_token(&my_charset_bin, str, "FOREIGN", &success); DBUG_ASSERT(success); - if (!my_isspace(&my_charset_bin, *str)) - { + if (!my_isspace(&my_charset_bin, *str)) { return false; } // See if the next token is 'KEY' str = rdb_check_next_token(&my_charset_bin, str, "KEY", &success); - if (!success) - { + if (!success) { continue; } // See if the next token is '(' str = rdb_check_next_token(&my_charset_bin, str, "(", &success); - if (!success) - { + if (!success) { // There is an optional index id after 'FOREIGN KEY', skip it str = rdb_skip_id(&my_charset_bin, str); @@ -5670,13 +5201,12 @@ bool ha_rocksdb::contains_foreign_key(THD* const thd) @param dbbuf returns database name/table_schema @param tablebuf returns tablename @param partitionbuf returns partition suffix if there is one - @return 0 on success, non-zero on failure to split + @return HA_EXIT_SUCCESS on success, non-zero on failure to split */ -int rdb_split_normalized_tablename(const std::string& fullname, - std::string* const db, - std::string* const table, - std::string* const partition) -{ +int rdb_split_normalized_tablename(const std::string &fullname, + std::string *const db, + std::string *const table, + std::string *const partition) { DBUG_ASSERT(!fullname.empty()); #define RDB_PARTITION_STR "#P#" @@ -5685,47 +5215,39 @@ int rdb_split_normalized_tablename(const std::string& fullname, size_t dotpos = fullname.find('.'); /* Invalid table name? */ - if (dotpos == std::string::npos) - { + if (dotpos == std::string::npos) { return HA_ERR_INTERNAL_ERROR; } // Table must have a database name associated with it. DBUG_ASSERT(dotpos > 0); - if (db != nullptr) - { + if (db != nullptr) { *db = fullname.substr(0, dotpos); } dotpos++; - const size_t partpos = fullname.find(RDB_PARTITION_STR, dotpos, - strlen(RDB_PARTITION_STR)); + const size_t partpos = + fullname.find(RDB_PARTITION_STR, dotpos, strlen(RDB_PARTITION_STR)); - if (partpos != std::string::npos) - { + if (partpos != std::string::npos) { DBUG_ASSERT(partpos >= dotpos); - if (table != nullptr) - { + if (table != nullptr) { *table = fullname.substr(dotpos, partpos - dotpos); } - if (partition != nullptr) - { + if (partition != nullptr) { *partition = fullname.substr(partpos + strlen(RDB_PARTITION_STR)); } - } - else if (table != nullptr) - { + } else if (table != nullptr) { *table = fullname.substr(dotpos); } - return 0; + return HA_EXIT_SUCCESS; } - /** @brief create() is called to create a database. The variable name will have the name @@ -5745,14 +5267,14 @@ int rdb_split_normalized_tablename(const std::string& fullname, ha_create_table() in handle.cc */ -int ha_rocksdb::create(const char* const name, TABLE* const table_arg, - HA_CREATE_INFO* const create_info) -{ +int ha_rocksdb::create(const char *const name, TABLE *const table_arg, + HA_CREATE_INFO *const create_info) { + DBUG_ENTER_FUNC(); + int res; std::string str; - DBUG_ENTER("ha_rocksdb::create"); - THD* const thd = my_core::thd_get_current_thd(); + THD *const thd = my_core::thd_get_current_thd(); if (get_table_if_exists(name)) { if (thd->lex->sql_command == SQLCOM_TRUNCATE) { @@ -5763,11 +5285,12 @@ int ha_rocksdb::create(const char* const name, TABLE* const table_arg, if (rdb_normalize_tablename(name, &str)) { DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } - my_printf_error(ER_UNKNOWN_ERROR, - "Table '%s' does not exist, but metadata information exists inside " - "MyRocks. This is a sign of data inconsistency. Please check if " - "'%s.frm' exists, and try to restore it if it does not exist.", - MYF(0), str.c_str(), name); + my_printf_error( + ER_UNKNOWN_ERROR, + "Table '%s' does not exist, but metadata information exists inside " + "MyRocks. This is a sign of data inconsistency. Please check if " + "'%s.frm' exists, and try to restore it if it does not exist.", + MYF(0), str.c_str(), name); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } } @@ -5777,15 +5300,14 @@ int ha_rocksdb::create(const char* const name, TABLE* const table_arg, passes strings like "./test/t14#P#p0" for individual partitions, while table_arg->s->table_name has none of that. */ - if (rdb_normalize_tablename(name, &str)) - { + if (rdb_normalize_tablename(name, &str)) { DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } - if (contains_foreign_key(thd)) - { + if (contains_foreign_key(thd)) { my_printf_error(ER_NOT_SUPPORTED_YET, "MyRocks does not currently support " - "foreign key constraints", MYF(0)); + "foreign key constraints", + MYF(0)); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } @@ -5793,65 +5315,60 @@ int ha_rocksdb::create(const char* const name, TABLE* const table_arg, TODO(alexyang): Temporarily disable unique indexes support when there is no Primary Key */ - for (uint keyno= 0; keyno < table_arg->s->keys; keyno++) - { + for (uint keyno = 0; keyno < table_arg->s->keys; keyno++) { if ((table_arg->key_info[keyno].flags & HA_NOSAME) && - has_hidden_pk(table_arg)) - { - my_printf_error(ER_UNKNOWN_ERROR, - "Unique index support is disabled when the table has no primary key. ", - MYF(0)); + has_hidden_pk(table_arg)) { + my_printf_error(ER_UNKNOWN_ERROR, "Unique index support is disabled when " + "the table has no primary key. ", + MYF(0)); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } } - bool write_err= false; - const std::unique_ptr wb= dict_manager.begin(); - rocksdb::WriteBatch* const batch= wb.get(); + bool write_err = false; + const std::unique_ptr wb = dict_manager.begin(); + rocksdb::WriteBatch *const batch = wb.get(); /* Create table/key descriptions and put them into the data dictionary */ - m_tbl_def= new Rdb_tbl_def(str); + m_tbl_def = new Rdb_tbl_def(str); - uint n_keys= table_arg->s->keys; + uint n_keys = table_arg->s->keys; /* If no primary key found, create a hidden PK and place it inside table definition */ - if (has_hidden_pk(table_arg)) - { + if (has_hidden_pk(table_arg)) { n_keys += 1; } - m_key_descr_arr= new std::shared_ptr[n_keys]; - m_tbl_def->m_key_count= n_keys; - m_tbl_def->m_key_descr_arr= m_key_descr_arr; + m_key_descr_arr = new std::shared_ptr[n_keys]; + m_tbl_def->m_key_count = n_keys; + m_tbl_def->m_key_descr_arr = m_key_descr_arr; - if ((res= create_key_defs(table_arg, m_tbl_def))) - { + if ((res = create_key_defs(table_arg, m_tbl_def))) { goto error; } - m_pk_descr= m_key_descr_arr[pk_index(table_arg, m_tbl_def)]; + m_pk_descr = m_key_descr_arr[pk_index(table_arg, m_tbl_def)]; dict_manager.lock(); - write_err= ddl_manager.put_and_write(m_tbl_def, batch) - || dict_manager.commit(batch); + write_err = + ddl_manager.put_and_write(m_tbl_def, batch) || dict_manager.commit(batch); dict_manager.unlock(); - if (write_err) - { + if (write_err) { goto error; } if (create_info->auto_increment_value) - m_tbl_def->m_auto_incr_val= create_info->auto_increment_value; + m_tbl_def->m_auto_incr_val = create_info->auto_increment_value; /* We only support auto_increment at start of the PRIMARY KEY. */ - //Field *field; - //if ((field= table_arg->next_number_field)) + // Field *field; + // if ((field= table_arg->next_number_field)) /* TODO mdcallag: disable this for now to let UNIQUE indexes kind of work if ((field= table_arg->found_next_number_field)) { @@ -5861,18 +5378,17 @@ int ha_rocksdb::create(const char* const name, TABLE* const table_arg, DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } */ - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); error: /* Delete what we have allocated so far */ delete m_tbl_def; - m_tbl_def= nullptr; - m_key_descr_arr= nullptr; + m_tbl_def = nullptr; + m_key_descr_arr = nullptr; DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } - /** @note This function is used only when the table has not yet been opened, and @@ -5881,48 +5397,43 @@ error: See comment in ha_rocksdb::index_flags() for details. */ -bool ha_rocksdb::check_keyread_allowed(uint inx, uint part, bool all_parts) const -{ - bool res= true; - KEY* const key_info= &table_share->key_info[inx]; +bool ha_rocksdb::check_keyread_allowed(uint inx, uint part, + bool all_parts) const { + bool res = true; + KEY *const key_info = &table_share->key_info[inx]; Rdb_field_packing dummy1; - res= dummy1.setup(nullptr, key_info->key_part[part].field, inx, part, - key_info->key_part[part].length); + res = dummy1.setup(nullptr, key_info->key_part[part].field, inx, part, + key_info->key_part[part].length); - if (res && all_parts) - { - for (uint i= 0; i < part; i++) - { + if (res && all_parts) { + for (uint i = 0; i < part; i++) { Field *field; - if ((field= key_info->key_part[i].field)) - { + if ((field = key_info->key_part[i].field)) { Rdb_field_packing dummy; - if (!dummy.setup(nullptr, field, inx, i, key_info->key_part[i].length)) - { + if (!dummy.setup(nullptr, field, inx, i, + key_info->key_part[i].length)) { /* Cannot do index-only reads for this column */ - res= false; + res = false; break; } } } } - const uint pk= table_share->primary_key; + const uint pk = table_share->primary_key; if (inx == pk && all_parts && - part + 1 == table_share->key_info[pk].user_defined_key_parts) - { - m_pk_can_be_decoded= res; + part + 1 == table_share->key_info[pk].user_defined_key_parts) { + m_pk_can_be_decoded = res; } return res; } -int ha_rocksdb::read_key_exact(const Rdb_key_def& kd, - rocksdb::Iterator* const iter, +int ha_rocksdb::read_key_exact(const Rdb_key_def &kd, + rocksdb::Iterator *const iter, const bool &full_key_match, - const rocksdb::Slice& key_slice) const -{ + const rocksdb::Slice &key_slice) const { DBUG_ASSERT(iter != nullptr); /* @@ -5930,10 +5441,8 @@ int ha_rocksdb::read_key_exact(const Rdb_key_def& kd, index_tuple= lookup_tuple. lookup_tuple may be a prefix of the index. */ - if (kd.m_is_reverse_cf) - { - if (!full_key_match) - { + if (kd.m_is_reverse_cf) { + if (!full_key_match) { if (!iter->Valid()) iter->SeekToLast(); else @@ -5941,49 +5450,42 @@ int ha_rocksdb::read_key_exact(const Rdb_key_def& kd, } } - if (!iter->Valid() || !kd.value_matches_prefix(iter->key(), key_slice)) - { + if (!iter->Valid() || !kd.value_matches_prefix(iter->key(), key_slice)) { /* Got a record that is not equal to the lookup value, or even a record from another table.index. */ return HA_ERR_KEY_NOT_FOUND; } - return 0; + return HA_EXIT_SUCCESS; } -int ha_rocksdb::read_before_key(const Rdb_key_def& kd, +int ha_rocksdb::read_before_key(const Rdb_key_def &kd, const bool &full_key_match, - const rocksdb::Slice& key_slice) -{ + const rocksdb::Slice &key_slice) { /* We are looking for record with the biggest t.key such that t.key < lookup_tuple. */ - if (kd.m_is_reverse_cf) - { + if (kd.m_is_reverse_cf) { if (m_scan_it->Valid() && full_key_match && - kd.value_matches_prefix(m_scan_it->key(), key_slice)) - { + kd.value_matches_prefix(m_scan_it->key(), key_slice)) { /* We are using full key and we've hit an exact match */ m_scan_it->Next(); } - } - else - { + } else { if (m_scan_it->Valid()) m_scan_it->Prev(); else m_scan_it->SeekToLast(); } - return m_scan_it->Valid() ? 0 : HA_ERR_KEY_NOT_FOUND; + return m_scan_it->Valid() ? HA_EXIT_SUCCESS : HA_ERR_KEY_NOT_FOUND; } -int ha_rocksdb::read_after_key(const Rdb_key_def& kd, +int ha_rocksdb::read_after_key(const Rdb_key_def &kd, const bool &full_key_match, - const rocksdb::Slice& key_slice) -{ + const rocksdb::Slice &key_slice) { /* We are looking for the first record such that @@ -5992,73 +5494,64 @@ int ha_rocksdb::read_after_key(const Rdb_key_def& kd, with HA_READ_AFTER_KEY, $GT = '>', with HA_READ_KEY_OR_NEXT, $GT = '>=' */ - if (kd.m_is_reverse_cf) - { - if (!m_scan_it->Valid()) - { + if (kd.m_is_reverse_cf) { + if (!m_scan_it->Valid()) { m_scan_it->SeekToLast(); - } - else - { + } else { /* We should step back - when not using full extended key - when using full extended key and when we've got an exact match */ if (!full_key_match || - !kd.value_matches_prefix(m_scan_it->key(), key_slice)) - { + !kd.value_matches_prefix(m_scan_it->key(), key_slice)) { m_scan_it->Prev(); } } } - return m_scan_it->Valid() ? 0 : HA_ERR_KEY_NOT_FOUND; + return m_scan_it->Valid() ? HA_EXIT_SUCCESS : HA_ERR_KEY_NOT_FOUND; } -int ha_rocksdb::position_to_correct_key( - const Rdb_key_def& kd, - const enum ha_rkey_function &find_flag, - const bool &full_key_match, - const uchar* const key, - const key_part_map &keypart_map, - const rocksdb::Slice& key_slice, - bool* const move_forward) -{ - int rc= 0; +int ha_rocksdb::position_to_correct_key(const Rdb_key_def &kd, + const enum ha_rkey_function &find_flag, + const bool &full_key_match, + const uchar *const key, + const key_part_map &keypart_map, + const rocksdb::Slice &key_slice, + bool *const move_forward) { + int rc = 0; - *move_forward= true; + *move_forward = true; switch (find_flag) { case HA_READ_KEY_EXACT: - rc= read_key_exact(kd, m_scan_it, full_key_match, key_slice); + rc = read_key_exact(kd, m_scan_it, full_key_match, key_slice); break; case HA_READ_BEFORE_KEY: - *move_forward= false; - rc= read_before_key(kd, full_key_match, key_slice); - if (rc == 0 && !kd.covers_key(m_scan_it->key())) - { + *move_forward = false; + rc = read_before_key(kd, full_key_match, key_slice); + if (rc == 0 && !kd.covers_key(m_scan_it->key())) { /* The record we've got is not from this index */ - rc= HA_ERR_KEY_NOT_FOUND; + rc = HA_ERR_KEY_NOT_FOUND; } break; case HA_READ_AFTER_KEY: case HA_READ_KEY_OR_NEXT: - rc= read_after_key(kd, full_key_match, key_slice); - if (rc == 0 && !kd.covers_key(m_scan_it->key())) - { + rc = read_after_key(kd, full_key_match, key_slice); + if (rc == 0 && !kd.covers_key(m_scan_it->key())) { /* The record we've got is not from this index */ - rc= HA_ERR_KEY_NOT_FOUND; + rc = HA_ERR_KEY_NOT_FOUND; } break; case HA_READ_KEY_OR_PREV: case HA_READ_PREFIX: /* This flag is not used by the SQL layer, so we don't support it yet. */ - rc= HA_ERR_UNSUPPORTED; + rc = HA_ERR_UNSUPPORTED; break; case HA_READ_PREFIX_LAST: case HA_READ_PREFIX_LAST_OR_PREV: - *move_forward= false; + *move_forward = false; /* Find the last record with the specified index prefix lookup. - HA_READ_PREFIX_LAST requires that the record has the @@ -6068,27 +5561,21 @@ int ha_rocksdb::position_to_correct_key( records with prefix=lookup, we should return the last record before that. */ - rc= read_before_key(kd, full_key_match, key_slice); - if (rc == 0) - { - const rocksdb::Slice& rkey= m_scan_it->key(); - if (!kd.covers_key(rkey)) - { + rc = read_before_key(kd, full_key_match, key_slice); + if (rc == 0) { + const rocksdb::Slice &rkey = m_scan_it->key(); + if (!kd.covers_key(rkey)) { /* The record we've got is not from this index */ - rc= HA_ERR_KEY_NOT_FOUND; - } - else if (find_flag == HA_READ_PREFIX_LAST) - { - uint size = kd.pack_index_tuple(table, m_pack_buffer, - m_sk_packed_tuple, key, - keypart_map); - rocksdb::Slice lookup_tuple( - reinterpret_cast(m_sk_packed_tuple), size); + rc = HA_ERR_KEY_NOT_FOUND; + } else if (find_flag == HA_READ_PREFIX_LAST) { + uint size = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, + key, keypart_map); + rocksdb::Slice lookup_tuple(reinterpret_cast(m_sk_packed_tuple), + size); // We need to compare the key we've got with the original search prefix. - if (!kd.value_matches_prefix(rkey, lookup_tuple)) - { - rc= HA_ERR_KEY_NOT_FOUND; + if (!kd.value_matches_prefix(rkey, lookup_tuple)) { + rc = HA_ERR_KEY_NOT_FOUND; } } } @@ -6101,18 +5588,16 @@ int ha_rocksdb::position_to_correct_key( return rc; } -int ha_rocksdb::calc_eq_cond_len(const Rdb_key_def& kd, +int ha_rocksdb::calc_eq_cond_len(const Rdb_key_def &kd, const enum ha_rkey_function &find_flag, - const rocksdb::Slice& slice, + const rocksdb::Slice &slice, const int &bytes_changed_by_succ, - const key_range* const end_key, - uint* const end_key_packed_size) -{ + const key_range *const end_key, + uint *const end_key_packed_size) { if (find_flag == HA_READ_KEY_EXACT) return slice.size(); - if (find_flag == HA_READ_PREFIX_LAST) - { + if (find_flag == HA_READ_PREFIX_LAST) { /* We have made the kd.successor(m_sk_packed_tuple) call above. @@ -6121,12 +5606,10 @@ int ha_rocksdb::calc_eq_cond_len(const Rdb_key_def& kd, return slice.size() - bytes_changed_by_succ; } - if (end_key) - { - *end_key_packed_size= kd.pack_index_tuple(table, m_pack_buffer, - m_end_key_packed_tuple, - end_key->key, - end_key->keypart_map); + if (end_key) { + *end_key_packed_size = + kd.pack_index_tuple(table, m_pack_buffer, m_end_key_packed_tuple, + end_key->key, end_key->keypart_map); /* Calculating length of the equal conditions here. 4 byte index id is @@ -6137,7 +5620,7 @@ int ha_rocksdb::calc_eq_cond_len(const Rdb_key_def& kd, Example2: id1 VARCHAR(30), id2 INT, PRIMARY KEY (id1, id2) WHERE id1 = 'AAA' and id2 < 3; => eq_cond_len=13 (varchar used 9 bytes) */ - rocksdb::Slice end_slice(reinterpret_cast(m_end_key_packed_tuple), + rocksdb::Slice end_slice(reinterpret_cast(m_end_key_packed_tuple), *end_key_packed_size); return slice.difference_offset(end_slice); } @@ -6151,124 +5634,73 @@ int ha_rocksdb::calc_eq_cond_len(const Rdb_key_def& kd, return Rdb_key_def::INDEX_NUMBER_SIZE; } -int ha_rocksdb::read_row_from_primary_key(uchar* const buf) -{ +int ha_rocksdb::read_row_from_primary_key(uchar *const buf) { DBUG_ASSERT(buf != nullptr); int rc; - const rocksdb::Slice& rkey= m_scan_it->key(); - const uint pk_size= rkey.size(); - const char *pk_data= rkey.data(); + const rocksdb::Slice &rkey = m_scan_it->key(); + const uint pk_size = rkey.size(); + const char *pk_data = rkey.data(); memcpy(m_pk_packed_tuple, pk_data, pk_size); m_last_rowkey.copy(pk_data, pk_size, &my_charset_bin); - if (m_lock_rows != RDB_LOCK_NONE) - { + if (m_lock_rows != RDB_LOCK_NONE) { /* We need to put a lock and re-read */ - rc= get_row_by_rowid(buf, m_pk_packed_tuple, pk_size); - } - else - { + rc = get_row_by_rowid(buf, m_pk_packed_tuple, pk_size); + } else { /* Unpack from the row we've read */ - const rocksdb::Slice& value = m_scan_it->value(); - rc= convert_record_from_storage_format(&rkey, &value, buf); + const rocksdb::Slice &value = m_scan_it->value(); + rc = convert_record_from_storage_format(&rkey, &value, buf); } return rc; } -int ha_rocksdb::read_row_from_secondary_key(uchar* const buf, - const Rdb_key_def& kd, bool move_forward) -{ +int ha_rocksdb::read_row_from_secondary_key(uchar *const buf, + const Rdb_key_def &kd, + bool move_forward) { DBUG_ASSERT(buf != nullptr); - int rc= 0; + int rc = 0; uint pk_size; - if (m_keyread_only && m_lock_rows == RDB_LOCK_NONE && !has_hidden_pk(table)) - { + if (m_keyread_only && m_lock_rows == RDB_LOCK_NONE && !has_hidden_pk(table)) { /* Get the key columns and primary key value */ - const rocksdb::Slice& rkey= m_scan_it->key(); - pk_size= kd.get_primary_key_tuple(table, *m_pk_descr, &rkey, - m_pk_packed_tuple); - const rocksdb::Slice& value= m_scan_it->value(); - if (pk_size == RDB_INVALID_KEY_LEN) - { - rc= HA_ERR_INTERNAL_ERROR; + const rocksdb::Slice &rkey = m_scan_it->key(); + pk_size = + kd.get_primary_key_tuple(table, *m_pk_descr, &rkey, m_pk_packed_tuple); + const rocksdb::Slice &value = m_scan_it->value(); + if (pk_size == RDB_INVALID_KEY_LEN || + kd.unpack_record(table, buf, &rkey, &value, + m_verify_row_debug_checksums)) { + rc = HA_ERR_INTERNAL_ERROR; } - else - { - rc= try_keyonly_read_from_sk(buf, kd, rkey, value, pk_size); - } - } - else - { + } else { if (kd.m_is_reverse_cf) - move_forward= !move_forward; + move_forward = !move_forward; - rc= find_icp_matching_index_rec(move_forward, buf); - if (!rc) - { - const rocksdb::Slice& rkey= m_scan_it->key(); - pk_size= kd.get_primary_key_tuple(table, *m_pk_descr, &rkey, + rc = find_icp_matching_index_rec(move_forward, buf); + if (!rc) { + const rocksdb::Slice &rkey = m_scan_it->key(); + pk_size = kd.get_primary_key_tuple(table, *m_pk_descr, &rkey, m_pk_packed_tuple); - if (pk_size == RDB_INVALID_KEY_LEN) - { - rc= HA_ERR_INTERNAL_ERROR; - } - else - { - rc= get_row_by_rowid(buf, m_pk_packed_tuple, pk_size); + if (pk_size == RDB_INVALID_KEY_LEN) { + rc = HA_ERR_INTERNAL_ERROR; + } else { + rc = get_row_by_rowid(buf, m_pk_packed_tuple, pk_size); } } } - if (!rc) - { - m_last_rowkey.copy((const char*)m_pk_packed_tuple, pk_size, + if (!rc) { + m_last_rowkey.copy((const char *)m_pk_packed_tuple, pk_size, &my_charset_bin); } return rc; } -/** - @note - Some data types require unpack_info in order to do index-only reads. - - However, support for this may have been added after the database was built. - This means that we need to support the case where unpack_info is not - available. In this case, we should just do a primary key lookup. -*/ -int ha_rocksdb::try_keyonly_read_from_sk(uchar* buf, - const Rdb_key_def& kd, - const rocksdb::Slice& key, - const rocksdb::Slice& value, - const uint rowid_size) -{ - const int res= - kd.unpack_record(table, buf, &key, &value, m_verify_row_debug_checksums); - uint size= rowid_size; - if (res == UNPACK_INFO_MISSING) - { - // If we can't unpack, then do a PK lookup. - if (rowid_size == RDB_INVALID_KEY_LEN) - { - size= kd.get_primary_key_tuple( - table, *m_pk_descr, &key, m_pk_packed_tuple); - } - return get_row_by_rowid(buf, m_pk_packed_tuple, size); - } - else if (res == UNPACK_FAILURE) - { - return HA_ERR_INTERNAL_ERROR; - } - DBUG_ASSERT(res == UNPACK_SUCCESS); - return res; -} - - /** @note The problem with this function is that SQL layer calls it, when @@ -6290,27 +5722,23 @@ int ha_rocksdb::try_keyonly_read_from_sk(uchar* buf, yet). */ -ulong ha_rocksdb::index_flags(uint inx, uint part, bool all_parts) const -{ - ulong base_flags= HA_READ_NEXT | // doesn't seem to be used - HA_READ_ORDER | - HA_READ_RANGE | - HA_READ_PREV; +ulong ha_rocksdb::index_flags(uint inx, uint part, bool all_parts) const { + DBUG_ENTER_FUNC(); + + ulong base_flags = HA_READ_NEXT | // doesn't seem to be used + HA_READ_ORDER | HA_READ_RANGE | HA_READ_PREV; if (check_keyread_allowed(inx, part, all_parts)) base_flags |= HA_KEYREAD_ONLY; - if (inx == table_share->primary_key) - { + if (inx == table_share->primary_key) { /* Index-only reads on primary key are the same as table scan for us. Still, we need to explicitly "allow" them, otherwise SQL layer will miss some plans. */ base_flags |= HA_KEYREAD_ONLY; - } - else - { + } else { /* We can Index Condition Pushdown any key except the primary. With primary key, we get (pk, record) pair immediately, there is no place to put the @@ -6319,10 +5747,9 @@ ulong ha_rocksdb::index_flags(uint inx, uint part, bool all_parts) const base_flags |= HA_DO_INDEX_COND_PUSHDOWN; } - return base_flags; + DBUG_RETURN(base_flags); } - /** @brief Read next index tuple through the secondary index. @@ -6331,49 +5758,45 @@ ulong ha_rocksdb::index_flags(uint inx, uint part, bool all_parts) const m_scan_it points at the index key-value pair that we should read the (pk,row) pair for. */ -int ha_rocksdb::secondary_index_read(const int keyno, uchar* const buf) -{ +int ha_rocksdb::secondary_index_read(const int keyno, uchar *const buf) { DBUG_ASSERT(buf != nullptr); DBUG_ASSERT(table != nullptr); stats.rows_requested++; /* Use STATUS_NOT_FOUND when record not found or some error occurred */ - table->status= STATUS_NOT_FOUND; + table->status = STATUS_NOT_FOUND; - if (m_scan_it->Valid()) - { - rocksdb::Slice key= m_scan_it->key(); + if (m_scan_it->Valid()) { + rocksdb::Slice key = m_scan_it->key(); /* Check if we've ran out of records of this index */ - if (m_key_descr_arr[keyno]->covers_key(key)) - { - int rc; + if (m_key_descr_arr[keyno]->covers_key(key)) { + int rc = 0; - //TODO: We could here check if we have ran out of range we're scanning - const uint size= m_key_descr_arr[keyno]->get_primary_key_tuple( + // TODO: We could here check if we have ran out of range we're scanning + const uint size = m_key_descr_arr[keyno]->get_primary_key_tuple( table, *m_pk_descr, &key, m_pk_packed_tuple); - if (size == RDB_INVALID_KEY_LEN) - { + if (size == RDB_INVALID_KEY_LEN) { return HA_ERR_INTERNAL_ERROR; } - m_last_rowkey.copy((const char*)m_pk_packed_tuple, size, &my_charset_bin); + m_last_rowkey.copy((const char *)m_pk_packed_tuple, size, + &my_charset_bin); if (m_keyread_only && m_lock_rows == RDB_LOCK_NONE && - !has_hidden_pk(table)) - { - rc= try_keyonly_read_from_sk(buf, *m_key_descr_arr[keyno], - key, m_scan_it->value(), size); - } - else - { - rc= get_row_by_rowid(buf, m_pk_packed_tuple, size); + !has_hidden_pk(table)) { + rocksdb::Slice value = m_scan_it->value(); + if (m_key_descr_arr[keyno]->unpack_record( + table, buf, &key, &value, m_verify_row_debug_checksums)) { + return HA_ERR_INTERNAL_ERROR; + } + } else { + rc = get_row_by_rowid(buf, m_pk_packed_tuple, size); } - if (!rc) - { - table->status= 0; + if (!rc) { + table->status = 0; stats.rows_read++; stats.rows_index_next++; update_row_stats(ROWS_READ); @@ -6384,56 +5807,46 @@ int ha_rocksdb::secondary_index_read(const int keyno, uchar* const buf) return HA_ERR_END_OF_FILE; } - /* ha_rocksdb::read_range_first overrides handler::read_range_first. The only difference from handler::read_range_first is that ha_rocksdb::read_range_first passes end_key to ha_rocksdb::index_read_map_impl function. */ -int ha_rocksdb::read_range_first(const key_range* const start_key, - const key_range* const end_key, - bool eq_range_arg, bool sorted) -{ - int result; - DBUG_ENTER("ha_rocksdb::read_range_first"); +int ha_rocksdb::read_range_first(const key_range *const start_key, + const key_range *const end_key, + bool eq_range_arg, bool sorted) { + DBUG_ENTER_FUNC(); - eq_range= eq_range_arg; + int result; + + eq_range = eq_range_arg; set_end_range(end_key, RANGE_SCAN_ASC); - range_key_part= table->key_info[active_index].key_part; + range_key_part = table->key_info[active_index].key_part; - if (!start_key) // Read first record - result= ha_index_first(table->record[0]); - else - { - if (is_using_prohibited_gap_locks(table, - is_using_full_unique_key( - active_index, - start_key->keypart_map, - start_key->flag))) - { + if (!start_key) // Read first record + result = ha_index_first(table->record[0]); + else { + if (is_using_prohibited_gap_locks( + table, + is_using_full_unique_key(active_index, start_key->keypart_map, + start_key->flag))) { DBUG_RETURN(HA_ERR_LOCK_DEADLOCK); } - MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0, - { result= index_read_map_impl(table->record[0], - start_key->key, - start_key->keypart_map, - start_key->flag, - end_key); }) + MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0, { + result = + index_read_map_impl(table->record[0], start_key->key, + start_key->keypart_map, start_key->flag, end_key); + }) } if (result) - DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND) - ? HA_ERR_END_OF_FILE - : result); + DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND) ? HA_ERR_END_OF_FILE : result); - if (compare_key(end_range) <= 0) - { - DBUG_RETURN(0); - } - else - { + if (compare_key(end_range) <= 0) { + DBUG_RETURN(HA_EXIT_SUCCESS); + } else { /* The last read row does not fall in the range. So request storage engine to release row lock if possible. @@ -6443,12 +5856,12 @@ int ha_rocksdb::read_range_first(const key_range* const start_key, } } - -int ha_rocksdb::index_read_map(uchar* const buf, const uchar* const key, +int ha_rocksdb::index_read_map(uchar *const buf, const uchar *const key, key_part_map keypart_map, - enum ha_rkey_function find_flag) -{ - return index_read_map_impl(buf, key, keypart_map, find_flag, nullptr); + enum ha_rkey_function find_flag) { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(index_read_map_impl(buf, key, keypart_map, find_flag, nullptr)); } /* @@ -6457,45 +5870,43 @@ int ha_rocksdb::index_read_map(uchar* const buf, const uchar* const key, This function takes end_key as an argument, and it is set on range scan. MyRocks needs to decide whether prefix bloom filter can be used or not. - To decide to use prefix bloom filter or not, calculating equal condition length + To decide to use prefix bloom filter or not, calculating equal condition + length is needed. On equal lookups (find_flag == HA_READ_KEY_EXACT), equal condition length is the same as rocksdb::Slice.size() of the start key. On range scan, equal condition length is MIN(start_key, end_key) of the rocksdb::Slice expression. */ -int ha_rocksdb::index_read_map_impl(uchar* const buf, const uchar* const key, +int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key, key_part_map keypart_map, enum ha_rkey_function find_flag, - const key_range* end_key) -{ - int rc= 0; - DBUG_ENTER("ha_rocksdb::index_read_map"); + const key_range *end_key) { + DBUG_ENTER_FUNC(); + + int rc = 0; ha_statistic_increment(&SSV::ha_read_key_count); - const Rdb_key_def& kd= *m_key_descr_arr[active_index]; - const uint actual_key_parts= kd.get_key_parts(); - bool using_full_key= is_using_full_key(keypart_map, actual_key_parts); + const Rdb_key_def &kd = *m_key_descr_arr[active_index]; + const uint actual_key_parts = kd.get_key_parts(); + bool using_full_key = is_using_full_key(keypart_map, actual_key_parts); if (!end_key) - end_key= end_range; + end_key = end_range; /* By default, we don't need the retrieved records to match the prefix */ - m_sk_match_prefix= nullptr; + m_sk_match_prefix = nullptr; stats.rows_requested++; - if (active_index == table->s->primary_key && - find_flag == HA_READ_KEY_EXACT && using_full_key) - { + if (active_index == table->s->primary_key && find_flag == HA_READ_KEY_EXACT && + using_full_key) { /* Equality lookup over primary key, using full tuple. This is a special case, use DB::Get. */ - const uint size= kd.pack_index_tuple( - table, m_pack_buffer, m_pk_packed_tuple, - key, keypart_map); - rc= get_row_by_rowid(buf, m_pk_packed_tuple, size); - if (!rc) - { + const uint size = kd.pack_index_tuple(table, m_pack_buffer, + m_pk_packed_tuple, key, keypart_map); + rc = get_row_by_rowid(buf, m_pk_packed_tuple, size); + if (!rc) { stats.rows_read++; stats.rows_index_first++; update_row_stats(ROWS_READ); @@ -6509,25 +5920,22 @@ int ha_rocksdb::index_read_map_impl(uchar* const buf, const uchar* const key, uint packed_size; if (active_index != table->s->primary_key && table->key_info[active_index].flags & HA_NOSAME && - find_flag == HA_READ_KEY_EXACT && using_full_key) - { - key_part_map tmp_map= (key_part_map(1) << - table->key_info[active_index].user_defined_key_parts) - 1; - packed_size= kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, + find_flag == HA_READ_KEY_EXACT && using_full_key) { + key_part_map tmp_map = (key_part_map(1) << table->key_info[active_index] + .user_defined_key_parts) - + 1; + packed_size = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, key, tmp_map); if (table->key_info[active_index].user_defined_key_parts != kd.get_key_parts()) - using_full_key= false; - } - else - { - packed_size= kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, + using_full_key = false; + } else { + packed_size = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, key, keypart_map); } if ((pushed_idx_cond && pushed_idx_cond_keyno == active_index) && - (find_flag == HA_READ_KEY_EXACT || find_flag == HA_READ_PREFIX_LAST)) - { + (find_flag == HA_READ_KEY_EXACT || find_flag == HA_READ_PREFIX_LAST)) { /* We are doing a point index lookup, and ICP is enabled. It is possible that this call will be followed by ha_rocksdb->index_next_same() call. @@ -6540,39 +5948,36 @@ int ha_rocksdb::index_read_map_impl(uchar* const buf, const uchar* const key, that don't match the lookup prefix are not returned. row matches the lookup prefix. */ - m_sk_match_prefix= m_sk_match_prefix_buf; - m_sk_match_length= packed_size; + m_sk_match_prefix = m_sk_match_prefix_buf; + m_sk_match_length = packed_size; memcpy(m_sk_match_prefix, m_sk_packed_tuple, packed_size); } - int bytes_changed_by_succ= 0; + int bytes_changed_by_succ = 0; if (find_flag == HA_READ_PREFIX_LAST_OR_PREV || - find_flag == HA_READ_PREFIX_LAST || - find_flag == HA_READ_AFTER_KEY) - { + find_flag == HA_READ_PREFIX_LAST || find_flag == HA_READ_AFTER_KEY) { /* See below */ - bytes_changed_by_succ= kd.successor(m_sk_packed_tuple, packed_size); + bytes_changed_by_succ = kd.successor(m_sk_packed_tuple, packed_size); } - rocksdb::Slice slice( - reinterpret_cast(m_sk_packed_tuple), packed_size); + rocksdb::Slice slice(reinterpret_cast(m_sk_packed_tuple), + packed_size); - uint end_key_packed_size= 0; - const uint eq_cond_len= calc_eq_cond_len(kd, find_flag, slice, - bytes_changed_by_succ, end_key, - &end_key_packed_size); + uint end_key_packed_size = 0; + const uint eq_cond_len = + calc_eq_cond_len(kd, find_flag, slice, bytes_changed_by_succ, end_key, + &end_key_packed_size); - bool use_all_keys= false; + bool use_all_keys = false; if (find_flag == HA_READ_KEY_EXACT && my_count_bits(keypart_map) == kd.get_key_parts()) - use_all_keys= true; + use_all_keys = true; - Rdb_transaction* const tx= get_or_create_tx(table->in_use); - const bool is_new_snapshot= !tx->has_snapshot(); + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + const bool is_new_snapshot = !tx->has_snapshot(); // Loop as long as we get a deadlock error AND we end up creating the // snapshot here (i.e. it did not exist prior to this) - for (;;) - { + for (;;) { /* This will open the iterator and position it at a record that's equal or greater than the lookup tuple. @@ -6588,14 +5993,13 @@ int ha_rocksdb::index_read_map_impl(uchar* const buf, const uchar* const key, rc = position_to_correct_key(kd, find_flag, using_full_key, key, keypart_map, slice, &move_forward); - if (rc) - { + if (rc) { /* This status is returned on any error */ - table->status= STATUS_NOT_FOUND; + table->status = STATUS_NOT_FOUND; DBUG_RETURN(rc); } - m_skip_scan_it_next_call= false; + m_skip_scan_it_next_call = false; /* Now get the data for the row into 'buf'. If we were using a primary key @@ -6603,9 +6007,9 @@ int ha_rocksdb::index_read_map_impl(uchar* const buf, const uchar* const key, lookup the primary key. */ if (active_index == table->s->primary_key) - rc= read_row_from_primary_key(buf); + rc = read_row_from_primary_key(buf); else - rc= read_row_from_secondary_key(buf, kd, move_forward); + rc = read_row_from_secondary_key(buf, kd, move_forward); if (rc != HA_ERR_LOCK_DEADLOCK || !is_new_snapshot) break; /* Exit the loop */ @@ -6615,22 +6019,19 @@ int ha_rocksdb::index_read_map_impl(uchar* const buf, const uchar* const key, release_scan_iterator(); } - if (rc) - { + if (rc) { /* the only possible error condition is record-not-found */ - table->status= STATUS_NOT_FOUND; - } - else - { - table->status= 0; + table->status = STATUS_NOT_FOUND; + } else { + table->status = 0; stats.rows_read++; stats.rows_index_first++; update_row_stats(ROWS_READ); } + DBUG_RETURN(rc); } - /* @brief Scan the secondary index until we find an index record that satisfies ICP @@ -6652,64 +6053,54 @@ int ha_rocksdb::index_read_map_impl(uchar* const buf, const uchar* const key, */ int ha_rocksdb::find_icp_matching_index_rec(const bool &move_forward, - uchar* const buf) -{ + uchar *const buf) { DBUG_ASSERT(buf != nullptr); - if (pushed_idx_cond && pushed_idx_cond_keyno == active_index) - { - const Rdb_key_def& kd= *m_key_descr_arr[active_index]; + if (pushed_idx_cond && pushed_idx_cond_keyno == active_index) { + const Rdb_key_def &kd = *m_key_descr_arr[active_index]; - while (1) - { - if (!m_scan_it->Valid()) - { - table->status= STATUS_NOT_FOUND; + while (1) { + if (!m_scan_it->Valid()) { + table->status = STATUS_NOT_FOUND; return HA_ERR_END_OF_FILE; } - const rocksdb::Slice rkey= m_scan_it->key(); + const rocksdb::Slice rkey = m_scan_it->key(); - if (!kd.covers_key(rkey)) - { - table->status= STATUS_NOT_FOUND; + if (!kd.covers_key(rkey)) { + table->status = STATUS_NOT_FOUND; return HA_ERR_END_OF_FILE; } - if (m_sk_match_prefix) - { - const rocksdb::Slice prefix((const char*)m_sk_match_prefix, - m_sk_match_length); - if (!kd.value_matches_prefix(rkey, prefix)) - { - table->status= STATUS_NOT_FOUND; + if (m_sk_match_prefix) { + const rocksdb::Slice prefix((const char *)m_sk_match_prefix, + m_sk_match_length); + if (!kd.value_matches_prefix(rkey, prefix)) { + table->status = STATUS_NOT_FOUND; return HA_ERR_END_OF_FILE; } } - const rocksdb::Slice value= m_scan_it->value(); - if (try_keyonly_read_from_sk(buf, kd, rkey, value, RDB_INVALID_KEY_LEN)) - { + const rocksdb::Slice value = m_scan_it->value(); + if (kd.unpack_record(table, buf, &rkey, &value, + m_verify_row_debug_checksums)) { return HA_ERR_INTERNAL_ERROR; } - const enum icp_result icp_status= check_index_cond(); - if (icp_status == ICP_NO_MATCH) - { + const enum icp_result icp_status = check_index_cond(); + if (icp_status == ICP_NO_MATCH) { if (move_forward) m_scan_it->Next(); else m_scan_it->Prev(); continue; /* Get the next (or prev) index tuple */ - } - else if (icp_status == ICP_OUT_OF_RANGE) - { + } else if (icp_status == ICP_OUT_OF_RANGE) { /* We have walked out of range we are scanning */ - table->status= STATUS_NOT_FOUND; + table->status = STATUS_NOT_FOUND; return HA_ERR_END_OF_FILE; - } - else /* icp_status == ICP_MATCH */ + } else /* icp_status == ICP_MATCH */ { - /* Index Condition is satisfied. We have rc==0, proceed to fetch the row. */ + /* Index Condition is satisfied. We have rc==0, proceed to fetch the + * row. */ break; } /* @@ -6722,84 +6113,81 @@ int ha_rocksdb::find_icp_matching_index_rec(const bool &move_forward, */ } } - return 0; + return HA_EXIT_SUCCESS; } +int ha_rocksdb::index_read_last_map(uchar *const buf, const uchar *const key, + key_part_map keypart_map) { + DBUG_ENTER_FUNC(); -int ha_rocksdb::index_read_last_map(uchar* const buf, const uchar* const key, - key_part_map keypart_map) -{ - return index_read_map(buf, key, keypart_map, HA_READ_PREFIX_LAST); + DBUG_RETURN(index_read_map(buf, key, keypart_map, HA_READ_PREFIX_LAST)); } +int ha_rocksdb::check(THD *const thd, HA_CHECK_OPT *const check_opt) { + DBUG_ENTER_FUNC(); -int ha_rocksdb::check(THD* const thd, HA_CHECK_OPT* const check_opt) -{ - const uint pk= pk_index(table, m_tbl_def); + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(check_opt != nullptr); + + const uint pk = pk_index(table, m_tbl_def); String rowkey_copy; String sec_key_copy; - const char* const table_name = table->s->table_name.str; + const char *const table_name = table->s->table_name.str; // Only when debugging: don't use snapshot when reading // Rdb_transaction *tx= get_or_create_tx(table->in_use); // tx->snapshot= nullptr; - const bool save_verify_row_debug_checksums= m_verify_row_debug_checksums; - m_verify_row_debug_checksums= true; + const bool save_verify_row_debug_checksums = m_verify_row_debug_checksums; + m_verify_row_debug_checksums = true; /* For each secondary index, check that we can get a PK value from it */ // NO_LINT_DEBUG sql_print_information("CHECKTABLE %s: Checking table %s", table_name, table_name); ha_rows UNINIT_VAR(row_checksums_at_start); // set/used iff first_index==true - ha_rows row_checksums= ha_rows(-1); - bool first_index= true; + ha_rows row_checksums = ha_rows(-1); + bool first_index = true; - for (uint keyno= 0; keyno < table->s->keys; keyno++) - { - if (keyno != pk) - { + for (uint keyno = 0; keyno < table->s->keys; keyno++) { + if (keyno != pk) { extra(HA_EXTRA_KEYREAD); ha_index_init(keyno, true); - ha_rows rows= 0; - ha_rows checksums= 0; + ha_rows rows = 0; + ha_rows checksums = 0; if (first_index) - row_checksums_at_start= m_row_checksums_checked; + row_checksums_at_start = m_row_checksums_checked; int res; // NO_LINT_DEBUG sql_print_information("CHECKTABLE %s: Checking index %s", table_name, table->key_info[keyno].name); - while (1) - { + while (1) { if (!rows) - res= index_first(table->record[0]); + res = index_first(table->record[0]); else - res= index_next(table->record[0]); + res = index_next(table->record[0]); if (res == HA_ERR_END_OF_FILE) break; - if (res) - { + if (res) { // error // NO_LINT_DEBUG sql_print_error("CHECKTABLE %s: .. row %lld: index scan error %d", table_name, rows, res); goto error; } - rocksdb::Slice key= m_scan_it->key(); + rocksdb::Slice key = m_scan_it->key(); sec_key_copy.copy(key.data(), key.size(), &my_charset_bin); rowkey_copy.copy(m_last_rowkey.ptr(), m_last_rowkey.length(), &my_charset_bin); - if (m_key_descr_arr[keyno]-> - unpack_info_has_checksum(m_scan_it->value())) - { + if (m_key_descr_arr[keyno]->unpack_info_has_checksum( + m_scan_it->value())) { checksums++; } - if ((res= get_row_by_rowid(table->record[0], rowkey_copy.ptr(), - rowkey_copy.length()))) - { + if ((res = get_row_by_rowid(table->record[0], rowkey_copy.ptr(), + rowkey_copy.length()))) { // NO_LINT_DEBUG sql_print_error("CHECKTABLE %s: .. row %lld: " "failed to fetch row by rowid", @@ -6807,18 +6195,17 @@ int ha_rocksdb::check(THD* const thd, HA_CHECK_OPT* const check_opt) goto error; } - longlong hidden_pk_id= 0; + longlong hidden_pk_id = 0; if (has_hidden_pk(table) && read_hidden_pk_id_from_rowkey(&hidden_pk_id)) goto error; /* Check if we get the same PK value */ - uint packed_size= m_pk_descr->pack_record( + uint packed_size = m_pk_descr->pack_record( table, m_pack_buffer, table->record[0], m_pk_packed_tuple, nullptr, false, hidden_pk_id); if (packed_size != rowkey_copy.length() || - memcmp(m_pk_packed_tuple, rowkey_copy.ptr(), packed_size)) - { + memcmp(m_pk_packed_tuple, rowkey_copy.ptr(), packed_size)) { // NO_LINT_DEBUG sql_print_error("CHECKTABLE %s: .. row %lld: PK value mismatch", table_name, rows); @@ -6826,12 +6213,11 @@ int ha_rocksdb::check(THD* const thd, HA_CHECK_OPT* const check_opt) } /* Check if we get the same secondary key value */ - packed_size= m_key_descr_arr[keyno]->pack_record( + packed_size = m_key_descr_arr[keyno]->pack_record( table, m_pack_buffer, table->record[0], m_sk_packed_tuple, &m_sk_tails, false, hidden_pk_id); if (packed_size != sec_key_copy.length() || - memcmp(m_sk_packed_tuple, sec_key_copy.ptr(), packed_size)) - { + memcmp(m_sk_packed_tuple, sec_key_copy.ptr(), packed_size)) { // NO_LINT_DEBUG sql_print_error("CHECKTABLE %s: .. row %lld: " "secondary index value mismatch", @@ -6841,72 +6227,64 @@ int ha_rocksdb::check(THD* const thd, HA_CHECK_OPT* const check_opt) rows++; continue; -print_and_error: - { - std::string buf; - buf = rdb_hexdump(rowkey_copy.ptr(), rowkey_copy.length(), - RDB_MAX_HEXDUMP_LEN); - // NO_LINT_DEBUG - sql_print_error("CHECKTABLE %s: rowkey: %s", table_name, - buf.c_str()); + print_and_error : { + std::string buf; + buf = rdb_hexdump(rowkey_copy.ptr(), rowkey_copy.length(), + RDB_MAX_HEXDUMP_LEN); + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: rowkey: %s", table_name, buf.c_str()); - buf= rdb_hexdump(m_retrieved_record.data(), m_retrieved_record.size(), - RDB_MAX_HEXDUMP_LEN); - // NO_LINT_DEBUG - sql_print_error("CHECKTABLE %s: record: %s", table_name, - buf.c_str()); + buf = rdb_hexdump(m_retrieved_record.data(), m_retrieved_record.size(), + RDB_MAX_HEXDUMP_LEN); + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: record: %s", table_name, buf.c_str()); - buf = rdb_hexdump(sec_key_copy.ptr(), sec_key_copy.length(), - RDB_MAX_HEXDUMP_LEN); - // NO_LINT_DEBUG - sql_print_error("CHECKTABLE %s: index: %s", table_name, - buf.c_str()); + buf = rdb_hexdump(sec_key_copy.ptr(), sec_key_copy.length(), + RDB_MAX_HEXDUMP_LEN); + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: index: %s", table_name, buf.c_str()); - goto error; - } + goto error; + } } // NO_LINT_DEBUG sql_print_information("CHECKTABLE %s: ... %lld index entries checked " - "(%lld had checksums)", table_name, rows, - checksums); + "(%lld had checksums)", + table_name, rows, checksums); - if (first_index) - { - row_checksums= m_row_checksums_checked - row_checksums_at_start; - first_index= false; + if (first_index) { + row_checksums = m_row_checksums_checked - row_checksums_at_start; + first_index = false; } ha_index_end(); } } - if (row_checksums != ha_rows(-1)) - { + if (row_checksums != ha_rows(-1)) { // NO_LINT_DEBUG sql_print_information("CHECKTABLE %s: %lld table records had checksums", table_name, row_checksums); } extra(HA_EXTRA_NO_KEYREAD); - m_verify_row_debug_checksums= save_verify_row_debug_checksums; + m_verify_row_debug_checksums = save_verify_row_debug_checksums; /* TODO: we should check also for PK records that are missing in the secondary indexes. For that, need to walk through the PK and check that every PK record has a proper counterpart in each secondary index. */ - return HA_ADMIN_OK; + DBUG_RETURN(HA_ADMIN_OK); error: - m_verify_row_debug_checksums= save_verify_row_debug_checksums; + m_verify_row_debug_checksums = save_verify_row_debug_checksums; ha_index_or_rnd_end(); extra(HA_EXTRA_NO_KEYREAD); - return HA_ADMIN_CORRUPT; + + DBUG_RETURN(HA_ADMIN_CORRUPT); } - -static void dbug_dump_str(FILE* const out, const char* const str, int len) -{ +static void dbug_dump_str(FILE *const out, const char *const str, int len) { fprintf(out, "\""); - for (int i= 0; i < len; i++) - { + for (int i = 0; i < len; i++) { if (str[i] > 32) fprintf(out, "%c", str[i]); else @@ -6915,23 +6293,21 @@ static void dbug_dump_str(FILE* const out, const char* const str, int len) fprintf(out, "\""); } - /* Debugging help: dump the whole database into a human-readable file. Usage: dbug_dump_database(rdb); */ -void dbug_dump_database(rocksdb::DB* const db) -{ - FILE* const out= fopen("/tmp/rocksdb.dump", "wt"); +void dbug_dump_database(rocksdb::DB *const db) { + FILE *const out = fopen("/tmp/rocksdb.dump", "wt"); if (!out) return; - rocksdb::Iterator* it = db->NewIterator(rocksdb::ReadOptions()); + rocksdb::Iterator *it = db->NewIterator(rocksdb::ReadOptions()); for (it->SeekToFirst(); it->Valid(); it->Next()) { - rocksdb::Slice key= it->key(); - rocksdb::Slice val= it->value(); + rocksdb::Slice key = it->key(); + rocksdb::Slice val = it->value(); dbug_dump_str(out, key.data(), key.size()); fprintf(out, " -> "); dbug_dump_str(out, val.data(), val.size()); @@ -6943,25 +6319,21 @@ void dbug_dump_database(rocksdb::DB* const db) } rocksdb::Status ha_rocksdb::get_for_update( - Rdb_transaction* const tx, - rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key, - std::string* const value) const -{ + Rdb_transaction *const tx, rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, std::string *const value) const { DBUG_ASSERT(m_lock_rows != RDB_LOCK_NONE); const bool exclusive = m_lock_rows != RDB_LOCK_READ; - rocksdb::Status s= tx->get_for_update(column_family, key, value, exclusive); + rocksdb::Status s = tx->get_for_update(column_family, key, value, exclusive); // If we have a lock conflict and we are running in READ COMMITTTED mode // release and reacquire the snapshot and then retry the get_for_update(). if (s.IsBusy() && !s.IsDeadlock() && - my_core::thd_tx_isolation(ha_thd()) == ISO_READ_COMMITTED) - { + my_core::thd_tx_isolation(ha_thd()) == ISO_READ_COMMITTED) { tx->release_snapshot(); tx->acquire_snapshot(false); - s= tx->get_for_update(column_family, key, value, exclusive); + s = tx->get_for_update(column_family, key, value, exclusive); } return s; @@ -6975,160 +6347,146 @@ rocksdb::Status ha_rocksdb::get_for_update( other - HA_ERR error code. */ -int ha_rocksdb::get_row_by_rowid(uchar* const buf, const char* const rowid, - const uint rowid_size) -{ +int ha_rocksdb::get_row_by_rowid(uchar *const buf, const char *const rowid, + const uint rowid_size) { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(buf != nullptr); DBUG_ASSERT(rowid != nullptr); DBUG_ASSERT(table != nullptr); int rc; - DBUG_ENTER("ha_rocksdb::get_row_by_rowid"); rocksdb::Slice key_slice(rowid, rowid_size); - Rdb_transaction* const tx= get_or_create_tx(table->in_use); + Rdb_transaction *const tx = get_or_create_tx(table->in_use); DBUG_ASSERT(tx != nullptr); DEBUG_SYNC(ha_thd(), "rocksdb.get_row_by_rowid"); - DBUG_EXECUTE_IF("dbug.rocksdb.get_row_by_rowid", - { - THD *thd= ha_thd(); - const char act[]= - "now signal Reached " - "wait_for signal.rocksdb.get_row_by_rowid_let_running"; - DBUG_ASSERT(opt_debug_sync_timeout > 0); - DBUG_ASSERT(!debug_sync_set_action(thd, - STRING_WITH_LEN(act))); - };); + DBUG_EXECUTE_IF("dbug.rocksdb.get_row_by_rowid", { + THD *thd = ha_thd(); + const char act[] = "now signal Reached " + "wait_for signal.rocksdb.get_row_by_rowid_let_running"; + DBUG_ASSERT(opt_debug_sync_timeout > 0); + DBUG_ASSERT(!debug_sync_set_action(thd, STRING_WITH_LEN(act))); + };); bool found; rocksdb::Status s; - if (m_lock_rows == RDB_LOCK_NONE) - { + if (m_lock_rows == RDB_LOCK_NONE) { tx->acquire_snapshot(true); - s= tx->get(m_pk_descr->get_cf(), key_slice, &m_retrieved_record); - } - else - { - s= get_for_update(tx, m_pk_descr->get_cf(), key_slice, &m_retrieved_record); + s = tx->get(m_pk_descr->get_cf(), key_slice, &m_retrieved_record); + } else { + s = get_for_update(tx, m_pk_descr->get_cf(), key_slice, + &m_retrieved_record); } - if (!s.IsNotFound() && !s.ok()) - { + if (!s.IsNotFound() && !s.ok()) { DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def)); } - found= !s.IsNotFound(); + found = !s.IsNotFound(); - table->status= STATUS_NOT_FOUND; - if (found) - { - m_last_rowkey.copy((const char*)rowid, rowid_size, &my_charset_bin); - rc= convert_record_from_storage_format(&key_slice, buf); + table->status = STATUS_NOT_FOUND; + if (found) { + m_last_rowkey.copy((const char *)rowid, rowid_size, &my_charset_bin); + rc = convert_record_from_storage_format(&key_slice, buf); if (!rc) - table->status= 0; - } - else - { + table->status = 0; + } else { /* Note: we don't need to unlock the row. It is intentional that we keep locks on rows that don't exist. */ - rc= HA_ERR_KEY_NOT_FOUND; + rc = HA_ERR_KEY_NOT_FOUND; } DBUG_RETURN(rc); } +int ha_rocksdb::index_next(uchar *const buf) { + DBUG_ENTER_FUNC(); -int ha_rocksdb::index_next(uchar* const buf) -{ - bool moves_forward= true; + bool moves_forward = true; ha_statistic_increment(&SSV::ha_read_next_count); - if (m_key_descr_arr[active_index]->m_is_reverse_cf) - { - moves_forward= false; + if (m_key_descr_arr[active_index]->m_is_reverse_cf) { + moves_forward = false; } - int rc= index_next_with_direction(buf, moves_forward); + int rc = index_next_with_direction(buf, moves_forward); if (rc == HA_ERR_KEY_NOT_FOUND) rc = HA_ERR_END_OF_FILE; - return rc; + + DBUG_RETURN(rc); } +int ha_rocksdb::index_prev(uchar *const buf) { + DBUG_ENTER_FUNC(); -int ha_rocksdb::index_prev(uchar* const buf) -{ - bool moves_forward= false; + bool moves_forward = false; ha_statistic_increment(&SSV::ha_read_prev_count); - if (m_key_descr_arr[active_index]->m_is_reverse_cf) - { - moves_forward= true; + if (m_key_descr_arr[active_index]->m_is_reverse_cf) { + moves_forward = true; } - int rc= index_next_with_direction(buf, moves_forward); + int rc = index_next_with_direction(buf, moves_forward); if (rc == HA_ERR_KEY_NOT_FOUND) rc = HA_ERR_END_OF_FILE; - return rc; + DBUG_RETURN(rc); } +int ha_rocksdb::index_next_with_direction(uchar *const buf, bool move_forward) { + DBUG_ENTER_FUNC(); -int ha_rocksdb::index_next_with_direction(uchar* const buf, bool move_forward) -{ int rc; - DBUG_ENTER("ha_rocksdb::index_next"); - if (active_index == pk_index(table, m_tbl_def)) - { - rc= rnd_next_with_direction(buf, move_forward); - } - else - { - if (m_skip_scan_it_next_call) - { - m_skip_scan_it_next_call= false; - } - else - { + if (active_index == pk_index(table, m_tbl_def)) { + rc = rnd_next_with_direction(buf, move_forward); + } else { + if (m_skip_scan_it_next_call) { + m_skip_scan_it_next_call = false; + } else { if (move_forward) - m_scan_it->Next(); /* this call cannot fail */ + m_scan_it->Next(); /* this call cannot fail */ else m_scan_it->Prev(); } - rc= find_icp_matching_index_rec(move_forward, buf); + rc = find_icp_matching_index_rec(move_forward, buf); if (!rc) - rc= secondary_index_read(active_index, buf); + rc = secondary_index_read(active_index, buf); } DBUG_RETURN(rc); } +int ha_rocksdb::index_first(uchar *const buf) { + DBUG_ENTER_FUNC(); -int ha_rocksdb::index_first(uchar* const buf) -{ - m_sk_match_prefix= nullptr; + m_sk_match_prefix = nullptr; ha_statistic_increment(&SSV::ha_read_first_count); - int rc= m_key_descr_arr[active_index]->m_is_reverse_cf ? - index_last_intern(buf) : index_first_intern(buf); + int rc = m_key_descr_arr[active_index]->m_is_reverse_cf + ? index_last_intern(buf) + : index_first_intern(buf); if (rc == HA_ERR_KEY_NOT_FOUND) rc = HA_ERR_END_OF_FILE; - return rc; + + DBUG_RETURN(rc); } +int ha_rocksdb::index_last(uchar *const buf) { + DBUG_ENTER_FUNC(); -int ha_rocksdb::index_last(uchar* const buf) -{ - m_sk_match_prefix= nullptr; + m_sk_match_prefix = nullptr; ha_statistic_increment(&SSV::ha_read_last_count); - int rc= m_key_descr_arr[active_index]->m_is_reverse_cf ? - index_first_intern(buf) : index_last_intern(buf); + int rc = m_key_descr_arr[active_index]->m_is_reverse_cf + ? index_first_intern(buf) + : index_last_intern(buf); if (rc == HA_ERR_KEY_NOT_FOUND) rc = HA_ERR_END_OF_FILE; - return rc; -} + DBUG_RETURN(rc); +} /* Start scanning from the "first" value. @@ -7161,61 +6519,53 @@ int ha_rocksdb::index_last(uchar* const buf) */ -int ha_rocksdb::index_first_intern(uchar* const buf) -{ +int ha_rocksdb::index_first_intern(uchar *const buf) { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(buf != nullptr); uchar *key; uint key_size; int rc; - DBUG_ENTER("ha_rocksdb::index_first"); - if (is_pk(active_index, table, m_tbl_def)) - { - key= m_pk_packed_tuple; - } - else - { - key= m_sk_packed_tuple; + if (is_pk(active_index, table, m_tbl_def)) { + key = m_pk_packed_tuple; + } else { + key = m_sk_packed_tuple; } DBUG_ASSERT(key != nullptr); - const Rdb_key_def& kd= *m_key_descr_arr[active_index]; - if (kd.m_is_reverse_cf) - { + const Rdb_key_def &kd = *m_key_descr_arr[active_index]; + if (kd.m_is_reverse_cf) { kd.get_supremum_key(key, &key_size); - } - else - { + } else { kd.get_infimum_key(key, &key_size); } - rocksdb::Slice index_key((const char*)key, key_size); + rocksdb::Slice index_key((const char *)key, key_size); - Rdb_transaction* const tx= get_or_create_tx(table->in_use); + Rdb_transaction *const tx = get_or_create_tx(table->in_use); DBUG_ASSERT(tx != nullptr); - const bool is_new_snapshot= !tx->has_snapshot(); + const bool is_new_snapshot = !tx->has_snapshot(); // Loop as long as we get a deadlock error AND we end up creating the // snapshot here (i.e. it did not exist prior to this) - for (;;) - { + for (;;) { setup_scan_iterator(kd, &index_key, false, !kd.m_is_reverse_cf, Rdb_key_def::INDEX_NUMBER_SIZE); - m_skip_scan_it_next_call= true; + m_skip_scan_it_next_call = true; - rc= index_next_with_direction(buf, true); + rc = index_next_with_direction(buf, true); if (rc != HA_ERR_LOCK_DEADLOCK || !is_new_snapshot) - break; // exit the loop + break; // exit the loop // release the snapshot and iterator so they will be regenerated tx->release_snapshot(); release_scan_iterator(); } - if (!rc) - { + if (!rc) { /* index_next is always incremented on success, so decrement if it is index_first instead @@ -7223,10 +6573,10 @@ int ha_rocksdb::index_first_intern(uchar* const buf) stats.rows_index_first++; stats.rows_index_next--; } + DBUG_RETURN(rc); } - /** @details Start scanning from the "last" value @@ -7264,90 +6614,76 @@ int ha_rocksdb::index_first_intern(uchar* const buf) So, need to: it->Seek(n) || it->SeekToLast(); it->Prev(); */ -int ha_rocksdb::index_last_intern(uchar* const buf) -{ +int ha_rocksdb::index_last_intern(uchar *const buf) { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(buf != nullptr); uchar *key; uint key_size; int rc; - DBUG_ENTER("ha_rocksdb::index_last"); - if (is_pk(active_index, table, m_tbl_def)) - { - key= m_pk_packed_tuple; - } - else - { - key= m_sk_packed_tuple; + if (is_pk(active_index, table, m_tbl_def)) { + key = m_pk_packed_tuple; + } else { + key = m_sk_packed_tuple; } DBUG_ASSERT(key != nullptr); - const Rdb_key_def& kd= *m_key_descr_arr[active_index]; - if (kd.m_is_reverse_cf) - { + const Rdb_key_def &kd = *m_key_descr_arr[active_index]; + if (kd.m_is_reverse_cf) { kd.get_infimum_key(key, &key_size); - } - else - { + } else { kd.get_supremum_key(key, &key_size); } - rocksdb::Slice index_key((const char*)key, key_size); + rocksdb::Slice index_key((const char *)key, key_size); - Rdb_transaction* const tx= get_or_create_tx(table->in_use); + Rdb_transaction *const tx = get_or_create_tx(table->in_use); DBUG_ASSERT(tx != nullptr); - bool is_new_snapshot= !tx->has_snapshot(); + bool is_new_snapshot = !tx->has_snapshot(); // Loop as long as we get a deadlock error AND we end up creating the // snapshot here (i.e. it did not exist prior to this) - for (;;) - { + for (;;) { setup_scan_iterator(kd, &index_key, false, kd.m_is_reverse_cf, Rdb_key_def::INDEX_NUMBER_SIZE); - if (!m_scan_it->Valid()) - { + if (!m_scan_it->Valid()) { /* RocksDB keyspace has no records that follow our index. It could be, our index is the last in the keyspace. Navigate to last record in the keyspace. */ m_scan_it->SeekToLast(); - } - else - { + } else { /* Iterator is pointing at some record right after the end of the index. Move one record back. */ m_scan_it->Prev(); } - m_skip_scan_it_next_call= false; + m_skip_scan_it_next_call = false; - if (is_pk(active_index, table, m_tbl_def)) - { - m_skip_scan_it_next_call= true; - rc= rnd_next_with_direction(buf, false); - } - else - { - rc= find_icp_matching_index_rec(false /*move_forward*/, buf); + if (is_pk(active_index, table, m_tbl_def)) { + m_skip_scan_it_next_call = true; + rc = rnd_next_with_direction(buf, false); + } else { + rc = find_icp_matching_index_rec(false /*move_forward*/, buf); if (!rc) - rc= secondary_index_read(active_index, buf); + rc = secondary_index_read(active_index, buf); } if (rc != HA_ERR_LOCK_DEADLOCK || !is_new_snapshot) - break; /* exit the loop */ + break; /* exit the loop */ // release the snapshot and iterator so they will be regenerated tx->release_snapshot(); release_scan_iterator(); } - if (!rc) - { + if (!rc) { /* index_next is always incremented on success, so decrement if it is index_first instead @@ -7355,18 +6691,20 @@ int ha_rocksdb::index_last_intern(uchar* const buf) stats.rows_index_first++; stats.rows_index_next--; } + DBUG_RETURN(rc); } +void ha_rocksdb::unlock_row() { + DBUG_ENTER_FUNC(); -void ha_rocksdb::unlock_row() -{ - if (m_lock_rows != RDB_LOCK_NONE) - { - Rdb_transaction* const tx= get_or_create_tx(table->in_use); - tx->release_lock(m_pk_descr->get_cf(), std::string(m_last_rowkey.ptr(), - m_last_rowkey.length())); + if (m_lock_rows != RDB_LOCK_NONE) { + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + tx->release_lock(m_pk_descr->get_cf(), + std::string(m_last_rowkey.ptr(), m_last_rowkey.length())); } + + DBUG_VOID_RETURN; } /* @@ -7375,35 +6713,35 @@ void ha_rocksdb::unlock_row() - If the index is PRIMARY KEY, and if all of the columns of the table are covered by the PRIMARY KEY, SingleDelete can be used. */ -bool ha_rocksdb::can_use_single_delete(const uint &index) const -{ +bool ha_rocksdb::can_use_single_delete(const uint &index) const { return (index != pk_index(table, m_tbl_def) || (!has_hidden_pk(table) && - table->key_info[index].actual_key_parts == table->s->fields)); + table->key_info[index].actual_key_parts == table->s->fields)); } -bool ha_rocksdb::skip_unique_check() const -{ +bool ha_rocksdb::skip_unique_check() const { /* We want to skip unique checks if: 1) bulk_load is on 2) this table is in the whitelist of tables to skip and the replication lag has reached a large enough value (see unique_check_lag_threshold and unique_check_lage_reset_threshold) - 3) the user set rocksdb_skip_unique_check + 3) the user set unique_checks option to 0 */ return THDVAR(table->in_use, bulk_load) || (m_force_skip_unique_check && m_skip_unique_check) || - THDVAR(table->in_use, skip_unique_check); + my_core::thd_test_options(table->in_use, OPTION_RELAXED_UNIQUE_CHECKS); } -void ha_rocksdb::set_force_skip_unique_check(bool skip) -{ - m_force_skip_unique_check= skip; +void ha_rocksdb::set_force_skip_unique_check(bool skip) { + DBUG_ENTER_FUNC(); + + m_force_skip_unique_check = skip; + + DBUG_VOID_RETURN; } -bool ha_rocksdb::commit_in_the_middle() -{ +bool ha_rocksdb::commit_in_the_middle() { return THDVAR(table->in_use, bulk_load) || THDVAR(table->in_use, commit_in_the_middle); } @@ -7413,8 +6751,7 @@ bool ha_rocksdb::commit_in_the_middle() @retval true if bulk commit failed @retval false if bulk commit was skipped or succeeded */ -bool ha_rocksdb::do_bulk_commit(Rdb_transaction* const tx) -{ +bool ha_rocksdb::do_bulk_commit(Rdb_transaction *const tx) { DBUG_ASSERT(tx != nullptr); return commit_in_the_middle() && tx->get_write_count() >= THDVAR(table->in_use, bulk_load_size) && @@ -7427,8 +6764,7 @@ bool ha_rocksdb::do_bulk_commit(Rdb_transaction* const tx) does not contain a primary key. (In which case we generate a hidden 'auto-incremented' pk.) */ -bool ha_rocksdb::has_hidden_pk(const TABLE* const table) const -{ +bool ha_rocksdb::has_hidden_pk(const TABLE *const table) const { DBUG_ASSERT(table != nullptr); return Rdb_key_def::table_has_hidden_pk(table); } @@ -7437,9 +6773,8 @@ bool ha_rocksdb::has_hidden_pk(const TABLE* const table) const Returns true if given index number is a hidden_pk. - This is used when a table is created with no primary key. */ -bool ha_rocksdb::is_hidden_pk(const uint index, const TABLE* const table_arg, - const Rdb_tbl_def* const tbl_def_arg) -{ +bool ha_rocksdb::is_hidden_pk(const uint index, const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); @@ -7449,22 +6784,19 @@ bool ha_rocksdb::is_hidden_pk(const uint index, const TABLE* const table_arg, } /* Returns index of primary key */ -uint ha_rocksdb::pk_index(const TABLE* const table_arg, - const Rdb_tbl_def* const tbl_def_arg) -{ +uint ha_rocksdb::pk_index(const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); - return table_arg->s->primary_key == MAX_INDEXES ? - tbl_def_arg->m_key_count - 1 : - table_arg->s->primary_key; + return table_arg->s->primary_key == MAX_INDEXES ? tbl_def_arg->m_key_count - 1 + : table_arg->s->primary_key; } /* Returns true if given index number is a primary key */ -bool ha_rocksdb::is_pk(const uint index, const TABLE* const table_arg, - const Rdb_tbl_def* const tbl_def_arg) -{ +bool ha_rocksdb::is_pk(const uint index, const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); @@ -7473,64 +6805,59 @@ bool ha_rocksdb::is_pk(const uint index, const TABLE* const table_arg, is_hidden_pk(index, table_arg, tbl_def_arg); } -const char* ha_rocksdb::get_key_name(const uint index, - const TABLE* const table_arg, - const Rdb_tbl_def* const tbl_def_arg) -{ +const char *ha_rocksdb::get_key_name(const uint index, + const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); - if (is_hidden_pk(index, table_arg, tbl_def_arg)) - { + if (is_hidden_pk(index, table_arg, tbl_def_arg)) { return HIDDEN_PK_NAME; } return table_arg->key_info[index].name; } -const char* ha_rocksdb::get_key_comment(const uint index, - const TABLE* const table_arg, - const Rdb_tbl_def* const tbl_def_arg) -{ +const char *ha_rocksdb::get_key_comment(const uint index, + const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); - if (is_hidden_pk(index, table_arg, tbl_def_arg)) - { + if (is_hidden_pk(index, table_arg, tbl_def_arg)) { return nullptr; } return table_arg->key_info[index].comment.str; } -int ha_rocksdb::write_row(uchar* const buf) -{ - DBUG_ENTER("ha_rocksdb::write_row"); +int ha_rocksdb::write_row(uchar *const buf) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(buf != nullptr); DBUG_ASSERT(buf == table->record[0]); DBUG_ASSERT(m_lock_rows == RDB_LOCK_WRITE); + ha_statistic_increment(&SSV::ha_write_count); /* Note: "buf == table->record[0]" is copied from innodb. I am not aware of any use cases where this condition is not true. */ - if (table->next_number_field && buf == table->record[0]) - { + if (table->next_number_field && buf == table->record[0]) { int err; - if ((err= update_auto_increment())) - { + if ((err = update_auto_increment())) { DBUG_RETURN(err); } } - const int rv= update_write_row(nullptr, buf, skip_unique_check()); + const int rv = update_write_row(nullptr, buf, skip_unique_check()); - if (rv == 0) - { + if (rv == 0) { stats.rows_inserted++; update_row_stats(ROWS_INSERTED); } + DBUG_RETURN(rv); } - /** Constructing m_last_rowkey (MyRocks key expression) from before_update|delete image (MySQL row expression). @@ -7540,87 +6867,76 @@ int ha_rocksdb::write_row(uchar* const buf) called without setting m_last_rowkey. This function sets m_last_rowkey for Read Free Replication. */ -void ha_rocksdb::set_last_rowkey(const uchar* const old_data) -{ - if (old_data && use_read_free_rpl()) - { - const int old_pk_size= m_pk_descr->pack_record( +void ha_rocksdb::set_last_rowkey(const uchar *const old_data) { + if (old_data && use_read_free_rpl()) { + const int old_pk_size = m_pk_descr->pack_record( table, m_pack_buffer, old_data, m_pk_packed_tuple, nullptr, false); - m_last_rowkey.copy((const char*)m_pk_packed_tuple, old_pk_size, + m_last_rowkey.copy((const char *)m_pk_packed_tuple, old_pk_size, &my_charset_bin); } } -int ha_rocksdb::get_pk_for_update(struct update_row_info* const row_info) -{ +int ha_rocksdb::get_pk_for_update(struct update_row_info *const row_info) { int size; /* Get new row key for any insert, and any update where the pk is not hidden. Row key for updates with hidden pk is handled below. */ - if (!has_hidden_pk(table)) - { - row_info->hidden_pk_id= 0; + if (!has_hidden_pk(table)) { + row_info->hidden_pk_id = 0; - row_info->new_pk_unpack_info= &m_pk_unpack_info; + row_info->new_pk_unpack_info = &m_pk_unpack_info; - size= m_pk_descr->pack_record(table, m_pack_buffer, row_info->new_data, - m_pk_packed_tuple, - row_info->new_pk_unpack_info, - false); - } - else if (row_info->old_data == nullptr) - { - row_info->hidden_pk_id= update_hidden_pk_val(); - size= m_pk_descr->pack_hidden_pk(row_info->hidden_pk_id, m_pk_packed_tuple); - } - else - { + size = m_pk_descr->pack_record(table, m_pack_buffer, row_info->new_data, + m_pk_packed_tuple, + row_info->new_pk_unpack_info, false); + } else if (row_info->old_data == nullptr) { + row_info->hidden_pk_id = update_hidden_pk_val(); + size = + m_pk_descr->pack_hidden_pk(row_info->hidden_pk_id, m_pk_packed_tuple); + } else { /* If hidden primary key, rowkey for new record will always be the same as before */ - size= row_info->old_pk_slice.size(); + size = row_info->old_pk_slice.size(); memcpy(m_pk_packed_tuple, row_info->old_pk_slice.data(), size); - if (read_hidden_pk_id_from_rowkey(&row_info->hidden_pk_id)) - { + if (read_hidden_pk_id_from_rowkey(&row_info->hidden_pk_id)) { return HA_ERR_INTERNAL_ERROR; } } - row_info->new_pk_slice= rocksdb::Slice((const char*)m_pk_packed_tuple, size); + row_info->new_pk_slice = + rocksdb::Slice((const char *)m_pk_packed_tuple, size); - return 0; + return HA_EXIT_SUCCESS; } int ha_rocksdb::check_and_lock_unique_pk(const uint &key_id, - const struct update_row_info& row_info, - bool* const found, - bool* const pk_changed) -{ + const struct update_row_info &row_info, + bool *const found, + bool *const pk_changed) { DBUG_ASSERT(found != nullptr); DBUG_ASSERT(pk_changed != nullptr); - *pk_changed= false; + *pk_changed = false; /* For UPDATEs, if the key has changed, we need to obtain a lock. INSERTs always require locking. */ - if (row_info.old_pk_slice.size() > 0) - { + if (row_info.old_pk_slice.size() > 0) { /* If the keys are the same, then no lock is needed */ if (!Rdb_pk_comparator::bytewise_compare(row_info.new_pk_slice, - row_info.old_pk_slice)) - { - *found= false; - return 0; + row_info.old_pk_slice)) { + *found = false; + return HA_EXIT_SUCCESS; } - *pk_changed= true; + *pk_changed = true; } /* @@ -7642,49 +6958,46 @@ int ha_rocksdb::check_and_lock_unique_pk(const uint &key_id, 2) T1 Get(empty) -> T1 Put(insert, not committed yet) -> T2 Get(empty) -> T2 Put(insert, blocked) -> T1 commit -> T2 commit(overwrite) */ - const rocksdb::Status s= get_for_update(row_info.tx, m_pk_descr->get_cf(), - row_info.new_pk_slice, &m_retrieved_record); - if (!s.ok() && !s.IsNotFound()) - { + const rocksdb::Status s = + get_for_update(row_info.tx, m_pk_descr->get_cf(), row_info.new_pk_slice, + &m_retrieved_record); + if (!s.ok() && !s.IsNotFound()) { return row_info.tx->set_status_error(table->in_use, s, *m_key_descr_arr[key_id], m_tbl_def); } - *found= !s.IsNotFound(); - return 0; + *found = !s.IsNotFound(); + return HA_EXIT_SUCCESS; } int ha_rocksdb::check_and_lock_sk(const uint &key_id, - const struct update_row_info& row_info, - bool* const found) const -{ + const struct update_row_info &row_info, + bool *const found) const { DBUG_ASSERT(found != nullptr); - *found= false; + *found = false; /* Can skip checking this key if none of the key fields have changed. */ - if (row_info.old_data != nullptr && !m_update_scope.is_set(key_id)) - { - return 0; + if (row_info.old_data != nullptr && !m_update_scope.is_set(key_id)) { + return HA_EXIT_SUCCESS; } - KEY* key_info= nullptr; - uint n_null_fields= 0; - uint user_defined_key_parts= 1; + KEY *key_info = nullptr; + uint n_null_fields = 0; + uint user_defined_key_parts = 1; - key_info= &table->key_info[key_id]; - user_defined_key_parts= key_info->user_defined_key_parts; + key_info = &table->key_info[key_id]; + user_defined_key_parts = key_info->user_defined_key_parts; /* If there are no uniqueness requirements, there's no need to obtain a lock for this key. */ - if (!(key_info->flags & HA_NOSAME)) - { - return 0; + if (!(key_info->flags & HA_NOSAME)) { + return HA_EXIT_SUCCESS; } - const Rdb_key_def& kd= *m_key_descr_arr[key_id]; + const Rdb_key_def &kd = *m_key_descr_arr[key_id]; /* Calculate the new key for obtaining the lock @@ -7692,33 +7005,30 @@ int ha_rocksdb::check_and_lock_sk(const uint &key_id, For unique secondary indexes, the key used for locking does not include the extended fields. */ - int size= kd.pack_record(table, m_pack_buffer, row_info.new_data, - m_sk_packed_tuple, nullptr, false, 0, - user_defined_key_parts, &n_null_fields); - if (n_null_fields > 0) - { + int size = + kd.pack_record(table, m_pack_buffer, row_info.new_data, m_sk_packed_tuple, + nullptr, false, 0, user_defined_key_parts, &n_null_fields); + if (n_null_fields > 0) { /* If any fields are marked as NULL this will never match another row as to NULL never matches anything else including another NULL. */ - return 0; + return HA_EXIT_SUCCESS; } - const rocksdb::Slice new_slice= rocksdb::Slice((const char*)m_sk_packed_tuple, - size); + const rocksdb::Slice new_slice = + rocksdb::Slice((const char *)m_sk_packed_tuple, size); /* For UPDATEs, if the key has changed, we need to obtain a lock. INSERTs always require locking. */ - if (row_info.old_data != nullptr) - { - size= kd.pack_record(table, m_pack_buffer, row_info.old_data, + if (row_info.old_data != nullptr) { + size = kd.pack_record(table, m_pack_buffer, row_info.old_data, m_sk_packed_tuple_old, nullptr, false, - row_info.hidden_pk_id, - user_defined_key_parts); - const rocksdb::Slice old_slice= rocksdb::Slice( - (const char*)m_sk_packed_tuple_old, size); + row_info.hidden_pk_id, user_defined_key_parts); + const rocksdb::Slice old_slice = + rocksdb::Slice((const char *)m_sk_packed_tuple_old, size); /* For updates, if the keys are the same, then no lock is needed @@ -7727,9 +7037,8 @@ int ha_rocksdb::check_and_lock_sk(const uint &key_id, this key is unique since NULL is not equal to each other, so no lock is needed. */ - if (!Rdb_pk_comparator::bytewise_compare(new_slice, old_slice)) - { - return 0; + if (!Rdb_pk_comparator::bytewise_compare(new_slice, old_slice)) { + return HA_EXIT_SUCCESS; } } @@ -7741,7 +7050,7 @@ int ha_rocksdb::check_and_lock_sk(const uint &key_id, the latest committed data. */ - const bool all_parts_used= (user_defined_key_parts == kd.get_key_parts()); + const bool all_parts_used = (user_defined_key_parts == kd.get_key_parts()); /* This iterator seems expensive since we need to allocate and free @@ -7754,27 +7063,23 @@ int ha_rocksdb::check_and_lock_sk(const uint &key_id, The bloom filter may need to be disabled for this lookup. */ - const bool total_order_seek= - !can_use_bloom_filter(ha_thd(), kd, new_slice, all_parts_used, - is_ascending(*m_key_descr_arr[key_id], - HA_READ_KEY_EXACT)); - const bool fill_cache= !THDVAR(ha_thd(), skip_fill_cache); + const bool total_order_seek = !can_use_bloom_filter( + ha_thd(), kd, new_slice, all_parts_used, + is_ascending(*m_key_descr_arr[key_id], HA_READ_KEY_EXACT)); + const bool fill_cache = !THDVAR(ha_thd(), skip_fill_cache); /* psergey-todo: we just need to take lock, lookups not needed: */ std::string dummy_value; - const rocksdb::Status s= get_for_update(row_info.tx, kd.get_cf(), new_slice, - &dummy_value); - if (!s.ok() && !s.IsNotFound()) - { + const rocksdb::Status s = + get_for_update(row_info.tx, kd.get_cf(), new_slice, &dummy_value); + if (!s.ok() && !s.IsNotFound()) { return row_info.tx->set_status_error(table->in_use, s, kd, m_tbl_def); } - rocksdb::Iterator* const iter= row_info.tx->get_iterator(kd.get_cf(), - total_order_seek, - fill_cache, - true /* read current data */, + rocksdb::Iterator *const iter = row_info.tx->get_iterator( + kd.get_cf(), total_order_seek, fill_cache, true /* read current data */, false /* acquire snapshot */); /* Need to scan the transaction to see if there is a duplicate key. @@ -7782,92 +7087,104 @@ int ha_rocksdb::check_and_lock_sk(const uint &key_id, in the transaction. */ iter->Seek(new_slice); - *found= !read_key_exact(kd, iter, all_parts_used, new_slice); + *found = !read_key_exact(kd, iter, all_parts_used, new_slice); delete iter; - return 0; + return HA_EXIT_SUCCESS; } int ha_rocksdb::check_uniqueness_and_lock( - const struct update_row_info& row_info, - bool* const pk_changed) -{ + const struct update_row_info &row_info, bool *const pk_changed) { /* Go through each index and determine if the index has uniqueness requirements. If it does, then try to obtain a row lock on the new values. Once all locks have been obtained, then perform the changes needed to update/insert the row. */ - for (uint key_id= 0; key_id < m_tbl_def->m_key_count; key_id++) - { + for (uint key_id = 0; key_id < m_tbl_def->m_key_count; key_id++) { bool found; - int rc; + int rc; - if (is_pk(key_id, table, m_tbl_def)) - { - rc= check_and_lock_unique_pk(key_id, row_info, &found, pk_changed); - } - else - { - rc= check_and_lock_sk(key_id, row_info, &found); + if (is_pk(key_id, table, m_tbl_def)) { + rc = check_and_lock_unique_pk(key_id, row_info, &found, pk_changed); + } else { + rc = check_and_lock_sk(key_id, row_info, &found); } - if (rc != 0) - { + if (rc != 0) { return rc; } - if (found) - { + if (found) { /* There is a row with this key already, so error out. */ - errkey= key_id; - m_dupp_errkey= errkey; + errkey = key_id; + m_dupp_errkey = errkey; return HA_ERR_FOUND_DUPP_KEY; } } + return HA_EXIT_SUCCESS; +} + +int ha_rocksdb::check_duplicate_sk(const TABLE *table_arg, + const Rdb_key_def &index, + const rocksdb::Slice *key, + struct unique_sk_buf_info *sk_info) { + uint n_null_fields = 0; + const rocksdb::Comparator *index_comp = index.get_cf()->GetComparator(); + + /* Get proper SK buffer. */ + uchar *sk_buf = sk_info->swap_and_get_sk_buf(); + + /* Get memcmp form of sk without extended pk tail */ + uint sk_memcmp_size = + index.get_memcmp_sk_parts(table_arg, *key, sk_buf, &n_null_fields); + + sk_info->sk_memcmp_key = + rocksdb::Slice(reinterpret_cast(sk_buf), sk_memcmp_size); + + if (sk_info->sk_memcmp_key_old.size() > 0 && n_null_fields == 0 && + index_comp->Compare(sk_info->sk_memcmp_key, sk_info->sk_memcmp_key_old) == + 0) { + return 1; + } + + sk_info->sk_memcmp_key_old = sk_info->sk_memcmp_key; return 0; } -int ha_rocksdb::bulk_load_key(Rdb_transaction* const tx, - const Rdb_key_def& kd, - const rocksdb::Slice& key, - const rocksdb::Slice& value) -{ - rocksdb::ColumnFamilyHandle* const cf= kd.get_cf(); +int ha_rocksdb::bulk_load_key(Rdb_transaction *const tx, const Rdb_key_def &kd, + const rocksdb::Slice &key, + const rocksdb::Slice &value) { + rocksdb::ColumnFamilyHandle *const cf = kd.get_cf(); DBUG_ASSERT(cf != nullptr); - if (m_sst_info == nullptr) - { - m_sst_info= - std::make_shared(rdb, m_table_handler->m_table_name, - kd.get_name(), cf, rocksdb_db_options, - THDVAR(ha_thd(), trace_sst_api)); + if (m_sst_info == nullptr) { + m_sst_info = std::make_shared( + rdb, m_table_handler->m_table_name, kd.get_name(), cf, + rocksdb_db_options, THDVAR(ha_thd(), trace_sst_api)); tx->start_bulk_load(this); - m_bulk_load_tx= tx; + m_bulk_load_tx = tx; } DBUG_ASSERT(m_sst_info != nullptr); - int rc= m_sst_info->put(key, value); - if (rc != 0) - { + int rc = m_sst_info->put(key, value); + if (rc != 0) { my_printf_error(ER_UNKNOWN_ERROR, "Failed to add a key to sst file writer(%s)", MYF(0), m_sst_info->error_message().c_str()); - rc= HA_ERR_INTERNAL_ERROR; + rc = HA_ERR_INTERNAL_ERROR; } return rc; } -int ha_rocksdb::finalize_bulk_load() -{ - int rc= 0; +int ha_rocksdb::finalize_bulk_load() { + int rc = 0; /* Skip if there are no possible ongoing bulk loads */ - if (m_sst_info == nullptr && m_bulk_load_tx == nullptr) - { + if (m_sst_info == nullptr && m_bulk_load_tx == nullptr) { return rc; } @@ -7878,99 +7195,86 @@ int ha_rocksdb::finalize_bulk_load() flushed and cleared by another thread by the time the mutex has been acquired. */ - if (m_sst_info != nullptr) - { - rc= m_sst_info->commit(); - if (rc != 0) - { + if (m_sst_info != nullptr) { + rc = m_sst_info->commit(); + if (rc != 0) { + /* + Log the error immediately here in case the server crashes before + mysql prints via my_printf_error. + */ + sql_print_error("Failed to commit bulk loaded sst file to the " + "data store (%s)", m_sst_info->error_message().c_str()); + my_printf_error(ER_UNKNOWN_ERROR, "Failed to commit bulk loaded sst file to the " "data store (%s)", MYF(0), m_sst_info->error_message().c_str()); - rc= HA_ERR_INTERNAL_ERROR; + rc = HA_ERR_INTERNAL_ERROR; } - m_sst_info= nullptr; + m_sst_info = nullptr; m_bulk_load_tx->end_bulk_load(this); - m_bulk_load_tx= nullptr; + m_bulk_load_tx = nullptr; } mysql_mutex_unlock(&m_bulk_load_mutex); return rc; } -int ha_rocksdb::update_pk(const Rdb_key_def& kd, - const struct update_row_info& row_info, - const bool &pk_changed) -{ - const uint key_id= kd.get_keyno(); - const bool hidden_pk= is_hidden_pk(key_id, table, m_tbl_def); - if (!hidden_pk && pk_changed) - { +int ha_rocksdb::update_pk(const Rdb_key_def &kd, + const struct update_row_info &row_info, + const bool &pk_changed) { + const uint key_id = kd.get_keyno(); + const bool hidden_pk = is_hidden_pk(key_id, table, m_tbl_def); + if (!hidden_pk && pk_changed) { /* The old key needs to be deleted. */ - const rocksdb::Status s= delete_or_singledelete( - key_id, row_info.tx, kd.get_cf(), - row_info.old_pk_slice); - if (!s.ok()) - { + const rocksdb::Status s = delete_or_singledelete( + key_id, row_info.tx, kd.get_cf(), row_info.old_pk_slice); + if (!s.ok()) { return row_info.tx->set_status_error(table->in_use, s, kd, m_tbl_def); } } - if (table->next_number_field) - { + if (table->next_number_field) { update_auto_incr_val(); } rocksdb::Slice value_slice; convert_record_to_storage_format(row_info.new_pk_slice, - row_info.new_pk_unpack_info, - &value_slice); + row_info.new_pk_unpack_info, &value_slice); - int rc= 0; - const auto cf= m_pk_descr->get_cf(); + int rc = 0; + const auto cf = m_pk_descr->get_cf(); if (rocksdb_enable_bulk_load_api && THDVAR(table->in_use, bulk_load) && - !hidden_pk) - { + !hidden_pk) { /* Write the primary key directly to an SST file using an SstFileWriter */ - rc= bulk_load_key(row_info.tx, kd, row_info.new_pk_slice, - value_slice); - } - else if (row_info.skip_unique_check) - { + rc = bulk_load_key(row_info.tx, kd, row_info.new_pk_slice, value_slice); + } else if (row_info.skip_unique_check) { /* It is responsibility of the user to make sure that the data being inserted doesn't violate any unique keys. */ row_info.tx->get_blind_write_batch()->Put(cf, row_info.new_pk_slice, value_slice); - } - else if (row_info.tx->m_ddl_transaction) - { + } else if (row_info.tx->m_ddl_transaction) { /* DDL statement must check for unique key conflicts. For example: ALTER TABLE tbl DROP PRIMARY KEY, ADD PRIMARY KEY(non_unique_column) */ row_info.tx->get_indexed_write_batch()->Put(cf, row_info.new_pk_slice, value_slice); - } - else - { - const auto s= row_info.tx->put(cf, row_info.new_pk_slice, value_slice); - if (!s.ok()) - { - if (s.IsBusy()) - { - errkey= table->s->primary_key; - m_dupp_errkey= errkey; + } else { + const auto s = row_info.tx->put(cf, row_info.new_pk_slice, value_slice); + if (!s.ok()) { + if (s.IsBusy()) { + errkey = table->s->primary_key; + m_dupp_errkey = errkey; rc = HA_ERR_FOUND_DUPP_KEY; - } - else - { + } else { rc = row_info.tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def); } @@ -7980,10 +7284,8 @@ int ha_rocksdb::update_pk(const Rdb_key_def& kd, return rc; } -int ha_rocksdb::update_sk(const TABLE* const table_arg, - const Rdb_key_def& kd, - const struct update_row_info& row_info) -{ +int ha_rocksdb::update_sk(const TABLE *const table_arg, const Rdb_key_def &kd, + const struct update_row_info &row_info) { int new_packed_size; int old_packed_size; @@ -7995,26 +7297,21 @@ int ha_rocksdb::update_sk(const TABLE* const table_arg, /* Can skip updating this key if none of the key fields have changed. */ - if (row_info.old_data != nullptr && !m_update_scope.is_set(key_id)) - { - return 0; + if (row_info.old_data != nullptr && !m_update_scope.is_set(key_id)) { + return HA_EXIT_SUCCESS; } - const bool store_row_debug_checksums= should_store_row_debug_checksums(); + const bool store_row_debug_checksums = should_store_row_debug_checksums(); - new_packed_size= kd.pack_record(table_arg, m_pack_buffer, row_info.new_data, - m_sk_packed_tuple, &m_sk_tails, - store_row_debug_checksums, - row_info.hidden_pk_id); + new_packed_size = kd.pack_record( + table_arg, m_pack_buffer, row_info.new_data, m_sk_packed_tuple, + &m_sk_tails, store_row_debug_checksums, row_info.hidden_pk_id); - if (row_info.old_data != nullptr) - { + if (row_info.old_data != nullptr) { // The old value - old_packed_size= kd.pack_record(table_arg, m_pack_buffer, - row_info.old_data, - m_sk_packed_tuple_old, &m_sk_tails_old, - store_row_debug_checksums, - row_info.hidden_pk_id); + old_packed_size = kd.pack_record( + table_arg, m_pack_buffer, row_info.old_data, m_sk_packed_tuple_old, + &m_sk_tails_old, store_row_debug_checksums, row_info.hidden_pk_id); /* Check if we are going to write the same value. This can happen when @@ -8032,12 +7329,11 @@ int ha_rocksdb::update_sk(const TABLE* const table_arg, */ if (old_packed_size == new_packed_size && m_sk_tails_old.get_current_pos() == m_sk_tails.get_current_pos() && - memcmp(m_sk_packed_tuple_old, m_sk_packed_tuple, - old_packed_size) == 0 && + memcmp(m_sk_packed_tuple_old, m_sk_packed_tuple, old_packed_size) == + 0 && memcmp(m_sk_tails_old.ptr(), m_sk_tails.ptr(), - m_sk_tails.get_current_pos()) == 0) - { - return 0; + m_sk_tails.get_current_pos()) == 0) { + return HA_EXIT_SUCCESS; } /* @@ -8046,9 +7342,8 @@ int ha_rocksdb::update_sk(const TABLE* const table_arg, (also note that DDL statements do not delete rows, so this is not a DDL statement) */ - old_key_slice= rocksdb::Slice( - reinterpret_cast(m_sk_packed_tuple_old), - old_packed_size); + old_key_slice = rocksdb::Slice( + reinterpret_cast(m_sk_packed_tuple_old), old_packed_size); row_info.tx->get_indexed_write_batch()->SingleDelete(kd.get_cf(), old_key_slice); @@ -8065,74 +7360,63 @@ int ha_rocksdb::update_sk(const TABLE* const table_arg, rocksdb::WriteBatchBase *write_batch; if ((row_info.tx->m_ddl_transaction && !(table_arg->key_info[key_id].flags & HA_NOSAME)) || - row_info.skip_unique_check) - { - write_batch= row_info.tx->get_blind_write_batch(); - } - else - { - write_batch= row_info.tx->get_indexed_write_batch(); + row_info.skip_unique_check) { + write_batch = row_info.tx->get_blind_write_batch(); + } else { + write_batch = row_info.tx->get_indexed_write_batch(); } - new_key_slice= rocksdb::Slice( - reinterpret_cast(m_sk_packed_tuple), new_packed_size); - new_value_slice= rocksdb::Slice( - reinterpret_cast(m_sk_tails.ptr()), - m_sk_tails.get_current_pos()); + new_key_slice = rocksdb::Slice( + reinterpret_cast(m_sk_packed_tuple), new_packed_size); + new_value_slice = + rocksdb::Slice(reinterpret_cast(m_sk_tails.ptr()), + m_sk_tails.get_current_pos()); write_batch->Put(kd.get_cf(), new_key_slice, new_value_slice); - return 0; + return HA_EXIT_SUCCESS; } -int ha_rocksdb::update_indexes(const struct update_row_info& row_info, - const bool &pk_changed) -{ +int ha_rocksdb::update_indexes(const struct update_row_info &row_info, + const bool &pk_changed) { int rc; - for (uint key_id= 0; key_id < m_tbl_def->m_key_count; key_id++) - { - const Rdb_key_def& kd= *m_key_descr_arr[key_id]; - if (is_pk(key_id, table, m_tbl_def)) - { - rc= update_pk(kd, row_info, pk_changed); - } - else - { - rc= update_sk(table, kd, row_info); + for (uint key_id = 0; key_id < m_tbl_def->m_key_count; key_id++) { + const Rdb_key_def &kd = *m_key_descr_arr[key_id]; + if (is_pk(key_id, table, m_tbl_def)) { + rc = update_pk(kd, row_info, pk_changed); + } else { + rc = update_sk(table, kd, row_info); } - if (rc != 0) - { + if (rc != 0) { return rc; } } - return 0; + return HA_EXIT_SUCCESS; } -int ha_rocksdb::update_write_row(const uchar* const old_data, - const uchar* const new_data, - const bool skip_unique_check) -{ - bool pk_changed= false; +int ha_rocksdb::update_write_row(const uchar *const old_data, + const uchar *const new_data, + const bool skip_unique_check) { + DBUG_ENTER_FUNC(); + + bool pk_changed = false; struct update_row_info row_info; - row_info.old_data= old_data; - row_info.new_data= new_data; - row_info.skip_unique_check= skip_unique_check; - row_info.new_pk_unpack_info= nullptr; - - DBUG_ENTER("ha_rocksdb::write_row_common"); + row_info.old_data = old_data; + row_info.new_data = new_data; + row_info.skip_unique_check = skip_unique_check; + row_info.new_pk_unpack_info = nullptr; set_last_rowkey(old_data); - row_info.tx= get_or_create_tx(table->in_use); + row_info.tx = get_or_create_tx(table->in_use); - if (old_data != nullptr) - { - row_info.old_pk_slice= rocksdb::Slice(m_last_rowkey.ptr(), - m_last_rowkey.length()); + if (old_data != nullptr) { + row_info.old_pk_slice = + rocksdb::Slice(m_last_rowkey.ptr(), m_last_rowkey.length()); /* Determine which indexes need updating. */ calc_updated_indexes(); @@ -8141,21 +7425,18 @@ int ha_rocksdb::update_write_row(const uchar* const old_data, /* Get the new row key into row_info.new_pk_slice */ - int rc= get_pk_for_update(&row_info); - if (rc != 0) - { + int rc = get_pk_for_update(&row_info); + if (rc != 0) { DBUG_RETURN(rc); } - if (!skip_unique_check) - { + if (!skip_unique_check) { /* Check to see if we are going to have failures because of unique keys. Also lock the appropriate key values. */ - rc= check_uniqueness_and_lock(row_info, &pk_changed); - if (rc != 0) - { + rc = check_uniqueness_and_lock(row_info, &pk_changed); + if (rc != 0) { DBUG_RETURN(rc); } } @@ -8168,59 +7449,57 @@ int ha_rocksdb::update_write_row(const uchar* const old_data, here because updates to the transaction will be made and those updates cannot be easily removed without rolling back the entire transaction. */ - rc= update_indexes(row_info, pk_changed); - if (rc != 0) - { + rc = update_indexes(row_info, pk_changed); + if (rc != 0) { DBUG_RETURN(rc); } - if (do_bulk_commit(row_info.tx)) - { + if (do_bulk_commit(row_info.tx)) { DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } - /* Open a cursor and position it at the passed record */ -void ha_rocksdb::setup_scan_iterator( - const Rdb_key_def& kd, - rocksdb::Slice* const slice, - const bool use_all_keys, - const bool is_ascending, - const uint eq_cond_len) -{ +void ha_rocksdb::setup_scan_iterator(const Rdb_key_def &kd, + rocksdb::Slice *const slice, + const bool use_all_keys, + const bool is_ascending, + const uint eq_cond_len) { DBUG_ASSERT(slice != nullptr); DBUG_ASSERT(slice->size() >= eq_cond_len); - Rdb_transaction* const tx= get_or_create_tx(table->in_use); + Rdb_transaction *const tx = get_or_create_tx(table->in_use); - bool skip_bloom= true; + bool skip_bloom = true; const rocksdb::Slice eq_cond(slice->data(), eq_cond_len); - if (can_use_bloom_filter(ha_thd(), kd, eq_cond, use_all_keys, is_ascending)) - { - skip_bloom= false; + if (can_use_bloom_filter(ha_thd(), kd, eq_cond, use_all_keys, is_ascending)) { + skip_bloom = false; } /* In some cases, setup_scan_iterator() is called multiple times from the same query but bloom filter can not always be used. - Suppose the following query example. id2 is VARCHAR(30) and PRIMARY KEY (id1, id2). - select count(*) from t2 WHERE id1=100 and id2 IN ('00000000000000000000', '100'); + Suppose the following query example. id2 is VARCHAR(30) and PRIMARY KEY + (id1, id2). + select count(*) from t2 WHERE id1=100 and id2 IN ('00000000000000000000', + '100'); In this case, setup_scan_iterator() is called twice, the first time is for - (id1, id2)=(100, '00000000000000000000') and the second time is for (100, '100'). - If prefix bloom filter length is 24 bytes, prefix bloom filter can be used for the + (id1, id2)=(100, '00000000000000000000') and the second time is for (100, + '100'). + If prefix bloom filter length is 24 bytes, prefix bloom filter can be used + for the first condition but not for the second condition. - If bloom filter condition is changed, currently it is necessary to destroy and + If bloom filter condition is changed, currently it is necessary to destroy + and re-create Iterator. */ - if (m_scan_it_skips_bloom != skip_bloom) - { + if (m_scan_it_skips_bloom != skip_bloom) { release_scan_iterator(); } @@ -8228,46 +7507,40 @@ void ha_rocksdb::setup_scan_iterator( SQL layer can call rnd_init() multiple times in a row. In that case, re-use the iterator, but re-position it at the table start. */ - if (!m_scan_it) - { - const bool fill_cache= !THDVAR(ha_thd(), skip_fill_cache); - if (commit_in_the_middle()) - { + if (!m_scan_it) { + const bool fill_cache = !THDVAR(ha_thd(), skip_fill_cache); + if (commit_in_the_middle()) { DBUG_ASSERT(m_scan_it_snapshot == nullptr); - m_scan_it_snapshot= rdb->GetSnapshot(); + m_scan_it_snapshot = rdb->GetSnapshot(); auto read_opts = rocksdb::ReadOptions(); read_opts.total_order_seek = true; // TODO: set based on WHERE conditions - read_opts.snapshot= m_scan_it_snapshot; - m_scan_it= rdb->NewIterator(read_opts, kd.get_cf()); + read_opts.snapshot = m_scan_it_snapshot; + m_scan_it = rdb->NewIterator(read_opts, kd.get_cf()); + } else { + m_scan_it = tx->get_iterator(kd.get_cf(), skip_bloom, fill_cache); } - else - { - m_scan_it= tx->get_iterator(kd.get_cf(), skip_bloom, fill_cache); - } - m_scan_it_skips_bloom= skip_bloom; + m_scan_it_skips_bloom = skip_bloom; } /* - Seek() will "Position at the first key in the source that at or past target". + Seek() will "Position at the first key in the source that at or past + target". The operation cannot fail. */ m_scan_it->Seek(*slice); } -void ha_rocksdb::release_scan_iterator() -{ +void ha_rocksdb::release_scan_iterator() { delete m_scan_it; - m_scan_it= nullptr; + m_scan_it = nullptr; - if (m_scan_it_snapshot) - { + if (m_scan_it_snapshot) { rdb->ReleaseSnapshot(m_scan_it_snapshot); - m_scan_it_snapshot= nullptr; + m_scan_it_snapshot = nullptr; } } -void ha_rocksdb::setup_iterator_for_rnd_scan() -{ +void ha_rocksdb::setup_iterator_for_rnd_scan() { uint key_size; if (m_pk_descr->m_is_reverse_cf) @@ -8275,27 +7548,22 @@ void ha_rocksdb::setup_iterator_for_rnd_scan() else m_pk_descr->get_infimum_key(m_pk_packed_tuple, &key_size); - rocksdb::Slice table_key((const char*)m_pk_packed_tuple, key_size); + rocksdb::Slice table_key((const char *)m_pk_packed_tuple, key_size); setup_scan_iterator(*m_pk_descr, &table_key); - m_skip_scan_it_next_call= true; + m_skip_scan_it_next_call = true; } +int ha_rocksdb::rnd_init(bool scan) { + DBUG_ENTER_FUNC(); -int ha_rocksdb::rnd_init(bool scan) -{ - DBUG_ENTER("ha_rocksdb::rnd_init"); - - Rdb_transaction* const tx= get_or_create_tx(table->in_use); + Rdb_transaction *const tx = get_or_create_tx(table->in_use); setup_read_decoders(); - if (scan) - { - m_rnd_scan_is_new_snapshot= !tx->has_snapshot(); + if (scan) { + m_rnd_scan_is_new_snapshot = !tx->has_snapshot(); setup_iterator_for_rnd_scan(); - } - else - { + } else { /* We don't need any preparations for rnd_pos() calls. */ } @@ -8304,48 +7572,46 @@ int ha_rocksdb::rnd_init(bool scan) // the snapshot immediately. tx->acquire_snapshot(m_lock_rows == RDB_LOCK_NONE); - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } +int ha_rocksdb::rnd_next(uchar *const buf) { + DBUG_ENTER_FUNC(); -int ha_rocksdb::rnd_next(uchar* const buf) -{ int rc; ha_statistic_increment(&SSV::ha_read_rnd_next_count); - for (;;) - { - rc= rnd_next_with_direction(buf, true); + for (;;) { + rc = rnd_next_with_direction(buf, true); if (rc != HA_ERR_LOCK_DEADLOCK || !m_rnd_scan_is_new_snapshot) - break; /* exit the loop */ + break; /* exit the loop */ // release the snapshot and iterator and then regenerate them - Rdb_transaction *tx= get_or_create_tx(table->in_use); + Rdb_transaction *tx = get_or_create_tx(table->in_use); tx->release_snapshot(); release_scan_iterator(); setup_iterator_for_rnd_scan(); } - m_rnd_scan_is_new_snapshot= false; + m_rnd_scan_is_new_snapshot = false; if (rc == HA_ERR_KEY_NOT_FOUND) rc = HA_ERR_END_OF_FILE; - return rc; -} + DBUG_RETURN(rc); +} /* See also secondary_index_read(). */ -int ha_rocksdb::rnd_next_with_direction(uchar* const buf, bool move_forward) -{ - int rc; - DBUG_ENTER("ha_rocksdb::rnd_next"); +int ha_rocksdb::rnd_next_with_direction(uchar *const buf, bool move_forward) { + DBUG_ENTER_FUNC(); - table->status= STATUS_NOT_FOUND; + int rc; + + table->status = STATUS_NOT_FOUND; stats.rows_requested++; - if (!m_scan_it || !m_scan_it->Valid()) - { + if (!m_scan_it || !m_scan_it->Valid()) { /* We can get here when SQL layer has called @@ -8357,49 +7623,41 @@ int ha_rocksdb::rnd_next_with_direction(uchar* const buf, bool move_forward) DBUG_RETURN(HA_ERR_END_OF_FILE); } - for ( ; ; ) - { - if (m_skip_scan_it_next_call) - { - m_skip_scan_it_next_call= false; - } - else - { + for (;;) { + if (m_skip_scan_it_next_call) { + m_skip_scan_it_next_call = false; + } else { if (move_forward) - m_scan_it->Next(); /* this call cannot fail */ + m_scan_it->Next(); /* this call cannot fail */ else - m_scan_it->Prev(); /* this call cannot fail */ + m_scan_it->Prev(); /* this call cannot fail */ } - if (!m_scan_it->Valid()) - { - rc= HA_ERR_END_OF_FILE; + if (!m_scan_it->Valid()) { + rc = HA_ERR_END_OF_FILE; break; } /* check if we're out of this table */ - const rocksdb::Slice key= m_scan_it->key(); - if (!m_pk_descr->covers_key(key)) - { - rc= HA_ERR_END_OF_FILE; + const rocksdb::Slice key = m_scan_it->key(); + if (!m_pk_descr->covers_key(key)) { + rc = HA_ERR_END_OF_FILE; break; } - if (m_lock_rows != RDB_LOCK_NONE) - { + if (m_lock_rows != RDB_LOCK_NONE) { /* Lock the row we've just read. Now we call get_for_update which will 1) Take a lock and 2) Will fail if the row was deleted since the snapshot was taken. */ - Rdb_transaction* const tx= get_or_create_tx(table->in_use); + Rdb_transaction *const tx = get_or_create_tx(table->in_use); DEBUG_SYNC(ha_thd(), "rocksdb_concurrent_delete"); - const rocksdb::Status s= get_for_update(tx, m_pk_descr->get_cf(), key, - &m_retrieved_record); + const rocksdb::Status s = + get_for_update(tx, m_pk_descr->get_cf(), key, &m_retrieved_record); if (s.IsNotFound() && - my_core::thd_tx_isolation(ha_thd()) == ISO_READ_COMMITTED) - { + my_core::thd_tx_isolation(ha_thd()) == ISO_READ_COMMITTED) { // This occurs if we accessed a row, tried to lock it, failed, // released and reacquired the snapshot (because of READ COMMITTED // mode) and the row was deleted by someone else in the meantime. @@ -8407,27 +7665,24 @@ int ha_rocksdb::rnd_next_with_direction(uchar* const buf, bool move_forward) continue; } - if (!s.ok()) - { - DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, - m_tbl_def)); + if (!s.ok()) { + DBUG_RETURN( + tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def)); } // If we called get_for_update() use the value from that call not from // the iterator as it may be stale since we don't have a snapshot // when m_lock_rows is not RDB_LOCK_NONE. m_last_rowkey.copy(key.data(), key.size(), &my_charset_bin); - rc= convert_record_from_storage_format(&key, buf); - } - else - { + rc = convert_record_from_storage_format(&key, buf); + } else { // Use the value from the iterator - rocksdb::Slice value= m_scan_it->value(); + rocksdb::Slice value = m_scan_it->value(); m_last_rowkey.copy(key.data(), key.size(), &my_charset_bin); - rc= convert_record_from_storage_format(&key, &value, buf); + rc = convert_record_from_storage_format(&key, &value, buf); } - table->status= 0; + table->status = 0; break; } @@ -8440,19 +7695,18 @@ int ha_rocksdb::rnd_next_with_direction(uchar* const buf, bool move_forward) DBUG_RETURN(rc); } - -int ha_rocksdb::rnd_end() -{ - DBUG_ENTER("ha_rocksdb::rnd_end"); +int ha_rocksdb::rnd_end() { + DBUG_ENTER_FUNC(); release_scan_iterator(); - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } -int ha_rocksdb::index_init(uint idx, bool sorted) -{ - Rdb_transaction* const tx= get_or_create_tx(table->in_use); +int ha_rocksdb::index_init(uint idx, bool sorted) { + DBUG_ENTER_FUNC(); + + Rdb_transaction *const tx = get_or_create_tx(table->in_use); DBUG_ASSERT(tx != nullptr); setup_read_decoders(); @@ -8462,39 +7716,34 @@ int ha_rocksdb::index_init(uint idx, bool sorted) // Otherwise acquire the snapshot immediately. tx->acquire_snapshot(m_lock_rows == RDB_LOCK_NONE); - active_index= idx; + active_index = idx; - return 0; + DBUG_RETURN(HA_EXIT_SUCCESS); } -int ha_rocksdb::index_end() -{ +int ha_rocksdb::index_end() { + DBUG_ENTER_FUNC(); + release_scan_iterator(); - active_index= MAX_KEY; - in_range_check_pushed_down= FALSE; - return 0; + active_index = MAX_KEY; + in_range_check_pushed_down = FALSE; + + DBUG_RETURN(HA_EXIT_SUCCESS); } +int ha_rocksdb::truncate() { + DBUG_ENTER_FUNC(); -int ha_rocksdb::delete_all_rows(ha_rows* const nrows) -{ - DBUG_ENTER("ha_rocksdb::delete_all_rows"); - /* - We will have to delete records one-by-one anyway. Let the SQL layer do - it. - */ - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} + DBUG_ASSERT(m_tbl_def != nullptr); + + m_tbl_def->m_auto_incr_val = 1; + m_tbl_def->m_hidden_pk_val = 1; -int ha_rocksdb::truncate() -{ - m_tbl_def->m_auto_incr_val= 1; - m_tbl_def->m_hidden_pk_val= 1; remove_rows(m_tbl_def); - return 0; -} + DBUG_RETURN(HA_EXIT_SUCCESS); +} /* Delete the row we've last read. The row is also passed as parameter. @@ -8504,108 +7753,95 @@ int ha_rocksdb::truncate() The row is either table->record[0] or table->record[1]. (Check out InnoDB: row_update_for_mysql() has "UT_NOT_USED(mysql_rec)" */ -int ha_rocksdb::delete_row(const uchar* const buf) -{ - DBUG_ENTER("ha_rocksdb::delete_row"); +int ha_rocksdb::delete_row(const uchar *const buf) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(buf != nullptr); ha_statistic_increment(&SSV::ha_delete_count); set_last_rowkey(buf); rocksdb::Slice key_slice(m_last_rowkey.ptr(), m_last_rowkey.length()); - Rdb_transaction* const tx= get_or_create_tx(table->in_use); + Rdb_transaction *const tx = get_or_create_tx(table->in_use); - const uint index= pk_index(table, m_tbl_def); - rocksdb::Status s= delete_or_singledelete(index, tx, - m_pk_descr->get_cf(), - key_slice); - if (!s.ok()) - { + const uint index = pk_index(table, m_tbl_def); + rocksdb::Status s = + delete_or_singledelete(index, tx, m_pk_descr->get_cf(), key_slice); + if (!s.ok()) { DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def)); } - longlong hidden_pk_id= 0; - if (m_tbl_def->m_key_count > 1 && - has_hidden_pk(table) && read_hidden_pk_id_from_rowkey(&hidden_pk_id)) + longlong hidden_pk_id = 0; + if (m_tbl_def->m_key_count > 1 && has_hidden_pk(table) && + read_hidden_pk_id_from_rowkey(&hidden_pk_id)) DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Delete the record for every secondary index - for (uint i= 0; i < m_tbl_def->m_key_count; i++) - { - if (!is_pk(i, table, m_tbl_def)) - { + for (uint i = 0; i < m_tbl_def->m_key_count; i++) { + if (!is_pk(i, table, m_tbl_def)) { int packed_size; - const Rdb_key_def& kd= *m_key_descr_arr[i]; - packed_size= kd.pack_record(table, m_pack_buffer, buf, m_sk_packed_tuple, + const Rdb_key_def &kd = *m_key_descr_arr[i]; + packed_size = kd.pack_record(table, m_pack_buffer, buf, m_sk_packed_tuple, nullptr, false, hidden_pk_id); rocksdb::Slice secondary_key_slice( - reinterpret_cast(m_sk_packed_tuple), packed_size); + reinterpret_cast(m_sk_packed_tuple), packed_size); /* Deleting on secondary key doesn't need any locks: */ tx->get_indexed_write_batch()->SingleDelete(kd.get_cf(), secondary_key_slice); } } - if (do_bulk_commit(tx)) - { + if (do_bulk_commit(tx)) { DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } stats.rows_deleted++; update_row_stats(ROWS_DELETED); - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } - -rocksdb::Status -ha_rocksdb::delete_or_singledelete(uint index, - Rdb_transaction* const tx, - rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key) -{ +rocksdb::Status ha_rocksdb::delete_or_singledelete( + uint index, Rdb_transaction *const tx, + rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key) { if (can_use_single_delete(index)) return tx->single_delete(column_family, key); return tx->delete_key(column_family, key); } +void ha_rocksdb::update_stats(void) { + DBUG_ENTER_FUNC(); -void ha_rocksdb::update_stats(void) -{ - DBUG_ENTER("ha_rocksdb::update_stats"); + stats.records = 0; + stats.index_file_length = 0ul; + stats.data_file_length = 0ul; + stats.mean_rec_length = 0; - stats.records= 0; - stats.index_file_length= 0ul; - stats.data_file_length= 0ul; - stats.mean_rec_length= 0; - for (uint i= 0; i < m_tbl_def->m_key_count; i++) - { - if (is_pk(i, table, m_tbl_def)) - { - stats.data_file_length= m_pk_descr->m_stats.m_actual_disk_size; + for (uint i = 0; i < m_tbl_def->m_key_count; i++) { + if (is_pk(i, table, m_tbl_def)) { + stats.data_file_length = m_pk_descr->m_stats.m_actual_disk_size; stats.records = m_pk_descr->m_stats.m_rows; - } - else - { - stats.index_file_length+= m_key_descr_arr[i]->m_stats.m_actual_disk_size; + } else { + stats.index_file_length += m_key_descr_arr[i]->m_stats.m_actual_disk_size; } } + DBUG_VOID_RETURN; } -int ha_rocksdb::info(uint flag) -{ - DBUG_ENTER("ha_rocksdb::info"); +int ha_rocksdb::info(uint flag) { + DBUG_ENTER_FUNC(); if (!table) - return 1; + return HA_EXIT_FAILURE; - if (flag & HA_STATUS_VARIABLE) - { + if (flag & HA_STATUS_VARIABLE) { /* Test only to simulate corrupted stats */ DBUG_EXECUTE_IF("myrocks_simulate_negative_stats", - m_pk_descr->m_stats.m_actual_disk_size = - -m_pk_descr->m_stats.m_actual_disk_size;); + m_pk_descr->m_stats.m_actual_disk_size = + -m_pk_descr->m_stats.m_actual_disk_size;); update_stats(); @@ -8615,27 +7851,27 @@ int ha_rocksdb::info(uint flag) */ if (static_cast(stats.data_file_length) < 0 || static_cast(stats.index_file_length) < 0 || - static_cast(stats.records) < 0) - { + static_cast(stats.records) < 0) { if (analyze(nullptr, nullptr)) { - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } update_stats(); } - if (stats.records == 0) - { + if (stats.records == 0) { // most likely, the table is in memtable // try to deduce from GetApproximateSizes - uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]; - auto r= get_range(pk_index(table, m_tbl_def), buf); - uint64_t sz= 0; - rdb->GetApproximateSizes( - m_pk_descr->get_cf(), - &r, 1, - &sz, true); - stats.records = sz/ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE; + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]; + auto r = get_range(pk_index(table, m_tbl_def), buf); + uint64_t sz = 0; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + rdb->GetApproximateSizes(m_pk_descr->get_cf(), &r, 1, &sz, true); +#pragma GCC diagnostic pop + + stats.records = sz / ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE; stats.data_file_length = sz; if (rocksdb_debug_optimizer_n_rows > 0) @@ -8645,66 +7881,59 @@ int ha_rocksdb::info(uint flag) if (stats.records != 0) stats.mean_rec_length = stats.data_file_length / stats.records; } - if (flag & HA_STATUS_CONST) - { - ref_length= m_pk_descr->max_storage_fmt_length(); + if (flag & HA_STATUS_CONST) { + ref_length = m_pk_descr->max_storage_fmt_length(); // TODO: Needs to reimplement after having real index statistics - for (uint i= 0; i < m_tbl_def->m_key_count; i++) - { - if (is_hidden_pk(i, table, m_tbl_def)) - { + for (uint i = 0; i < m_tbl_def->m_key_count; i++) { + if (is_hidden_pk(i, table, m_tbl_def)) { continue; } - KEY* const k= &table->key_info[i]; - for (uint j = 0; j < k->actual_key_parts; j++) - { - const Rdb_index_stats& k_stats= m_key_descr_arr[i]->m_stats; + KEY *const k = &table->key_info[i]; + for (uint j = 0; j < k->actual_key_parts; j++) { + const Rdb_index_stats &k_stats = m_key_descr_arr[i]->m_stats; uint x = k_stats.m_distinct_keys_per_prefix.size() > j && - k_stats.m_distinct_keys_per_prefix[j] > 0 ? - k_stats.m_rows / k_stats.m_distinct_keys_per_prefix[j] : - 0; + k_stats.m_distinct_keys_per_prefix[j] > 0 + ? k_stats.m_rows / k_stats.m_distinct_keys_per_prefix[j] + : 0; if (x > stats.records) x = stats.records; if ((x == 0 && rocksdb_debug_optimizer_no_zero_cardinality) || - rocksdb_debug_optimizer_n_rows > 0) - { - // Fake cardinality implementation. For example, (idx1, idx2, idx3) index + rocksdb_debug_optimizer_n_rows > 0) { + // Fake cardinality implementation. For example, (idx1, idx2, idx3) + // index // will have rec_per_key for (idx1)=4, (idx1,2)=2, and (idx1,2,3)=1. // rec_per_key for the whole index is 1, and multiplied by 2^n if // n suffix columns of the index are not used. - x = 1 << (k->actual_key_parts-j-1); + x = 1 << (k->actual_key_parts - j - 1); } - k->rec_per_key[j]= x; + k->rec_per_key[j] = x; } } } - if (flag & HA_STATUS_ERRKEY) - { + if (flag & HA_STATUS_ERRKEY) { /* Currently we support only primary keys so we know which key had a uniqueness violation. */ - errkey= m_dupp_errkey; - dup_ref= m_pk_tuple; // TODO(?): this should store packed PK. + errkey = m_dupp_errkey; + dup_ref = m_pk_tuple; // TODO(?): this should store packed PK. } - if (flag & HA_STATUS_AUTO) - { - stats.auto_increment_value= m_tbl_def->m_auto_incr_val; + if (flag & HA_STATUS_AUTO) { + stats.auto_increment_value = m_tbl_def->m_auto_incr_val; } - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } -void ha_rocksdb::position(const uchar* const record) -{ - DBUG_ENTER("ha_rocksdb::position"); +void ha_rocksdb::position(const uchar *const record) { + DBUG_ENTER_FUNC(); - longlong hidden_pk_id= 0; + longlong hidden_pk_id = 0; if (has_hidden_pk(table) && read_hidden_pk_id_from_rowkey(&hidden_pk_id)) - DBUG_ASSERT(false); // should never reach here + DBUG_ASSERT(false); // should never reach here /* Get packed primary key value from the record. @@ -8719,8 +7948,8 @@ void ha_rocksdb::position(const uchar* const record) read set (this may potentially disable index-only access when PK column cannot be restored from its mem-comparable form in the secondary indexes). */ - const uint packed_size= m_pk_descr->pack_record(table, m_pack_buffer, record, - ref, nullptr, false, hidden_pk_id); + const uint packed_size = m_pk_descr->pack_record( + table, m_pack_buffer, record, ref, nullptr, false, hidden_pk_id); /* It could be that mem-comparable form of PK occupies less than ref_length @@ -8732,26 +7961,23 @@ void ha_rocksdb::position(const uchar* const record) DBUG_VOID_RETURN; } +int ha_rocksdb::rnd_pos(uchar *const buf, uchar *const pos) { + DBUG_ENTER_FUNC(); -int ha_rocksdb::rnd_pos(uchar* const buf, uchar* const pos) -{ int rc; size_t len; - DBUG_ENTER("ha_rocksdb::rnd_pos"); ha_statistic_increment(&SSV::ha_read_rnd_count); stats.rows_requested++; - len= m_pk_descr->key_length(table, rocksdb::Slice((const char*)pos, - ref_length)); - if (len == size_t(-1)) - { + len = m_pk_descr->key_length(table, + rocksdb::Slice((const char *)pos, ref_length)); + if (len == size_t(-1)) { DBUG_RETURN(HA_ERR_INTERNAL_ERROR); /* Data corruption? */ } - rc= get_row_by_rowid(buf, pos, len); + rc = get_row_by_rowid(buf, pos, len); - if (!rc) - { + if (!rc) { stats.rows_read++; update_row_stats(ROWS_READ); } @@ -8759,37 +7985,32 @@ int ha_rocksdb::rnd_pos(uchar* const buf, uchar* const pos) DBUG_RETURN(rc); } - /* @brief Calculate (if needed) the bitmap of indexes that are modified by the current query @detail - The calculation done by checking index definitions against the table->write_set + The calculation done by checking index definitions against the + table->write_set column bitmap. */ -void ha_rocksdb::calc_updated_indexes() -{ - if (!m_update_scope_is_valid) - { - m_update_scope_is_valid= true; +void ha_rocksdb::calc_updated_indexes() { + if (!m_update_scope_is_valid) { + m_update_scope_is_valid = true; m_update_scope.clear_all(); - for (uint keynr= 0; keynr < table->s->keys; keynr++) - { - const Rdb_key_def& kd= *m_key_descr_arr[keynr]; + for (uint keynr = 0; keynr < table->s->keys; keynr++) { + const Rdb_key_def &kd = *m_key_descr_arr[keynr]; /* Walk over all key parts, including the "extended key" suffix */ - const uint key_parts= kd.get_key_parts(); - for (uint kp= 0; kp < key_parts; kp++) - { + const uint key_parts = kd.get_key_parts(); + for (uint kp = 0; kp < key_parts; kp++) { if (has_hidden_pk(table) && kp + 1 == key_parts) break; - Field* const field= kd.get_table_field_for_part_no(table, kp); - if (bitmap_is_set(table->write_set, field->field_index)) - { + Field *const field = kd.get_table_field_for_part_no(table, kp); + if (bitmap_is_set(table->write_set, field->field_index)) { m_update_scope.set_bit(keynr); break; } @@ -8798,10 +8019,11 @@ void ha_rocksdb::calc_updated_indexes() } } +int ha_rocksdb::update_row(const uchar *const old_data, uchar *const new_data) { + DBUG_ENTER_FUNC(); -int ha_rocksdb::update_row(const uchar* const old_data, uchar* const new_data) -{ - DBUG_ENTER("ha_rocksdb::update_row"); + DBUG_ASSERT(old_data != nullptr); + DBUG_ASSERT(new_data != nullptr); DBUG_ASSERT(m_lock_rows == RDB_LOCK_WRITE); /* old_data points to record we're updating. It is the same as the record @@ -8811,40 +8033,34 @@ int ha_rocksdb::update_row(const uchar* const old_data, uchar* const new_data) DBUG_ASSERT(new_data == table->record[0]); ha_statistic_increment(&SSV::ha_update_count); - const int rv= update_write_row(old_data, new_data, false); + const int rv = update_write_row(old_data, new_data, false); - if (rv == 0) - { + if (rv == 0) { stats.rows_updated++; update_row_stats(ROWS_UPDATED); } + DBUG_RETURN(rv); } - /* The following function was copied from ha_blackhole::store_lock: */ -THR_LOCK_DATA **ha_rocksdb::store_lock(THD* const thd, - THR_LOCK_DATA **to, - enum thr_lock_type lock_type) -{ - DBUG_ENTER("ha_rocksdb::store_lock"); +THR_LOCK_DATA **ha_rocksdb::store_lock(THD *const thd, THR_LOCK_DATA **to, + enum thr_lock_type lock_type) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(to != nullptr); bool in_lock_tables = my_core::thd_in_lock_tables(thd); /* First, make a decision about MyRocks's internal locking */ - if (lock_type >= TL_WRITE_ALLOW_WRITE) - { + if (lock_type >= TL_WRITE_ALLOW_WRITE) { m_lock_rows = RDB_LOCK_WRITE; - } - else if (lock_type == TL_READ_WITH_SHARED_LOCKS) - { + } else if (lock_type == TL_READ_WITH_SHARED_LOCKS) { m_lock_rows = RDB_LOCK_READ; - } - else - { + } else { m_lock_rows = RDB_LOCK_NONE; - if (THDVAR(thd, lock_scanned_rows)) - { + if (THDVAR(thd, lock_scanned_rows)) { /* The following logic was copied directly from ha_innobase::store_lock_with_x_type() in @@ -8854,8 +8070,7 @@ THR_LOCK_DATA **ha_rocksdb::store_lock(THD* const thd, const uint sql_command = my_core::thd_sql_command(thd); if ((lock_type == TL_READ && in_lock_tables) || (lock_type == TL_READ_HIGH_PRIORITY && in_lock_tables) || - can_hold_read_locks_on_select(thd, lock_type)) - { + can_hold_read_locks_on_select(thd, lock_type)) { ulong tx_isolation = my_core::thd_tx_isolation(thd); if (sql_command != SQLCOM_CHECKSUM && ((my_core::thd_test_options(thd, OPTION_BIN_LOG) && @@ -8864,10 +8079,8 @@ THR_LOCK_DATA **ha_rocksdb::store_lock(THD* const thd, (lock_type != TL_READ && lock_type != TL_READ_NO_INSERT) || (sql_command != SQLCOM_INSERT_SELECT && sql_command != SQLCOM_REPLACE_SELECT && - sql_command != SQLCOM_UPDATE && - sql_command != SQLCOM_DELETE && - sql_command != SQLCOM_CREATE_TABLE))) - { + sql_command != SQLCOM_UPDATE && sql_command != SQLCOM_DELETE && + sql_command != SQLCOM_CREATE_TABLE))) { m_lock_rows = RDB_LOCK_READ; } } @@ -8875,8 +8088,7 @@ THR_LOCK_DATA **ha_rocksdb::store_lock(THD* const thd, } /* Then, tell the SQL layer what kind of locking it should use: */ - if (lock_type != TL_IGNORE && m_db_lock.type == TL_UNLOCK) - { + if (lock_type != TL_IGNORE && m_db_lock.type == TL_UNLOCK) { /* Here is where we get into the guts of a row level lock. If TL_UNLOCK is set @@ -8885,8 +8097,7 @@ THR_LOCK_DATA **ha_rocksdb::store_lock(THD* const thd, */ if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) && - !in_lock_tables && !my_core::thd_tablespace_op(thd)) - { + !in_lock_tables && !my_core::thd_tablespace_op(thd)) { lock_type = TL_WRITE_ALLOW_WRITE; } @@ -8898,65 +8109,61 @@ THR_LOCK_DATA **ha_rocksdb::store_lock(THD* const thd, concurrent inserts to t2. */ - if (lock_type == TL_READ_NO_INSERT && !in_lock_tables) - { + if (lock_type == TL_READ_NO_INSERT && !in_lock_tables) { lock_type = TL_READ; } - m_db_lock.type= lock_type; + m_db_lock.type = lock_type; } - *to++= &m_db_lock; + + *to++ = &m_db_lock; + DBUG_RETURN(to); } -void ha_rocksdb::read_thd_vars(THD* const thd) -{ - m_store_row_debug_checksums= THDVAR(thd, store_row_debug_checksums); - m_verify_row_debug_checksums= THDVAR(thd, verify_row_debug_checksums); - m_checksums_pct= THDVAR(thd, checksums_pct); +void ha_rocksdb::read_thd_vars(THD *const thd) { + m_store_row_debug_checksums = THDVAR(thd, store_row_debug_checksums); + m_verify_row_debug_checksums = THDVAR(thd, verify_row_debug_checksums); + m_checksums_pct = THDVAR(thd, checksums_pct); } -const char* ha_rocksdb::thd_rocksdb_tmpdir() -{ - const char* tmp_dir = THDVAR(ha_thd(), tmpdir); +const char *ha_rocksdb::thd_rocksdb_tmpdir() { + const char *tmp_dir = THDVAR(ha_thd(), tmpdir); /* We want to treat an empty string as nullptr, in these cases DDL operations will use the default --tmpdir passed to mysql instead. */ if (tmp_dir != nullptr && *tmp_dir == '\0') { - tmp_dir= nullptr; + tmp_dir = nullptr; } - return(tmp_dir); + return (tmp_dir); } -int ha_rocksdb::external_lock(THD* const thd, int lock_type) -{ - int res= 0; - DBUG_ENTER("ha_rocksdb::external_lock"); +int ha_rocksdb::external_lock(THD *const thd, int lock_type) { + DBUG_ENTER_FUNC(); - int binlog_format= my_core::thd_binlog_format(thd); - bool unsafe_for_binlog= THDVAR(ha_thd(), unsafe_for_binlog); - if (lock_type == F_WRLCK && !thd->rli_slave && - !unsafe_for_binlog && + DBUG_ASSERT(thd != nullptr); + + int res = 0; + int binlog_format = my_core::thd_binlog_format(thd); + bool unsafe_for_binlog = THDVAR(ha_thd(), unsafe_for_binlog); + if (lock_type == F_WRLCK && !thd->rli_slave && !unsafe_for_binlog && binlog_format != BINLOG_FORMAT_ROW && binlog_format != BINLOG_FORMAT_UNSPEC && - my_core::thd_binlog_filter_ok(thd)) - { - my_printf_error(ER_UNKNOWN_ERROR, - "Can't execute updates on master with binlog_format != ROW.", - MYF(0)); + my_core::thd_binlog_filter_ok(thd)) { + my_printf_error( + ER_UNKNOWN_ERROR, + "Can't execute updates on master with binlog_format != ROW.", MYF(0)); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } - if (lock_type == F_UNLCK) - { - Rdb_transaction* const tx= get_or_create_tx(thd); + if (lock_type == F_UNLCK) { + Rdb_transaction *const tx = get_or_create_tx(thd); tx->io_perf_end_and_record(&m_io_perf); - if (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) - { + if (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { /* Do like InnoDB: when we get here, it's time to commit a single-statement transaction. @@ -8965,21 +8172,17 @@ int ha_rocksdb::external_lock(THD* const thd, int lock_type) for each of them, but that's ok because non-first tx->commit() calls will be no-ops. */ - res= tx->commit_or_rollback(); + res = tx->commit_or_rollback(); } - } - else - { + } else { if (my_core::thd_tx_isolation(thd) < ISO_READ_COMMITTED || - my_core::thd_tx_isolation(thd) > ISO_REPEATABLE_READ) - { - my_printf_error(ER_UNKNOWN_ERROR, - "MyRocks supports only READ COMMITTED and " - "REPEATABLE READ isolation levels. " - "Please change from current isolation " - "level %s", - MYF(0), - tx_isolation_names[my_core::thd_tx_isolation(thd)]); + my_core::thd_tx_isolation(thd) > ISO_REPEATABLE_READ) { + my_printf_error( + ER_UNKNOWN_ERROR, "MyRocks supports only READ COMMITTED and " + "REPEATABLE READ isolation levels. " + "Please change from current isolation " + "level %s", + MYF(0), tx_isolation_names[my_core::thd_tx_isolation(thd)]); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } /* @@ -8987,19 +8190,17 @@ int ha_rocksdb::external_lock(THD* const thd, int lock_type) is, handler->start_stmt() is not called for INSERTs. So, we put this code here. */ - Rdb_transaction* const tx= get_or_create_tx(thd); + Rdb_transaction *const tx = get_or_create_tx(thd); read_thd_vars(thd); - m_update_scope_is_valid= false; + m_update_scope_is_valid = false; - if (skip_unique_check()) - { + if (skip_unique_check()) { if ((thd->lex->sql_command == SQLCOM_INSERT || - thd->lex->sql_command == SQLCOM_LOAD || - thd->lex->sql_command == SQLCOM_REPLACE) && + thd->lex->sql_command == SQLCOM_LOAD || + thd->lex->sql_command == SQLCOM_REPLACE) && (thd->lex->duplicates == DUP_REPLACE || - thd->lex->duplicates == DUP_UPDATE)) - { + thd->lex->duplicates == DUP_UPDATE)) { my_printf_error(ER_UNKNOWN_ERROR, "When unique checking is disabled in MyRocks, INSERT," "UPDATE,LOAD statements with clauses that update or " @@ -9011,10 +8212,8 @@ int ha_rocksdb::external_lock(THD* const thd, int lock_type) } } - if (lock_type == F_WRLCK) - { - if (tx->is_tx_read_only()) - { + if (lock_type == F_WRLCK) { + if (tx->is_tx_read_only()) { my_printf_error(ER_UNKNOWN_ERROR, "Can't execute updates when you started a transaction " "with START TRANSACTION WITH CONSISTENT [ROCKSDB] " @@ -9030,13 +8229,12 @@ int ha_rocksdb::external_lock(THD* const thd, int lock_type) Note: sometimes, external_lock() can be called without a prior ::store_lock call. That's why we need to set lock_* members here, too. */ - m_lock_rows= RDB_LOCK_WRITE; + m_lock_rows = RDB_LOCK_WRITE; if (thd->lex->sql_command == SQLCOM_CREATE_INDEX || - thd->lex->sql_command == SQLCOM_DROP_INDEX || - thd->lex->sql_command == SQLCOM_ALTER_TABLE) - { - tx->m_ddl_transaction= true; + thd->lex->sql_command == SQLCOM_DROP_INDEX || + thd->lex->sql_command == SQLCOM_ALTER_TABLE) { + tx->m_ddl_transaction = true; } } rocksdb_register_tx(rocksdb_hton, thd, tx); @@ -9046,7 +8244,6 @@ int ha_rocksdb::external_lock(THD* const thd, int lock_type) DBUG_RETURN(res); } - /** @note A quote from ha_innobase::start_stmt(): @@ -9057,63 +8254,58 @@ int ha_rocksdb::external_lock(THD* const thd, int lock_type) */ -int ha_rocksdb::start_stmt(THD* const thd, thr_lock_type lock_type) -{ - Rdb_transaction* const tx= get_or_create_tx(thd); +int ha_rocksdb::start_stmt(THD *const thd, thr_lock_type lock_type) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + + Rdb_transaction *const tx = get_or_create_tx(thd); read_thd_vars(thd); rocksdb_register_tx(ht, thd, tx); tx->io_perf_start(&m_io_perf); - return 0; + + DBUG_RETURN(HA_EXIT_SUCCESS); } -rocksdb::Range get_range( - uint32_t i, uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2], int offset1, - int offset2) -{ - uchar* buf_begin= buf; - uchar* buf_end= buf + Rdb_key_def::INDEX_NUMBER_SIZE; +rocksdb::Range get_range(uint32_t i, + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2], + int offset1, int offset2) { + uchar *buf_begin = buf; + uchar *buf_end = buf + Rdb_key_def::INDEX_NUMBER_SIZE; rdb_netbuf_store_index(buf_begin, i + offset1); rdb_netbuf_store_index(buf_end, i + offset2); return rocksdb::Range( - rocksdb::Slice((const char*) buf_begin, Rdb_key_def::INDEX_NUMBER_SIZE), - rocksdb::Slice((const char*) buf_end, Rdb_key_def::INDEX_NUMBER_SIZE)); + rocksdb::Slice((const char *)buf_begin, Rdb_key_def::INDEX_NUMBER_SIZE), + rocksdb::Slice((const char *)buf_end, Rdb_key_def::INDEX_NUMBER_SIZE)); } -static rocksdb::Range get_range( - const Rdb_key_def& kd, - uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2], - int offset1, int offset2) -{ +static rocksdb::Range get_range(const Rdb_key_def &kd, + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2], + int offset1, int offset2) { return get_range(kd.get_index_number(), buf, offset1, offset2); } -rocksdb::Range get_range(const Rdb_key_def& kd, - uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]) -{ - if (kd.m_is_reverse_cf) - { +rocksdb::Range get_range(const Rdb_key_def &kd, + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]) { + if (kd.m_is_reverse_cf) { return myrocks::get_range(kd, buf, 1, 0); - } - else - { + } else { return myrocks::get_range(kd, buf, 0, 1); } } -rocksdb::Range ha_rocksdb::get_range( - const int &i, uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]) const -{ +rocksdb::Range +ha_rocksdb::get_range(const int &i, + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]) const { return myrocks::get_range(*m_key_descr_arr[i], buf); } - /* Drop index thread's main logic */ -void Rdb_drop_index_thread::run() -{ +void Rdb_drop_index_thread::run() { mysql_mutex_lock(&m_signal_mutex); for (;;) { @@ -9129,11 +8321,11 @@ void Rdb_drop_index_thread::run() timespec ts; clock_gettime(CLOCK_REALTIME, &ts); ts.tv_sec += dict_manager.is_drop_index_empty() - ? 24*60*60 // no filtering - : 60; // filtering + ? 24 * 60 * 60 // no filtering + : 60; // filtering - const auto ret __attribute__((__unused__)) = mysql_cond_timedwait( - &m_signal_cond, &m_signal_mutex, &ts); + const auto ret MY_ATTRIBUTE((__unused__)) = + mysql_cond_timedwait(&m_signal_cond, &m_signal_mutex, &ts); if (m_stop) { break; } @@ -9141,7 +8333,7 @@ void Rdb_drop_index_thread::run() DBUG_ASSERT(ret == 0 || ret == ETIMEDOUT); mysql_mutex_unlock(&m_signal_mutex); - std::vector indices; + std::unordered_set indices; dict_manager.get_ongoing_drop_indexes(&indices); if (!indices.empty()) { std::unordered_set finished; @@ -9149,78 +8341,65 @@ void Rdb_drop_index_thread::run() read_opts.total_order_seek = true; // disable bloom filter for (const auto d : indices) { - uint32 cf_flags= 0; - if (!dict_manager.get_cf_flags(d.cf_id, &cf_flags)) - { + uint32 cf_flags = 0; + if (!dict_manager.get_cf_flags(d.cf_id, &cf_flags)) { sql_print_error("RocksDB: Failed to get column family flags " "from cf id %u. MyRocks data dictionary may " - "get corrupted.", d.cf_id); + "get corrupted.", + d.cf_id); abort_with_stack_traces(); } - rocksdb::ColumnFamilyHandle* cfh= cf_manager.get_cf(d.cf_id); + rocksdb::ColumnFamilyHandle *cfh = cf_manager.get_cf(d.cf_id); DBUG_ASSERT(cfh); - const bool is_reverse_cf= cf_flags & Rdb_key_def::REVERSE_CF_FLAG; + const bool is_reverse_cf = cf_flags & Rdb_key_def::REVERSE_CF_FLAG; - bool index_removed= false; - uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE]= {0}; + bool index_removed = false; + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE] = {0}; rdb_netbuf_store_uint32(key_buf, d.index_id); - const rocksdb::Slice - key = rocksdb::Slice((char*)key_buf, sizeof(key_buf)); - uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]; - rocksdb::Range range = get_range(d.index_id, buf, is_reverse_cf?1:0, - is_reverse_cf?0:1); + const rocksdb::Slice key = + rocksdb::Slice((char *)key_buf, sizeof(key_buf)); + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]; + rocksdb::Range range = get_range(d.index_id, buf, is_reverse_cf ? 1 : 0, + is_reverse_cf ? 0 : 1); rocksdb::CompactRangeOptions compact_range_options; compact_range_options.bottommost_level_compaction = - rocksdb::BottommostLevelCompaction::kForce; + rocksdb::BottommostLevelCompaction::kForce; compact_range_options.exclusive_manual_compaction = false; rocksdb::Status status = DeleteFilesInRange(rdb->GetBaseDB(), cfh, - &range.start, &range.limit); - if (!status.ok()) - { - if (status.IsShutdownInProgress()) - { + &range.start, &range.limit); + if (!status.ok()) { + if (status.IsShutdownInProgress()) { break; } rdb_handle_io_error(status, RDB_IO_ERROR_BG_THREAD); } - status = rdb->CompactRange( - compact_range_options, cfh, &range.start, &range.limit); - if (!status.ok()) - { - if (status.IsShutdownInProgress()) - { + status = rdb->CompactRange(compact_range_options, cfh, &range.start, + &range.limit); + if (!status.ok()) { + if (status.IsShutdownInProgress()) { break; } rdb_handle_io_error(status, RDB_IO_ERROR_BG_THREAD); } - std::unique_ptr it( - rdb->NewIterator(read_opts, cfh)); + std::unique_ptr it(rdb->NewIterator(read_opts, cfh)); it->Seek(key); - if (is_reverse_cf) - { - if (!it->Valid()) - { + if (is_reverse_cf) { + if (!it->Valid()) { it->SeekToLast(); - } - else - { + } else { it->Prev(); } } - if (!it->Valid()) - { - index_removed= true; - } - else - { - if (memcmp(it->key().data(), key_buf, Rdb_key_def::INDEX_NUMBER_SIZE)) - { + if (!it->Valid()) { + index_removed = true; + } else { + if (memcmp(it->key().data(), key_buf, + Rdb_key_def::INDEX_NUMBER_SIZE)) { // Key does not have same prefix - index_removed= true; + index_removed = true; } } - if (index_removed) - { + if (index_removed) { finished.insert(d); } } @@ -9235,14 +8414,11 @@ void Rdb_drop_index_thread::run() mysql_mutex_unlock(&m_signal_mutex); } - -Rdb_tbl_def* ha_rocksdb::get_table_if_exists(const char* const tablename) -{ +Rdb_tbl_def *ha_rocksdb::get_table_if_exists(const char *const tablename) { DBUG_ASSERT(tablename != nullptr); std::string str; - if (rdb_normalize_tablename(tablename, &str)) - { + if (rdb_normalize_tablename(tablename, &str)) { // We were not passed table name? DBUG_ASSERT(0); return nullptr; @@ -9258,14 +8434,16 @@ Rdb_tbl_def* ha_rocksdb::get_table_if_exists(const char* const tablename) tablename points to line in form "./dbname/tablename". */ -int ha_rocksdb::delete_table(const char* const tablename) -{ - DBUG_ENTER("ha_rocksdb::delete_table"); - const std::unique_ptr wb= dict_manager.begin(); - rocksdb::WriteBatch* const batch= wb.get(); +int ha_rocksdb::delete_table(const char *const tablename) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(tablename != nullptr); + + const std::unique_ptr wb = dict_manager.begin(); + rocksdb::WriteBatch *const batch = wb.get(); /* Find the table in the hash */ - Rdb_tbl_def* const tbl = get_table_if_exists(tablename); + Rdb_tbl_def *const tbl = get_table_if_exists(tablename); if (!tbl) DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); dict_manager.add_drop_table(tbl->m_key_descr_arr, tbl->m_key_count, batch); @@ -9280,18 +8458,16 @@ int ha_rocksdb::delete_table(const char* const tablename) rdb_drop_idx_thread.signal(); - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } - -void ha_rocksdb::remove_rows(Rdb_tbl_def* const tbl) -{ - const rocksdb::WriteOptions wo= - rdb_get_rocksdb_write_options(handler::ha_thd()); +void ha_rocksdb::remove_rows(Rdb_tbl_def *const tbl) { + const rocksdb::WriteOptions wo = + rdb_get_rocksdb_write_options(handler::ha_thd()); rocksdb::ReadOptions opts; - opts.total_order_seek= true; - rocksdb::Iterator* const it= rdb->NewIterator(opts); + opts.total_order_seek = true; + rocksdb::Iterator *const it = rdb->NewIterator(opts); char key_buf[MAX_KEY_LENGTH]; uint key_len; @@ -9300,18 +8476,15 @@ void ha_rocksdb::remove_rows(Rdb_tbl_def* const tbl) (This is is not crash-safe, but it doesn't matter, because bulk row deletion will be handled on rocksdb side) */ - for (uint i= 0; i < tbl->m_key_count ; i++) - { - const Rdb_key_def& kd= *tbl->m_key_descr_arr[i]; - kd.get_infimum_key(reinterpret_cast(key_buf), &key_len); + for (uint i = 0; i < tbl->m_key_count; i++) { + const Rdb_key_def &kd = *tbl->m_key_descr_arr[i]; + kd.get_infimum_key(reinterpret_cast(key_buf), &key_len); const rocksdb::Slice table_key(key_buf, key_len); it->Seek(table_key); - while (it->Valid()) - { - const rocksdb::Slice key= it->key(); - if (!kd.covers_key(key)) - { + while (it->Valid()) { + const rocksdb::Slice key = it->key(); + if (!kd.covers_key(key)) { break; } @@ -9325,35 +8498,32 @@ void ha_rocksdb::remove_rows(Rdb_tbl_def* const tbl) delete it; } +int ha_rocksdb::rename_table(const char *const from, const char *const to) { + DBUG_ENTER_FUNC(); -int ha_rocksdb::rename_table(const char* const from, const char* const to) -{ DBUG_ASSERT(from != nullptr); DBUG_ASSERT(to != nullptr); - bool write_err= false; + bool write_err = false; std::string from_str; std::string to_str; std::string from_db; std::string to_db; if (rdb_normalize_tablename(from, &from_str) || - rdb_split_normalized_tablename(from_str, &from_db)) - { - return HA_ERR_INTERNAL_ERROR; + rdb_split_normalized_tablename(from_str, &from_db)) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } if (rdb_normalize_tablename(to, &to_str) || - rdb_split_normalized_tablename(to_str, &to_db)) - { - return HA_ERR_INTERNAL_ERROR; + rdb_split_normalized_tablename(to_str, &to_db)) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } // If the user changed the database part of the name then validate that the // 'to' database exists. - if (from_db != to_db && !rdb_database_exists(to_db)) - { - return HA_ERR_INTERNAL_ERROR; + if (from_db != to_db && !rdb_database_exists(to_db)) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } /* Check if any index has a per-index column family */ @@ -9368,30 +8538,28 @@ int ha_rocksdb::rename_table(const char* const from, const char* const to) it has been opened by this statement), and check is_auto_cf there. */ Rdb_tbl_def *tdef; - if (!(tdef= ddl_manager.find(from_str))) - return HA_ERR_INTERNAL_ERROR; + if (!(tdef = ddl_manager.find(from_str))) + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); - for (uint i= 0; i < tdef->m_key_count; i++) - { + for (uint i = 0; i < tdef->m_key_count; i++) { DBUG_ASSERT(tdef->m_key_descr_arr != nullptr); - if (tdef->m_key_descr_arr[i]->m_is_auto_cf) - { + if (tdef->m_key_descr_arr[i]->m_is_auto_cf) { my_error(ER_NOT_SUPPORTED_YET, MYF(0), "ALTER TABLE on table with per-index CF"); - return HA_ERR_INTERNAL_ERROR; + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } } } - const std::unique_ptr wb= dict_manager.begin(); - rocksdb::WriteBatch* const batch= wb.get(); + const std::unique_ptr wb = dict_manager.begin(); + rocksdb::WriteBatch *const batch = wb.get(); dict_manager.lock(); - write_err= ddl_manager.rename(from_str, to_str, batch) - || dict_manager.commit(batch); + write_err = + ddl_manager.rename(from_str, to_str, batch) || dict_manager.commit(batch); dict_manager.unlock(); - return write_err ? HA_ERR_INTERNAL_ERROR : 0; + DBUG_RETURN(write_err ? HA_ERR_INTERNAL_ERROR : 0); } /** @@ -9405,25 +8573,25 @@ int ha_rocksdb::rename_table(const char* const from, const char* const to) */ -bool ha_rocksdb::check_if_incompatible_data(HA_CREATE_INFO* const info, - uint table_changes) -{ - DBUG_ENTER("ha_rocksdb::check_if_incompatible_data"); +bool ha_rocksdb::check_if_incompatible_data(HA_CREATE_INFO *const info, + uint table_changes) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(info != nullptr); + // this function is needed only for online alter-table DBUG_RETURN(COMPATIBLE_DATA_NO); } - -int ha_rocksdb::extra(enum ha_extra_function operation) -{ - DBUG_ENTER("ha_rocksdb::extra"); +int ha_rocksdb::extra(enum ha_extra_function operation) { + DBUG_ENTER_FUNC(); switch (operation) { case HA_EXTRA_KEYREAD: - m_keyread_only= true; + m_keyread_only = true; break; case HA_EXTRA_NO_KEYREAD: - m_keyread_only= false; + m_keyread_only = false; break; case HA_EXTRA_FLUSH: /* @@ -9436,17 +8604,17 @@ int ha_rocksdb::extra(enum ha_extra_function operation) break; } - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } /* Given a starting key and an ending key, estimate the number of rows that will exist between the two keys. */ -ha_rows ha_rocksdb::records_in_range(uint inx, key_range* const min_key, - key_range* const max_key) -{ - DBUG_ENTER("ha_rocksdb::records_in_range"); +ha_rows ha_rocksdb::records_in_range(uint inx, key_range *const min_key, + key_range *const max_key) { + DBUG_ENTER_FUNC(); + ha_rows ret = THDVAR(ha_thd(), records_in_range); if (ret) { DBUG_RETURN(ret); @@ -9458,71 +8626,67 @@ ha_rows ha_rocksdb::records_in_range(uint inx, key_range* const min_key, } } - const Rdb_key_def& kd= *m_key_descr_arr[inx]; + const Rdb_key_def &kd = *m_key_descr_arr[inx]; - uint size1=0; + uint size1 = 0; if (min_key) { size1 = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, - min_key->key, min_key->keypart_map); + min_key->key, min_key->keypart_map); if (min_key->flag == HA_READ_PREFIX_LAST_OR_PREV || min_key->flag == HA_READ_PREFIX_LAST || - min_key->flag == HA_READ_AFTER_KEY) - { + min_key->flag == HA_READ_AFTER_KEY) { kd.successor(m_sk_packed_tuple, size1); } } else { kd.get_infimum_key(m_sk_packed_tuple, &size1); } - uint size2=0; + uint size2 = 0; if (max_key) { size2 = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple_old, - max_key->key, max_key->keypart_map); + max_key->key, max_key->keypart_map); if (max_key->flag == HA_READ_PREFIX_LAST_OR_PREV || max_key->flag == HA_READ_PREFIX_LAST || - max_key->flag == HA_READ_AFTER_KEY) - { + max_key->flag == HA_READ_AFTER_KEY) { kd.successor(m_sk_packed_tuple_old, size2); } // pad the upper key with FFFFs to make sure it is more than the lower if (size1 > size2) { - memset(m_sk_packed_tuple_old+size2, 0xff, size1-size2); + memset(m_sk_packed_tuple_old + size2, 0xff, size1 - size2); size2 = size1; } } else { kd.get_supremum_key(m_sk_packed_tuple_old, &size2); } - const rocksdb::Slice slice1((const char*) m_sk_packed_tuple, size1); - const rocksdb::Slice slice2((const char*) m_sk_packed_tuple_old, size2); + const rocksdb::Slice slice1((const char *)m_sk_packed_tuple, size1); + const rocksdb::Slice slice2((const char *)m_sk_packed_tuple_old, size2); // slice1 >= slice2 means no row will match - if (slice1.compare(slice2) >= 0) - { - DBUG_RETURN(0); + if (slice1.compare(slice2) >= 0) { + DBUG_RETURN(HA_EXIT_SUCCESS); } - rocksdb::Range r( - kd.m_is_reverse_cf ? slice2 : slice1, - kd.m_is_reverse_cf ? slice1 : slice2 - ); + rocksdb::Range r(kd.m_is_reverse_cf ? slice2 : slice1, + kd.m_is_reverse_cf ? slice1 : slice2); - uint64_t sz=0; + uint64_t sz = 0; auto disk_size = kd.m_stats.m_actual_disk_size; if (disk_size == 0) disk_size = kd.m_stats.m_data_size; auto rows = kd.m_stats.m_rows; - if (rows == 0 || disk_size == 0) - { - rows= 1; - disk_size= ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE; + if (rows == 0 || disk_size == 0) { + rows = 1; + disk_size = ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE; } + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" // Getting statistics, including from Memtables - rdb->GetApproximateSizes( - kd.get_cf(), - &r, 1, - &sz, true); - ret = rows*sz/disk_size; + rdb->GetApproximateSizes(kd.get_cf(), &r, 1, &sz, true); +#pragma GCC diagnostic pop + + ret = rows * sz / disk_size; /* GetApproximateSizes() gives estimates so ret might exceed stats.records. @@ -9531,30 +8695,29 @@ ha_rows ha_rocksdb::records_in_range(uint inx, key_range* const min_key, To prevent this, changing estimated records slightly smaller than stats.records. */ - if (ret >= stats.records) - { + if (ret >= stats.records) { ret = stats.records * 0.99; } - if (rocksdb_debug_optimizer_n_rows > 0) - { - ret= rocksdb_debug_optimizer_n_rows; - } - else if (ret == 0) - { + if (rocksdb_debug_optimizer_n_rows > 0) { + ret = rocksdb_debug_optimizer_n_rows; + } else if (ret == 0) { ret = 1; } DBUG_RETURN(ret); } +void ha_rocksdb::update_create_info(HA_CREATE_INFO *const create_info) { + DBUG_ENTER_FUNC(); -void ha_rocksdb::update_create_info(HA_CREATE_INFO* const create_info) -{ - if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) - { - create_info->auto_increment_value= m_tbl_def->m_auto_incr_val; + DBUG_ASSERT(create_info != nullptr); + + if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) { + create_info->auto_increment_value = m_tbl_def->m_auto_incr_val; } + + DBUG_VOID_RETURN; } /** @@ -9571,85 +8734,81 @@ void ha_rocksdb::update_create_info(HA_CREATE_INFO* const create_info) column family, the first row of the next index id should be the last row of the previous index id. */ -int ha_rocksdb::optimize(THD* const thd, HA_CHECK_OPT* const check_opt) -{ - int rc= 0; - for (uint i= 0; i < table->s->keys; i++) - { - uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]; +int ha_rocksdb::optimize(THD *const thd, HA_CHECK_OPT *const check_opt) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(check_opt != nullptr); + + int rc = 0; + for (uint i = 0; i < table->s->keys; i++) { + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]; auto range = get_range(i, buf); if (!rdb->CompactRange(rocksdb::CompactRangeOptions(), - m_key_descr_arr[i]->get_cf(), - &range.start, &range.limit).ok()) - { - rc= 1; + m_key_descr_arr[i]->get_cf(), &range.start, + &range.limit) + .ok()) { + rc = 1; break; } } - return rc; + + DBUG_RETURN(rc); } -int ha_rocksdb::calculate_stats(const TABLE* const table_arg, THD* const thd, - HA_CHECK_OPT* const check_opt) -{ - DBUG_ENTER("ha_rocksdb::calculate_stats"); +int ha_rocksdb::calculate_stats(const TABLE *const table_arg, THD *const thd, + HA_CHECK_OPT *const check_opt) { + DBUG_ENTER_FUNC(); // find per column family key ranges which need to be queried - std::unordered_map> - ranges; + std::unordered_map> + ranges; std::unordered_set ids_to_check; std::vector buf(table_arg->s->keys * 2 * - Rdb_key_def::INDEX_NUMBER_SIZE); - for (uint i = 0; i < table_arg->s->keys; i++) - { + Rdb_key_def::INDEX_NUMBER_SIZE); + for (uint i = 0; i < table_arg->s->keys; i++) { const auto bufp = &buf[i * 2 * Rdb_key_def::INDEX_NUMBER_SIZE]; - const Rdb_key_def& kd= *m_key_descr_arr[i]; + const Rdb_key_def &kd = *m_key_descr_arr[i]; ranges[kd.get_cf()].push_back(get_range(i, bufp)); ids_to_check.insert(kd.get_gl_index_id()); } // for analyze statements, force flush on memtable to get accurate cardinality - Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); + Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); if (thd != nullptr && THDVAR(thd, flush_memtable_on_analyze) && - !rocksdb_pause_background_work) - { - for (auto it : ids_to_check) - { + !rocksdb_pause_background_work) { + for (auto it : ids_to_check) { rdb->Flush(rocksdb::FlushOptions(), cf_manager.get_cf(it.cf_id)); } } // get RocksDB table properties for these ranges rocksdb::TablePropertiesCollection props; - for (auto it : ranges) - { - const auto old_size __attribute__((__unused__)) = props.size(); + for (auto it : ranges) { + const auto old_size MY_ATTRIBUTE((__unused__)) = props.size(); const auto status = rdb->GetPropertiesOfTablesInRange( - it.first, &it.second[0], it.second.size(), &props); + it.first, &it.second[0], it.second.size(), &props); DBUG_ASSERT(props.size() >= old_size); if (!status.ok()) DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } - int num_sst= 0; + int num_sst = 0; // group stats per index id std::unordered_map stats; - for (const auto &it : ids_to_check) - { + for (const auto &it : ids_to_check) { // Initialize the stats to 0. If there are no files that contain // this gl_index_id, then 0 should be stored for the cached stats. stats[it] = Rdb_index_stats(it); } - for (const auto &it : props) - { + for (const auto &it : props) { std::vector sst_stats; Rdb_tbl_prop_coll::read_stats_from_tbl_props(it.second, &sst_stats); /* sst_stats is a list of index statistics for indexes that have entries in the current SST file. */ - for (const auto &it1 : sst_stats) - { + for (const auto &it1 : sst_stats) { /* Only update statistics for indexes that belong to this SQL table. @@ -9661,7 +8820,7 @@ int ha_rocksdb::calculate_stats(const TABLE* const table_arg, THD* const thd, if (ids_to_check.find(it1.m_gl_index_id) == ids_to_check.end()) continue; - auto kd= ddl_manager.safe_find(it1.m_gl_index_id); + auto kd = ddl_manager.safe_find(it1.m_gl_index_id); DBUG_ASSERT(kd != nullptr); stats[it1.m_gl_index_id].merge(it1, true, kd->max_storage_fmt_length()); } @@ -9672,24 +8831,23 @@ int ha_rocksdb::calculate_stats(const TABLE* const table_arg, THD* const thd, ddl_manager.set_stats(stats); ddl_manager.persist_stats(true); - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } -int ha_rocksdb::analyze(THD* const thd, HA_CHECK_OPT* const check_opt) -{ - DBUG_ENTER("ha_rocksdb::analyze"); +int ha_rocksdb::analyze(THD *const thd, HA_CHECK_OPT *const check_opt) { + DBUG_ENTER_FUNC(); - if (!table) - DBUG_RETURN(1); + if (!table) { + DBUG_RETURN(HA_EXIT_FAILURE); + } DBUG_RETURN(calculate_stats(table, thd, check_opt)); } void ha_rocksdb::get_auto_increment(ulonglong off, ulonglong inc, ulonglong nb_desired_values, - ulonglong* const first_value, - ulonglong* const nb_reserved_values) -{ + ulonglong *const first_value, + ulonglong *const nb_reserved_values) { /* MySQL has a somewhat complicated way of handling the auto-increment value. The first time get_auto_increment is called for a statement, @@ -9707,36 +8865,31 @@ void ha_rocksdb::get_auto_increment(ulonglong off, ulonglong inc, */ DEBUG_SYNC(ha_thd(), "rocksdb.autoinc_vars"); - if (off > inc) - { - off= 1; + if (off > inc) { + off = 1; } // Local variable reference to simplify code below - std::atomic& auto_incr= m_tbl_def->m_auto_incr_val; + std::atomic &auto_incr = m_tbl_def->m_auto_incr_val; - if (inc == 1 && off == 1) - { + if (inc == 1 && off == 1) { // Optimization for the standard case where we are always simply // incrementing from the last position - *first_value= auto_incr++; - } - else - { + *first_value = auto_incr++; + } else { // The next value can be more complicated if either `inc` or 'off' is not 1 longlong new_val; - longlong last_val= auto_incr; + longlong last_val = auto_incr; // Loop until we can correctly update the atomic value - do - { + do { // Calculate the next value in the auto increment series: // offset + N * increment // where N is 0, 1, 2, ... // // For further information please visit: // http://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html - new_val= ((last_val + (inc - off) - 1) / inc) * inc + off; + new_val = ((last_val + (inc - off) - 1) / inc) * inc + off; // Attempt to store the new value (plus 1 since m_auto_incr_val contains // the next available value) into the atomic value. If the current @@ -9745,26 +8898,24 @@ void ha_rocksdb::get_auto_increment(ulonglong off, ulonglong inc, // with the current value). } while (!auto_incr.compare_exchange_weak(last_val, new_val + 1)); - *first_value= new_val; + *first_value = new_val; } - *nb_reserved_values= 1; + *nb_reserved_values = 1; } - #ifndef DBUG_OFF /* Debugger help function */ static char dbug_item_print_buf[512]; -const char *dbug_print_item(Item* const item) -{ - char* const buf= dbug_item_print_buf; +const char *dbug_print_item(Item *const item) { + char *const buf = dbug_item_print_buf; String str(buf, sizeof(dbug_item_print_buf), &my_charset_bin); str.length(0); if (!item) return "(Item*)nullptr"; - item->print(&str ,QT_ORDINARY); + item->print(&str, QT_ORDINARY); if (str.c_ptr() == buf) return buf; else @@ -9773,7 +8924,6 @@ const char *dbug_print_item(Item* const item) #endif /*DBUG_OFF*/ - /** SQL layer calls this function to push an index condition. @@ -9795,8 +8945,9 @@ const char *dbug_print_item(Item* const item) Part of condition we couldn't check (always nullptr). */ -class Item* ha_rocksdb::idx_cond_push(uint keyno, class Item* const idx_cond) -{ +class Item *ha_rocksdb::idx_cond_push(uint keyno, class Item *const idx_cond) { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(keyno != MAX_KEY); DBUG_ASSERT(idx_cond != nullptr); @@ -9805,10 +8956,9 @@ class Item* ha_rocksdb::idx_cond_push(uint keyno, class Item* const idx_cond) in_range_check_pushed_down = TRUE; /* We will check the whole condition */ - return nullptr; + DBUG_RETURN(nullptr); } - /* @brief Check the index condition. @@ -9827,13 +8977,11 @@ class Item* ha_rocksdb::idx_cond_push(uint keyno, class Item* const idx_cond) and return it) */ -enum icp_result ha_rocksdb::check_index_cond() const -{ +enum icp_result ha_rocksdb::check_index_cond() const { DBUG_ASSERT(pushed_idx_cond); DBUG_ASSERT(pushed_idx_cond_keyno != MAX_KEY); - if (end_range && compare_key_icp(end_range) > 0) - { + if (end_range && compare_key_icp(end_range) > 0) { /* caller should return HA_ERR_END_OF_FILE already */ return ICP_OUT_OF_RANGE; } @@ -9841,7 +8989,6 @@ enum icp_result ha_rocksdb::check_index_cond() const return pushed_idx_cond->val_int() ? ICP_MATCH : ICP_NO_MATCH; } - /** Checking if an index is used for ascending scan or not @@ -9853,37 +9000,32 @@ enum icp_result ha_rocksdb::check_index_cond() const @param kd @param find_flag */ -bool ha_rocksdb::is_ascending(const Rdb_key_def& kd, - enum ha_rkey_function find_flag) const -{ +bool ha_rocksdb::is_ascending(const Rdb_key_def &kd, + enum ha_rkey_function find_flag) const { bool is_ascending; switch (find_flag) { - case HA_READ_KEY_EXACT: - { + case HA_READ_KEY_EXACT: { is_ascending = !kd.m_is_reverse_cf; break; } - case HA_READ_PREFIX: - { - is_ascending= true; + case HA_READ_PREFIX: { + is_ascending = true; break; } case HA_READ_KEY_OR_NEXT: - case HA_READ_AFTER_KEY: - { + case HA_READ_AFTER_KEY: { is_ascending = !kd.m_is_reverse_cf; break; } case HA_READ_KEY_OR_PREV: case HA_READ_BEFORE_KEY: case HA_READ_PREFIX_LAST: - case HA_READ_PREFIX_LAST_OR_PREV: - { + case HA_READ_PREFIX_LAST_OR_PREV: { is_ascending = kd.m_is_reverse_cf; break; } default: - is_ascending= false; + is_ascending = false; } return is_ascending; } @@ -9893,16 +9035,23 @@ bool ha_rocksdb::is_ascending(const Rdb_key_def& kd, */ my_core::enum_alter_inplace_result ha_rocksdb::check_if_supported_inplace_alter( - TABLE* /* altered_table */, - my_core::Alter_inplace_info* const ha_alter_info) -{ - DBUG_ENTER("check_if_supported_alter"); + TABLE *altered_table, my_core::Alter_inplace_info *const ha_alter_info) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(ha_alter_info != nullptr); if (ha_alter_info->handler_flags & ~(my_core::Alter_inplace_info::DROP_INDEX | my_core::Alter_inplace_info::DROP_UNIQUE_INDEX | - my_core::Alter_inplace_info::ADD_INDEX)) - { + my_core::Alter_inplace_info::ADD_INDEX | + my_core::Alter_inplace_info::ADD_UNIQUE_INDEX)) { + DBUG_RETURN(my_core::HA_ALTER_INPLACE_NOT_SUPPORTED); + } + + /* We don't support unique keys on table w/ no primary keys */ + if ((ha_alter_info->handler_flags & + my_core::Alter_inplace_info::ADD_UNIQUE_INDEX) && + has_hidden_pk(altered_table)) { DBUG_RETURN(my_core::HA_ALTER_INPLACE_NOT_SUPPORTED); } @@ -9938,37 +9087,41 @@ my_core::enum_alter_inplace_result ha_rocksdb::check_if_supported_inplace_alter( @retval false Success */ bool ha_rocksdb::prepare_inplace_alter_table( - TABLE* const altered_table, - my_core::Alter_inplace_info* const ha_alter_info) -{ - DBUG_ENTER("prepare_inplace_alter_table"); - const uint n_dropped_keys= ha_alter_info->index_drop_count; - const uint n_added_keys= ha_alter_info->index_add_count; - const uint old_n_keys= m_tbl_def->m_key_count; - uint new_n_keys= altered_table->s->keys; - if (has_hidden_pk(altered_table)) - { + TABLE *const altered_table, + my_core::Alter_inplace_info *const ha_alter_info) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(altered_table != nullptr); + DBUG_ASSERT(ha_alter_info != nullptr); + + const uint n_dropped_keys = ha_alter_info->index_drop_count; + const uint n_added_keys = ha_alter_info->index_add_count; + const uint old_n_keys = m_tbl_def->m_key_count; + uint new_n_keys = altered_table->s->keys; + + if (has_hidden_pk(altered_table)) { new_n_keys += 1; } - std::shared_ptr* const old_key_descr= m_tbl_def->m_key_descr_arr; - std::shared_ptr* const new_key_descr= + std::shared_ptr *const old_key_descr = + m_tbl_def->m_key_descr_arr; + std::shared_ptr *const new_key_descr = new std::shared_ptr[new_n_keys]; - Rdb_tbl_def* const new_tdef= new Rdb_tbl_def(m_tbl_def->full_tablename()); - new_tdef->m_key_descr_arr= new_key_descr; - new_tdef->m_key_count= new_n_keys; - new_tdef->m_auto_incr_val= - m_tbl_def->m_auto_incr_val.load(std::memory_order_relaxed); - new_tdef->m_hidden_pk_val= - m_tbl_def->m_hidden_pk_val.load(std::memory_order_relaxed); + Rdb_tbl_def *const new_tdef = new Rdb_tbl_def(m_tbl_def->full_tablename()); + new_tdef->m_key_descr_arr = new_key_descr; + new_tdef->m_key_count = new_n_keys; + new_tdef->m_auto_incr_val = + m_tbl_def->m_auto_incr_val.load(std::memory_order_relaxed); + new_tdef->m_hidden_pk_val = + m_tbl_def->m_hidden_pk_val.load(std::memory_order_relaxed); if (ha_alter_info->handler_flags & - (my_core::Alter_inplace_info::DROP_INDEX | - my_core::Alter_inplace_info::DROP_UNIQUE_INDEX | - my_core::Alter_inplace_info::ADD_INDEX) - && create_key_defs(altered_table, new_tdef, table, m_tbl_def)) - { + (my_core::Alter_inplace_info::DROP_INDEX | + my_core::Alter_inplace_info::DROP_UNIQUE_INDEX | + my_core::Alter_inplace_info::ADD_INDEX | + my_core::Alter_inplace_info::ADD_UNIQUE_INDEX) && + create_key_defs(altered_table, new_tdef, table, m_tbl_def)) { /* Delete the new key descriptors */ delete[] new_key_descr; @@ -9976,9 +9129,13 @@ bool ha_rocksdb::prepare_inplace_alter_table( Explicitly mark as nullptr so we don't accidentally remove entries from data dictionary on cleanup (or cause double delete[]). */ - new_tdef->m_key_descr_arr= nullptr; + new_tdef->m_key_descr_arr = nullptr; delete new_tdef; - DBUG_RETURN(1); + + my_printf_error(ER_UNKNOWN_ERROR, + "MyRocks failed creating new key definitions during alter.", + MYF(0)); + DBUG_RETURN(HA_EXIT_FAILURE); } std::unordered_set> added_indexes; @@ -9986,16 +9143,13 @@ bool ha_rocksdb::prepare_inplace_alter_table( uint i; uint j; - const KEY* key; + const KEY *key; /* Determine which(if any) key definition(s) need to be dropped */ - for (i = 0; i < ha_alter_info->index_drop_count; i++) - { - const KEY* const key = ha_alter_info->index_drop_buffer[i]; - for (j = 0; j < old_n_keys; j++) - { - if (!old_key_descr[j]->m_name.compare(key->name)) - { + for (i = 0; i < ha_alter_info->index_drop_count; i++) { + const KEY *const key = ha_alter_info->index_drop_buffer[i]; + for (j = 0; j < old_n_keys; j++) { + if (!old_key_descr[j]->m_name.compare(key->name)) { dropped_index_ids.insert(old_key_descr[j]->get_gl_index_id()); break; } @@ -10005,14 +9159,10 @@ bool ha_rocksdb::prepare_inplace_alter_table( DBUG_ASSERT(dropped_index_ids.size() == ha_alter_info->index_drop_count); /* Determine which(if any) key definitions(s) need to be added */ - for (i = 0; i < ha_alter_info->index_add_count; i++) - { - key = &ha_alter_info->key_info_buffer[ - ha_alter_info->index_add_buffer[i]]; - for (j = 0; j < new_n_keys; j++) - { - if (!new_key_descr[j]->m_name.compare(key->name)) - { + for (i = 0; i < ha_alter_info->index_add_count; i++) { + key = &ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[i]]; + for (j = 0; j < new_n_keys; j++) { + if (!new_key_descr[j]->m_name.compare(key->name)) { added_indexes.insert(new_key_descr[j]); break; } @@ -10021,11 +9171,11 @@ bool ha_rocksdb::prepare_inplace_alter_table( DBUG_ASSERT(added_indexes.size() == ha_alter_info->index_add_count); - ha_alter_info->handler_ctx= new Rdb_inplace_alter_ctx( - new_tdef, old_key_descr, new_key_descr, old_n_keys, new_n_keys, - added_indexes, dropped_index_ids, n_added_keys, n_dropped_keys); + ha_alter_info->handler_ctx = new Rdb_inplace_alter_ctx( + new_tdef, old_key_descr, new_key_descr, old_n_keys, new_n_keys, + added_indexes, dropped_index_ids, n_added_keys, n_dropped_keys); - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } /** @@ -10048,53 +9198,74 @@ bool ha_rocksdb::prepare_inplace_alter_table( @retval true Error @retval false Success */ -bool ha_rocksdb::inplace_alter_table(TABLE* const altered_table, - my_core::Alter_inplace_info* const ha_alter_info) -{ - /* Do nothing for now */ - DBUG_ENTER("inplace_alter_table"); - DBUG_ASSERT(ha_alter_info->handler_ctx); +bool ha_rocksdb::inplace_alter_table( + TABLE *const altered_table, + my_core::Alter_inplace_info *const ha_alter_info) { + DBUG_ENTER_FUNC(); - Rdb_inplace_alter_ctx* const ctx= - static_cast (ha_alter_info->handler_ctx); + DBUG_ASSERT(altered_table != nullptr); + DBUG_ASSERT(ha_alter_info != nullptr); + DBUG_ASSERT(ha_alter_info->handler_ctx != nullptr); - if (ha_alter_info->handler_flags & my_core::Alter_inplace_info::ADD_INDEX) - { + Rdb_inplace_alter_ctx *const ctx = + static_cast(ha_alter_info->handler_ctx); + + if (ha_alter_info->handler_flags & + (my_core::Alter_inplace_info::ADD_INDEX | + my_core::Alter_inplace_info::ADD_UNIQUE_INDEX)) { /* Buffers need to be set up again to account for new, possibly longer secondary keys. */ free_key_buffers(); - if (alloc_key_buffers(altered_table, ctx->m_new_tdef)) - { - DBUG_RETURN(1); + + DBUG_ASSERT(ctx != nullptr); + + /* + If adding unique index, allocate special buffers for duplicate checking. + */ + int err; + if ((err = alloc_key_buffers( + altered_table, ctx->m_new_tdef, + ha_alter_info->handler_flags & + my_core::Alter_inplace_info::ADD_UNIQUE_INDEX))) { + my_printf_error(ER_UNKNOWN_ERROR, + "MyRocks failed allocating key buffers during alter.", + MYF(0)); + DBUG_RETURN(err); } - /* Populate all new secondary keys by scanning primary key */ - if (inplace_populate_sk(altered_table, ctx->m_added_indexes)) - { - DBUG_RETURN(1); + /* Populate all new secondary keys by scanning the primary key. */ + if ((err = inplace_populate_sk(altered_table, ctx->m_added_indexes))) { + my_printf_error(ER_UNKNOWN_ERROR, + "MyRocks failed populating secondary key during alter.", + MYF(0)); + DBUG_RETURN(HA_EXIT_FAILURE); } } - DBUG_EXECUTE_IF("myrocks_simulate_index_create_rollback", DBUG_RETURN(1);); - DBUG_RETURN(0); + DBUG_EXECUTE_IF("myrocks_simulate_index_create_rollback", { + dbug_create_err_inplace_alter(); + DBUG_RETURN(HA_EXIT_FAILURE); + };); + + DBUG_RETURN(HA_EXIT_SUCCESS); } /** Scan the Primary Key index entries and populate the new secondary keys. */ -int ha_rocksdb::inplace_populate_sk(const TABLE* const new_table_arg, - const std::unordered_set>& indexes) -{ - DBUG_ENTER("ha_rocksdb::inplace_populate_sk"); - const std::unique_ptr wb= dict_manager.begin(); - rocksdb::WriteBatch* const batch= wb.get(); +int ha_rocksdb::inplace_populate_sk( + TABLE *const new_table_arg, + const std::unordered_set> &indexes) { + DBUG_ENTER_FUNC(); + + const std::unique_ptr wb = dict_manager.begin(); + rocksdb::WriteBatch *const batch = wb.get(); /* Update the data dictionary */ std::unordered_set create_index_ids; - for (const auto& index : indexes) - { + for (const auto &index : indexes) { create_index_ids.insert(index->get_gl_index_id()); } dict_manager.add_create_index(create_index_ids, batch); @@ -10102,8 +9273,8 @@ int ha_rocksdb::inplace_populate_sk(const TABLE* const new_table_arg, const bool hidden_pk_exists = has_hidden_pk(table); - int res= 0; - Rdb_transaction *tx= get_or_create_tx(table->in_use); + int res = 0; + Rdb_transaction *tx = get_or_create_tx(table->in_use); /* There is one specific scenario where m_sst_info may not be nullptr. This @@ -10133,28 +9304,26 @@ int ha_rocksdb::inplace_populate_sk(const TABLE* const new_table_arg, open handlers have been closed at this point, and the one we're on is the only one left. */ - if (m_sst_info != nullptr) - { - if ((res= finalize_bulk_load())) - { + if (m_sst_info != nullptr) { + if ((res = finalize_bulk_load())) { DBUG_RETURN(res); } tx->commit(); } - const ulonglong rdb_merge_buf_size= THDVAR(ha_thd(), merge_buf_size); - const ulonglong rdb_merge_combine_read_size= THDVAR(ha_thd(), - merge_combine_read_size); + const ulonglong rdb_merge_buf_size = THDVAR(ha_thd(), merge_buf_size); + const ulonglong rdb_merge_combine_read_size = + THDVAR(ha_thd(), merge_combine_read_size); + + for (const auto &index : indexes) { + const rocksdb::Comparator *index_comp = index->get_cf()->GetComparator(); + bool is_unique_index = + new_table_arg->key_info[index->get_keyno()].flags & HA_NOSAME; - for (const auto& index : indexes) - { - const rocksdb::Comparator* index_comp= index->get_cf()->GetComparator(); Rdb_index_merge rdb_merge(thd_rocksdb_tmpdir(), rdb_merge_buf_size, - rdb_merge_combine_read_size, - index_comp); + rdb_merge_combine_read_size, index_comp); - if ((res= rdb_merge.init())) - { + if ((res = rdb_merge.init())) { DBUG_RETURN(res); } @@ -10163,17 +9332,14 @@ int ha_rocksdb::inplace_populate_sk(const TABLE* const new_table_arg, as the pk index position may have changed in the case of hidden primary keys. */ - const uint pk= pk_index(table, m_tbl_def); + const uint pk = pk_index(table, m_tbl_def); ha_index_init(pk, true); /* Scan each record in the primary key in order */ for (res = index_first(table->record[0]); res == 0; - res = index_next(table->record[0])) - { - longlong hidden_pk_id= 0; - if (hidden_pk_exists && - read_hidden_pk_id_from_rowkey(&hidden_pk_id)) - { + res = index_next(table->record[0])) { + longlong hidden_pk_id = 0; + if (hidden_pk_exists && read_hidden_pk_id_from_rowkey(&hidden_pk_id)) { // NO_LINT_DEBUG sql_print_error("Error retrieving hidden pk id."); ha_index_end(); @@ -10181,32 +9347,27 @@ int ha_rocksdb::inplace_populate_sk(const TABLE* const new_table_arg, } /* Create new secondary index entry */ - const int new_packed_size= index->pack_record( - new_table_arg, m_pack_buffer, - table->record[0], - m_sk_packed_tuple, &m_sk_tails, - should_store_row_debug_checksums(), - hidden_pk_id); + const int new_packed_size = index->pack_record( + new_table_arg, m_pack_buffer, table->record[0], m_sk_packed_tuple, + &m_sk_tails, should_store_row_debug_checksums(), hidden_pk_id); - const rocksdb::Slice key= rocksdb::Slice( - reinterpret_cast(m_sk_packed_tuple), new_packed_size); - const rocksdb::Slice val= rocksdb::Slice( - reinterpret_cast(m_sk_tails.ptr()), - m_sk_tails.get_current_pos()); + const rocksdb::Slice key = rocksdb::Slice( + reinterpret_cast(m_sk_packed_tuple), new_packed_size); + const rocksdb::Slice val = + rocksdb::Slice(reinterpret_cast(m_sk_tails.ptr()), + m_sk_tails.get_current_pos()); /* Add record to offset tree in preparation for writing out to disk in sorted chunks. */ - if ((res= rdb_merge.add(key, val))) - { + if ((res = rdb_merge.add(key, val))) { ha_index_end(); DBUG_RETURN(res); } } - if (res != HA_ERR_END_OF_FILE) - { + if (res != HA_ERR_END_OF_FILE) { // NO_LINT_DEBUG sql_print_error("Error retrieving index entry from primary key."); ha_index_end(); @@ -10221,13 +9382,39 @@ int ha_rocksdb::inplace_populate_sk(const TABLE* const new_table_arg, */ rocksdb::Slice merge_key; rocksdb::Slice merge_val; - while ((res= rdb_merge.next(&merge_key, &merge_val)) == 0) - { + + struct unique_sk_buf_info sk_info; + sk_info.dup_sk_buf = m_dup_sk_packed_tuple; + sk_info.dup_sk_buf_old = m_dup_sk_packed_tuple_old; + + while ((res = rdb_merge.next(&merge_key, &merge_val)) == 0) { + /* Perform uniqueness check if needed */ + if (is_unique_index) { + if (check_duplicate_sk(new_table_arg, *index, &merge_key, &sk_info)) { + /* + Duplicate entry found when trying to create unique secondary key. + We need to unpack the record into new_table_arg->record[0] as it + is used inside print_keydup_error so that the error message shows + the duplicate record. + */ + if (index->unpack_record(new_table_arg, new_table_arg->record[0], + &merge_key, nullptr, + m_verify_row_debug_checksums)) { + /* Should never reach here */ + DBUG_ASSERT(0); + } + + print_keydup_error(new_table_arg, + &new_table_arg->key_info[index->get_keyno()], + MYF(0)); + DBUG_RETURN(ER_DUP_ENTRY); + } + } + /* Insert key and slice to SST via SSTFileWriter API. */ - if ((res= bulk_load_key(tx, *index, merge_key, merge_val))) - { + if ((res = bulk_load_key(tx, *index, merge_key, merge_val))) { break; } } @@ -10236,15 +9423,13 @@ int ha_rocksdb::inplace_populate_sk(const TABLE* const new_table_arg, Here, res == -1 means that we are finished, while > 0 means an error occurred. */ - if (res > 0) - { + if (res > 0) { // NO_LINT_DEBUG sql_print_error("Error while bulk loading keys in external merge sort."); DBUG_RETURN(res); } - if ((res= tx->finish_bulk_load())) - { + if ((res = tx->finish_bulk_load())) { // NO_LINT_DEBUG sql_print_error("Error finishing bulk load."); DBUG_RETURN(res); @@ -10294,14 +9479,15 @@ int ha_rocksdb::inplace_populate_sk(const TABLE* const new_table_arg, @retval false Success */ bool ha_rocksdb::commit_inplace_alter_table( - my_core::TABLE* const altered_table, - my_core::Alter_inplace_info* const ha_alter_info, - bool commit) -{ - Rdb_inplace_alter_ctx* const ctx0= - static_cast (ha_alter_info->handler_ctx); + my_core::TABLE *const altered_table, + my_core::Alter_inplace_info *const ha_alter_info, bool commit) { + DBUG_ENTER_FUNC(); - DBUG_ENTER("commit_inplace_alter_table"); + DBUG_ASSERT(altered_table != nullptr); + DBUG_ASSERT(ha_alter_info != nullptr); + + Rdb_inplace_alter_ctx *const ctx0 = + static_cast(ha_alter_info->handler_ctx); /* IMPORTANT: When rollback is requested, mysql will abort with @@ -10313,12 +9499,10 @@ bool ha_rocksdb::commit_inplace_alter_table( is done for each partition. A successful commit call only executes once for all partitions. */ - if (!commit) - { + if (!commit) { /* If ctx has not been created yet, nothing to do here */ - if (!ctx0) - { - DBUG_RETURN(0); + if (!ctx0) { + DBUG_RETURN(HA_EXIT_SUCCESS); } /* @@ -10326,12 +9510,10 @@ bool ha_rocksdb::commit_inplace_alter_table( erase the mappings inside the ddl_manager, as the old_key_descr is still using them. */ - if (ctx0->m_new_key_descr) - { + if (ctx0->m_new_key_descr) { /* Delete the new key descriptors */ - for (uint i = 0; i < ctx0->m_new_tdef->m_key_count; i++) - { - ctx0->m_new_key_descr[i]= nullptr; + for (uint i = 0; i < ctx0->m_new_tdef->m_key_count; i++) { + ctx0->m_new_key_descr[i] = nullptr; } delete[] ctx0->m_new_key_descr; @@ -10341,7 +9523,10 @@ bool ha_rocksdb::commit_inplace_alter_table( delete ctx0->m_new_tdef; } - DBUG_RETURN(0); + /* Rollback any partially created indexes */ + dict_manager.rollback_ongoing_index_creation(); + + DBUG_RETURN(HA_EXIT_SUCCESS); } DBUG_ASSERT(ctx0); @@ -10350,17 +9535,13 @@ bool ha_rocksdb::commit_inplace_alter_table( For partitioned tables, we need to commit all changes to all tables at once, unlike in the other inplace alter API methods. */ - inplace_alter_handler_ctx** ctx_array; - inplace_alter_handler_ctx* ctx_single[2]; + inplace_alter_handler_ctx **ctx_array; + inplace_alter_handler_ctx *ctx_single[2]; - if (ha_alter_info->group_commit_ctx) - { - DBUG_EXECUTE_IF("crash_during_index_creation_partition", - DBUG_SUICIDE();); + if (ha_alter_info->group_commit_ctx) { + DBUG_EXECUTE_IF("crash_during_index_creation_partition", DBUG_SUICIDE();); ctx_array = ha_alter_info->group_commit_ctx; - } - else - { + } else { ctx_single[0] = ctx0; ctx_single[1] = nullptr; ctx_array = ctx_single; @@ -10372,32 +9553,29 @@ bool ha_rocksdb::commit_inplace_alter_table( if (ha_alter_info->handler_flags & (my_core::Alter_inplace_info::DROP_INDEX | my_core::Alter_inplace_info::DROP_UNIQUE_INDEX | - my_core::Alter_inplace_info::ADD_INDEX)) - { - const std::unique_ptr wb= dict_manager.begin(); - rocksdb::WriteBatch* const batch= wb.get(); + my_core::Alter_inplace_info::ADD_INDEX | + my_core::Alter_inplace_info::ADD_UNIQUE_INDEX)) { + const std::unique_ptr wb = dict_manager.begin(); + rocksdb::WriteBatch *const batch = wb.get(); std::unordered_set create_index_ids; - m_tbl_def= ctx0->m_new_tdef; - m_key_descr_arr= m_tbl_def->m_key_descr_arr; - m_pk_descr= m_key_descr_arr[pk_index(altered_table, m_tbl_def)]; + m_tbl_def = ctx0->m_new_tdef; + m_key_descr_arr = m_tbl_def->m_key_descr_arr; + m_pk_descr = m_key_descr_arr[pk_index(altered_table, m_tbl_def)]; dict_manager.lock(); - for (inplace_alter_handler_ctx** pctx = ctx_array; *pctx; pctx++) - { - Rdb_inplace_alter_ctx* const ctx= - static_cast (*pctx); + for (inplace_alter_handler_ctx **pctx = ctx_array; *pctx; pctx++) { + Rdb_inplace_alter_ctx *const ctx = + static_cast(*pctx); /* Mark indexes to be dropped */ dict_manager.add_drop_index(ctx->m_dropped_index_ids, batch); - for (const auto& index : ctx->m_added_indexes) - { + for (const auto &index : ctx->m_added_indexes) { create_index_ids.insert(index->get_gl_index_id()); } - if (ddl_manager.put_and_write(ctx->m_new_tdef, batch)) - { + if (ddl_manager.put_and_write(ctx->m_new_tdef, batch)) { /* Failed to write new entry into data dictionary, this should never happen. @@ -10406,8 +9584,7 @@ bool ha_rocksdb::commit_inplace_alter_table( } } - if (dict_manager.commit(batch)) - { + if (dict_manager.commit(batch)) { /* Should never reach here. We assume MyRocks will abort if commit fails. */ @@ -10417,8 +9594,8 @@ bool ha_rocksdb::commit_inplace_alter_table( dict_manager.unlock(); /* Mark ongoing create indexes as finished/remove from data dictionary */ - dict_manager.finish_indexes_operation(create_index_ids, - Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + dict_manager.finish_indexes_operation( + create_index_ids, Rdb_key_def::DDL_CREATE_INDEX_ONGOING); /* We need to recalculate the index stats here manually. The reason is that @@ -10427,8 +9604,7 @@ bool ha_rocksdb::commit_inplace_alter_table( prevents us from updating the stats normally as the ddl_manager cannot find the proper gl_index_ids yet during adjust_stats calls. */ - if (calculate_stats(altered_table, nullptr, nullptr)) - { + if (calculate_stats(altered_table, nullptr, nullptr)) { /* Failed to update index statistics, should never happen */ DBUG_ASSERT(0); } @@ -10436,29 +9612,28 @@ bool ha_rocksdb::commit_inplace_alter_table( rdb_drop_idx_thread.signal(); } - DBUG_RETURN(0); + DBUG_RETURN(HA_EXIT_SUCCESS); } #define SHOW_FNAME(name) rocksdb_show_##name -#define DEF_SHOW_FUNC(name, key) \ - static int SHOW_FNAME(name)(MYSQL_THD thd, SHOW_VAR *var, char *buff) \ - { \ - rocksdb_status_counters.name = \ - rocksdb_stats->getTickerCount(rocksdb::key); \ - var->type = SHOW_LONGLONG; \ - var->value = (char *)&rocksdb_status_counters.name; \ - return 0; \ +#define DEF_SHOW_FUNC(name, key) \ + static int SHOW_FNAME(name)(MYSQL_THD thd, SHOW_VAR * var, char *buff) { \ + rocksdb_status_counters.name = \ + rocksdb_stats->getTickerCount(rocksdb::key); \ + var->type = SHOW_LONGLONG; \ + var->value = (char *)&rocksdb_status_counters.name; \ + return HA_EXIT_SUCCESS; \ } -#define DEF_STATUS_VAR(name) \ - {"rocksdb_" #name, (char*) &SHOW_FNAME(name), SHOW_FUNC} +#define DEF_STATUS_VAR(name) \ + { "rocksdb_" #name, (char *)&SHOW_FNAME(name), SHOW_FUNC } -#define DEF_STATUS_VAR_PTR(name, ptr, option) \ - {"rocksdb_" name, (char*) ptr, option} +#define DEF_STATUS_VAR_PTR(name, ptr, option) \ + { "rocksdb_" name, (char *)ptr, option } -#define DEF_STATUS_VAR_FUNC(name, ptr, option) \ - {name, reinterpret_cast(ptr), option} +#define DEF_STATUS_VAR_FUNC(name, ptr, option) \ + { name, reinterpret_cast(ptr), option } struct rocksdb_status_counters_t { uint64_t block_cache_miss; @@ -10582,113 +9757,108 @@ static void myrocks_update_status() { export_stats.system_rows_updated = global_stats.system_rows[ROWS_UPDATED]; } -static SHOW_VAR myrocks_status_variables[]= { - DEF_STATUS_VAR_FUNC("rows_deleted", &export_stats.rows_deleted, - SHOW_LONGLONG), - DEF_STATUS_VAR_FUNC("rows_inserted", &export_stats.rows_inserted, - SHOW_LONGLONG), - DEF_STATUS_VAR_FUNC("rows_read", &export_stats.rows_read, SHOW_LONGLONG), - DEF_STATUS_VAR_FUNC("rows_updated", &export_stats.rows_updated, - SHOW_LONGLONG), - DEF_STATUS_VAR_FUNC("system_rows_deleted", &export_stats.system_rows_deleted, - SHOW_LONGLONG), - DEF_STATUS_VAR_FUNC("system_rows_inserted", - &export_stats.system_rows_inserted, SHOW_LONGLONG), - DEF_STATUS_VAR_FUNC("system_rows_read", &export_stats.system_rows_read, - SHOW_LONGLONG), - DEF_STATUS_VAR_FUNC("system_rows_updated", &export_stats.system_rows_updated, - SHOW_LONGLONG), +static SHOW_VAR myrocks_status_variables[] = { + DEF_STATUS_VAR_FUNC("rows_deleted", &export_stats.rows_deleted, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("rows_inserted", &export_stats.rows_inserted, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("rows_read", &export_stats.rows_read, SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("rows_updated", &export_stats.rows_updated, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("system_rows_deleted", + &export_stats.system_rows_deleted, SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("system_rows_inserted", + &export_stats.system_rows_inserted, SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("system_rows_read", &export_stats.system_rows_read, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("system_rows_updated", + &export_stats.system_rows_updated, SHOW_LONGLONG), - {NullS, NullS, SHOW_LONG} -}; + {NullS, NullS, SHOW_LONG}}; -static void show_myrocks_vars(THD* thd, SHOW_VAR* var, char* buff) { +static void show_myrocks_vars(THD *thd, SHOW_VAR *var, char *buff) { myrocks_update_status(); var->type = SHOW_ARRAY; - var->value = reinterpret_cast(&myrocks_status_variables); + var->value = reinterpret_cast(&myrocks_status_variables); } -static SHOW_VAR rocksdb_status_vars[]= { - DEF_STATUS_VAR(block_cache_miss), - DEF_STATUS_VAR(block_cache_hit), - DEF_STATUS_VAR(block_cache_add), - DEF_STATUS_VAR(block_cache_index_miss), - DEF_STATUS_VAR(block_cache_index_hit), - DEF_STATUS_VAR(block_cache_filter_miss), - DEF_STATUS_VAR(block_cache_filter_hit), - DEF_STATUS_VAR(block_cache_data_miss), - DEF_STATUS_VAR(block_cache_data_hit), - DEF_STATUS_VAR(bloom_filter_useful), - DEF_STATUS_VAR(memtable_hit), - DEF_STATUS_VAR(memtable_miss), - DEF_STATUS_VAR(compaction_key_drop_new), - DEF_STATUS_VAR(compaction_key_drop_obsolete), - DEF_STATUS_VAR(compaction_key_drop_user), - DEF_STATUS_VAR(number_keys_written), - DEF_STATUS_VAR(number_keys_read), - DEF_STATUS_VAR(number_keys_updated), - DEF_STATUS_VAR(bytes_written), - DEF_STATUS_VAR(bytes_read), - DEF_STATUS_VAR(no_file_closes), - DEF_STATUS_VAR(no_file_opens), - DEF_STATUS_VAR(no_file_errors), - DEF_STATUS_VAR(l0_slowdown_micros), - DEF_STATUS_VAR(memtable_compaction_micros), - DEF_STATUS_VAR(l0_num_files_stall_micros), - DEF_STATUS_VAR(rate_limit_delay_millis), - DEF_STATUS_VAR(num_iterators), - DEF_STATUS_VAR(number_multiget_get), - DEF_STATUS_VAR(number_multiget_keys_read), - DEF_STATUS_VAR(number_multiget_bytes_read), - DEF_STATUS_VAR(number_deletes_filtered), - DEF_STATUS_VAR(number_merge_failures), - DEF_STATUS_VAR(bloom_filter_prefix_checked), - DEF_STATUS_VAR(bloom_filter_prefix_useful), - DEF_STATUS_VAR(number_reseeks_iteration), - DEF_STATUS_VAR(getupdatessince_calls), - DEF_STATUS_VAR(block_cachecompressed_miss), - DEF_STATUS_VAR(block_cachecompressed_hit), - DEF_STATUS_VAR(wal_synced), - DEF_STATUS_VAR(wal_bytes), - DEF_STATUS_VAR(write_self), - DEF_STATUS_VAR(write_other), - DEF_STATUS_VAR(write_timedout), - DEF_STATUS_VAR(write_wal), - DEF_STATUS_VAR(flush_write_bytes), - DEF_STATUS_VAR(compact_read_bytes), - DEF_STATUS_VAR(compact_write_bytes), - DEF_STATUS_VAR(number_superversion_acquires), - DEF_STATUS_VAR(number_superversion_releases), - DEF_STATUS_VAR(number_superversion_cleanups), - DEF_STATUS_VAR(number_block_not_compressed), - DEF_STATUS_VAR_PTR("snapshot_conflict_errors", - &rocksdb_snapshot_conflict_errors, - SHOW_LONGLONG), - DEF_STATUS_VAR_PTR("wal_group_syncs", - &rocksdb_wal_group_syncs, - SHOW_LONGLONG), - DEF_STATUS_VAR_PTR("number_stat_computes", &rocksdb_number_stat_computes, SHOW_LONGLONG), - DEF_STATUS_VAR_PTR("number_sst_entry_put", &rocksdb_num_sst_entry_put, - SHOW_LONGLONG), - DEF_STATUS_VAR_PTR("number_sst_entry_delete", &rocksdb_num_sst_entry_delete, - SHOW_LONGLONG), - DEF_STATUS_VAR_PTR("number_sst_entry_singledelete", - &rocksdb_num_sst_entry_singledelete, SHOW_LONGLONG), - DEF_STATUS_VAR_PTR("number_sst_entry_merge", &rocksdb_num_sst_entry_merge, - SHOW_LONGLONG), - DEF_STATUS_VAR_PTR("number_sst_entry_other", &rocksdb_num_sst_entry_other, - SHOW_LONGLONG), - {"rocksdb", reinterpret_cast(&show_myrocks_vars), SHOW_FUNC}, - {NullS, NullS, SHOW_LONG} -}; - +static SHOW_VAR rocksdb_status_vars[] = { + DEF_STATUS_VAR(block_cache_miss), + DEF_STATUS_VAR(block_cache_hit), + DEF_STATUS_VAR(block_cache_add), + DEF_STATUS_VAR(block_cache_index_miss), + DEF_STATUS_VAR(block_cache_index_hit), + DEF_STATUS_VAR(block_cache_filter_miss), + DEF_STATUS_VAR(block_cache_filter_hit), + DEF_STATUS_VAR(block_cache_data_miss), + DEF_STATUS_VAR(block_cache_data_hit), + DEF_STATUS_VAR(bloom_filter_useful), + DEF_STATUS_VAR(memtable_hit), + DEF_STATUS_VAR(memtable_miss), + DEF_STATUS_VAR(compaction_key_drop_new), + DEF_STATUS_VAR(compaction_key_drop_obsolete), + DEF_STATUS_VAR(compaction_key_drop_user), + DEF_STATUS_VAR(number_keys_written), + DEF_STATUS_VAR(number_keys_read), + DEF_STATUS_VAR(number_keys_updated), + DEF_STATUS_VAR(bytes_written), + DEF_STATUS_VAR(bytes_read), + DEF_STATUS_VAR(no_file_closes), + DEF_STATUS_VAR(no_file_opens), + DEF_STATUS_VAR(no_file_errors), + DEF_STATUS_VAR(l0_slowdown_micros), + DEF_STATUS_VAR(memtable_compaction_micros), + DEF_STATUS_VAR(l0_num_files_stall_micros), + DEF_STATUS_VAR(rate_limit_delay_millis), + DEF_STATUS_VAR(num_iterators), + DEF_STATUS_VAR(number_multiget_get), + DEF_STATUS_VAR(number_multiget_keys_read), + DEF_STATUS_VAR(number_multiget_bytes_read), + DEF_STATUS_VAR(number_deletes_filtered), + DEF_STATUS_VAR(number_merge_failures), + DEF_STATUS_VAR(bloom_filter_prefix_checked), + DEF_STATUS_VAR(bloom_filter_prefix_useful), + DEF_STATUS_VAR(number_reseeks_iteration), + DEF_STATUS_VAR(getupdatessince_calls), + DEF_STATUS_VAR(block_cachecompressed_miss), + DEF_STATUS_VAR(block_cachecompressed_hit), + DEF_STATUS_VAR(wal_synced), + DEF_STATUS_VAR(wal_bytes), + DEF_STATUS_VAR(write_self), + DEF_STATUS_VAR(write_other), + DEF_STATUS_VAR(write_timedout), + DEF_STATUS_VAR(write_wal), + DEF_STATUS_VAR(flush_write_bytes), + DEF_STATUS_VAR(compact_read_bytes), + DEF_STATUS_VAR(compact_write_bytes), + DEF_STATUS_VAR(number_superversion_acquires), + DEF_STATUS_VAR(number_superversion_releases), + DEF_STATUS_VAR(number_superversion_cleanups), + DEF_STATUS_VAR(number_block_not_compressed), + DEF_STATUS_VAR_PTR("snapshot_conflict_errors", + &rocksdb_snapshot_conflict_errors, SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("wal_group_syncs", &rocksdb_wal_group_syncs, + SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_stat_computes", &rocksdb_number_stat_computes, + SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_sst_entry_put", &rocksdb_num_sst_entry_put, + SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_sst_entry_delete", &rocksdb_num_sst_entry_delete, + SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_sst_entry_singledelete", + &rocksdb_num_sst_entry_singledelete, SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_sst_entry_merge", &rocksdb_num_sst_entry_merge, + SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_sst_entry_other", &rocksdb_num_sst_entry_other, + SHOW_LONGLONG), + {"rocksdb", reinterpret_cast(&show_myrocks_vars), SHOW_FUNC}, + {NullS, NullS, SHOW_LONG}}; /* Background thread's main logic */ -void Rdb_background_thread::run() -{ +void Rdb_background_thread::run() { // How many seconds to wait till flushing the WAL next time. const int WAKE_UP_INTERVAL = 1; @@ -10696,24 +9866,22 @@ void Rdb_background_thread::run() clock_gettime(CLOCK_REALTIME, &ts_next_sync); ts_next_sync.tv_sec += WAKE_UP_INTERVAL; - for (;;) - { + for (;;) { // Wait until the next timeout or until we receive a signal to stop the // thread. Request to stop the thread should only be triggered when the // storage engine is being unloaded. mysql_mutex_lock(&m_signal_mutex); - const auto ret __attribute__((__unused__)) = mysql_cond_timedwait( - &m_signal_cond, &m_signal_mutex, &ts_next_sync); + const auto ret MY_ATTRIBUTE((__unused__)) = + mysql_cond_timedwait(&m_signal_cond, &m_signal_mutex, &ts_next_sync); // Check that we receive only the expected error codes. DBUG_ASSERT(ret == 0 || ret == ETIMEDOUT); - const bool local_stop= m_stop; - const bool local_save_stats= m_save_stats; + const bool local_stop = m_stop; + const bool local_save_stats = m_save_stats; reset(); mysql_mutex_unlock(&m_signal_mutex); - if (local_stop) - { + if (local_stop) { // If we're here then that's because condition variable was signaled by // another thread and we're shutting down. Break out the loop to make // sure that shutdown thread can proceed. @@ -10723,8 +9891,7 @@ void Rdb_background_thread::run() // This path should be taken only when the timer expired. DBUG_ASSERT(ret == ETIMEDOUT); - if (local_save_stats) - { + if (local_save_stats) { ddl_manager.persist_stats(); } @@ -10732,10 +9899,9 @@ void Rdb_background_thread::run() clock_gettime(CLOCK_REALTIME, &ts); // Flush the WAL. - if (rdb && rocksdb_background_sync) - { + if (rdb && rocksdb_background_sync) { DBUG_ASSERT(!rocksdb_db_options.allow_mmap_writes); - const rocksdb::Status s= rdb->SyncWAL(); + const rocksdb::Status s = rdb->SyncWAL(); if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_BG_THREAD); } @@ -10743,14 +9909,13 @@ void Rdb_background_thread::run() // Set the next timestamp for mysql_cond_timedwait() (which ends up calling // pthread_cond_timedwait()) to wait on. - ts_next_sync.tv_sec= ts.tv_sec + WAKE_UP_INTERVAL; + ts_next_sync.tv_sec = ts.tv_sec + WAKE_UP_INTERVAL; } // save remaining stats which might've left unsaved ddl_manager.persist_stats(); } - /** Deciding if it is possible to use bloom filter or not. @@ -10769,22 +9934,17 @@ void Rdb_background_thread::run() @param use_all_keys True if all key parts are set with equal conditions. This is aware of extended keys. */ -bool can_use_bloom_filter(THD *thd, - const Rdb_key_def& kd, +bool can_use_bloom_filter(THD *thd, const Rdb_key_def &kd, const rocksdb::Slice &eq_cond, - const bool use_all_keys, - bool is_ascending) -{ - bool can_use= false; + const bool use_all_keys, bool is_ascending) { + bool can_use = false; - if (THDVAR(thd, skip_bloom_filter_on_read)) - { + if (THDVAR(thd, skip_bloom_filter_on_read)) { return can_use; } - rocksdb::Options opt = rdb->GetOptions(kd.get_cf()); - if (opt.prefix_extractor) - { + const rocksdb::SliceTransform *prefix_extractor = kd.get_extractor(); + if (prefix_extractor) { /* This is an optimized use case for CappedPrefixTransform. If eq_cond length >= prefix extractor length and if @@ -10803,92 +9963,113 @@ bool can_use_bloom_filter(THD *thd, shorter require all parts of the key to be available for the short key match. */ - if (use_all_keys && opt.prefix_extractor->InRange(eq_cond)) - can_use= true; + if (use_all_keys && prefix_extractor->InRange(eq_cond)) + can_use = true; else if (!is_ascending) - can_use= false; - else if (opt.prefix_extractor->SameResultWhenAppended(eq_cond)) - can_use= true; + can_use = false; + else if (prefix_extractor->SameResultWhenAppended(eq_cond)) + can_use = true; else - can_use= false; - } else - { + can_use = false; + } else { /* if prefix extractor is not defined, all key parts have to be used by eq_cond. */ if (use_all_keys) - can_use= true; + can_use = true; else - can_use= false; + can_use = false; } return can_use; } /* For modules that need access to the global data structures */ -rocksdb::TransactionDB *rdb_get_rocksdb_db() -{ - return rdb; -} +rocksdb::TransactionDB *rdb_get_rocksdb_db() { return rdb; } -Rdb_cf_manager& rdb_get_cf_manager() -{ - return cf_manager; -} +Rdb_cf_manager &rdb_get_cf_manager() { return cf_manager; } -rocksdb::BlockBasedTableOptions& rdb_get_table_options() -{ +rocksdb::BlockBasedTableOptions &rdb_get_table_options() { return rocksdb_tbl_options; } - -int rdb_get_table_perf_counters(const char* const tablename, - Rdb_perf_counters* const counters) -{ +int rdb_get_table_perf_counters(const char *const tablename, + Rdb_perf_counters *const counters) { DBUG_ASSERT(counters != nullptr); DBUG_ASSERT(tablename != nullptr); Rdb_table_handler *table_handler; - table_handler= rdb_open_tables.get_table_handler(tablename); - if (table_handler == nullptr) - { + table_handler = rdb_open_tables.get_table_handler(tablename); + if (table_handler == nullptr) { return HA_ERR_INTERNAL_ERROR; } counters->load(table_handler->m_table_perf_context); rdb_open_tables.release_table_handler(table_handler); - return 0; + return HA_EXIT_SUCCESS; } +const char *get_rdb_io_error_string(const RDB_IO_ERROR_TYPE err_type) { + // If this assertion fails then this means that a member has been either added + // to or removed from RDB_IO_ERROR_TYPE enum and this function needs to be + // changed to return the appropriate value. + static_assert(RDB_IO_ERROR_LAST == 4, "Please handle all the error types."); -void rdb_handle_io_error(rocksdb::Status status, RDB_IO_ERROR_TYPE err_type) -{ - if (status.IsIOError()) - { + switch (err_type) { + case RDB_IO_ERROR_TYPE::RDB_IO_ERROR_TX_COMMIT: + return "RDB_IO_ERROR_TX_COMMIT"; + case RDB_IO_ERROR_TYPE::RDB_IO_ERROR_DICT_COMMIT: + return "RDB_IO_ERROR_DICT_COMMIT"; + case RDB_IO_ERROR_TYPE::RDB_IO_ERROR_BG_THREAD: + return "RDB_IO_ERROR_BG_THREAD"; + case RDB_IO_ERROR_TYPE::RDB_IO_ERROR_GENERAL: + return "RDB_IO_ERROR_GENERAL"; + default: + DBUG_ASSERT(false); + return "(unknown)"; + } +} + +// In case of core dump generation we want this function NOT to be optimized +// so that we can capture as much data as possible to debug the root cause +// more efficiently. +#pragma GCC push_options +#pragma GCC optimize("O0") + +void rdb_handle_io_error(const rocksdb::Status status, + const RDB_IO_ERROR_TYPE err_type) { + if (status.IsIOError()) { switch (err_type) { case RDB_IO_ERROR_TX_COMMIT: - case RDB_IO_ERROR_DICT_COMMIT: - { - sql_print_error("RocksDB: Failed to write to WAL - status %d, %s", - status.code(), status.ToString().c_str()); - sql_print_error("RocksDB: Aborting on WAL write error."); + case RDB_IO_ERROR_DICT_COMMIT: { + /* NO_LINT_DEBUG */ + sql_print_error("MyRocks: failed to write to WAL. Error type = %s, " + "status code = %d, status = %s", + get_rdb_io_error_string(err_type), status.code(), + status.ToString().c_str()); + /* NO_LINT_DEBUG */ + sql_print_error("MyRocks: aborting on WAL write error."); abort_with_stack_traces(); break; } - case RDB_IO_ERROR_BG_THREAD: - { - sql_print_warning("RocksDB: BG Thread failed to write to RocksDB " - "- status %d, %s", status.code(), + case RDB_IO_ERROR_BG_THREAD: { + /* NO_LINT_DEBUG */ + sql_print_warning("MyRocks: BG thread failed to write to RocksDB. " + "Error type = %s, status code = %d, status = %s", + get_rdb_io_error_string(err_type), status.code(), status.ToString().c_str()); break; } - case RDB_IO_ERROR_GENERAL: - { - sql_print_error("RocksDB: Failed on I/O - status %d, %s", - status.code(), status.ToString().c_str()); - sql_print_error("RocksDB: Aborting on I/O error."); + case RDB_IO_ERROR_GENERAL: { + /* NO_LINT_DEBUG */ + sql_print_error("MyRocks: failed on I/O. Error type = %s, " + "status code = %d, status = %s", + get_rdb_io_error_string(err_type), status.code(), + status.ToString().c_str()); + /* NO_LINT_DEBUG */ + sql_print_error("MyRocks: aborting on I/O error."); abort_with_stack_traces(); break; } @@ -10896,89 +10077,77 @@ void rdb_handle_io_error(rocksdb::Status status, RDB_IO_ERROR_TYPE err_type) DBUG_ASSERT(0); break; } - } - else if (status.IsCorruption()) - { + } else if (status.IsCorruption()) { /* NO_LINT_DEBUG */ - sql_print_error("RocksDB: Data Corruption detected! %d, %s", - status.code(), status.ToString().c_str()); + sql_print_error("MyRocks: data corruption detected! Error type = %s, " + "status code = %d, status = %s", + get_rdb_io_error_string(err_type), status.code(), + status.ToString().c_str()); /* NO_LINT_DEBUG */ - sql_print_error("RocksDB: Aborting because of data corruption."); + sql_print_error("MyRocks: aborting because of data corruption."); abort_with_stack_traces(); - } - else if (!status.ok()) - { + } else if (!status.ok()) { switch (err_type) { - case RDB_IO_ERROR_DICT_COMMIT: - { - sql_print_error("RocksDB: Failed to write to WAL (dictionary) - " - "status %d, %s", - status.code(), status.ToString().c_str()); - sql_print_error("RocksDB: Aborting on WAL write error."); + case RDB_IO_ERROR_DICT_COMMIT: { + /* NO_LINT_DEBUG */ + sql_print_error("MyRocks: failed to write to WAL (dictionary). " + "Error type = %s, status code = %d, status = %s", + get_rdb_io_error_string(err_type), status.code(), + status.ToString().c_str()); + /* NO_LINT_DEBUG */ + sql_print_error("MyRocks: aborting on WAL write error."); abort_with_stack_traces(); break; } default: - sql_print_warning("RocksDB: Failed to read/write in RocksDB " - "- status %d, %s", status.code(), + /* NO_LINT_DEBUG */ + sql_print_warning("MyRocks: failed to read/write in RocksDB. " + "Error type = %s, status code = %d, status = %s", + get_rdb_io_error_string(err_type), status.code(), status.ToString().c_str()); break; } } } -Rdb_dict_manager *rdb_get_dict_manager(void) -{ - return &dict_manager; -} +#pragma GCC pop_options -Rdb_ddl_manager *rdb_get_ddl_manager(void) -{ - return &ddl_manager; -} +Rdb_dict_manager *rdb_get_dict_manager(void) { return &dict_manager; } -Rdb_binlog_manager *rdb_get_binlog_manager(void) -{ - return &binlog_manager; -} +Rdb_ddl_manager *rdb_get_ddl_manager(void) { return &ddl_manager; } +Rdb_binlog_manager *rdb_get_binlog_manager(void) { return &binlog_manager; } -void -rocksdb_set_compaction_options( - my_core::THD* const thd __attribute__((__unused__)), - my_core::st_mysql_sys_var* const var __attribute__((__unused__)), - void* const var_ptr, - const void* const save) -{ +void rocksdb_set_compaction_options( + my_core::THD *const thd MY_ATTRIBUTE((__unused__)), + my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr, const void *const save) { if (var_ptr && save) { - *(uint64_t*)var_ptr = *(const uint64_t*) save; + *(uint64_t *)var_ptr = *(const uint64_t *)save; } const Rdb_compact_params params = { - (uint64_t)rocksdb_compaction_sequential_deletes, - (uint64_t)rocksdb_compaction_sequential_deletes_window, - (uint64_t)rocksdb_compaction_sequential_deletes_file_size - }; + (uint64_t)rocksdb_compaction_sequential_deletes, + (uint64_t)rocksdb_compaction_sequential_deletes_window, + (uint64_t)rocksdb_compaction_sequential_deletes_file_size}; if (properties_collector_factory) { properties_collector_factory->SetCompactionParams(params); } } void rocksdb_set_table_stats_sampling_pct( - my_core::THD* const thd __attribute__((__unused__)), - my_core::st_mysql_sys_var* const var __attribute__((__unused__)), - void* const var_ptr __attribute__((__unused__)), - const void* const save) -{ + my_core::THD *const thd MY_ATTRIBUTE((__unused__)), + my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { mysql_mutex_lock(&rdb_sysvars_mutex); - const uint32_t new_val= *static_cast(save); + const uint32_t new_val = *static_cast(save); if (new_val != rocksdb_table_stats_sampling_pct) { rocksdb_table_stats_sampling_pct = new_val; if (properties_collector_factory) { properties_collector_factory->SetTableStatsSamplingPct( - rocksdb_table_stats_sampling_pct); + rocksdb_table_stats_sampling_pct); } } @@ -10994,105 +10163,113 @@ void rocksdb_set_table_stats_sampling_pct( This is similar to the code in innodb_doublewrite_update (found in storage/innobase/handler/ha_innodb.cc). */ -void -rocksdb_set_rate_limiter_bytes_per_sec( - my_core::THD* const thd, - my_core::st_mysql_sys_var* const var __attribute__((__unused__)), - void* const var_ptr __attribute__((__unused__)), - const void* const save) -{ - const uint64_t new_val= *static_cast(save); - if (new_val == 0 || rocksdb_rate_limiter_bytes_per_sec == 0) - { +void rocksdb_set_rate_limiter_bytes_per_sec( + my_core::THD *const thd, + my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { + const uint64_t new_val = *static_cast(save); + if (new_val == 0 || rocksdb_rate_limiter_bytes_per_sec == 0) { /* If a rate_limiter was not enabled at startup we can't change it nor can we disable it if one was created at startup */ - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_WRONG_ARGUMENTS, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_ARGUMENTS, "RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot " "be dynamically changed to or from 0. Do a clean " "shutdown if you want to change it from or to 0."); - } - else if (new_val != rocksdb_rate_limiter_bytes_per_sec) - { + } else if (new_val != rocksdb_rate_limiter_bytes_per_sec) { /* Apply the new value to the rate limiter and store it locally */ DBUG_ASSERT(rocksdb_rate_limiter != nullptr); - rocksdb_rate_limiter_bytes_per_sec= new_val; + rocksdb_rate_limiter_bytes_per_sec = new_val; rocksdb_rate_limiter->SetBytesPerSecond(new_val); } } -void rdb_set_collation_exception_list(const char* const exception_list) -{ +void rdb_set_collation_exception_list(const char *const exception_list) { DBUG_ASSERT(rdb_collation_exceptions != nullptr); - if (!rdb_collation_exceptions->set_patterns(exception_list)) - { + if (!rdb_collation_exceptions->set_patterns(exception_list)) { my_core::warn_about_bad_patterns(rdb_collation_exceptions, "strict_collation_exceptions"); } } -void -rocksdb_set_collation_exception_list(THD* const thd, - struct st_mysql_sys_var* const var, - void* const var_ptr, - const void* const save) -{ - const char* const val = *static_cast(save); +void rocksdb_set_collation_exception_list(THD *const thd, + struct st_mysql_sys_var *const var, + void *const var_ptr, + const void *const save) { + const char *const val = *static_cast(save); rdb_set_collation_exception_list(val); - *static_cast(var_ptr) = val; + *static_cast(var_ptr) = val; } -void -rocksdb_set_bulk_load(THD* const thd, - struct st_mysql_sys_var* const var __attribute__((__unused__)), - void* const var_ptr, - const void* const save) -{ - Rdb_transaction*& tx= get_tx_from_thd(thd); +void rocksdb_set_bulk_load(THD *const thd, struct st_mysql_sys_var *const var + MY_ATTRIBUTE((__unused__)), + void *const var_ptr, const void *const save) { + Rdb_transaction *&tx = get_tx_from_thd(thd); - if (tx != nullptr) - { - const int rc= tx->finish_bulk_load(); - if (rc != 0) - { + if (tx != nullptr) { + const int rc = tx->finish_bulk_load(); + if (rc != 0) { // NO_LINT_DEBUG sql_print_error("RocksDB: Error %d finalizing last SST file while " - "setting bulk loading variable", rc); + "setting bulk loading variable", + rc); abort_with_stack_traces(); } } - *static_cast(var_ptr) = *static_cast(save); + *static_cast(var_ptr) = *static_cast(save); } -void rdb_queue_save_stats_request() -{ - rdb_bg_thread.request_save_stats(); +static void rocksdb_set_max_background_compactions( + THD *thd, struct st_mysql_sys_var *const var, void *const var_ptr, + const void *const save) { + DBUG_ASSERT(save != nullptr); + + mysql_mutex_lock(&rdb_sysvars_mutex); + rocksdb_db_options.max_background_compactions = + *static_cast(save); + rocksdb_db_options.env->SetBackgroundThreads( + rocksdb_db_options.max_background_compactions, + rocksdb::Env::Priority::LOW); + mysql_mutex_unlock(&rdb_sysvars_mutex); } -void ha_rocksdb::rpl_before_delete_rows() -{ - m_in_rpl_delete_rows= true; +void rdb_queue_save_stats_request() { rdb_bg_thread.request_save_stats(); } + +void ha_rocksdb::rpl_before_delete_rows() { + DBUG_ENTER_FUNC(); + + m_in_rpl_delete_rows = true; + + DBUG_VOID_RETURN; } -void ha_rocksdb::rpl_after_delete_rows() -{ - m_in_rpl_delete_rows= false; +void ha_rocksdb::rpl_after_delete_rows() { + DBUG_ENTER_FUNC(); + + m_in_rpl_delete_rows = false; + + DBUG_VOID_RETURN; } -void ha_rocksdb::rpl_before_update_rows() -{ - m_in_rpl_update_rows= true; +void ha_rocksdb::rpl_before_update_rows() { + DBUG_ENTER_FUNC(); + + m_in_rpl_update_rows = true; + + DBUG_VOID_RETURN; } -void ha_rocksdb::rpl_after_update_rows() -{ - m_in_rpl_update_rows= false; +void ha_rocksdb::rpl_after_update_rows() { + DBUG_ENTER_FUNC(); + + m_in_rpl_update_rows = false; + + DBUG_VOID_RETURN; } /** @@ -11102,22 +10279,25 @@ void ha_rocksdb::rpl_after_update_rows() on UPDATE or DELETE row events, and table must have user defined primary key. */ -bool ha_rocksdb::use_read_free_rpl() -{ - return ((m_in_rpl_delete_rows || m_in_rpl_update_rows) && - !has_hidden_pk(table) && m_use_read_free_rpl); +bool ha_rocksdb::use_read_free_rpl() { + DBUG_ENTER_FUNC(); + + DBUG_RETURN((m_in_rpl_delete_rows || m_in_rpl_update_rows) && + !has_hidden_pk(table) && m_use_read_free_rpl); } -double ha_rocksdb::read_time(uint index, uint ranges, ha_rows rows) -{ +double ha_rocksdb::read_time(uint index, uint ranges, ha_rows rows) { + DBUG_ENTER_FUNC(); + if (index != table->s->primary_key) { /* Non covering index range scan */ - return(handler::read_time(index, ranges, rows)); + DBUG_RETURN(handler::read_time(index, ranges, rows)); } - return (rows/20.0) + 1; + + DBUG_RETURN((rows / 20.0) + 1); } -} // namespace myrocks +} // namespace myrocks /* Register the storage engine plugin outside of myrocks namespace @@ -11125,33 +10305,27 @@ double ha_rocksdb::read_time(uint index, uint ranges, ha_rows rows) its name generation. */ -struct st_mysql_storage_engine rocksdb_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION }; +struct st_mysql_storage_engine rocksdb_storage_engine = { + MYSQL_HANDLERTON_INTERFACE_VERSION}; -mysql_declare_plugin(rocksdb_se) -{ - MYSQL_STORAGE_ENGINE_PLUGIN, /* Plugin Type */ - &rocksdb_storage_engine, /* Plugin Descriptor */ - "ROCKSDB", /* Plugin Name */ - "Monty Program Ab", /* Plugin Author */ - "RocksDB storage engine", /* Plugin Description */ - PLUGIN_LICENSE_GPL, /* Plugin Licence */ - myrocks::rocksdb_init_func, /* Plugin Entry Point */ - myrocks::rocksdb_done_func, /* Plugin Deinitializer */ - 0x0001, /* version number (0.1) */ - myrocks::rocksdb_status_vars, /* status variables */ - myrocks::rocksdb_system_variables, /* system variables */ - nullptr, /* config options */ - 0, /* flags */ +mysql_declare_plugin(rocksdb_se){ + MYSQL_STORAGE_ENGINE_PLUGIN, /* Plugin Type */ + &rocksdb_storage_engine, /* Plugin Descriptor */ + "ROCKSDB", /* Plugin Name */ + "Monty Program Ab", /* Plugin Author */ + "RocksDB storage engine", /* Plugin Description */ + PLUGIN_LICENSE_GPL, /* Plugin Licence */ + myrocks::rocksdb_init_func, /* Plugin Entry Point */ + myrocks::rocksdb_done_func, /* Plugin Deinitializer */ + 0x0001, /* version number (0.1) */ + myrocks::rocksdb_status_vars, /* status variables */ + myrocks::rocksdb_system_variables, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ }, -myrocks::rdb_i_s_cfstats, -myrocks::rdb_i_s_dbstats, -myrocks::rdb_i_s_perf_context, -myrocks::rdb_i_s_perf_context_global, -myrocks::rdb_i_s_cfoptions, -myrocks::rdb_i_s_global_info, -myrocks::rdb_i_s_ddl, -myrocks::rdb_i_s_index_file_map, -myrocks::rdb_i_s_lock_info, -myrocks::rdb_i_s_trx_info -mysql_declare_plugin_end; + myrocks::rdb_i_s_cfstats, myrocks::rdb_i_s_dbstats, + myrocks::rdb_i_s_perf_context, myrocks::rdb_i_s_perf_context_global, + myrocks::rdb_i_s_cfoptions, myrocks::rdb_i_s_compact_stats, + myrocks::rdb_i_s_global_info, myrocks::rdb_i_s_ddl, + myrocks::rdb_i_s_index_file_map, myrocks::rdb_i_s_lock_info, + myrocks::rdb_i_s_trx_info mysql_declare_plugin_end; diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index a15ae6dd443..db95a4d919a 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -16,7 +16,7 @@ #pragma once #ifdef USE_PRAGMA_INTERFACE -#pragma interface /* gcc class implementation */ +#pragma interface /* gcc class implementation */ #endif /* C++ standard header files */ @@ -26,8 +26,8 @@ #include /* MySQL header files */ -#include "./handler.h" /* handler */ -#include "./my_global.h" /* ulonglong */ +#include "./handler.h" /* handler */ +#include "./my_global.h" /* ulonglong */ #include "./sql_string.h" #include "./ut0counter.h" @@ -93,41 +93,51 @@ std::vector rdb_get_all_trx_info(); - the name used to set the default column family parameter for per-cf arguments. */ -const char * const DEFAULT_CF_NAME= "default"; +const char *const DEFAULT_CF_NAME = "default"; /* This is the name of the Column Family used for storing the data dictionary. */ -const char * const DEFAULT_SYSTEM_CF_NAME= "__system__"; +const char *const DEFAULT_SYSTEM_CF_NAME = "__system__"; /* This is the name of the hidden primary key for tables with no pk. */ -const char * const HIDDEN_PK_NAME= "HIDDEN_PK_ID"; +const char *const HIDDEN_PK_NAME = "HIDDEN_PK_ID"; /* Column family name which means "put this index into its own column family". See Rdb_cf_manager::get_per_index_cf_name(). */ -const char * const PER_INDEX_CF_NAME = "$per_index_cf"; +const char *const PER_INDEX_CF_NAME = "$per_index_cf"; + +/* + Name for the background thread. +*/ +const char *const BG_THREAD_NAME = "myrocks-bg"; + +/* + Name for the drop index thread. +*/ +const char *const INDEX_THREAD_NAME = "myrocks-index"; /* Default, minimal valid, and maximum valid sampling rate values when collecting statistics about table. */ -#define RDB_DEFAULT_TBL_STATS_SAMPLE_PCT 10 -#define RDB_TBL_STATS_SAMPLE_PCT_MIN 1 -#define RDB_TBL_STATS_SAMPLE_PCT_MAX 100 +#define RDB_DEFAULT_TBL_STATS_SAMPLE_PCT 10 +#define RDB_TBL_STATS_SAMPLE_PCT_MIN 1 +#define RDB_TBL_STATS_SAMPLE_PCT_MAX 100 /* Default and maximum values for rocksdb-compaction-sequential-deletes and rocksdb-compaction-sequential-deletes-window to add basic boundary checking. */ -#define DEFAULT_COMPACTION_SEQUENTIAL_DELETES 0 -#define MAX_COMPACTION_SEQUENTIAL_DELETES 2000000 +#define DEFAULT_COMPACTION_SEQUENTIAL_DELETES 0 +#define MAX_COMPACTION_SEQUENTIAL_DELETES 2000000 -#define DEFAULT_COMPACTION_SEQUENTIAL_DELETES_WINDOW 0 -#define MAX_COMPACTION_SEQUENTIAL_DELETES_WINDOW 2000000 +#define DEFAULT_COMPACTION_SEQUENTIAL_DELETES_WINDOW 0 +#define MAX_COMPACTION_SEQUENTIAL_DELETES_WINDOW 2000000 /* Default and maximum values for various compaction and flushing related @@ -139,11 +149,11 @@ const char * const PER_INDEX_CF_NAME = "$per_index_cf"; CPU-s and derive the values from there. This however has its own set of problems and we'll choose simplicity for now. */ -#define MAX_BACKGROUND_COMPACTIONS 64 -#define MAX_BACKGROUND_FLUSHES 64 +#define MAX_BACKGROUND_COMPACTIONS 64 +#define MAX_BACKGROUND_FLUSHES 64 -#define DEFAULT_SUBCOMPACTIONS 1 -#define MAX_SUBCOMPACTIONS 64 +#define DEFAULT_SUBCOMPACTIONS 1 +#define MAX_SUBCOMPACTIONS 64 /* Defines the field sizes for serializing XID object to a string representation. @@ -167,7 +177,7 @@ const char * const PER_INDEX_CF_NAME = "$per_index_cf"; The reason behind the cast issue is the lack of unsigned int support in Java. */ -#define MAX_RATE_LIMITER_BYTES_PER_SEC static_cast(LONGLONG_MAX) +#define MAX_RATE_LIMITER_BYTES_PER_SEC static_cast(LONGLONG_MAX) /* Hidden PK column (for tables with no primary key) is a longlong (aka 8 bytes). @@ -179,14 +189,13 @@ const char * const PER_INDEX_CF_NAME = "$per_index_cf"; MyRocks specific error codes. NB! Please make sure that you will update HA_ERR_ROCKSDB_LAST when adding new ones. */ -#define HA_ERR_ROCKSDB_UNIQUE_NOT_SUPPORTED (HA_ERR_LAST + 1) -#define HA_ERR_ROCKSDB_PK_REQUIRED (HA_ERR_LAST + 2) -#define HA_ERR_ROCKSDB_TOO_MANY_LOCKS (HA_ERR_LAST + 3) -#define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TOO_MANY_LOCKS +#define HA_ERR_ROCKSDB_UNIQUE_NOT_SUPPORTED (HA_ERR_LAST + 1) +#define HA_ERR_ROCKSDB_PK_REQUIRED (HA_ERR_LAST + 2) +#define HA_ERR_ROCKSDB_TOO_MANY_LOCKS (HA_ERR_LAST + 3) +#define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TOO_MANY_LOCKS -inline bool looks_like_per_index_cf_typo(const char* const name) -{ - return (name && name[0]=='$' && strcmp(name, PER_INDEX_CF_NAME)); +inline bool looks_like_per_index_cf_typo(const char *const name) { + return (name && name[0] == '$' && strcmp(name, PER_INDEX_CF_NAME)); } /** @@ -196,21 +205,18 @@ inline bool looks_like_per_index_cf_typo(const char* const name) //TODO: join this with Rdb_tbl_def ? */ -struct Rdb_table_handler -{ - char *m_table_name; - uint m_table_name_length; - int m_ref_count; +struct Rdb_table_handler { + char *m_table_name; + uint m_table_name_length; + int m_ref_count; - my_core::THR_LOCK m_thr_lock; ///< MySQL latch needed by m_db_lock + my_core::THR_LOCK m_thr_lock; ///< MySQL latch needed by m_db_lock /* Stores cumulative table statistics */ - my_io_perf_atomic_t m_io_perf_read; - Rdb_atomic_perf_counters m_table_perf_context; + my_io_perf_atomic_t m_io_perf_read; + Rdb_atomic_perf_counters m_table_perf_context; }; - - class Rdb_key_def; class Rdb_tbl_def; class Rdb_transaction; @@ -218,38 +224,32 @@ class Rdb_transaction_impl; class Rdb_writebatch_impl; class Rdb_field_encoder; -const char *const rocksdb_hton_name= "ROCKSDB"; +const char *const rocksdb_hton_name = "ROCKSDB"; typedef struct _gl_index_id_s { uint32_t cf_id; uint32_t index_id; - bool operator==(const struct _gl_index_id_s& other) const - { + bool operator==(const struct _gl_index_id_s &other) const { return cf_id == other.cf_id && index_id == other.index_id; } - bool operator!=(const struct _gl_index_id_s& other) const - { + bool operator!=(const struct _gl_index_id_s &other) const { return cf_id != other.cf_id || index_id != other.index_id; } - bool operator<(const struct _gl_index_id_s& other) const - { + bool operator<(const struct _gl_index_id_s &other) const { return cf_id < other.cf_id || - (cf_id == other.cf_id && index_id < other.index_id); + (cf_id == other.cf_id && index_id < other.index_id); } - bool operator<=(const struct _gl_index_id_s& other) const - { + bool operator<=(const struct _gl_index_id_s &other) const { return cf_id < other.cf_id || - (cf_id == other.cf_id && index_id <= other.index_id); + (cf_id == other.cf_id && index_id <= other.index_id); } - bool operator>(const struct _gl_index_id_s& other) const - { + bool operator>(const struct _gl_index_id_s &other) const { return cf_id > other.cf_id || - (cf_id == other.cf_id && index_id > other.index_id); + (cf_id == other.cf_id && index_id > other.index_id); } - bool operator>=(const struct _gl_index_id_s& other) const - { + bool operator>=(const struct _gl_index_id_s &other) const { return cf_id > other.cf_id || - (cf_id == other.cf_id && index_id >= other.index_id); + (cf_id == other.cf_id && index_id >= other.index_id); } } GL_INDEX_ID; @@ -289,23 +289,20 @@ struct st_export_stats { ulonglong system_rows_updated; }; -} // namespace myrocks +} // namespace myrocks #include "./rdb_buff.h" /* Provide hash function for GL_INDEX_ID so we can include it in sets */ namespace std { - template <> - struct hash - { - std::size_t operator()(const myrocks::GL_INDEX_ID& gl_index_id) const - { - const uint64_t val = ((uint64_t) gl_index_id.cf_id << 32 | - (uint64_t) gl_index_id.index_id); - return std::hash()(val); - } - }; -} // namespace std +template <> struct hash { + std::size_t operator()(const myrocks::GL_INDEX_ID &gl_index_id) const { + const uint64_t val = + ((uint64_t)gl_index_id.cf_id << 32 | (uint64_t)gl_index_id.index_id); + return std::hash()(val); + } +}; +} // namespace std namespace myrocks { @@ -314,11 +311,10 @@ namespace myrocks { Class definition for ROCKSDB storage engine plugin handler */ -class ha_rocksdb: public my_core::handler -{ - my_core::THR_LOCK_DATA m_db_lock; ///< MySQL database lock +class ha_rocksdb : public my_core::handler { + my_core::THR_LOCK_DATA m_db_lock; ///< MySQL database lock - Rdb_table_handler *m_table_handler; ///< Open table handler + Rdb_table_handler *m_table_handler; ///< Open table handler /* Iterator used for range scans and for full table/index scans */ rocksdb::Iterator *m_scan_it; @@ -354,7 +350,7 @@ class ha_rocksdb: public my_core::handler */ bool m_maybe_unpack_info; - uchar *m_pk_tuple; /* Buffer for storing PK in KeyTupleFormat */ + uchar *m_pk_tuple; /* Buffer for storing PK in KeyTupleFormat */ uchar *m_pk_packed_tuple; /* Buffer for storing PK in StorageFormat */ // ^^ todo: change it to 'char*'? TODO: ^ can we join this with last_rowkey? @@ -387,6 +383,10 @@ class ha_rocksdb: public my_core::handler uchar *m_sk_packed_tuple_old; Rdb_string_writer m_sk_tails_old; + /* Buffers used for duplicate checking during unique_index_creation */ + uchar *m_dup_sk_packed_tuple; + uchar *m_dup_sk_packed_tuple_old; + /* Temporary space for packing VARCHARs (we provide it to pack_record()/pack_index_tuple() calls). @@ -448,40 +448,37 @@ class ha_rocksdb: public my_core::handler */ int m_dupp_errkey; - int create_key_defs(const TABLE* const table_arg, - Rdb_tbl_def* const tbl_def_arg, - const TABLE* const old_table_arg= nullptr, - const Rdb_tbl_def* const old_tbl_def_arg= nullptr) const - __attribute__((__nonnull__(2, 3), __warn_unused_result__)); - int secondary_index_read(const int keyno, uchar* const buf) - __attribute__((__nonnull__, __warn_unused_result__)); + int create_key_defs(const TABLE *const table_arg, + Rdb_tbl_def *const tbl_def_arg, + const TABLE *const old_table_arg = nullptr, + const Rdb_tbl_def *const old_tbl_def_arg = nullptr) const + MY_ATTRIBUTE((__nonnull__(2, 3), __warn_unused_result__)); + int secondary_index_read(const int keyno, uchar *const buf) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); void setup_iterator_for_rnd_scan(); - void setup_scan_iterator(const Rdb_key_def& kd, rocksdb::Slice* const slice) - __attribute__((__nonnull__)) - { + void setup_scan_iterator(const Rdb_key_def &kd, rocksdb::Slice *const slice) + MY_ATTRIBUTE((__nonnull__)) { setup_scan_iterator(kd, slice, false, false, 0); } - bool is_ascending(const Rdb_key_def& keydef, + bool is_ascending(const Rdb_key_def &keydef, enum ha_rkey_function find_flag) const - __attribute__((__nonnull__, __warn_unused_result__)); - void setup_scan_iterator(const Rdb_key_def& kd, - rocksdb::Slice *slice, const bool use_all_keys, - const bool is_ascending, const uint eq_cond_len) - __attribute__((__nonnull__)); + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + void setup_scan_iterator(const Rdb_key_def &kd, rocksdb::Slice *slice, + const bool use_all_keys, const bool is_ascending, + const uint eq_cond_len) MY_ATTRIBUTE((__nonnull__)); void release_scan_iterator(void); - rocksdb::Status get_for_update(Rdb_transaction* const tx, - rocksdb::ColumnFamilyHandle* const column_family, - const rocksdb::Slice& key, - std::string* const value) const; + rocksdb::Status + get_for_update(Rdb_transaction *const tx, + rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, std::string *const value) const; - int get_row_by_rowid(uchar* const buf, const char* const rowid, + int get_row_by_rowid(uchar *const buf, const char *const rowid, const uint rowid_size) - __attribute__((__nonnull__, __warn_unused_result__)); - int get_row_by_rowid(uchar* const buf, const uchar* const rowid, + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int get_row_by_rowid(uchar *const buf, const uchar *const rowid, const uint rowid_size) - __attribute__((__nonnull__, __warn_unused_result__)) - { + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)) { return get_row_by_rowid(buf, reinterpret_cast(rowid), rowid_size); } @@ -489,22 +486,22 @@ class ha_rocksdb: public my_core::handler void update_auto_incr_val(); void load_auto_incr_value(); longlong update_hidden_pk_val(); - int load_hidden_pk_value() __attribute__((__warn_unused_result__)); - int read_hidden_pk_id_from_rowkey(longlong* const hidden_pk_id) - __attribute__((__nonnull__, __warn_unused_result__)); + int load_hidden_pk_value() MY_ATTRIBUTE((__warn_unused_result__)); + int read_hidden_pk_id_from_rowkey(longlong *const hidden_pk_id) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); bool can_use_single_delete(const uint &index) const - __attribute__((__warn_unused_result__)); - bool skip_unique_check() const __attribute__((__warn_unused_result__)); + MY_ATTRIBUTE((__warn_unused_result__)); + bool skip_unique_check() const MY_ATTRIBUTE((__warn_unused_result__)); void set_force_skip_unique_check(bool skip) override; - bool commit_in_the_middle() __attribute__((__warn_unused_result__)); - bool do_bulk_commit(Rdb_transaction* const tx) - __attribute__((__nonnull__, __warn_unused_result__)); - bool has_hidden_pk(const TABLE* const table) const - __attribute__((__nonnull__, __warn_unused_result__)); + bool commit_in_the_middle() MY_ATTRIBUTE((__warn_unused_result__)); + bool do_bulk_commit(Rdb_transaction *const tx) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + bool has_hidden_pk(const TABLE *const table) const + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); void update_row_stats(const operation_type &type); - void set_last_rowkey(const uchar* const old_data); + void set_last_rowkey(const uchar *const old_data); /* Array of table->s->fields elements telling how to store fields in the @@ -513,15 +510,14 @@ class ha_rocksdb: public my_core::handler Rdb_field_encoder *m_encoder_arr; /* Describes instructions on how to decode the field */ - class READ_FIELD - { - public: + class READ_FIELD { + public: /* Points to Rdb_field_encoder describing the field */ - Rdb_field_encoder* m_field_enc; + Rdb_field_encoder *m_field_enc; /* if true, decode the field, otherwise skip it */ - bool m_decode; + bool m_decode; /* Skip this many bytes before reading (or skipping) this field */ - int m_skip; + int m_skip; }; /* @@ -540,11 +536,12 @@ class ha_rocksdb: public my_core::handler */ uint m_null_bytes_in_rec; - void get_storage_type(Rdb_field_encoder* const encoder, const uint &kp); + void get_storage_type(Rdb_field_encoder *const encoder, const uint &kp); void setup_field_converters(); - int alloc_key_buffers(const TABLE* const table_arg, - const Rdb_tbl_def* const tbl_def_arg) - __attribute__((__nonnull__, __warn_unused_result__)); + int alloc_key_buffers(const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg, + bool alloc_alter_buffers = false) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); void free_key_buffers(); // the buffer size should be at least 2*Rdb_key_def::INDEX_NUMBER_SIZE @@ -568,7 +565,8 @@ class ha_rocksdb: public my_core::handler public: /* - Controls whether writes include checksums. This is updated from the session variable + Controls whether writes include checksums. This is updated from the session + variable at the start of each query. */ bool m_store_row_debug_checksums; @@ -577,12 +575,11 @@ public: bool m_verify_row_debug_checksums; int m_checksums_pct; - ha_rocksdb(my_core::handlerton* const hton, - my_core::TABLE_SHARE* const table_arg); - ~ha_rocksdb() - { - int err __attribute__((__unused__)); - err= finalize_bulk_load(); + ha_rocksdb(my_core::handlerton *const hton, + my_core::TABLE_SHARE *const table_arg); + ~ha_rocksdb() { + int err MY_ATTRIBUTE((__unused__)); + err = finalize_bulk_load(); DBUG_ASSERT(err == 0); mysql_mutex_destroy(&m_bulk_load_mutex); } @@ -590,28 +587,37 @@ public: /** @brief The name that will be used for display purposes. */ - const char *table_type() const { return rocksdb_hton_name; } + const char *table_type() const override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(rocksdb_hton_name); + } /* The following is only used by SHOW KEYS: */ - const char *index_type(uint inx) { return "LSMTREE"; } + const char *index_type(uint inx) override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN("LSMTREE"); + } /** @brief The file extensions. */ - const char **bas_ext() const; + const char **bas_ext() const override; /* See if this is the same base table - this should only be true for different partitions of the same table. */ - bool same_table(const ha_rocksdb& other) const; + bool same_table(const ha_rocksdb &other) const; /** @brief This is a list of flags that indicate what functionality the storage engine implements. The current table flags are documented in handler.h */ - ulonglong table_flags() const override - { + ulonglong table_flags() const override { + DBUG_ENTER_FUNC(); + /* HA_BINLOG_STMT_CAPABLE We are saying that this engine is just statement capable to have @@ -621,12 +627,11 @@ public: If we don't set it, filesort crashes, because it assumes rowids are 1..8 byte numbers */ - return HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE | - HA_REC_NOT_IN_SEQ | HA_CAN_INDEX_BLOBS | - (m_pk_can_be_decoded? HA_PRIMARY_KEY_IN_READ_INDEX : 0) | - HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | - HA_NULL_IN_KEY | - HA_PARTIAL_COLUMN_READ; + DBUG_RETURN(HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE | + HA_REC_NOT_IN_SEQ | HA_CAN_INDEX_BLOBS | + (m_pk_can_be_decoded ? HA_PRIMARY_KEY_IN_READ_INDEX : 0) | + HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | HA_NULL_IN_KEY | + HA_PARTIAL_COLUMN_READ); } bool init_with_fields() override; @@ -641,61 +646,62 @@ public: If all_parts is set, MySQL wants to know the flags for the combined index, up to and including 'part'. */ - ulong index_flags(uint inx, uint part, bool all_parts) const; + ulong index_flags(uint inx, uint part, bool all_parts) const override; - const key_map * keys_to_use_for_scanning() - { - return &key_map_full; + const key_map *keys_to_use_for_scanning() override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(&key_map_full); } - bool primary_key_is_clustered() - { - return true; + bool primary_key_is_clustered() override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(true); } - bool should_store_row_debug_checksums() const - { + bool should_store_row_debug_checksums() const { return m_store_row_debug_checksums && (rand() % 100 < m_checksums_pct); } - int rename_table(const char* const from, const char* const to) - __attribute__((__nonnull__, __warn_unused_result__)); + int rename_table(const char *const from, const char *const to) override + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - int convert_record_from_storage_format(const rocksdb::Slice* const key, - const rocksdb::Slice* const value, - uchar* const buf) - __attribute__((__nonnull__, __warn_unused_result__)); + int convert_record_from_storage_format(const rocksdb::Slice *const key, + const rocksdb::Slice *const value, + uchar *const buf) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - int convert_record_from_storage_format(const rocksdb::Slice* const key, - uchar* const buf) - __attribute__((__nonnull__, __warn_unused_result__)); + int convert_record_from_storage_format(const rocksdb::Slice *const key, + uchar *const buf) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - void convert_record_to_storage_format(const rocksdb::Slice& pk_packed_slice, - Rdb_string_writer* const pk_unpack_info, - rocksdb::Slice* const packed_rec) - __attribute__((__nonnull__)); + void convert_record_to_storage_format(const rocksdb::Slice &pk_packed_slice, + Rdb_string_writer *const pk_unpack_info, + rocksdb::Slice *const packed_rec) + MY_ATTRIBUTE((__nonnull__)); - static const char* get_key_name(const uint index, - const TABLE* const table_arg, - const Rdb_tbl_def* const tbl_def_arg) - __attribute__((__nonnull__, __warn_unused_result__)); + static const char *get_key_name(const uint index, + const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - static const char* get_key_comment(const uint index, - const TABLE* const table_arg, - const Rdb_tbl_def* const tbl_def_arg) - __attribute__((__nonnull__, __warn_unused_result__)); + static const char *get_key_comment(const uint index, + const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - static bool is_hidden_pk(const uint index, const TABLE* const table_arg, - const Rdb_tbl_def* const tbl_def_arg) - __attribute__((__nonnull__, __warn_unused_result__)); + static bool is_hidden_pk(const uint index, const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - static uint pk_index(const TABLE* const table_arg, - const Rdb_tbl_def* const tbl_def_arg) - __attribute__((__nonnull__, __warn_unused_result__)); + static uint pk_index(const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - static bool is_pk(const uint index, const TABLE* table_arg, - const Rdb_tbl_def* tbl_def_arg) - __attribute__((__nonnull__, __warn_unused_result__)); + static bool is_pk(const uint index, const TABLE *table_arg, + const Rdb_tbl_def *tbl_def_arg) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); /** @brief unireg.cc will call max_supported_record_length(), max_supported_keys(), @@ -704,11 +710,30 @@ public: send. Return *real* limits of your storage engine here; MySQL will do min(your_limits, MySQL_limits) automatically. */ - uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; } + uint max_supported_record_length() const override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(HA_MAX_REC_LENGTH); + } + + uint max_supported_keys() const override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(MAX_INDEXES); + } + + uint max_supported_key_parts() const override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(MAX_REF_PARTS); + } + + uint max_supported_key_part_length() const override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(2048); + } - uint max_supported_keys() const { return MAX_INDEXES; } - uint max_supported_key_parts() const { return MAX_REF_PARTS; } - uint max_supported_key_part_length() const { return 2048; } /** @brief unireg.cc will call this to make sure that the storage engine can handle the data it is about to send. Return *real* limits of your storage engine @@ -718,222 +743,253 @@ public: There is no need to implement ..._key_... methods if your engine doesn't support indexes. */ - uint max_supported_key_length() const { return 16*1024; /* just to return something*/ } + uint max_supported_key_length() const override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(16 * 1024); /* just to return something*/ + } /** TODO: return actual upper bound of number of records in the table. (e.g. save number of records seen on full table scan and/or use file size as upper bound) */ - ha_rows estimate_rows_upper_bound() { return HA_POS_ERROR; } + ha_rows estimate_rows_upper_bound() override { + DBUG_ENTER_FUNC(); - /* At the moment, we're ok with default handler::index_init() implementation. */ - int index_read_map(uchar* const buf, const uchar* const key, + DBUG_RETURN(HA_POS_ERROR); + } + + /* At the moment, we're ok with default handler::index_init() implementation. + */ + int index_read_map(uchar *const buf, const uchar *const key, key_part_map keypart_map, - enum ha_rkey_function find_flag); - __attribute__((__warn_unused_result__)); + enum ha_rkey_function find_flag) override + MY_ATTRIBUTE((__warn_unused_result__)); - int index_read_map_impl(uchar* const buf, const uchar* const key, + int index_read_map_impl(uchar *const buf, const uchar *const key, key_part_map keypart_map, enum ha_rkey_function find_flag, - const key_range* end_key) - __attribute__((__warn_unused_result__));; + const key_range *end_key) + MY_ATTRIBUTE((__warn_unused_result__)); - int index_read_last_map(uchar* const buf, const uchar* const key, - key_part_map keypart_map) - __attribute__((__warn_unused_result__));; + int index_read_last_map(uchar *const buf, const uchar *const key, + key_part_map keypart_map) override + MY_ATTRIBUTE((__warn_unused_result__)); - int read_range_first(const key_range* const start_key, - const key_range* const end_key, - bool eq_range, bool sorted) - __attribute__((__warn_unused_result__));; + int read_range_first(const key_range *const start_key, + const key_range *const end_key, bool eq_range, + bool sorted) override + MY_ATTRIBUTE((__warn_unused_result__)); + + virtual double scan_time() override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN( + static_cast((stats.records + stats.deleted) / 20.0 + 10)); + } - virtual double scan_time() { return (double) (stats.records+stats.deleted) / 20.0+10; } virtual double read_time(uint, uint, ha_rows rows) override; - int open(const char* const name, int mode, uint test_if_locked) - __attribute__((__warn_unused_result__)); - int close(void) __attribute__((__warn_unused_result__)); + int open(const char *const name, int mode, uint test_if_locked) override + MY_ATTRIBUTE((__warn_unused_result__)); + int close(void) override MY_ATTRIBUTE((__warn_unused_result__)); - int write_row(uchar* const buf) __attribute__((__warn_unused_result__)); - int update_row(const uchar* const old_data, uchar* const new_data) - __attribute__((__warn_unused_result__)); - int delete_row(const uchar* const buf) - __attribute__((__warn_unused_result__)); - rocksdb::Status delete_or_singledelete(uint index, - Rdb_transaction* const tx, - rocksdb::ColumnFamilyHandle* const cf, - const rocksdb::Slice& key) - __attribute__((__warn_unused_result__)); + int write_row(uchar *const buf) override + MY_ATTRIBUTE((__warn_unused_result__)); + int update_row(const uchar *const old_data, uchar *const new_data) override + MY_ATTRIBUTE((__warn_unused_result__)); + int delete_row(const uchar *const buf) override + MY_ATTRIBUTE((__warn_unused_result__)); + rocksdb::Status delete_or_singledelete(uint index, Rdb_transaction *const tx, + rocksdb::ColumnFamilyHandle *const cf, + const rocksdb::Slice &key) + MY_ATTRIBUTE((__warn_unused_result__)); - int index_next(uchar* const buf) __attribute__((__warn_unused_result__)); - int index_next_with_direction(uchar* const buf, bool move_forward) - __attribute__((__warn_unused_result__)); - int index_prev(uchar* const buf) __attribute__((__warn_unused_result__)); + int index_next(uchar *const buf) override + MY_ATTRIBUTE((__warn_unused_result__)); + int index_next_with_direction(uchar *const buf, bool move_forward) + MY_ATTRIBUTE((__warn_unused_result__)); + int index_prev(uchar *const buf) override + MY_ATTRIBUTE((__warn_unused_result__)); - int index_first(uchar* const buf) __attribute__((__warn_unused_result__)); - int index_last(uchar* const buf) __attribute__((__warn_unused_result__)); + int index_first(uchar *const buf) override + MY_ATTRIBUTE((__warn_unused_result__)); + int index_last(uchar *const buf) override + MY_ATTRIBUTE((__warn_unused_result__)); - class Item* idx_cond_push(uint keyno, class Item* const idx_cond); + class Item *idx_cond_push(uint keyno, class Item *const idx_cond) override; /* Default implementation from cancel_pushed_idx_cond() suits us */ private: - struct key_def_cf_info - { - rocksdb::ColumnFamilyHandle* cf_handle; + struct key_def_cf_info { + rocksdb::ColumnFamilyHandle *cf_handle; bool is_reverse_cf; bool is_auto_cf; }; - struct update_row_info - { - Rdb_transaction* tx; - const uchar* new_data; - const uchar* old_data; - rocksdb::Slice new_pk_slice; - rocksdb::Slice old_pk_slice; + struct update_row_info { + Rdb_transaction *tx; + const uchar *new_data; + const uchar *old_data; + rocksdb::Slice new_pk_slice; + rocksdb::Slice old_pk_slice; // "unpack_info" data for the new PK value Rdb_string_writer *new_pk_unpack_info; - longlong hidden_pk_id; - bool skip_unique_check; + longlong hidden_pk_id; + bool skip_unique_check; }; - int create_cfs(const TABLE* const table_arg, Rdb_tbl_def* const tbl_def_arg, - std::array* const cfs) const; - __attribute__((__nonnull__, __warn_unused_result__)); + /* + Used to check for duplicate entries during fast unique secondary index + creation. + */ + struct unique_sk_buf_info { + bool sk_buf_switch = false; + rocksdb::Slice sk_memcmp_key; + rocksdb::Slice sk_memcmp_key_old; + uchar *dup_sk_buf; + uchar *dup_sk_buf_old; - int create_key_def(const TABLE* const table_arg, const uint &i, - const Rdb_tbl_def* const tbl_def_arg, - std::shared_ptr* const new_key_def, - const struct key_def_cf_info& cf_info) const; - __attribute__((__nonnull__, __warn_unused_result__)); + /* + This method is meant to be called back to back during inplace creation + of unique indexes. It will switch between two buffers, which + will each store the memcmp form of secondary keys, which are then + converted to slices in sk_memcmp_key or sk_memcmp_key_old. - int create_inplace_key_defs(const TABLE* const table_arg, - Rdb_tbl_def* vtbl_def_arg, - const TABLE* const old_table_arg, - const Rdb_tbl_def* const old_tbl_def_arg, - const std::array& cfs) const; - __attribute__((__nonnull__, __warn_unused_result__)); + Switching buffers on each iteration allows us to retain the + sk_memcmp_key_old value for duplicate comparison. + */ + inline uchar *swap_and_get_sk_buf() { + sk_buf_switch = !sk_buf_switch; + return sk_buf_switch ? dup_sk_buf : dup_sk_buf_old; + } + }; - std::unordered_map get_old_key_positions( - const TABLE* table_arg, - const Rdb_tbl_def* tbl_def_arg, - const TABLE* old_table_arg, - const Rdb_tbl_def* old_tbl_def_arg) const - __attribute__((__nonnull__)); + int create_cfs(const TABLE *const table_arg, Rdb_tbl_def *const tbl_def_arg, + std::array *const cfs) + const MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - int compare_key_parts(const KEY* const old_key, - const KEY* const new_key) const; - __attribute__((__nonnull__, __warn_unused_result__)); + int create_key_def(const TABLE *const table_arg, const uint &i, + const Rdb_tbl_def *const tbl_def_arg, + std::shared_ptr *const new_key_def, + const struct key_def_cf_info &cf_info) const + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int create_inplace_key_defs( + const TABLE *const table_arg, Rdb_tbl_def *vtbl_def_arg, + const TABLE *const old_table_arg, + const Rdb_tbl_def *const old_tbl_def_arg, + const std::array &cfs) const + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + std::unordered_map + get_old_key_positions(const TABLE *table_arg, const Rdb_tbl_def *tbl_def_arg, + const TABLE *old_table_arg, + const Rdb_tbl_def *old_tbl_def_arg) const + MY_ATTRIBUTE((__nonnull__)); + + int compare_key_parts(const KEY *const old_key, + const KEY *const new_key) const; + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); int index_first_intern(uchar *buf) - __attribute__((__nonnull__, __warn_unused_result__)); + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); int index_last_intern(uchar *buf) - __attribute__((__nonnull__, __warn_unused_result__)); + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); enum icp_result check_index_cond() const; - int find_icp_matching_index_rec(const bool &move_forward, uchar* const buf) - __attribute__((__nonnull__, __warn_unused_result__)); + int find_icp_matching_index_rec(const bool &move_forward, uchar *const buf) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); void calc_updated_indexes(); - int update_write_row(const uchar* const old_data, const uchar* const new_data, + int update_write_row(const uchar *const old_data, const uchar *const new_data, const bool skip_unique_check) - __attribute__((__warn_unused_result__)); - int get_pk_for_update(struct update_row_info* const row_info); + MY_ATTRIBUTE((__warn_unused_result__)); + int get_pk_for_update(struct update_row_info *const row_info); int check_and_lock_unique_pk(const uint &key_id, - const struct update_row_info& row_info, - bool* const found, bool* const pk_changed) - __attribute__((__warn_unused_result__)); + const struct update_row_info &row_info, + bool *const found, bool *const pk_changed) + MY_ATTRIBUTE((__warn_unused_result__)); int check_and_lock_sk(const uint &key_id, - const struct update_row_info& row_info, - bool* const found) const - __attribute__((__warn_unused_result__)); - int check_uniqueness_and_lock(const struct update_row_info& row_info, - bool* const pk_changed) - __attribute__((__warn_unused_result__)); - bool over_bulk_load_threshold(int* err) - __attribute__((__warn_unused_result__)); - int bulk_load_key(Rdb_transaction* const tx, - const Rdb_key_def& kd, - const rocksdb::Slice& key, - const rocksdb::Slice& value) - __attribute__((__nonnull__, __warn_unused_result__)); - int update_pk(const Rdb_key_def& kd, - const struct update_row_info& row_info, - const bool &pk_changed) - __attribute__((__warn_unused_result__)); - int update_sk(const TABLE* const table_arg, - const Rdb_key_def& kd, - const struct update_row_info& row_info) - __attribute__((__warn_unused_result__)); - int update_indexes(const struct update_row_info& row_info, + const struct update_row_info &row_info, + bool *const found) const + MY_ATTRIBUTE((__warn_unused_result__)); + int check_uniqueness_and_lock(const struct update_row_info &row_info, + bool *const pk_changed) + MY_ATTRIBUTE((__warn_unused_result__)); + bool over_bulk_load_threshold(int *err) + MY_ATTRIBUTE((__warn_unused_result__)); + int check_duplicate_sk(const TABLE *table_arg, const Rdb_key_def &index, + const rocksdb::Slice *key, + struct unique_sk_buf_info *sk_info) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int bulk_load_key(Rdb_transaction *const tx, const Rdb_key_def &kd, + const rocksdb::Slice &key, const rocksdb::Slice &value) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int update_pk(const Rdb_key_def &kd, const struct update_row_info &row_info, + const bool &pk_changed) MY_ATTRIBUTE((__warn_unused_result__)); + int update_sk(const TABLE *const table_arg, const Rdb_key_def &kd, + const struct update_row_info &row_info) + MY_ATTRIBUTE((__warn_unused_result__)); + int update_indexes(const struct update_row_info &row_info, const bool &pk_changed) - __attribute__((__warn_unused_result__)); + MY_ATTRIBUTE((__warn_unused_result__)); - int read_key_exact(const Rdb_key_def& kd, - rocksdb::Iterator* const iter, const bool &using_full_key, - const rocksdb::Slice& key_slice) const - __attribute__((__nonnull__, __warn_unused_result__)); - int read_before_key(const Rdb_key_def& kd, - const bool &using_full_key, - const rocksdb::Slice& key_slice) - __attribute__((__nonnull__, __warn_unused_result__)); - int read_after_key(const Rdb_key_def& kd, + int read_key_exact(const Rdb_key_def &kd, rocksdb::Iterator *const iter, const bool &using_full_key, - const rocksdb::Slice& key_slice) - __attribute__((__nonnull__, __warn_unused_result__)); + const rocksdb::Slice &key_slice) const + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int read_before_key(const Rdb_key_def &kd, const bool &using_full_key, + const rocksdb::Slice &key_slice) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int read_after_key(const Rdb_key_def &kd, const bool &using_full_key, + const rocksdb::Slice &key_slice) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - int position_to_correct_key(const Rdb_key_def& kd, - const enum ha_rkey_function &find_flag, - const bool &full_key_match, - const uchar* const key, - const key_part_map &keypart_map, - const rocksdb::Slice& key_slice, - bool* const move_forward) - __attribute__((__warn_unused_result__)); + int position_to_correct_key( + const Rdb_key_def &kd, const enum ha_rkey_function &find_flag, + const bool &full_key_match, const uchar *const key, + const key_part_map &keypart_map, const rocksdb::Slice &key_slice, + bool *const move_forward) MY_ATTRIBUTE((__warn_unused_result__)); - int read_row_from_primary_key(uchar* const buf) - __attribute__((__nonnull__, __warn_unused_result__)); - int read_row_from_secondary_key(uchar* const buf, - const Rdb_key_def& kd, + int read_row_from_primary_key(uchar *const buf) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int read_row_from_secondary_key(uchar *const buf, const Rdb_key_def &kd, bool move_forward) - __attribute__((__nonnull__, __warn_unused_result__)); - int try_keyonly_read_from_sk(uchar* buf, - const Rdb_key_def& kd, - const rocksdb::Slice& key, - const rocksdb::Slice& value, - uint rowid_size) - __attribute__((__nonnull__, __warn_unused_result__)); + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - int calc_eq_cond_len(const Rdb_key_def& kd, + int calc_eq_cond_len(const Rdb_key_def &kd, const enum ha_rkey_function &find_flag, - const rocksdb::Slice& slice, + const rocksdb::Slice &slice, const int &bytes_changed_by_succ, - const key_range* const end_key, - uint* const end_key_packed_size) - __attribute__((__warn_unused_result__)); + const key_range *const end_key, + uint *const end_key_packed_size) + MY_ATTRIBUTE((__warn_unused_result__)); - Rdb_tbl_def* get_table_if_exists(const char* const tablename) - __attribute__((__nonnull__, __warn_unused_result__)); - void read_thd_vars(THD* const thd) - __attribute__((__nonnull__)); - const char* thd_rocksdb_tmpdir() - __attribute__((__nonnull__, __warn_unused_result__)); + Rdb_tbl_def *get_table_if_exists(const char *const tablename) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + void read_thd_vars(THD *const thd) MY_ATTRIBUTE((__nonnull__)); + const char *thd_rocksdb_tmpdir() + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - bool contains_foreign_key(THD* const thd) - __attribute__((__nonnull__, __warn_unused_result__)); + bool contains_foreign_key(THD *const thd) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - int inplace_populate_sk(const TABLE* const table_arg, - const std::unordered_set>& indexes); + int inplace_populate_sk( + TABLE *const table_arg, + const std::unordered_set> &indexes) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); public: - int index_init(uint idx, bool sorted) __attribute__((__warn_unused_result__)); - int index_end() __attribute__((__warn_unused_result__)); + int index_init(uint idx, bool sorted) override + MY_ATTRIBUTE((__warn_unused_result__)); + int index_end() override MY_ATTRIBUTE((__warn_unused_result__)); - void unlock_row(); + void unlock_row() override; /** @brief Unlike index_init(), rnd_init() can be called two consecutive times @@ -943,107 +999,114 @@ public: cursor to the start of the table; no need to deallocate and allocate it again. This is a required method. */ - int rnd_init(bool scan) __attribute__((__warn_unused_result__)); - int rnd_end() __attribute__((__warn_unused_result__)); + int rnd_init(bool scan) override MY_ATTRIBUTE((__warn_unused_result__)); + int rnd_end() override MY_ATTRIBUTE((__warn_unused_result__)); - int rnd_next(uchar* const buf) __attribute__((__warn_unused_result__)); - int rnd_next_with_direction(uchar* const buf, bool move_forward) - __attribute__((__warn_unused_result__)); + int rnd_next(uchar *const buf) override + MY_ATTRIBUTE((__warn_unused_result__)); + int rnd_next_with_direction(uchar *const buf, bool move_forward) + MY_ATTRIBUTE((__warn_unused_result__)); - int rnd_pos(uchar* const buf, uchar* const pos) - __attribute__((__warn_unused_result__)); - void position(const uchar* const record); + int rnd_pos(uchar *const buf, uchar *const pos) override + MY_ATTRIBUTE((__warn_unused_result__)); + void position(const uchar *const record) override; int info(uint) override; /* This function will always return success, therefore no annotation related * to checking the return value. Can't change the signature because it's * required by the interface. */ - int extra(enum ha_extra_function operation); + int extra(enum ha_extra_function operation) override; - int start_stmt(THD* const thd, thr_lock_type lock_type) - __attribute__((__warn_unused_result__)); - int external_lock(THD* const thd, int lock_type) - __attribute__((__warn_unused_result__)); - int delete_all_rows(ha_rows* const nrows) __attribute__((__warn_unused_result__)); - int truncate() __attribute__((__warn_unused_result__)); + int start_stmt(THD *const thd, thr_lock_type lock_type) override + MY_ATTRIBUTE((__warn_unused_result__)); + int external_lock(THD *const thd, int lock_type) override + MY_ATTRIBUTE((__warn_unused_result__)); + int truncate() override MY_ATTRIBUTE((__warn_unused_result__)); + + int reset() override { + DBUG_ENTER_FUNC(); - int reset() override - { /* Free blob data */ m_retrieved_record.clear(); - return 0; + + DBUG_RETURN(HA_EXIT_SUCCESS); } - int check(THD* const thd, HA_CHECK_OPT* const check_opt) - __attribute__((__warn_unused_result__)); - void remove_rows(Rdb_tbl_def* const tbl); - ha_rows records_in_range(uint inx, key_range* const min_key, - key_range* const max_key) - __attribute__((__warn_unused_result__)); - int delete_table(const char* const from) __attribute__((__warn_unused_result__)); - int create(const char* const name, TABLE* const form, - HA_CREATE_INFO* const create_info) - __attribute__((__warn_unused_result__)); - bool check_if_incompatible_data(HA_CREATE_INFO* const info, - uint table_changes) - __attribute__((__warn_unused_result__)); + int check(THD *const thd, HA_CHECK_OPT *const check_opt) override + MY_ATTRIBUTE((__warn_unused_result__)); + void remove_rows(Rdb_tbl_def *const tbl); + ha_rows records_in_range(uint inx, key_range *const min_key, + key_range *const max_key) override + MY_ATTRIBUTE((__warn_unused_result__)); + int delete_table(const char *const from) override + MY_ATTRIBUTE((__warn_unused_result__)); + int create(const char *const name, TABLE *const form, + HA_CREATE_INFO *const create_info) override + MY_ATTRIBUTE((__warn_unused_result__)); + bool check_if_incompatible_data(HA_CREATE_INFO *const info, + uint table_changes) override + MY_ATTRIBUTE((__warn_unused_result__)); - THR_LOCK_DATA **store_lock(THD* const thd, THR_LOCK_DATA **to, - enum thr_lock_type lock_type) - __attribute__((__warn_unused_result__)); + THR_LOCK_DATA **store_lock(THD *const thd, THR_LOCK_DATA **to, + enum thr_lock_type lock_type) override + MY_ATTRIBUTE((__warn_unused_result__)); - my_bool register_query_cache_table(THD* const thd, char* const table_key, + my_bool register_query_cache_table(THD *const thd, char *const table_key, uint key_length, - qc_engine_callback* const engine_callback, - ulonglong* const engine_data) - { + qc_engine_callback *const engine_callback, + ulonglong *const engine_data) override { + DBUG_ENTER_FUNC(); + /* Currently, we don't support query cache */ - return FALSE; + DBUG_RETURN(FALSE); } - bool get_error_message(const int error, String* const buf) - __attribute__((__nonnull__)); + bool get_error_message(const int error, String *const buf) override + MY_ATTRIBUTE((__nonnull__)); void get_auto_increment(ulonglong offset, ulonglong increment, ulonglong nb_desired_values, - ulonglong* const first_value, - ulonglong* const nb_reserved_values); - void update_create_info(HA_CREATE_INFO* const create_info); - int optimize(THD* const thd, HA_CHECK_OPT* const check_opt) - __attribute__((__warn_unused_result__)); - int analyze(THD* const thd, HA_CHECK_OPT* const check_opt) - __attribute__((__warn_unused_result__)); - int calculate_stats(const TABLE* const table_arg, THD* const thd, - HA_CHECK_OPT* const check_opt) - __attribute__((__warn_unused_result__)); + ulonglong *const first_value, + ulonglong *const nb_reserved_values) override; + void update_create_info(HA_CREATE_INFO *const create_info) override; + int optimize(THD *const thd, HA_CHECK_OPT *const check_opt) override + MY_ATTRIBUTE((__warn_unused_result__)); + int analyze(THD *const thd, HA_CHECK_OPT *const check_opt) override + MY_ATTRIBUTE((__warn_unused_result__)); + int calculate_stats(const TABLE *const table_arg, THD *const thd, + HA_CHECK_OPT *const check_opt) + MY_ATTRIBUTE((__warn_unused_result__)); enum_alter_inplace_result check_if_supported_inplace_alter( - TABLE *altered_table, - my_core::Alter_inplace_info* const ha_alter_info) override; + TABLE *altered_table, + my_core::Alter_inplace_info *const ha_alter_info) override; - bool prepare_inplace_alter_table(TABLE* const altered_table, - my_core::Alter_inplace_info* const ha_alter_info); + bool prepare_inplace_alter_table( + TABLE *const altered_table, + my_core::Alter_inplace_info *const ha_alter_info) override; - bool inplace_alter_table(TABLE* const altered_table, - my_core::Alter_inplace_info* const ha_alter_info); + bool inplace_alter_table( + TABLE *const altered_table, + my_core::Alter_inplace_info *const ha_alter_info) override; - bool commit_inplace_alter_table(TABLE* const altered_table, - my_core::Alter_inplace_info* const ha_alter_info, - bool commit); + bool + commit_inplace_alter_table(TABLE *const altered_table, + my_core::Alter_inplace_info *const ha_alter_info, + bool commit) override; - int finalize_bulk_load() __attribute__((__warn_unused_result__)); + int finalize_bulk_load() MY_ATTRIBUTE((__warn_unused_result__)); - void set_use_read_free_rpl(const char* const whitelist); - void set_skip_unique_check_tables(const char* const whitelist); + void set_use_read_free_rpl(const char *const whitelist); + void set_skip_unique_check_tables(const char *const whitelist); - public: +public: virtual void rpl_before_delete_rows() override; virtual void rpl_after_delete_rows() override; virtual void rpl_before_update_rows() override; virtual void rpl_after_update_rows() override; virtual bool use_read_free_rpl(); - private: +private: /* Flags tracking if we are inside different replication operation */ bool m_in_rpl_delete_rows; bool m_in_rpl_update_rows; @@ -1055,16 +1118,15 @@ public: Helper class for in-place alter, for storing handler context between inplace alter calls */ -struct Rdb_inplace_alter_ctx : public my_core::inplace_alter_handler_ctx -{ +struct Rdb_inplace_alter_ctx : public my_core::inplace_alter_handler_ctx { /* The new table definition */ - Rdb_tbl_def* const m_new_tdef; + Rdb_tbl_def *const m_new_tdef; /* Stores the original key definitions */ - std::shared_ptr* const m_old_key_descr; + std::shared_ptr *const m_old_key_descr; /* Stores the new key definitions */ - std::shared_ptr* m_new_key_descr; + std::shared_ptr *m_new_key_descr; /* Stores the old number of key definitions */ const uint m_old_n_keys; @@ -1085,28 +1147,24 @@ struct Rdb_inplace_alter_ctx : public my_core::inplace_alter_handler_ctx const uint m_n_dropped_keys; Rdb_inplace_alter_ctx( - Rdb_tbl_def* new_tdef, std::shared_ptr* old_key_descr, - std::shared_ptr* new_key_descr, uint old_n_keys, + Rdb_tbl_def *new_tdef, std::shared_ptr *old_key_descr, + std::shared_ptr *new_key_descr, uint old_n_keys, uint new_n_keys, std::unordered_set> added_indexes, - std::unordered_set dropped_index_ids, - uint n_added_keys, uint n_dropped_keys) : - my_core::inplace_alter_handler_ctx(), m_new_tdef(new_tdef), - m_old_key_descr(old_key_descr), m_new_key_descr(new_key_descr), - m_old_n_keys(old_n_keys), m_new_n_keys(new_n_keys), - m_added_indexes(added_indexes), - m_dropped_index_ids(dropped_index_ids), - m_n_added_keys(n_added_keys), - m_n_dropped_keys(n_dropped_keys) - { - } + std::unordered_set dropped_index_ids, uint n_added_keys, + uint n_dropped_keys) + : my_core::inplace_alter_handler_ctx(), m_new_tdef(new_tdef), + m_old_key_descr(old_key_descr), m_new_key_descr(new_key_descr), + m_old_n_keys(old_n_keys), m_new_n_keys(new_n_keys), + m_added_indexes(added_indexes), m_dropped_index_ids(dropped_index_ids), + m_n_added_keys(n_added_keys), m_n_dropped_keys(n_dropped_keys) {} - ~Rdb_inplace_alter_ctx(){} + ~Rdb_inplace_alter_ctx() {} - private: +private: /* Disable Copying */ - Rdb_inplace_alter_ctx(const Rdb_inplace_alter_ctx&); - Rdb_inplace_alter_ctx& operator=(const Rdb_inplace_alter_ctx&); + Rdb_inplace_alter_ctx(const Rdb_inplace_alter_ctx &); + Rdb_inplace_alter_ctx &operator=(const Rdb_inplace_alter_ctx &); }; -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/ha_rocksdb_proto.h b/storage/rocksdb/ha_rocksdb_proto.h index be7b5518423..b30585f5d37 100644 --- a/storage/rocksdb/ha_rocksdb_proto.h +++ b/storage/rocksdb/ha_rocksdb_proto.h @@ -32,27 +32,31 @@ enum RDB_IO_ERROR_TYPE { RDB_IO_ERROR_TX_COMMIT, RDB_IO_ERROR_DICT_COMMIT, RDB_IO_ERROR_BG_THREAD, - RDB_IO_ERROR_GENERAL + RDB_IO_ERROR_GENERAL, + RDB_IO_ERROR_LAST }; -void rdb_handle_io_error(rocksdb::Status status, RDB_IO_ERROR_TYPE err_type); +const char *get_rdb_io_error_string(const RDB_IO_ERROR_TYPE err_type); -int rdb_normalize_tablename(const std::string& tablename, std::string* str) - __attribute__((__nonnull__, __warn_unused_result__)); +void rdb_handle_io_error(const rocksdb::Status status, + const RDB_IO_ERROR_TYPE err_type); -int rdb_split_normalized_tablename(const std::string& fullname, std::string *db, +int rdb_normalize_tablename(const std::string &tablename, std::string *str) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + +int rdb_split_normalized_tablename(const std::string &fullname, std::string *db, std::string *table = nullptr, std::string *partition = nullptr) - __attribute__((__warn_unused_result__)); + MY_ATTRIBUTE((__warn_unused_result__)); std::vector rdb_get_open_table_names(void); int rdb_get_table_perf_counters(const char *tablename, Rdb_perf_counters *counters) - __attribute__((__nonnull__(2))); + MY_ATTRIBUTE((__nonnull__(2))); void rdb_get_global_perf_counters(Rdb_perf_counters *counters) - __attribute__((__nonnull__(1))); + MY_ATTRIBUTE((__nonnull__(1))); void rdb_queue_save_stats_request(); @@ -63,20 +67,20 @@ void rdb_queue_save_stats_request(); rocksdb::TransactionDB *rdb_get_rocksdb_db(); class Rdb_cf_manager; -Rdb_cf_manager& rdb_get_cf_manager(); +Rdb_cf_manager &rdb_get_cf_manager(); -rocksdb::BlockBasedTableOptions& rdb_get_table_options(); +rocksdb::BlockBasedTableOptions &rdb_get_table_options(); class Rdb_dict_manager; Rdb_dict_manager *rdb_get_dict_manager(void) - __attribute__((__warn_unused_result__)); + MY_ATTRIBUTE((__warn_unused_result__)); class Rdb_ddl_manager; Rdb_ddl_manager *rdb_get_ddl_manager(void) - __attribute__((__warn_unused_result__)); + MY_ATTRIBUTE((__warn_unused_result__)); class Rdb_binlog_manager; Rdb_binlog_manager *rdb_get_binlog_manager(void) - __attribute__((__warn_unused_result__)); + MY_ATTRIBUTE((__warn_unused_result__)); -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/logger.h b/storage/rocksdb/logger.h index dd6dd138e56..f639f807549 100644 --- a/storage/rocksdb/logger.h +++ b/storage/rocksdb/logger.h @@ -21,13 +21,10 @@ namespace myrocks { -class Rdb_logger : public rocksdb::Logger -{ - public: - void Logv(const rocksdb::InfoLogLevel log_level, - const char* format, - va_list ap) override - { +class Rdb_logger : public rocksdb::Logger { +public: + void Logv(const rocksdb::InfoLogLevel log_level, const char *format, + va_list ap) override { DBUG_ASSERT(format != nullptr); enum loglevel mysql_log_level; @@ -41,11 +38,11 @@ class Rdb_logger : public rocksdb::Logger } if (log_level >= rocksdb::InfoLogLevel::ERROR_LEVEL) { - mysql_log_level= ERROR_LEVEL; + mysql_log_level = ERROR_LEVEL; } else if (log_level >= rocksdb::InfoLogLevel::WARN_LEVEL) { - mysql_log_level= WARNING_LEVEL; + mysql_log_level = WARNING_LEVEL; } else { - mysql_log_level= INFORMATION_LEVEL; + mysql_log_level = INFORMATION_LEVEL; } // log to MySQL @@ -54,20 +51,18 @@ class Rdb_logger : public rocksdb::Logger error_log_print(mysql_log_level, f.c_str(), ap); } - void Logv(const char* format, va_list ap) override - { + void Logv(const char *format, va_list ap) override { DBUG_ASSERT(format != nullptr); // If no level is specified, it is by default at information level Logv(rocksdb::InfoLogLevel::INFO_LEVEL, format, ap); } - void SetRocksDBLogger(const std::shared_ptr logger) - { + void SetRocksDBLogger(const std::shared_ptr logger) { m_logger = logger; } - private: +private: std::shared_ptr m_logger; }; -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result b/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result index bfa06f88011..97238282ebe 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result @@ -4,20 +4,20 @@ CREATE DATABASE mysqlslap; USE mysqlslap; CREATE TABLE t1(id BIGINT AUTO_INCREMENT, value BIGINT, PRIMARY KEY(id)) ENGINE=rocksdb; # 2PC enabled, MyRocks durability enabled -SET GLOBAL rocksdb_disable_2pc=0; +SET GLOBAL rocksdb_enable_2pc=0; SET GLOBAL rocksdb_write_sync=1; ## 2PC + durability + single thread select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; select case when variable_value-@c = 1000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; case when variable_value-@c = 1000 then 'true' else 'false' end -true +false ## 2PC + durability + group commit select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; select case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' else 'false' end -true +false # 2PC enabled, MyRocks durability disabled -SET GLOBAL rocksdb_disable_2pc=0; +SET GLOBAL rocksdb_enable_2pc=0; SET GLOBAL rocksdb_write_sync=0; select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; @@ -28,17 +28,17 @@ select case when variable_value-@c = 0 then 'true' else 'false' end from informa case when variable_value-@c = 0 then 'true' else 'false' end true # 2PC disabled, MyRocks durability enabled -SET GLOBAL rocksdb_disable_2pc=1; +SET GLOBAL rocksdb_enable_2pc=1; SET GLOBAL rocksdb_write_sync=1; select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; case when variable_value-@c = 0 then 'true' else 'false' end -true +false select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; case when variable_value-@c = 0 then 'true' else 'false' end -true -SET GLOBAL rocksdb_disable_2pc=1; +false +SET GLOBAL rocksdb_enable_2pc=1; SET GLOBAL rocksdb_write_sync=0; DROP TABLE t1; DROP DATABASE mysqlslap; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_crash.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_crash.result index 987b34948e8..05455e76e5b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_crash.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_crash.result @@ -60,11 +60,8 @@ CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB # crash_during_index_creation_partition flush logs; SET SESSION debug="+d,myrocks_simulate_index_create_rollback"; -# expected assertion failure from sql layer here for alter rollback -call mtr.add_suppression("Assertion `0' failed."); -call mtr.add_suppression("Attempting backtrace. You can use the following information to find out"); ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; -ERROR HY000: Lost connection to MySQL server during query +ERROR HY000: Intentional failure in inplace alter occurred. SET SESSION debug="-d,myrocks_simulate_index_create_rollback"; SHOW CREATE TABLE t1; Table Create Table diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_unique_index_inplace.result b/storage/rocksdb/mysql-test/rocksdb/r/add_unique_index_inplace.result new file mode 100644 index 00000000000..dbd22a9f1f4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_unique_index_inplace.result @@ -0,0 +1,89 @@ +drop table if exists t1; +CREATE TABLE t1 (a INT, b INT, PRIMARY KEY ka(a)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +INSERT INTO t1 (a,b) VALUES (4,5); +ALTER TABLE t1 ADD UNIQUE INDEX kb(b), ALGORITHM=INPLACE; +ERROR 23000: Duplicate entry '5' for key 'kb' +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL DEFAULT '0', + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, PRIMARY KEY ka(a)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD UNIQUE INDEX kb(b), ALGORITHM=INPLACE; +INSERT INTO t1 (a,b) VALUES (4,5); +ERROR 23000: Duplicate entry '5' for key 'kb' +INSERT INTO t1 (a,b) VALUES (5,8); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL DEFAULT '0', + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + UNIQUE KEY `kb` (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT * FROM t1 FORCE INDEX(kb); +a b +1 5 +2 6 +3 7 +5 8 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, PRIMARY KEY ka(a)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, NULL); +INSERT INTO t1 (a, b) VALUES (3, NULL); +ALTER TABLE t1 ADD UNIQUE INDEX kb(b), ALGORITHM=INPLACE; +INSERT INTO t1 (a, b) VALUES (4, NULL); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL DEFAULT '0', + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + UNIQUE KEY `kb` (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT COUNT(*) FROM t1 FORCE INDEX(kb); +COUNT(*) +4 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, c INT, PRIMARY KEY ka(a)) ENGINE=RocksDB; +INSERT INTO t1 (a,b,c) VALUES (1,1,NULL); +INSERT INTO t1 (a,b,c) VALUES (2,1,NULL); +INSERT INTO t1 (a,b,c) VALUES (3,1,NULL); +INSERT INTO t1 (a,b,c) VALUES (4,1,5); +ALTER TABLE t1 ADD UNIQUE INDEX kbc(b,c), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL DEFAULT '0', + `b` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + UNIQUE KEY `kbc` (`b`,`c`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT COUNT(*) FROM t1 FORCE INDEX(kbc); +COUNT(*) +4 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD UNIQUE INDEX kb(b); +ERROR HY000: Unique index support is disabled when the table has no primary key. +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_file_map.result b/storage/rocksdb/mysql-test/rocksdb/r/index_file_map.result index c3e54a25864..2c7d37c053f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/index_file_map.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/index_file_map.result @@ -10,19 +10,19 @@ SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP WHERE INDEX_NUMBER = (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY"); -COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS -# # SSTNAME 5 # # # # # +COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX +# # SSTNAME 5 # # # # # 5 SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP WHERE INDEX_NUMBER = (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME = 't1' AND INDEX_NAME = "j"); -COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS -# # SSTNAME 5 # # # # # +COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX +# # SSTNAME 5 # # # # # 5,5 SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP WHERE INDEX_NUMBER = (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME = 't2' AND INDEX_NAME = "PRIMARY"); -COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS -# # SSTNAME 4 # # # # # +COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX +# # SSTNAME 4 # # # # # 4 DROP TABLE t1; DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result b/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result index d6177a3f019..f55662183ca 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result @@ -9,6 +9,7 @@ CF_FLAGS 1 __system__ [0] select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; count(*) 3 +select VALUE into @keysIn from INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS where CF_NAME = 'default' and LEVEL = 'Sum' and TYPE = 'KeyIn'; CREATE TABLE t1 (i1 INT, i2 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB; INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3); select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; @@ -22,6 +23,11 @@ CF_FLAGS 1 __system__ [0] select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; count(*) 6 +set global rocksdb_force_flush_memtable_now = true; +set global rocksdb_compact_cf='default'; +select case when VALUE-@keysIn >= 3 then 'true' else 'false' end from INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS where CF_NAME = 'default' and LEVEL = 'Sum' and TYPE = 'KeyIn'; +case when VALUE-@keysIn >= 3 then 'true' else 'false' end +true CREATE INDEX tindex1 on t1 (i1); CREATE INDEX tindex2 on t1 (i2); select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where TYPE = 'CF_FLAGS'; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/loaddata.result b/storage/rocksdb/mysql-test/rocksdb/r/loaddata.result index 5f6df197c94..a9f9c0b49e8 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/loaddata.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/loaddata.result @@ -121,7 +121,7 @@ a b 5 loaded 7 test DROP TABLE t1; -set session rocksdb_skip_unique_check=1; +set session unique_checks=0; DROP TABLE IF EXISTS t1; CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; LOAD DATA INFILE '/se_loaddata.dat' INTO TABLE t1 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/persistent_cache.result b/storage/rocksdb/mysql-test/rocksdb/r/persistent_cache.result new file mode 100644 index 00000000000..bc5739c2d96 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/persistent_cache.result @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a int primary key) ENGINE=ROCKSDB; +insert into t1 values (1); +set global rocksdb_force_flush_memtable_now=1; +select * from t1 where a = 1; +a +1 +select * from t1 where a = 1; +a +1 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result index b6a17d90221..9fb28791834 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result @@ -890,8 +890,8 @@ rocksdb_deadlock_detect OFF rocksdb_debug_optimizer_no_zero_cardinality ON rocksdb_default_cf_options rocksdb_delete_obsolete_files_period_micros 21600000000 -rocksdb_disable_2pc ON rocksdb_disabledatasync OFF +rocksdb_enable_2pc ON rocksdb_enable_bulk_load_api ON rocksdb_enable_thread_tracking OFF rocksdb_enable_write_thread_adaptive_yield OFF @@ -924,17 +924,17 @@ rocksdb_override_cf_options rocksdb_paranoid_checks ON rocksdb_pause_background_work ON rocksdb_perf_context_level 0 +rocksdb_persistent_cache_path +rocksdb_persistent_cache_size 0 rocksdb_pin_l0_filter_and_index_blocks_in_cache ON rocksdb_print_snapshot_conflict_queries OFF rocksdb_rate_limiter_bytes_per_sec 0 rocksdb_read_free_rpl_tables rocksdb_records_in_range 50 -rocksdb_rpl_skip_tx_api OFF rocksdb_seconds_between_stat_computes 3600 rocksdb_signal_drop_index_thread OFF rocksdb_skip_bloom_filter_on_read OFF rocksdb_skip_fill_cache OFF -rocksdb_skip_unique_check OFF rocksdb_skip_unique_check_tables .* rocksdb_stats_dump_period_sec 600 rocksdb_store_row_debug_checksums OFF @@ -2231,7 +2231,7 @@ DROP DATABASE test_db; # Issue #143: Split rocksdb_bulk_load option into two # CREATE TABLE t1 (id int primary key, value int) engine=RocksDB; -SET rocksdb_skip_unique_check=1; +SET unique_checks=0; INSERT INTO t1 VALUES(1, 1); INSERT INTO t1 VALUES(1, 2); INSERT INTO t1 VALUES(1, 3); @@ -2243,7 +2243,7 @@ INSERT INTO t1 VALUES(5, 5) ON DUPLICATE KEY UPDATE value=value+1; ERROR HY000: When unique checking is disabled in MyRocks, INSERT,UPDATE,LOAD statements with clauses that update or replace the key (i.e. INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not allowed. Query: INSERT INTO t1 VALUES(5, 5) ON DUPLICATE KEY UPDATE value=value+1 TRUNCATE TABLE t1; SET @save_rocksdb_bulk_load_size= @@rocksdb_bulk_load_size; -SET rocksdb_skip_unique_check=0; +SET unique_checks=1; SET rocksdb_commit_in_the_middle=1; SET rocksdb_bulk_load_size=10; BEGIN; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/trx_info_rpl.result b/storage/rocksdb/mysql-test/rocksdb/r/trx_info_rpl.result index 1e0c7a5adbf..35147ac7a15 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/trx_info_rpl.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/trx_info_rpl.result @@ -6,9 +6,9 @@ Note #### Storing MySQL user name or password information in the master info rep DROP TABLE IF EXISTS t1; include/stop_slave.inc create table t1 (a int) engine=rocksdb; -show variables like 'rocksdb_rpl_skip_tx_api'; +show variables like 'rpl_skip_tx_api'; Variable_name Value -rocksdb_rpl_skip_tx_api ON +rpl_skip_tx_api ON include/start_slave.inc found DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result index 59ad709a595..64db56ca78e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result @@ -183,3 +183,24 @@ ERROR 23000: Duplicate entry '1-1' for key 'PRIMARY' INSERT INTO t2 VALUES (2,1); ERROR 23000: Duplicate entry '1' for key 'a' DROP TABLE t2; +# +# Issue #491 (https://github.com/facebook/mysql-5.6/issues/491) +# +CREATE TABLE t (a BLOB, PRIMARY KEY(a(2)), UNIQUE KEY (a(1))) engine=rocksdb; +INSERT INTO t VALUES('a'); +CHECK TABLE t EXTENDED; +Table Op Msg_type Msg_text +test.t check status OK +DROP TABLE t; +CREATE TABLE t (a VARCHAR(255), PRIMARY KEY(a), UNIQUE KEY (a(1))) engine=rocksdb; +INSERT INTO t VALUES('a'); +CHECK TABLE t EXTENDED; +Table Op Msg_type Msg_text +test.t check status OK +DROP TABLE t; +CREATE TABLE t (a VARCHAR(255), PRIMARY KEY(a(2)), UNIQUE KEY (a(1))) engine=rocksdb; +INSERT INTO t VALUES('a'); +CHECK TABLE t EXTENDED; +Table Op Msg_type Msg_text +test.t check status OK +DROP TABLE t; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test b/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test index c806e46aa4d..90af6617794 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test @@ -13,7 +13,7 @@ USE mysqlslap; CREATE TABLE t1(id BIGINT AUTO_INCREMENT, value BIGINT, PRIMARY KEY(id)) ENGINE=rocksdb; --echo # 2PC enabled, MyRocks durability enabled -SET GLOBAL rocksdb_disable_2pc=0; +SET GLOBAL rocksdb_enable_2pc=0; SET GLOBAL rocksdb_write_sync=1; --echo ## 2PC + durability + single thread @@ -28,7 +28,7 @@ select case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' --echo # 2PC enabled, MyRocks durability disabled -SET GLOBAL rocksdb_disable_2pc=0; +SET GLOBAL rocksdb_enable_2pc=0; SET GLOBAL rocksdb_write_sync=0; select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; @@ -41,7 +41,7 @@ select case when variable_value-@c = 0 then 'true' else 'false' end from informa --echo # 2PC disabled, MyRocks durability enabled -SET GLOBAL rocksdb_disable_2pc=1; +SET GLOBAL rocksdb_enable_2pc=1; SET GLOBAL rocksdb_write_sync=1; select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; @@ -58,7 +58,7 @@ select case when variable_value-@c = 0 then 'true' else 'false' end from informa -SET GLOBAL rocksdb_disable_2pc=1; +SET GLOBAL rocksdb_enable_2pc=1; SET GLOBAL rocksdb_write_sync=0; DROP TABLE t1; DROP DATABASE mysqlslap; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test index ca9122bccd7..11134f16201 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test @@ -89,22 +89,11 @@ while ($i <= $max) { --echo # crash_during_index_creation_partition flush logs; ---exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect SET SESSION debug="+d,myrocks_simulate_index_create_rollback"; ---echo # expected assertion failure from sql layer here for alter rollback -call mtr.add_suppression("Assertion `0' failed."); -call mtr.add_suppression("Attempting backtrace. You can use the following information to find out"); - ---error 2013 - +--error 1105 ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; - ---enable_reconnect ---source include/wait_until_connected_again.inc - SET SESSION debug="-d,myrocks_simulate_index_create_rollback"; - SHOW CREATE TABLE t1; # here, the index numbers should be higher because previously 4 index numbers diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_unique_index_inplace.test b/storage/rocksdb/mysql-test/rocksdb/t/add_unique_index_inplace.test new file mode 100644 index 00000000000..375a63c3a38 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_unique_index_inplace.test @@ -0,0 +1,82 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +# test adding duplicate value before unique index +CREATE TABLE t1 (a INT, b INT, PRIMARY KEY ka(a)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); + +INSERT INTO t1 (a,b) VALUES (4,5); + +# should cause error here, duplicate value on b +--error 1062 +ALTER TABLE t1 ADD UNIQUE INDEX kb(b), ALGORITHM=INPLACE; + +SHOW CREATE TABLE t1; +DROP TABLE t1; + +# test dup value AFTER unique index +CREATE TABLE t1 (a INT, b INT, PRIMARY KEY ka(a)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD UNIQUE INDEX kb(b), ALGORITHM=INPLACE; + +# should error here, duplicate value on b +--error 1062 +INSERT INTO t1 (a,b) VALUES (4,5); + +# should succeed +INSERT INTO t1 (a,b) VALUES (5,8); + +SHOW CREATE TABLE t1; +SELECT * FROM t1 FORCE INDEX(kb); +DROP TABLE t1; + +# test what happens when duplicate nulls exist +CREATE TABLE t1 (a INT, b INT, PRIMARY KEY ka(a)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, NULL); +INSERT INTO t1 (a, b) VALUES (3, NULL); + +# should pass, because in MySQL we allow multiple NULLS in unique key +ALTER TABLE t1 ADD UNIQUE INDEX kb(b), ALGORITHM=INPLACE; +INSERT INTO t1 (a, b) VALUES (4, NULL); + +SHOW CREATE TABLE t1; +SELECT COUNT(*) FROM t1 FORCE INDEX(kb); +DROP TABLE t1; + +## test case with multi-part key with nulls +CREATE TABLE t1 (a INT, b INT, c INT, PRIMARY KEY ka(a)) ENGINE=RocksDB; +INSERT INTO t1 (a,b,c) VALUES (1,1,NULL); +INSERT INTO t1 (a,b,c) VALUES (2,1,NULL); +INSERT INTO t1 (a,b,c) VALUES (3,1,NULL); +INSERT INTO t1 (a,b,c) VALUES (4,1,5); + +# should pass +ALTER TABLE t1 ADD UNIQUE INDEX kbc(b,c), ALGORITHM=INPLACE; + +SHOW CREATE TABLE t1; +SELECT COUNT(*) FROM t1 FORCE INDEX(kbc); +DROP TABLE t1; + +## test case with table w/ no primary key, and we try to add unique key +CREATE TABLE t1 (a INT, b INT) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); + +# should fail, can't add unique index on table w/ no pk +--error 1105 +ALTER TABLE t1 ADD UNIQUE INDEX kb(b); + +SHOW CREATE TABLE t1; +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test b/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test index 39bae56bea6..c20ab17ff6c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test @@ -12,6 +12,8 @@ DROP TABLE IF EXISTS t3; select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +select VALUE into @keysIn from INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS where CF_NAME = 'default' and LEVEL = 'Sum' and TYPE = 'KeyIn'; + CREATE TABLE t1 (i1 INT, i2 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB; INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3); @@ -21,6 +23,10 @@ INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3); select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +set global rocksdb_force_flush_memtable_now = true; +set global rocksdb_compact_cf='default'; +select case when VALUE-@keysIn >= 3 then 'true' else 'false' end from INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS where CF_NAME = 'default' and LEVEL = 'Sum' and TYPE = 'KeyIn'; + CREATE INDEX tindex1 on t1 (i1); --let $start_max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/loaddata.test b/storage/rocksdb/mysql-test/rocksdb/t/loaddata.test index 837fa746ed7..1f59d5ce204 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/loaddata.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/loaddata.test @@ -3,6 +3,5 @@ let $skip_unique_check = 0; --source loaddata.inc let $skip_unique_check = 1; -set session rocksdb_skip_unique_check=1; +set session unique_checks=0; --source loaddata.inc - diff --git a/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test b/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test new file mode 100644 index 00000000000..ec00ddee5db --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test @@ -0,0 +1,41 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--let $_server_id= `SELECT @@server_id` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_cache_file_name= $MYSQLTEST_VARDIR/tmp/persistent_cache +--exec echo "wait" >$_expect_file_name + +# restart server with correct parameters +shutdown_server 10; +--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size=1000000000" >$_expect_file_name +--sleep 5 +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + + +# insert values and flush out of memtable +CREATE TABLE t1 (a int primary key) ENGINE=ROCKSDB; +insert into t1 values (1); +set global rocksdb_force_flush_memtable_now=1; + +# pull data through cache +select * from t1 where a = 1; + +# restart server to re-read cache +--exec echo "wait" >$_expect_file_name +shutdown_server 10; +--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size=1000000000" >$_expect_file_name +--sleep 5 +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + +# pull values from cache again +select * from t1 where a = 1; + +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test index 7ec15d157a7..ed26d036e9a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test @@ -1768,7 +1768,7 @@ DROP DATABASE test_db; --echo # Issue #143: Split rocksdb_bulk_load option into two --echo # CREATE TABLE t1 (id int primary key, value int) engine=RocksDB; -SET rocksdb_skip_unique_check=1; +SET unique_checks=0; INSERT INTO t1 VALUES(1, 1); INSERT INTO t1 VALUES(1, 2); INSERT INTO t1 VALUES(1, 3); @@ -1779,7 +1779,7 @@ REPLACE INTO t1 VALUES(4, 4); INSERT INTO t1 VALUES(5, 5) ON DUPLICATE KEY UPDATE value=value+1; TRUNCATE TABLE t1; SET @save_rocksdb_bulk_load_size= @@rocksdb_bulk_load_size; -SET rocksdb_skip_unique_check=0; +SET unique_checks=1; SET rocksdb_commit_in_the_middle=1; SET rocksdb_bulk_load_size=10; BEGIN; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.cnf b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.cnf index f5b725932e4..f4257d80fdb 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.cnf +++ b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.cnf @@ -5,4 +5,4 @@ binlog_format=row [mysqld.2] binlog_format=row slave_parallel_workers=1 -rocksdb_rpl_skip_tx_api=ON +rpl_skip_tx_api=ON diff --git a/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test index 19499765140..452a7989b0b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test @@ -17,7 +17,7 @@ while ($aa < 1000) { --enable_query_log connection slave; -show variables like 'rocksdb_rpl_skip_tx_api'; +show variables like 'rpl_skip_tx_api'; --source include/start_slave.inc --let $it=0 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test index 28b52f262cc..4bc6d6262f7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test @@ -31,3 +31,21 @@ INSERT INTO t2 VALUES (1,1); --error ER_DUP_ENTRY INSERT INTO t2 VALUES (2,1); DROP TABLE t2; + +--echo # +--echo # Issue #491 (https://github.com/facebook/mysql-5.6/issues/491) +--echo # +CREATE TABLE t (a BLOB, PRIMARY KEY(a(2)), UNIQUE KEY (a(1))) engine=rocksdb; +INSERT INTO t VALUES('a'); +CHECK TABLE t EXTENDED; +DROP TABLE t; + +CREATE TABLE t (a VARCHAR(255), PRIMARY KEY(a), UNIQUE KEY (a(1))) engine=rocksdb; +INSERT INTO t VALUES('a'); +CHECK TABLE t EXTENDED; +DROP TABLE t; + +CREATE TABLE t (a VARCHAR(255), PRIMARY KEY(a(2)), UNIQUE KEY (a(1))) engine=rocksdb; +INSERT INTO t VALUES('a'); +CHECK TABLE t EXTENDED; +DROP TABLE t; diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/create_slocket_socket.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/create_slocket_socket.sh new file mode 100755 index 00000000000..6174e5d1864 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/create_slocket_socket.sh @@ -0,0 +1,2 @@ +src_data_dir="${MYSQLTEST_VARDIR}/mysqld.1/data/" +python -c "import socket as s; sock = s.socket(s.AF_UNIX); sock.bind('${src_data_dir}/slocket')" diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_slocket.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_slocket.sh new file mode 100755 index 00000000000..ed0b3cb5c1c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_slocket.sh @@ -0,0 +1,43 @@ +set -e + +# Insert 10 batches of 10 records each to a table with following schema: +# create table slocket.t1 ( +# `id` int(10) not null auto_increment, +# `k` int(10), +# `data` varchar(2048), +# primary key (`id`), +# key (`k`) +# ) engine=innodb; + +MAX_INSERTS=10 +MAX_ROWS_PER_INSERT=10 + +insertData() { + for ((i=1; i<=$MAX_INSERTS; i++)); + do + stmt='INSERT INTO slocket.t1 values' + for ((j=1; j<=$MAX_ROWS_PER_INSERT; j++)); + do + k=$RANDOM + data=$(head -c 2048 /dev/urandom|tr -cd 'a-zA-Z0-9') + stmt=$stmt' (NULL, '$k', "'$data'")' + if [ $j -lt $MAX_ROWS_PER_INSERT ]; then + stmt=$stmt',' + fi + done + stmt=$stmt';' + $MYSQL --defaults-group-suffix=.1 -e "$stmt" + done +} + +NUM_PARALLEL_INSERTS=25 +pids=() +for ((k=1; k<=$NUM_PARALLEL_INSERTS; k++)); +do + insertData & + pids+=($!) +done +for ((k=1; k<=$NUM_PARALLEL_INSERTS; k++)); +do + wait ${pids[k]} +done diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/remove_slocket_socket.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/remove_slocket_socket.sh new file mode 100755 index 00000000000..0c2c71aad68 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/remove_slocket_socket.sh @@ -0,0 +1,2 @@ +src_data_dir="${MYSQLTEST_VARDIR}/mysqld.1/data/" +rm "${src_data_dir}/slocket" diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_slocket.inc b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_slocket.inc new file mode 100644 index 00000000000..ce889164219 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_slocket.inc @@ -0,0 +1,10 @@ +connection server_1; +create database slocket; + +create table slocket.t1 ( + `id` int(10) not null auto_increment, + `k` int(10), + `data` varchar(2048), + primary key (`id`), + key (`k`) +) engine=rocksdb; diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh index b83b957cff0..ef505e4b888 100755 --- a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh @@ -39,6 +39,11 @@ elif [ "$STREAM_TYPE" == 'xbstream' ]; then --stream=xbstream --checkpoint_dir=$checkpoint_dir 2> \ $COPY_LOG | xbstream -x \ --directory=$backup_dir" +elif [ "$STREAM_TYPE" == "xbstream_socket" ]; then + BACKUP_CMD="$MYSQL_MYROCKS_HOTBACKUP --user='root' --socket=${MASTER_MYSOCK} \ + --stream=xbstream --checkpoint_dir=$checkpoint_dir 2> \ + $COPY_LOG | xbstream -x \ + --directory=$backup_dir" else BACKUP_CMD="$MYSQL_MYROCKS_HOTBACKUP --user='root' --stream=wdt \ --port=${MASTER_MYPORT} --destination=localhost --backup_dir=$backup_dir \ diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/slocket.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/slocket.result new file mode 100644 index 00000000000..9accd18b294 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/slocket.result @@ -0,0 +1,41 @@ +include/rpl_init.inc [topology=none] +include/rpl_default_connections.inc +create database db1; +create table db1.t1 ( +`id` int(10) not null auto_increment, +`k` int(10), +`data` varchar(2048), +primary key (`id`), +key (`k`) +) engine=rocksdb; +create database slocket; +create table slocket.t1 ( +`id` int(10) not null auto_increment, +`k` int(10), +`data` varchar(2048), +primary key (`id`), +key (`k`) +) engine=rocksdb; +include/rpl_stop_server.inc [server_number=2] +myrocks_hotbackup copy phase +myrocks_hotbackup move-back phase +include/rpl_start_server.inc [server_number=2] +select count(*) from db1.t1; +count(*) +250000 +select count(*) from slocket.t1; +count(*) +2500 +drop database slocket; +drop database db1; +drop database slocket; +include/rpl_stop_server.inc [server_number=2] +myrocks_hotbackup copy phase +myrocks_hotbackup move-back phase +include/rpl_start_server.inc [server_number=2] +select count(*) from db1.t1; +count(*) +250000 +drop database db1; +drop database db1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream_socket.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream_socket.result new file mode 100644 index 00000000000..d3f2ebc4e6f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream_socket.result @@ -0,0 +1,20 @@ +include/rpl_init.inc [topology=none] +include/rpl_default_connections.inc +create database db1; +create table db1.t1 ( +`id` int(10) not null auto_increment, +`k` int(10), +`data` varchar(2048), +primary key (`id`), +key (`k`) +) engine=rocksdb; +include/rpl_stop_server.inc [server_number=2] +myrocks_hotbackup copy phase +myrocks_hotbackup move-back phase +include/rpl_start_server.inc [server_number=2] +select count(*) from db1.t1; +count(*) +250000 +drop database db1; +drop database db1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/slocket.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/slocket.test new file mode 100644 index 00000000000..14ad8d23376 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/slocket.test @@ -0,0 +1,46 @@ +source suite/rocksdb_hotbackup/include/setup.inc; +source suite/rocksdb_hotbackup/include/setup_slocket.inc; + +--exec suite/rocksdb_hotbackup/include/load_data.sh 2>&1 +--exec suite/rocksdb_hotbackup/include/load_data_slocket.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--exec suite/rocksdb_hotbackup/include/stream_run.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +connection server_2; +select count(*) from db1.t1; +select count(*) from slocket.t1; + +connection server_1; +drop database slocket; +connection server_2; +drop database db1; +drop database slocket; + +--exec sleep 2 +--exec suite/rocksdb_hotbackup/include/create_slocket_socket.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--exec suite/rocksdb_hotbackup/include/stream_run.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +connection server_2; +select count(*) from db1.t1; + +connection server_1; +drop database db1; +connection server_2; +drop database db1; + +--exec suite/rocksdb_hotbackup/include/remove_slocket_socket.sh 2>&1 + +source suite/rocksdb_hotbackup/include/cleanup.inc; diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream_socket.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream_socket.test new file mode 100644 index 00000000000..28edff072e7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream_socket.test @@ -0,0 +1,22 @@ + +source suite/rocksdb_hotbackup/include/setup.inc; + +--exec suite/rocksdb_hotbackup/include/load_data.sh 2>&1 +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--exec STREAM_TYPE=xbstream_socket suite/rocksdb_hotbackup/include/stream_run.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +connection server_2; +select count(*) from db1.t1; + +connection server_1; +drop database db1; +connection server_2; +drop database db1; + +source suite/rocksdb_hotbackup/include/cleanup.inc; + diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/multiclient_2pc.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/multiclient_2pc.result index 71c0d6d5dbf..7a7400f17e1 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/r/multiclient_2pc.result +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/multiclient_2pc.result @@ -1,5 +1,5 @@ DROP TABLE IF EXISTS t1; -SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; create table t1 (a int primary key, b int, c varchar(255)) engine=rocksdb; 'con1' SET SESSION debug="d,crash_commit_after_log"; @@ -7,11 +7,11 @@ SET DEBUG_SYNC='rocksdb.prepared SIGNAL parked WAIT_FOR go'; insert into t1 values (1, 1, "iamtheogthealphaandomega");; 'con2' insert into t1 values (2, 1, "i_am_just_here_to_trigger_a_flush"); -SET GLOBAL ROCKSDB_DISABLE_2PC = ON; +SET GLOBAL ROCKSDB_ENABLE_2PC = OFF; SET GLOBAL ROCKSDB_WRITE_SYNC = OFF; SET GLOBAL SYNC_BINLOG = 0; SET DEBUG_SYNC='now WAIT_FOR parked'; -SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; SET GLOBAL ROCKSDB_WRITE_SYNC = ON; SET GLOBAL SYNC_BINLOG = 1; insert into t1 values (1000000, 1, "i_am_just_here_to_trigger_a_flush"); diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_2pc_crash_recover.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_2pc_crash_recover.result index 325df314216..59d1a231327 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_2pc_crash_recover.result +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_2pc_crash_recover.result @@ -1,18 +1,18 @@ DROP TABLE IF EXISTS t1; create table t1 (a int primary key, msg varchar(255)) engine=rocksdb; -SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; SET SESSION debug="d,crash_commit_after_prepare"; insert into t1 values (1, 'dogz'); select * from t1; a msg -SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; SET SESSION debug="d,crash_commit_after_log"; insert into t1 values (2, 'catz'), (3, 'men'); select * from t1; a msg 2 catz 3 men -SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; SET SESSION debug="d,crash_commit_after"; insert into t1 values (4, 'cars'), (5, 'foo'); select * from t1; @@ -21,7 +21,7 @@ a msg 3 men 4 cars 5 foo -SET GLOBAL ROCKSDB_DISABLE_2PC = ON; +SET GLOBAL ROCKSDB_ENABLE_2PC = OFF; SET SESSION debug="d,crash_commit_after_log"; insert into t1 values (6, 'shipz'), (7, 'tankz'); select * from t1; @@ -30,7 +30,7 @@ a msg 3 men 4 cars 5 foo -SET GLOBAL ROCKSDB_DISABLE_2PC = ON; +SET GLOBAL ROCKSDB_ENABLE_2PC = OFF; SET SESSION debug="d,crash_commit_after"; insert into t1 values (8, 'space'), (9, 'time'); select * from t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_skip_trx_api_binlog_format.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_skip_trx_api_binlog_format.result new file mode 100644 index 00000000000..e0dbc92cdf5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_skip_trx_api_binlog_format.result @@ -0,0 +1,27 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +call mtr.add_suppression("Master's binlog format is not ROW but rpl_skip_tx_api is enabled on the slave"); +set global rpl_skip_tx_api=ON; +set global rocksdb_unsafe_for_binlog=1; +create table t1(a int); +set session binlog_format=STATEMENT; +insert into t1 values(1); +include/wait_for_slave_sql_error.inc [errno=1756] +Last_SQL_Error = 'Master's binlog format is not ROW but rpl_skip_tx_api is enabled on the slave. rpl_skip_tx_api recovery should only be used when master's binlog format is ROW.' +"Table after error" +select * from t1; +a +set global rpl_skip_tx_api=OFF; +include/start_slave.inc +include/sync_slave_sql_with_master.inc +"Table after error fixed" +select * from t1; +a +1 +drop table t1; +set global rocksdb_unsafe_for_binlog=0; +set global rpl_skip_tx_api=0; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test index 69d2e87e40e..f47f83b0bd2 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test @@ -10,7 +10,7 @@ DROP TABLE IF EXISTS t1; --enable_warnings -SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; create table t1 (a int primary key, b int, c varchar(255)) engine=rocksdb; connect (con1, localhost, root,,); @@ -35,7 +35,7 @@ insert into t1 values (2, 1, "i_am_just_here_to_trigger_a_flush"); # Disable 2PC and syncing for faster inserting of dummy rows # These rows only purpose is to rotate the binlog -SET GLOBAL ROCKSDB_DISABLE_2PC = ON; +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; SET GLOBAL ROCKSDB_WRITE_SYNC = OFF; SET GLOBAL SYNC_BINLOG = 0; @@ -50,7 +50,7 @@ while ($pk < 1000000) { # re-enable 2PC an syncing then write to trigger a flush # before we trigger the crash to simulate full-durability -SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; SET GLOBAL ROCKSDB_WRITE_SYNC = ON; SET GLOBAL SYNC_BINLOG = 1; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf index 454c9eb887a..71c81a892ed 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf @@ -2,8 +2,10 @@ [mysqld.1] log_slave_updates +rocksdb_enable_2pc=OFF [mysqld.2] relay_log_recovery=1 relay_log_info_repository=TABLE log_slave_updates +rocksdb_enable_2pc=OFF diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf index b6e8beb8fcb..c69c987b0d9 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf @@ -4,6 +4,7 @@ log_slave_updates gtid_mode=ON enforce_gtid_consistency=ON +rocksdb_enable_2pc=OFF [mysqld.2] sync_relay_log_info=100 @@ -12,3 +13,4 @@ relay_log_info_repository=FILE log_slave_updates gtid_mode=ON enforce_gtid_consistency=ON +rocksdb_enable_2pc=OFF diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover.test index 5f99e1aabd1..ea1fe3e34d6 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover.test +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover.test @@ -8,7 +8,7 @@ DROP TABLE IF EXISTS t1; create table t1 (a int primary key, msg varchar(255)) engine=rocksdb; -SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect SET SESSION debug="d,crash_commit_after_prepare"; --error 0,2013 @@ -17,7 +17,7 @@ insert into t1 values (1, 'dogz'); --source include/wait_until_connected_again.inc select * from t1; -SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect SET SESSION debug="d,crash_commit_after_log"; --error 0,2013 @@ -26,7 +26,7 @@ insert into t1 values (2, 'catz'), (3, 'men'); --source include/wait_until_connected_again.inc select * from t1; -SET GLOBAL ROCKSDB_DISABLE_2PC = OFF; +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect SET SESSION debug="d,crash_commit_after"; --error 0,2013 @@ -35,7 +35,7 @@ insert into t1 values (4, 'cars'), (5, 'foo'); --source include/wait_until_connected_again.inc select * from t1; -SET GLOBAL ROCKSDB_DISABLE_2PC = ON; +SET GLOBAL ROCKSDB_ENABLE_2PC = OFF; --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect SET SESSION debug="d,crash_commit_after_log"; --error 0,2013 @@ -44,7 +44,7 @@ insert into t1 values (6, 'shipz'), (7, 'tankz'); --source include/wait_until_connected_again.inc select * from t1; -SET GLOBAL ROCKSDB_DISABLE_2PC = ON; +SET GLOBAL ROCKSDB_ENABLE_2PC = OFF; --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect SET SESSION debug="d,crash_commit_after"; --error 0,2013 diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format-master.opt new file mode 100644 index 00000000000..39bb3238861 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format-master.opt @@ -0,0 +1,2 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates +--binlog_format=STATEMENT --default-storage-engine=rocksdb diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format-slave.opt new file mode 100644 index 00000000000..826f1ee9cb6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format-slave.opt @@ -0,0 +1,2 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates +--sync_binlog=1000 --relay_log_recovery=1 --default-storage-engine=rocksdb diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format.test new file mode 100644 index 00000000000..22151d14547 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format.test @@ -0,0 +1,51 @@ +# Checks if the slave stops executing transactions when master's binlog format +# is STATEMENT but rpl_skip_tx_api is enabled +-- source include/master-slave.inc + +call mtr.add_suppression("Master's binlog format is not ROW but rpl_skip_tx_api is enabled on the slave"); + +connection slave; +let $old_rpl_skip_tx_api= `SELECT @@global.rpl_skip_tx_api`; +set global rpl_skip_tx_api=ON; + +connection master; +let $old_rocksdb_unsafe_for_binlog= `SELECT @@global.rocksdb_unsafe_for_binlog`; +set global rocksdb_unsafe_for_binlog=1; +create table t1(a int); +set session binlog_format=STATEMENT; +insert into t1 values(1); + +# Wait till we hit the binlog format mismatch error +connection slave; +let $slave_sql_errno= convert_error(ER_MTS_INCONSISTENT_DATA); # 1756 +let $show_slave_sql_error= 1; +source include/wait_for_slave_sql_error.inc; + +# Print table +connection slave; +echo "Table after error"; +select * from t1; + +connection slave; +# Turn off rpl_skip_tx_api and start the slave again +set global rpl_skip_tx_api=OFF; +source include/start_slave.inc; + +connection slave; +source include/sync_slave_sql_with_master.inc; + +connection slave; +# Print table again +echo "Table after error fixed"; +select * from t1; + +# Cleanup +connection master; +drop table t1; +eval set global rocksdb_unsafe_for_binlog=$old_rocksdb_unsafe_for_binlog; +sync_slave_with_master; + +connection slave; +eval set global rpl_skip_tx_api=$old_rpl_skip_tx_api; + +-- source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result index 159d6a983c8..9f21825d262 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result @@ -9,5 +9,7 @@ There should be *no* long test name listed below: select variable_name as `There should be *no* variables listed below:` from t2 left join t1 on variable_name=test_name where test_name is null ORDER BY variable_name; There should be *no* variables listed below: +ROCKSDB_ENABLE_2PC +ROCKSDB_ENABLE_2PC drop table t1; drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result index 708dd462dfe..686f8bcd39a 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result @@ -6,70 +6,70 @@ INSERT INTO valid_values VALUES('off'); CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; INSERT INTO invalid_values VALUES('\'aaa\''); INSERT INTO invalid_values VALUES('\'bbb\''); -SET @start_global_value = @@global.ROCKSDB_DISABLE_2PC; +SET @start_global_value = @@global.ROCKSDB_ENABLE_2PC; SELECT @start_global_value; @start_global_value 1 '# Setting to valid values in global scope#' -"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to 1" -SET @@global.ROCKSDB_DISABLE_2PC = 1; -SELECT @@global.ROCKSDB_DISABLE_2PC; -@@global.ROCKSDB_DISABLE_2PC +"Trying to set variable @@global.ROCKSDB_ENABLE_2PC to 1" +SET @@global.ROCKSDB_ENABLE_2PC = 1; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC 1 "Setting the global scope variable back to default" -SET @@global.ROCKSDB_DISABLE_2PC = DEFAULT; -SELECT @@global.ROCKSDB_DISABLE_2PC; -@@global.ROCKSDB_DISABLE_2PC +SET @@global.ROCKSDB_ENABLE_2PC = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC 1 -"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to 0" -SET @@global.ROCKSDB_DISABLE_2PC = 0; -SELECT @@global.ROCKSDB_DISABLE_2PC; -@@global.ROCKSDB_DISABLE_2PC +"Trying to set variable @@global.ROCKSDB_ENABLE_2PC to 0" +SET @@global.ROCKSDB_ENABLE_2PC = 0; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC 0 "Setting the global scope variable back to default" -SET @@global.ROCKSDB_DISABLE_2PC = DEFAULT; -SELECT @@global.ROCKSDB_DISABLE_2PC; -@@global.ROCKSDB_DISABLE_2PC +SET @@global.ROCKSDB_ENABLE_2PC = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC 1 -"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to on" -SET @@global.ROCKSDB_DISABLE_2PC = on; -SELECT @@global.ROCKSDB_DISABLE_2PC; -@@global.ROCKSDB_DISABLE_2PC +"Trying to set variable @@global.ROCKSDB_ENABLE_2PC to on" +SET @@global.ROCKSDB_ENABLE_2PC = on; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC 1 "Setting the global scope variable back to default" -SET @@global.ROCKSDB_DISABLE_2PC = DEFAULT; -SELECT @@global.ROCKSDB_DISABLE_2PC; -@@global.ROCKSDB_DISABLE_2PC +SET @@global.ROCKSDB_ENABLE_2PC = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC 1 -"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to off" -SET @@global.ROCKSDB_DISABLE_2PC = off; -SELECT @@global.ROCKSDB_DISABLE_2PC; -@@global.ROCKSDB_DISABLE_2PC +"Trying to set variable @@global.ROCKSDB_ENABLE_2PC to off" +SET @@global.ROCKSDB_ENABLE_2PC = off; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC 0 "Setting the global scope variable back to default" -SET @@global.ROCKSDB_DISABLE_2PC = DEFAULT; -SELECT @@global.ROCKSDB_DISABLE_2PC; -@@global.ROCKSDB_DISABLE_2PC +SET @@global.ROCKSDB_ENABLE_2PC = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC 1 -"Trying to set variable @@session.ROCKSDB_DISABLE_2PC to 444. It should fail because it is not session." -SET @@session.ROCKSDB_DISABLE_2PC = 444; -ERROR HY000: Variable 'rocksdb_disable_2pc' is a GLOBAL variable and should be set with SET GLOBAL +"Trying to set variable @@session.ROCKSDB_ENABLE_2PC to 444. It should fail because it is not session." +SET @@session.ROCKSDB_ENABLE_2PC = 444; +ERROR HY000: Variable 'rocksdb_enable_2pc' is a GLOBAL variable and should be set with SET GLOBAL '# Testing with invalid values in global scope #' -"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to 'aaa'" -SET @@global.ROCKSDB_DISABLE_2PC = 'aaa'; +"Trying to set variable @@global.ROCKSDB_ENABLE_2PC to 'aaa'" +SET @@global.ROCKSDB_ENABLE_2PC = 'aaa'; Got one of the listed errors -SELECT @@global.ROCKSDB_DISABLE_2PC; -@@global.ROCKSDB_DISABLE_2PC +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC 1 -"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to 'bbb'" -SET @@global.ROCKSDB_DISABLE_2PC = 'bbb'; +"Trying to set variable @@global.ROCKSDB_ENABLE_2PC to 'bbb'" +SET @@global.ROCKSDB_ENABLE_2PC = 'bbb'; Got one of the listed errors -SELECT @@global.ROCKSDB_DISABLE_2PC; -@@global.ROCKSDB_DISABLE_2PC +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC 1 -SET @@global.ROCKSDB_DISABLE_2PC = @start_global_value; -SELECT @@global.ROCKSDB_DISABLE_2PC; -@@global.ROCKSDB_DISABLE_2PC +SET @@global.ROCKSDB_ENABLE_2PC = @start_global_value; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC 1 DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result index 903e393d5ea..714f2101127 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result @@ -1,7 +1,46 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(64); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'abc\''); SET @start_global_value = @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; SELECT @start_global_value; @start_global_value 1 -"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS to 444. It should fail because it is readonly." -SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = 444; -ERROR HY000: Variable 'rocksdb_max_background_compactions' is a read only variable +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS to 1" +SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = 1; +SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; +@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = DEFAULT; +SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; +@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS +1 +"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS to 64" +SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = 64; +SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; +@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS +64 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = DEFAULT; +SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; +@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS +1 +"Trying to set variable @@session.ROCKSDB_MAX_BACKGROUND_COMPACTIONS to 444. It should fail because it is not session." +SET @@session.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = 444; +ERROR HY000: Variable 'rocksdb_max_background_compactions' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS to 'abc'" +SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = 'abc'; +Got one of the listed errors +SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; +@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS +1 +SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = @start_global_value; +SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; +@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS +1 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_path_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_path_basic.result new file mode 100644 index 00000000000..10b187d44e9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_path_basic.result @@ -0,0 +1,13 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES('abc'); +INSERT INTO valid_values VALUES('def'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +SET @start_global_value = @@global.ROCKSDB_PERSISTENT_CACHE_PATH; +SELECT @start_global_value; +@start_global_value + +"Trying to set variable @@global.ROCKSDB_PERSISTENT_CACHE_PATH to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_PERSISTENT_CACHE_PATH = 444; +ERROR HY000: Variable 'rocksdb_persistent_cache_path' is a read only variable +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_size_basic.result new file mode 100644 index 00000000000..87440ae0bcb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_size_basic.result @@ -0,0 +1,14 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_PERSISTENT_CACHE_SIZE; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_PERSISTENT_CACHE_SIZE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_PERSISTENT_CACHE_SIZE = 444; +ERROR HY000: Variable 'rocksdb_persistent_cache_size' is a read only variable +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.result deleted file mode 100644 index 5f6522e4488..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.result +++ /dev/null @@ -1,68 +0,0 @@ -CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO valid_values VALUES(1); -INSERT INTO valid_values VALUES(0); -INSERT INTO valid_values VALUES('on'); -INSERT INTO valid_values VALUES('off'); -CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO invalid_values VALUES('\'aaa\''); -SET @start_global_value = @@global.ROCKSDB_RPL_SKIP_TX_API; -SELECT @start_global_value; -@start_global_value -1 -'# Setting to valid values in global scope#' -"Trying to set variable @@global.ROCKSDB_RPL_SKIP_TX_API to 1" -SET @@global.ROCKSDB_RPL_SKIP_TX_API = 1; -SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; -@@global.ROCKSDB_RPL_SKIP_TX_API -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_RPL_SKIP_TX_API = DEFAULT; -SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; -@@global.ROCKSDB_RPL_SKIP_TX_API -1 -"Trying to set variable @@global.ROCKSDB_RPL_SKIP_TX_API to 0" -SET @@global.ROCKSDB_RPL_SKIP_TX_API = 0; -SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; -@@global.ROCKSDB_RPL_SKIP_TX_API -0 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_RPL_SKIP_TX_API = DEFAULT; -SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; -@@global.ROCKSDB_RPL_SKIP_TX_API -1 -"Trying to set variable @@global.ROCKSDB_RPL_SKIP_TX_API to on" -SET @@global.ROCKSDB_RPL_SKIP_TX_API = on; -SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; -@@global.ROCKSDB_RPL_SKIP_TX_API -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_RPL_SKIP_TX_API = DEFAULT; -SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; -@@global.ROCKSDB_RPL_SKIP_TX_API -1 -"Trying to set variable @@global.ROCKSDB_RPL_SKIP_TX_API to off" -SET @@global.ROCKSDB_RPL_SKIP_TX_API = off; -SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; -@@global.ROCKSDB_RPL_SKIP_TX_API -0 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_RPL_SKIP_TX_API = DEFAULT; -SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; -@@global.ROCKSDB_RPL_SKIP_TX_API -1 -"Trying to set variable @@session.ROCKSDB_RPL_SKIP_TX_API to 444. It should fail because it is not session." -SET @@session.ROCKSDB_RPL_SKIP_TX_API = 444; -ERROR HY000: Variable 'rocksdb_rpl_skip_tx_api' is a GLOBAL variable and should be set with SET GLOBAL -'# Testing with invalid values in global scope #' -"Trying to set variable @@global.ROCKSDB_RPL_SKIP_TX_API to 'aaa'" -SET @@global.ROCKSDB_RPL_SKIP_TX_API = 'aaa'; -Got one of the listed errors -SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; -@@global.ROCKSDB_RPL_SKIP_TX_API -1 -SET @@global.ROCKSDB_RPL_SKIP_TX_API = @start_global_value; -SELECT @@global.ROCKSDB_RPL_SKIP_TX_API; -@@global.ROCKSDB_RPL_SKIP_TX_API -1 -DROP TABLE valid_values; -DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_basic.result deleted file mode 100644 index a1244723b05..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_basic.result +++ /dev/null @@ -1,163 +0,0 @@ -CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO valid_values VALUES(0); -INSERT INTO valid_values VALUES(1); -INSERT INTO valid_values VALUES('on'); -INSERT INTO valid_values VALUES('off'); -INSERT INTO valid_values VALUES('true'); -INSERT INTO valid_values VALUES('false'); -CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO invalid_values VALUES('\'aaa\''); -INSERT INTO invalid_values VALUES('\'bbb\''); -SET @start_global_value = @@global.ROCKSDB_SKIP_UNIQUE_CHECK; -SELECT @start_global_value; -@start_global_value -0 -SET @start_session_value = @@session.ROCKSDB_SKIP_UNIQUE_CHECK; -SELECT @start_session_value; -@start_session_value -0 -'# Setting to valid values in global scope#' -"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to 0" -SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = 0; -SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; -@@global.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; -SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; -@@global.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to 1" -SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = 1; -SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; -@@global.ROCKSDB_SKIP_UNIQUE_CHECK -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; -SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; -@@global.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to on" -SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = on; -SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; -@@global.ROCKSDB_SKIP_UNIQUE_CHECK -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; -SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; -@@global.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to off" -SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = off; -SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; -@@global.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; -SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; -@@global.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to true" -SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = true; -SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; -@@global.ROCKSDB_SKIP_UNIQUE_CHECK -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; -SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; -@@global.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to false" -SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = false; -SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; -@@global.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; -SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; -@@global.ROCKSDB_SKIP_UNIQUE_CHECK -0 -'# Setting to valid values in session scope#' -"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to 0" -SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = 0; -SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; -@@session.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; -SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; -@@session.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to 1" -SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = 1; -SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; -@@session.ROCKSDB_SKIP_UNIQUE_CHECK -1 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; -SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; -@@session.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to on" -SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = on; -SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; -@@session.ROCKSDB_SKIP_UNIQUE_CHECK -1 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; -SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; -@@session.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to off" -SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = off; -SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; -@@session.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; -SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; -@@session.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to true" -SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = true; -SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; -@@session.ROCKSDB_SKIP_UNIQUE_CHECK -1 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; -SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; -@@session.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to false" -SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = false; -SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; -@@session.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT; -SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; -@@session.ROCKSDB_SKIP_UNIQUE_CHECK -0 -'# Testing with invalid values in global scope #' -"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to 'aaa'" -SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = 'aaa'; -Got one of the listed errors -SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; -@@global.ROCKSDB_SKIP_UNIQUE_CHECK -0 -"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to 'bbb'" -SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = 'bbb'; -Got one of the listed errors -SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; -@@global.ROCKSDB_SKIP_UNIQUE_CHECK -0 -SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = @start_global_value; -SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK; -@@global.ROCKSDB_SKIP_UNIQUE_CHECK -0 -SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = @start_session_value; -SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK; -@@session.ROCKSDB_SKIP_UNIQUE_CHECK -0 -DROP TABLE valid_values; -DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test index 061a4c902b5..1badcef0347 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test @@ -10,7 +10,7 @@ CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; INSERT INTO invalid_values VALUES('\'aaa\''); INSERT INTO invalid_values VALUES('\'bbb\''); ---let $sys_var=ROCKSDB_DISABLE_2PC +--let $sys_var=ROCKSDB_ENABLE_2PC --let $read_only=0 --let $session=0 --let $sticky=1 diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test index 441c0577c10..5fcc4e6ef25 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test @@ -1,7 +1,16 @@ --source include/have_rocksdb.inc +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(64); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'abc\''); + --let $sys_var=ROCKSDB_MAX_BACKGROUND_COMPACTIONS ---let $read_only=1 +--let $read_only=0 --let $session=0 --source suite/sys_vars/inc/rocksdb_sys_var.inc +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_path_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_path_basic.test new file mode 100644 index 00000000000..c0840274253 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_path_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES('abc'); +INSERT INTO valid_values VALUES('def'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; + +--let $sys_var=ROCKSDB_PERSISTENT_CACHE_PATH +--let $read_only=1 +--let $session=0 +--let $sticky=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rpl_skip_tx_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_size_basic.test similarity index 67% rename from storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rpl_skip_tx_api_basic.test rename to storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_size_basic.test index f6c0a219a9f..32fafcaf232 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rpl_skip_tx_api_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_size_basic.test @@ -2,15 +2,13 @@ CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; INSERT INTO valid_values VALUES(1); -INSERT INTO valid_values VALUES(0); -INSERT INTO valid_values VALUES('on'); -INSERT INTO valid_values VALUES('off'); +INSERT INTO valid_values VALUES(1024); CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; INSERT INTO invalid_values VALUES('\'aaa\''); ---let $sys_var=ROCKSDB_RPL_SKIP_TX_API ---let $read_only=0 +--let $sys_var=ROCKSDB_PERSISTENT_CACHE_SIZE +--let $read_only=1 --let $session=0 --source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_basic.test deleted file mode 100644 index fe90a49365b..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_basic.test +++ /dev/null @@ -1,21 +0,0 @@ ---source include/have_rocksdb.inc - -CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO valid_values VALUES(0); -INSERT INTO valid_values VALUES(1); -INSERT INTO valid_values VALUES('on'); -INSERT INTO valid_values VALUES('off'); -INSERT INTO valid_values VALUES('true'); -INSERT INTO valid_values VALUES('false'); - -CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO invalid_values VALUES('\'aaa\''); -INSERT INTO invalid_values VALUES('\'bbb\''); - ---let $sys_var=ROCKSDB_SKIP_UNIQUE_CHECK ---let $read_only=0 ---let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc - -DROP TABLE valid_values; -DROP TABLE invalid_values; diff --git a/storage/rocksdb/properties_collector.cc b/storage/rocksdb/properties_collector.cc index e5b5b25792d..26a967bc088 100644 --- a/storage/rocksdb/properties_collector.cc +++ b/storage/rocksdb/properties_collector.cc @@ -41,21 +41,15 @@ std::atomic rocksdb_num_sst_entry_merge(0); std::atomic rocksdb_num_sst_entry_other(0); my_bool rocksdb_compaction_sequential_deletes_count_sd = false; -Rdb_tbl_prop_coll::Rdb_tbl_prop_coll( - Rdb_ddl_manager* const ddl_manager, - const Rdb_compact_params ¶ms, - const uint32_t &cf_id, - const uint8_t &table_stats_sampling_pct -) : - m_cf_id(cf_id), - m_ddl_manager(ddl_manager), - m_last_stats(nullptr), - m_rows(0l), m_window_pos(0l), m_deleted_rows(0l), m_max_deleted_rows(0l), - m_file_size(0), m_params(params), - m_table_stats_sampling_pct(table_stats_sampling_pct), - m_seed(time(nullptr)), - m_card_adj_extra(1.) -{ +Rdb_tbl_prop_coll::Rdb_tbl_prop_coll(Rdb_ddl_manager *const ddl_manager, + const Rdb_compact_params ¶ms, + const uint32_t &cf_id, + const uint8_t &table_stats_sampling_pct) + : m_cf_id(cf_id), m_ddl_manager(ddl_manager), m_last_stats(nullptr), + m_rows(0l), m_window_pos(0l), m_deleted_rows(0l), m_max_deleted_rows(0l), + m_file_size(0), m_params(params), + m_table_stats_sampling_pct(table_stats_sampling_pct), + m_seed(time(nullptr)), m_card_adj_extra(1.) { DBUG_ASSERT(ddl_manager != nullptr); // We need to adjust the index cardinality numbers based on the sampling @@ -71,12 +65,11 @@ Rdb_tbl_prop_coll::Rdb_tbl_prop_coll( /* This function is called by RocksDB for every key in the SST file */ -rocksdb::Status -Rdb_tbl_prop_coll::AddUserKey( - const rocksdb::Slice& key, const rocksdb::Slice& value, - rocksdb::EntryType type, rocksdb::SequenceNumber seq, - uint64_t file_size -) { +rocksdb::Status Rdb_tbl_prop_coll::AddUserKey(const rocksdb::Slice &key, + const rocksdb::Slice &value, + rocksdb::EntryType type, + rocksdb::SequenceNumber seq, + uint64_t file_size) { if (key.size() >= 4) { AdjustDeletedRows(type); @@ -88,10 +81,8 @@ Rdb_tbl_prop_coll::AddUserKey( return rocksdb::Status::OK(); } -void Rdb_tbl_prop_coll::AdjustDeletedRows(rocksdb::EntryType type) -{ - if (m_params.m_window > 0) - { +void Rdb_tbl_prop_coll::AdjustDeletedRows(rocksdb::EntryType type) { + if (m_params.m_window > 0) { // record the "is deleted" flag into the sliding window // the sliding window is implemented as a circular buffer // in m_deleted_rows_window vector @@ -99,42 +90,33 @@ void Rdb_tbl_prop_coll::AdjustDeletedRows(rocksdb::EntryType type) // m_rows % m_deleted_rows_window.size() // m_deleted_rows is the current number of 1's in the vector // --update the counter for the element which will be overridden - const bool is_delete= (type == rocksdb::kEntryDelete || - (type == rocksdb::kEntrySingleDelete && - rocksdb_compaction_sequential_deletes_count_sd)); + const bool is_delete = (type == rocksdb::kEntryDelete || + (type == rocksdb::kEntrySingleDelete && + rocksdb_compaction_sequential_deletes_count_sd)); // Only make changes if the value at the current position needs to change - if (is_delete != m_deleted_rows_window[m_window_pos]) - { + if (is_delete != m_deleted_rows_window[m_window_pos]) { // Set or clear the flag at the current position as appropriate - m_deleted_rows_window[m_window_pos]= is_delete; - if (!is_delete) - { + m_deleted_rows_window[m_window_pos] = is_delete; + if (!is_delete) { m_deleted_rows--; - } - else if (++m_deleted_rows > m_max_deleted_rows) - { + } else if (++m_deleted_rows > m_max_deleted_rows) { m_max_deleted_rows = m_deleted_rows; } } - if (++m_window_pos == m_params.m_window) - { + if (++m_window_pos == m_params.m_window) { m_window_pos = 0; } } } -Rdb_index_stats* Rdb_tbl_prop_coll::AccessStats( - const rocksdb::Slice& key) -{ - GL_INDEX_ID gl_index_id = { - .cf_id = m_cf_id, - .index_id = rdb_netbuf_to_uint32(reinterpret_cast(key.data())) - }; +Rdb_index_stats *Rdb_tbl_prop_coll::AccessStats(const rocksdb::Slice &key) { + GL_INDEX_ID gl_index_id = {.cf_id = m_cf_id, + .index_id = rdb_netbuf_to_uint32( + reinterpret_cast(key.data()))}; - if (m_last_stats == nullptr || m_last_stats->m_gl_index_id != gl_index_id) - { + if (m_last_stats == nullptr || m_last_stats->m_gl_index_id != gl_index_id) { m_keydef = nullptr; // starting a new table @@ -142,8 +124,7 @@ Rdb_index_stats* Rdb_tbl_prop_coll::AccessStats( m_stats.emplace_back(gl_index_id); m_last_stats = &m_stats.back(); - if (m_ddl_manager) - { + if (m_ddl_manager) { // safe_find() returns a std::shared_ptr with the count // incremented (so it can't be deleted out from under us) and with // the mutex locked (if setup has not occurred yet). We must make @@ -152,8 +133,7 @@ Rdb_index_stats* Rdb_tbl_prop_coll::AccessStats( // when we are switching to a new Rdb_key_def and when this object // is destructed. m_keydef = m_ddl_manager->safe_find(gl_index_id); - if (m_keydef != nullptr) - { + if (m_keydef != nullptr) { // resize the array to the number of columns. // It will be initialized with zeroes m_last_stats->m_distinct_keys_per_prefix.resize( @@ -167,13 +147,13 @@ Rdb_index_stats* Rdb_tbl_prop_coll::AccessStats( return m_last_stats; } -void Rdb_tbl_prop_coll::CollectStatsForRow( - const rocksdb::Slice& key, const rocksdb::Slice& value, - const rocksdb::EntryType &type, const uint64_t &file_size) -{ +void Rdb_tbl_prop_coll::CollectStatsForRow(const rocksdb::Slice &key, + const rocksdb::Slice &value, + const rocksdb::EntryType &type, + const uint64_t &file_size) { const auto stats = AccessStats(key); - stats->m_data_size += key.size()+value.size(); + stats->m_data_size += key.size() + value.size(); // Incrementing per-index entry-type statistics switch (type) { @@ -195,7 +175,8 @@ void Rdb_tbl_prop_coll::CollectStatsForRow( default: // NO_LINT_DEBUG sql_print_error("RocksDB: Unexpected entry type found: %u. " - "This should not happen so aborting the system.", type); + "This should not happen so aborting the system.", + type); abort_with_stack_traces(); break; } @@ -203,23 +184,19 @@ void Rdb_tbl_prop_coll::CollectStatsForRow( stats->m_actual_disk_size += file_size - m_file_size; m_file_size = file_size; - if (m_keydef != nullptr && ShouldCollectStats()) - { + if (m_keydef != nullptr && ShouldCollectStats()) { std::size_t column = 0; bool new_key = true; - if (!m_last_key.empty()) - { + if (!m_last_key.empty()) { rocksdb::Slice last(m_last_key.data(), m_last_key.size()); new_key = (m_keydef->compare_keys(&last, &key, &column) == 0); } - if (new_key) - { + if (new_key) { DBUG_ASSERT(column <= stats->m_distinct_keys_per_prefix.size()); - for (auto i = column; i < stats->m_distinct_keys_per_prefix.size(); i++) - { + for (auto i = column; i < stats->m_distinct_keys_per_prefix.size(); i++) { stats->m_distinct_keys_per_prefix[i]++; } @@ -228,23 +205,20 @@ void Rdb_tbl_prop_coll::CollectStatsForRow( // if one of the first n-1 columns is different // If the n-1 prefix is the same, no sense in storing // the new key - if (column < stats->m_distinct_keys_per_prefix.size()) - { + if (column < stats->m_distinct_keys_per_prefix.size()) { m_last_key.assign(key.data(), key.size()); } } } } -const char* Rdb_tbl_prop_coll::INDEXSTATS_KEY = "__indexstats__"; +const char *Rdb_tbl_prop_coll::INDEXSTATS_KEY = "__indexstats__"; /* This function is called by RocksDB to compute properties to store in sst file */ rocksdb::Status -Rdb_tbl_prop_coll::Finish( - rocksdb::UserCollectedProperties* const properties -) { +Rdb_tbl_prop_coll::Finish(rocksdb::UserCollectedProperties *const properties) { uint64_t num_sst_entry_put = 0; uint64_t num_sst_entry_delete = 0; uint64_t num_sst_entry_singledelete = 0; @@ -253,8 +227,7 @@ Rdb_tbl_prop_coll::Finish( DBUG_ASSERT(properties != nullptr); - for (auto it = m_stats.begin(); it != m_stats.end(); it++) - { + for (auto it = m_stats.begin(); it != m_stats.end(); it++) { num_sst_entry_put += it->m_rows; num_sst_entry_delete += it->m_entry_deletes; num_sst_entry_singledelete += it->m_entry_single_deletes; @@ -262,42 +235,35 @@ Rdb_tbl_prop_coll::Finish( num_sst_entry_other += it->m_entry_others; } - if (num_sst_entry_put > 0) - { + if (num_sst_entry_put > 0) { rocksdb_num_sst_entry_put += num_sst_entry_put; } - if (num_sst_entry_delete > 0) - { + if (num_sst_entry_delete > 0) { rocksdb_num_sst_entry_delete += num_sst_entry_delete; } - if (num_sst_entry_singledelete > 0) - { + if (num_sst_entry_singledelete > 0) { rocksdb_num_sst_entry_singledelete += num_sst_entry_singledelete; } - if (num_sst_entry_merge > 0) - { + if (num_sst_entry_merge > 0) { rocksdb_num_sst_entry_merge += num_sst_entry_merge; } - if (num_sst_entry_other > 0) - { + if (num_sst_entry_other > 0) { rocksdb_num_sst_entry_other += num_sst_entry_other; } properties->insert({INDEXSTATS_KEY, - Rdb_index_stats::materialize(m_stats, m_card_adj_extra)}); + Rdb_index_stats::materialize(m_stats, m_card_adj_extra)}); return rocksdb::Status::OK(); } bool Rdb_tbl_prop_coll::NeedCompact() const { - return - m_params.m_deletes && - (m_params.m_window > 0) && - (m_file_size > m_params.m_file_size) && - (m_max_deleted_rows > m_params.m_deletes); + return m_params.m_deletes && (m_params.m_window > 0) && + (m_file_size > m_params.m_file_size) && + (m_max_deleted_rows > m_params.m_deletes); } bool Rdb_tbl_prop_coll::ShouldCollectStats() { @@ -307,9 +273,9 @@ bool Rdb_tbl_prop_coll::ShouldCollectStats() { return true; } - const int val = rand_r(&m_seed) % - (RDB_TBL_STATS_SAMPLE_PCT_MAX - RDB_TBL_STATS_SAMPLE_PCT_MIN + 1) + - RDB_TBL_STATS_SAMPLE_PCT_MIN; + const int val = rand_r(&m_seed) % (RDB_TBL_STATS_SAMPLE_PCT_MAX - + RDB_TBL_STATS_SAMPLE_PCT_MIN + 1) + + RDB_TBL_STATS_SAMPLE_PCT_MIN; DBUG_ASSERT(val >= RDB_TBL_STATS_SAMPLE_PCT_MIN); DBUG_ASSERT(val <= RDB_TBL_STATS_SAMPLE_PCT_MAX); @@ -337,14 +303,11 @@ Rdb_tbl_prop_coll::GetReadableProperties() const { } s.append(GetReadableStats(it)); } - #endif +#endif return rocksdb::UserCollectedProperties{{INDEXSTATS_KEY, s}}; } -std::string -Rdb_tbl_prop_coll::GetReadableStats( - const Rdb_index_stats& it -) { +std::string Rdb_tbl_prop_coll::GetReadableStats(const Rdb_index_stats &it) { std::string s; s.append("("); s.append(std::to_string(it.m_gl_index_id.cf_id)); @@ -380,28 +343,24 @@ Rdb_tbl_prop_coll::GetReadableStats( */ void Rdb_tbl_prop_coll::read_stats_from_tbl_props( - const std::shared_ptr& table_props, - std::vector* const out_stats_vector) -{ + const std::shared_ptr &table_props, + std::vector *const out_stats_vector) { DBUG_ASSERT(out_stats_vector != nullptr); - const auto& user_properties = table_props->user_collected_properties; + const auto &user_properties = table_props->user_collected_properties; const auto it2 = user_properties.find(std::string(INDEXSTATS_KEY)); - if (it2 != user_properties.end()) - { - auto result __attribute__((__unused__)) = + if (it2 != user_properties.end()) { + auto result MY_ATTRIBUTE((__unused__)) = Rdb_index_stats::unmaterialize(it2->second, out_stats_vector); DBUG_ASSERT(result == 0); } } - /* Serializes an array of Rdb_index_stats into a network string. */ -std::string Rdb_index_stats::materialize( - const std::vector& stats, - const float card_adj_extra) -{ +std::string +Rdb_index_stats::materialize(const std::vector &stats, + const float card_adj_extra) { String ret; rdb_netstr_append_uint16(&ret, INDEX_STATS_VERSION_ENTRY_TYPES); for (const auto &i : stats) { @@ -422,105 +381,92 @@ std::string Rdb_index_stats::materialize( } } - return std::string((char*) ret.ptr(), ret.length()); + return std::string((char *)ret.ptr(), ret.length()); } /** @brief Reads an array of Rdb_index_stats from a string. - @return 1 if it detects any inconsistency in the input - @return 0 if completes successfully + @return HA_EXIT_FAILURE if it detects any inconsistency in the input + @return HA_EXIT_SUCCESS if completes successfully */ -int Rdb_index_stats::unmaterialize( - const std::string& s, std::vector* const ret) -{ - const uchar* p= rdb_std_str_to_uchar_ptr(s); - const uchar* const p2= p + s.size(); +int Rdb_index_stats::unmaterialize(const std::string &s, + std::vector *const ret) { + const uchar *p = rdb_std_str_to_uchar_ptr(s); + const uchar *const p2 = p + s.size(); DBUG_ASSERT(ret != nullptr); - if (p+2 > p2) - { - return 1; + if (p + 2 > p2) { + return HA_EXIT_FAILURE; } - const int version= rdb_netbuf_read_uint16(&p); + const int version = rdb_netbuf_read_uint16(&p); Rdb_index_stats stats; // Make sure version is within supported range. if (version < INDEX_STATS_VERSION_INITIAL || - version > INDEX_STATS_VERSION_ENTRY_TYPES) - { + version > INDEX_STATS_VERSION_ENTRY_TYPES) { // NO_LINT_DEBUG sql_print_error("Index stats version %d was outside of supported range. " - "This should not happen so aborting the system.", version); + "This should not happen so aborting the system.", + version); abort_with_stack_traces(); } - size_t needed = sizeof(stats.m_gl_index_id.cf_id)+ - sizeof(stats.m_gl_index_id.index_id)+ - sizeof(stats.m_data_size)+ - sizeof(stats.m_rows)+ - sizeof(stats.m_actual_disk_size)+ - sizeof(uint64); - if (version >= INDEX_STATS_VERSION_ENTRY_TYPES) - { - needed += sizeof(stats.m_entry_deletes)+ - sizeof(stats.m_entry_single_deletes)+ - sizeof(stats.m_entry_merges)+ - sizeof(stats.m_entry_others); + size_t needed = sizeof(stats.m_gl_index_id.cf_id) + + sizeof(stats.m_gl_index_id.index_id) + + sizeof(stats.m_data_size) + sizeof(stats.m_rows) + + sizeof(stats.m_actual_disk_size) + sizeof(uint64); + if (version >= INDEX_STATS_VERSION_ENTRY_TYPES) { + needed += sizeof(stats.m_entry_deletes) + + sizeof(stats.m_entry_single_deletes) + + sizeof(stats.m_entry_merges) + sizeof(stats.m_entry_others); } - while (p < p2) - { - if (p+needed > p2) - { - return 1; + while (p < p2) { + if (p + needed > p2) { + return HA_EXIT_FAILURE; } rdb_netbuf_read_gl_index(&p, &stats.m_gl_index_id); - stats.m_data_size= rdb_netbuf_read_uint64(&p); - stats.m_rows= rdb_netbuf_read_uint64(&p); - stats.m_actual_disk_size= rdb_netbuf_read_uint64(&p); + stats.m_data_size = rdb_netbuf_read_uint64(&p); + stats.m_rows = rdb_netbuf_read_uint64(&p); + stats.m_actual_disk_size = rdb_netbuf_read_uint64(&p); stats.m_distinct_keys_per_prefix.resize(rdb_netbuf_read_uint64(&p)); - if (version >= INDEX_STATS_VERSION_ENTRY_TYPES) - { - stats.m_entry_deletes= rdb_netbuf_read_uint64(&p); - stats.m_entry_single_deletes= rdb_netbuf_read_uint64(&p); - stats.m_entry_merges= rdb_netbuf_read_uint64(&p); - stats.m_entry_others= rdb_netbuf_read_uint64(&p); + if (version >= INDEX_STATS_VERSION_ENTRY_TYPES) { + stats.m_entry_deletes = rdb_netbuf_read_uint64(&p); + stats.m_entry_single_deletes = rdb_netbuf_read_uint64(&p); + stats.m_entry_merges = rdb_netbuf_read_uint64(&p); + stats.m_entry_others = rdb_netbuf_read_uint64(&p); } - if (p+stats.m_distinct_keys_per_prefix.size() - *sizeof(stats.m_distinct_keys_per_prefix[0]) > p2) - { - return 1; + if (p + + stats.m_distinct_keys_per_prefix.size() * + sizeof(stats.m_distinct_keys_per_prefix[0]) > + p2) { + return HA_EXIT_FAILURE; } - for (std::size_t i= 0; i < stats.m_distinct_keys_per_prefix.size(); i++) - { - stats.m_distinct_keys_per_prefix[i]= rdb_netbuf_read_uint64(&p); + for (std::size_t i = 0; i < stats.m_distinct_keys_per_prefix.size(); i++) { + stats.m_distinct_keys_per_prefix[i] = rdb_netbuf_read_uint64(&p); } ret->push_back(stats); } - return 0; + return HA_EXIT_SUCCESS; } /* Merges one Rdb_index_stats into another. Can be used to come up with the stats for the index based on stats for each sst */ -void Rdb_index_stats::merge( - const Rdb_index_stats& s, const bool &increment, - const int64_t &estimated_data_len) -{ +void Rdb_index_stats::merge(const Rdb_index_stats &s, const bool &increment, + const int64_t &estimated_data_len) { std::size_t i; DBUG_ASSERT(estimated_data_len >= 0); m_gl_index_id = s.m_gl_index_id; - if (m_distinct_keys_per_prefix.size() < s.m_distinct_keys_per_prefix.size()) - { + if (m_distinct_keys_per_prefix.size() < s.m_distinct_keys_per_prefix.size()) { m_distinct_keys_per_prefix.resize(s.m_distinct_keys_per_prefix.size()); } - if (increment) - { + if (increment) { m_rows += s.m_rows; m_data_size += s.m_data_size; @@ -531,32 +477,28 @@ void Rdb_index_stats::merge( we make a reasoned estimate for the data_file_length for the index in the current SST. */ - m_actual_disk_size += s.m_actual_disk_size ? s.m_actual_disk_size : - estimated_data_len * s.m_rows; + m_actual_disk_size += s.m_actual_disk_size ? s.m_actual_disk_size + : estimated_data_len * s.m_rows; m_entry_deletes += s.m_entry_deletes; m_entry_single_deletes += s.m_entry_single_deletes; m_entry_merges += s.m_entry_merges; m_entry_others += s.m_entry_others; - for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++) - { + for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++) { m_distinct_keys_per_prefix[i] += s.m_distinct_keys_per_prefix[i]; } - } - else - { + } else { m_rows -= s.m_rows; m_data_size -= s.m_data_size; - m_actual_disk_size -= s.m_actual_disk_size ? s.m_actual_disk_size : - estimated_data_len * s.m_rows; + m_actual_disk_size -= s.m_actual_disk_size ? s.m_actual_disk_size + : estimated_data_len * s.m_rows; m_entry_deletes -= s.m_entry_deletes; m_entry_single_deletes -= s.m_entry_single_deletes; m_entry_merges -= s.m_entry_merges; m_entry_others -= s.m_entry_others; - for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++) - { + for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++) { m_distinct_keys_per_prefix[i] -= s.m_distinct_keys_per_prefix[i]; } } } -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/properties_collector.h b/storage/rocksdb/properties_collector.h index b0f8e5deb59..9ae519d95c7 100644 --- a/storage/rocksdb/properties_collector.h +++ b/storage/rocksdb/properties_collector.h @@ -40,102 +40,87 @@ extern std::atomic rocksdb_num_sst_entry_merge; extern std::atomic rocksdb_num_sst_entry_other; extern my_bool rocksdb_compaction_sequential_deletes_count_sd; - -struct Rdb_compact_params -{ +struct Rdb_compact_params { uint64_t m_deletes, m_window, m_file_size; }; - -struct Rdb_index_stats -{ - enum { - INDEX_STATS_VERSION_INITIAL= 1, - INDEX_STATS_VERSION_ENTRY_TYPES= 2, +struct Rdb_index_stats { + enum { + INDEX_STATS_VERSION_INITIAL = 1, + INDEX_STATS_VERSION_ENTRY_TYPES = 2, }; GL_INDEX_ID m_gl_index_id; int64_t m_data_size, m_rows, m_actual_disk_size; int64_t m_entry_deletes, m_entry_single_deletes; int64_t m_entry_merges, m_entry_others; std::vector m_distinct_keys_per_prefix; - std::string m_name; // name is not persisted + std::string m_name; // name is not persisted - static std::string materialize(const std::vector& stats, + static std::string materialize(const std::vector &stats, const float card_adj_extra); - static int unmaterialize(const std::string& s, - std::vector* const ret); + static int unmaterialize(const std::string &s, + std::vector *const ret); Rdb_index_stats() : Rdb_index_stats({0, 0}) {} - explicit Rdb_index_stats(GL_INDEX_ID gl_index_id) : - m_gl_index_id(gl_index_id), - m_data_size(0), - m_rows(0), - m_actual_disk_size(0), - m_entry_deletes(0), - m_entry_single_deletes(0), - m_entry_merges(0), - m_entry_others(0) {} + explicit Rdb_index_stats(GL_INDEX_ID gl_index_id) + : m_gl_index_id(gl_index_id), m_data_size(0), m_rows(0), + m_actual_disk_size(0), m_entry_deletes(0), m_entry_single_deletes(0), + m_entry_merges(0), m_entry_others(0) {} - void merge(const Rdb_index_stats& s, const bool &increment = true, + void merge(const Rdb_index_stats &s, const bool &increment = true, const int64_t &estimated_data_len = 0); }; - -class Rdb_tbl_prop_coll : public rocksdb::TablePropertiesCollector -{ - public: - Rdb_tbl_prop_coll( - Rdb_ddl_manager* const ddl_manager, - const Rdb_compact_params ¶ms, - const uint32_t &cf_id, - const uint8_t &table_stats_sampling_pct - ); +class Rdb_tbl_prop_coll : public rocksdb::TablePropertiesCollector { +public: + Rdb_tbl_prop_coll(Rdb_ddl_manager *const ddl_manager, + const Rdb_compact_params ¶ms, const uint32_t &cf_id, + const uint8_t &table_stats_sampling_pct); /* Override parent class's virtual methods of interest. */ - virtual rocksdb::Status AddUserKey( - const rocksdb::Slice& key, const rocksdb::Slice& value, - rocksdb::EntryType type, rocksdb::SequenceNumber seq, - uint64_t file_size); + virtual rocksdb::Status AddUserKey(const rocksdb::Slice &key, + const rocksdb::Slice &value, + rocksdb::EntryType type, + rocksdb::SequenceNumber seq, + uint64_t file_size); - virtual rocksdb::Status Finish(rocksdb::UserCollectedProperties* properties) override; + virtual rocksdb::Status + Finish(rocksdb::UserCollectedProperties *properties) override; - virtual const char* Name() const override { - return "Rdb_tbl_prop_coll"; - } + virtual const char *Name() const override { return "Rdb_tbl_prop_coll"; } rocksdb::UserCollectedProperties GetReadableProperties() const override; bool NeedCompact() const override; - public: - uint64_t GetMaxDeletedRows() const { - return m_max_deleted_rows; - } +public: + uint64_t GetMaxDeletedRows() const { return m_max_deleted_rows; } static void read_stats_from_tbl_props( - const std::shared_ptr& table_props, - std::vector* out_stats_vector); + const std::shared_ptr &table_props, + std::vector *out_stats_vector); - private: - static std::string GetReadableStats(const Rdb_index_stats& it); +private: + static std::string GetReadableStats(const Rdb_index_stats &it); bool ShouldCollectStats(); - void CollectStatsForRow(const rocksdb::Slice& key, - const rocksdb::Slice& value, const rocksdb::EntryType &type, - const uint64_t &file_size); - Rdb_index_stats* AccessStats(const rocksdb::Slice& key); + void CollectStatsForRow(const rocksdb::Slice &key, + const rocksdb::Slice &value, + const rocksdb::EntryType &type, + const uint64_t &file_size); + Rdb_index_stats *AccessStats(const rocksdb::Slice &key); void AdjustDeletedRows(rocksdb::EntryType type); - private: +private: uint32_t m_cf_id; std::shared_ptr m_keydef; - Rdb_ddl_manager* m_ddl_manager; + Rdb_ddl_manager *m_ddl_manager; std::vector m_stats; - Rdb_index_stats* m_last_stats; - static const char* INDEXSTATS_KEY; + Rdb_index_stats *m_last_stats; + static const char *INDEXSTATS_KEY; // last added key std::string m_last_key; @@ -150,34 +135,33 @@ class Rdb_tbl_prop_coll : public rocksdb::TablePropertiesCollector float m_card_adj_extra; }; - class Rdb_tbl_prop_coll_factory : public rocksdb::TablePropertiesCollectorFactory { - public: - Rdb_tbl_prop_coll_factory(const Rdb_tbl_prop_coll_factory&) = delete; - Rdb_tbl_prop_coll_factory& operator=(const Rdb_tbl_prop_coll_factory&) = delete; +public: + Rdb_tbl_prop_coll_factory(const Rdb_tbl_prop_coll_factory &) = delete; + Rdb_tbl_prop_coll_factory & + operator=(const Rdb_tbl_prop_coll_factory &) = delete; - explicit Rdb_tbl_prop_coll_factory(Rdb_ddl_manager* ddl_manager) - : m_ddl_manager(ddl_manager) { - } + explicit Rdb_tbl_prop_coll_factory(Rdb_ddl_manager *ddl_manager) + : m_ddl_manager(ddl_manager) {} /* Override parent class's virtual methods of interest. */ - virtual rocksdb::TablePropertiesCollector* CreateTablePropertiesCollector( + virtual rocksdb::TablePropertiesCollector *CreateTablePropertiesCollector( rocksdb::TablePropertiesCollectorFactory::Context context) override { - return new Rdb_tbl_prop_coll( - m_ddl_manager, m_params, context.column_family_id, - m_table_stats_sampling_pct); + return new Rdb_tbl_prop_coll(m_ddl_manager, m_params, + context.column_family_id, + m_table_stats_sampling_pct); } - virtual const char* Name() const override { + virtual const char *Name() const override { return "Rdb_tbl_prop_coll_factory"; } - public: - void SetCompactionParams(const Rdb_compact_params& params) { +public: + void SetCompactionParams(const Rdb_compact_params ¶ms) { m_params = params; } @@ -185,10 +169,10 @@ class Rdb_tbl_prop_coll_factory m_table_stats_sampling_pct = table_stats_sampling_pct; } - private: - Rdb_ddl_manager* const m_ddl_manager; +private: + Rdb_ddl_manager *const m_ddl_manager; Rdb_compact_params m_params; uint8_t m_table_stats_sampling_pct; }; -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_buff.h b/storage/rocksdb/rdb_buff.h index deb718e88c9..f035085cae6 100644 --- a/storage/rocksdb/rdb_buff.h +++ b/storage/rocksdb/rdb_buff.h @@ -27,97 +27,86 @@ namespace myrocks { ("netstr") which stores data in Network Byte Order (Big Endian). */ -inline void rdb_netstr_append_uint64(my_core::String* const out_netstr, - const uint64 &val) -{ +inline void rdb_netstr_append_uint64(my_core::String *const out_netstr, + const uint64 &val) { DBUG_ASSERT(out_netstr != nullptr); // Convert from host machine byte order (usually Little Endian) to network // byte order (Big Endian). - uint64 net_val= htobe64(val); - out_netstr->append(reinterpret_cast(&net_val), sizeof(net_val)); + uint64 net_val = htobe64(val); + out_netstr->append(reinterpret_cast(&net_val), sizeof(net_val)); } -inline void rdb_netstr_append_uint32(my_core::String* const out_netstr, - const uint32 &val) -{ +inline void rdb_netstr_append_uint32(my_core::String *const out_netstr, + const uint32 &val) { DBUG_ASSERT(out_netstr != nullptr); // Convert from host machine byte order (usually Little Endian) to network // byte order (Big Endian). - uint32 net_val= htobe32(val); - out_netstr->append(reinterpret_cast(&net_val), sizeof(net_val)); + uint32 net_val = htobe32(val); + out_netstr->append(reinterpret_cast(&net_val), sizeof(net_val)); } -inline void rdb_netstr_append_uint16(my_core::String* const out_netstr, - const uint16 &val) -{ +inline void rdb_netstr_append_uint16(my_core::String *const out_netstr, + const uint16 &val) { DBUG_ASSERT(out_netstr != nullptr); // Convert from host machine byte order (usually Little Endian) to network // byte order (Big Endian). - uint16 net_val= htobe16(val); - out_netstr->append(reinterpret_cast(&net_val), sizeof(net_val)); + uint16 net_val = htobe16(val); + out_netstr->append(reinterpret_cast(&net_val), sizeof(net_val)); } - /* Basic network buffer ("netbuf") write helper functions. */ -inline void rdb_netbuf_store_uint64(uchar* const dst_netbuf, const uint64 &n) -{ +inline void rdb_netbuf_store_uint64(uchar *const dst_netbuf, const uint64 &n) { DBUG_ASSERT(dst_netbuf != nullptr); // Convert from host byte order (usually Little Endian) to network byte order // (Big Endian). - uint64 net_val= htobe64(n); + uint64 net_val = htobe64(n); memcpy(dst_netbuf, &net_val, sizeof(net_val)); } -inline void rdb_netbuf_store_uint32(uchar* const dst_netbuf, const uint32 &n) -{ +inline void rdb_netbuf_store_uint32(uchar *const dst_netbuf, const uint32 &n) { DBUG_ASSERT(dst_netbuf != nullptr); // Convert from host byte order (usually Little Endian) to network byte order // (Big Endian). - uint32 net_val= htobe32(n); + uint32 net_val = htobe32(n); memcpy(dst_netbuf, &net_val, sizeof(net_val)); } -inline void rdb_netbuf_store_uint16(uchar* const dst_netbuf, const uint16 &n) -{ +inline void rdb_netbuf_store_uint16(uchar *const dst_netbuf, const uint16 &n) { DBUG_ASSERT(dst_netbuf != nullptr); // Convert from host byte order (usually Little Endian) to network byte order // (Big Endian). - uint16 net_val= htobe16(n); + uint16 net_val = htobe16(n); memcpy(dst_netbuf, &net_val, sizeof(net_val)); } -inline void rdb_netbuf_store_byte(uchar* const dst_netbuf, const uchar &c) -{ +inline void rdb_netbuf_store_byte(uchar *const dst_netbuf, const uchar &c) { DBUG_ASSERT(dst_netbuf != nullptr); - *dst_netbuf= c; + *dst_netbuf = c; } -inline void rdb_netbuf_store_index(uchar* const dst_netbuf, - const uint32 &number) -{ +inline void rdb_netbuf_store_index(uchar *const dst_netbuf, + const uint32 &number) { DBUG_ASSERT(dst_netbuf != nullptr); rdb_netbuf_store_uint32(dst_netbuf, number); } - /* Basic conversion helper functions from network byte order (Big Endian) to host machine byte order (usually Little Endian). */ -inline uint64 rdb_netbuf_to_uint64(const uchar* const netbuf) -{ +inline uint64 rdb_netbuf_to_uint64(const uchar *const netbuf) { DBUG_ASSERT(netbuf != nullptr); uint64 net_val; @@ -128,8 +117,7 @@ inline uint64 rdb_netbuf_to_uint64(const uchar* const netbuf) return be64toh(net_val); } -inline uint32 rdb_netbuf_to_uint32(const uchar* const netbuf) -{ +inline uint32 rdb_netbuf_to_uint32(const uchar *const netbuf) { DBUG_ASSERT(netbuf != nullptr); uint32 net_val; @@ -140,8 +128,7 @@ inline uint32 rdb_netbuf_to_uint32(const uchar* const netbuf) return be32toh(net_val); } -inline uint16 rdb_netbuf_to_uint16(const uchar* const netbuf) -{ +inline uint16 rdb_netbuf_to_uint16(const uchar *const netbuf) { DBUG_ASSERT(netbuf != nullptr); uint16 net_val; @@ -152,14 +139,12 @@ inline uint16 rdb_netbuf_to_uint16(const uchar* const netbuf) return be16toh(net_val); } -inline uchar rdb_netbuf_to_byte(const uchar* const netbuf) -{ +inline uchar rdb_netbuf_to_byte(const uchar *const netbuf) { DBUG_ASSERT(netbuf != nullptr); - return(uchar)netbuf[0]; + return (uchar)netbuf[0]; } - /* Basic network buffer ("netbuf") read helper functions. Network buffer stores data in Network Byte Order (Big Endian). @@ -167,13 +152,12 @@ inline uchar rdb_netbuf_to_byte(const uchar* const netbuf) the netbuf pointer gets advanced to the following byte. */ -inline uint64 rdb_netbuf_read_uint64(const uchar **netbuf_ptr) -{ +inline uint64 rdb_netbuf_read_uint64(const uchar **netbuf_ptr) { DBUG_ASSERT(netbuf_ptr != nullptr); // Convert from network byte order (Big Endian) to host machine byte order // (usually Little Endian). - const uint64 host_val= rdb_netbuf_to_uint64(*netbuf_ptr); + const uint64 host_val = rdb_netbuf_to_uint64(*netbuf_ptr); // Advance pointer. *netbuf_ptr += sizeof(host_val); @@ -181,13 +165,12 @@ inline uint64 rdb_netbuf_read_uint64(const uchar **netbuf_ptr) return host_val; } -inline uint32 rdb_netbuf_read_uint32(const uchar **netbuf_ptr) -{ +inline uint32 rdb_netbuf_read_uint32(const uchar **netbuf_ptr) { DBUG_ASSERT(netbuf_ptr != nullptr); // Convert from network byte order (Big Endian) to host machine byte order // (usually Little Endian). - const uint32 host_val= rdb_netbuf_to_uint32(*netbuf_ptr); + const uint32 host_val = rdb_netbuf_to_uint32(*netbuf_ptr); // Advance pointer. *netbuf_ptr += sizeof(host_val); @@ -195,13 +178,12 @@ inline uint32 rdb_netbuf_read_uint32(const uchar **netbuf_ptr) return host_val; } -inline uint16 rdb_netbuf_read_uint16(const uchar **netbuf_ptr) -{ +inline uint16 rdb_netbuf_read_uint16(const uchar **netbuf_ptr) { DBUG_ASSERT(netbuf_ptr != nullptr); // Convert from network byte order (Big Endian) to host machine byte order // (usually Little Endian). - const uint16 host_val= rdb_netbuf_to_uint16(*netbuf_ptr); + const uint16 host_val = rdb_netbuf_to_uint16(*netbuf_ptr); // Advance pointer. *netbuf_ptr += sizeof(host_val); @@ -210,13 +192,12 @@ inline uint16 rdb_netbuf_read_uint16(const uchar **netbuf_ptr) } inline void rdb_netbuf_read_gl_index(const uchar **netbuf_ptr, - GL_INDEX_ID* const gl_index_id) -{ + GL_INDEX_ID *const gl_index_id) { DBUG_ASSERT(gl_index_id != nullptr); DBUG_ASSERT(netbuf_ptr != nullptr); - gl_index_id->cf_id= rdb_netbuf_read_uint32(netbuf_ptr); - gl_index_id->index_id= rdb_netbuf_read_uint32(netbuf_ptr); + gl_index_id->cf_id = rdb_netbuf_read_uint32(netbuf_ptr); + gl_index_id->index_id = rdb_netbuf_read_uint32(netbuf_ptr); } /* @@ -225,17 +206,17 @@ inline void rdb_netbuf_read_gl_index(const uchar **netbuf_ptr, - it prevents one from reading beyond the end of the string. */ -class Rdb_string_reader -{ - const char* m_ptr; +class Rdb_string_reader { + const char *m_ptr; uint m_len; - private: - Rdb_string_reader& operator=(const Rdb_string_reader&) = default; - public: - Rdb_string_reader(const Rdb_string_reader&) = default; + +private: + Rdb_string_reader &operator=(const Rdb_string_reader &) = default; + +public: + Rdb_string_reader(const Rdb_string_reader &) = default; /* named constructor */ - static Rdb_string_reader read_or_empty(const rocksdb::Slice* const slice) - { + static Rdb_string_reader read_or_empty(const rocksdb::Slice *const slice) { if (!slice) { return Rdb_string_reader(""); } else { @@ -243,72 +224,59 @@ class Rdb_string_reader } } - explicit Rdb_string_reader(const std::string &str) - { - m_len= str.length(); - if (m_len) - { - m_ptr= &str.at(0); - } - else - { + explicit Rdb_string_reader(const std::string &str) { + m_len = str.length(); + if (m_len) { + m_ptr = &str.at(0); + } else { /* One can a create a Rdb_string_reader for reading from an empty string (although attempts to read anything will fail). We must not access str.at(0), since len==0, we can set ptr to any value. */ - m_ptr= nullptr; + m_ptr = nullptr; } } - explicit Rdb_string_reader(const rocksdb::Slice* const slice) - { - m_ptr= slice->data(); - m_len= slice->size(); + explicit Rdb_string_reader(const rocksdb::Slice *const slice) { + m_ptr = slice->data(); + m_len = slice->size(); } /* Read the next @param size bytes. Returns pointer to the bytes read, or nullptr if the remaining string doesn't have that many bytes. */ - const char *read(const uint &size) - { + const char *read(const uint &size) { const char *res; - if (m_len < size) - { - res= nullptr; - } - else - { - res= m_ptr; + if (m_len < size) { + res = nullptr; + } else { + res = m_ptr; m_ptr += size; m_len -= size; } return res; } - bool read_uint8(uint* const res) - { + bool read_uint8(uint *const res) { const uchar *p; - if (!(p= reinterpret_cast(read(1)))) - return true; // error - else - { - *res= *p; - return false; // Ok + if (!(p = reinterpret_cast(read(1)))) + return true; // error + else { + *res = *p; + return false; // Ok } } - bool read_uint16(uint* const res) - { + bool read_uint16(uint *const res) { const uchar *p; - if (!(p= reinterpret_cast(read(2)))) - return true; // error - else - { - *res= rdb_netbuf_to_uint16(p); - return false; // Ok + if (!(p = reinterpret_cast(read(2)))) + return true; // error + else { + *res = rdb_netbuf_to_uint16(p); + return false; // Ok } } @@ -322,7 +290,6 @@ class Rdb_string_reader const char *get_current_ptr() const { return m_ptr; } }; - /* @brief A buffer one can write the data to. @@ -338,132 +305,112 @@ class Rdb_string_reader */ -class Rdb_string_writer -{ +class Rdb_string_writer { std::vector m_data; - public: - Rdb_string_writer(const Rdb_string_writer&) = delete; - Rdb_string_writer& operator=(const Rdb_string_writer&) = delete; + +public: + Rdb_string_writer(const Rdb_string_writer &) = delete; + Rdb_string_writer &operator=(const Rdb_string_writer &) = delete; Rdb_string_writer() = default; void clear() { m_data.clear(); } - void write_uint8(const uint &val) - { + void write_uint8(const uint &val) { m_data.push_back(static_cast(val)); } - void write_uint16(const uint &val) - { - const auto size= m_data.size(); + void write_uint16(const uint &val) { + const auto size = m_data.size(); m_data.resize(size + 2); rdb_netbuf_store_uint16(m_data.data() + size, val); } - void write_uint32(const uint &val) - { - const auto size= m_data.size(); + void write_uint32(const uint &val) { + const auto size = m_data.size(); m_data.resize(size + 4); rdb_netbuf_store_uint32(m_data.data() + size, val); } - void write(const uchar* const new_data, const size_t &len) - { + void write(const uchar *const new_data, const size_t &len) { DBUG_ASSERT(new_data != nullptr); m_data.insert(m_data.end(), new_data, new_data + len); } - uchar* ptr() { return m_data.data(); } + uchar *ptr() { return m_data.data(); } size_t get_current_pos() const { return m_data.size(); } - void write_uint8_at(const size_t &pos, const uint &new_val) - { + void write_uint8_at(const size_t &pos, const uint &new_val) { // This function will only overwrite what was written DBUG_ASSERT(pos < get_current_pos()); - m_data.data()[pos]= new_val; + m_data.data()[pos] = new_val; } - void write_uint16_at(const size_t &pos, const uint &new_val) - { + void write_uint16_at(const size_t &pos, const uint &new_val) { // This function will only overwrite what was written DBUG_ASSERT(pos < get_current_pos() && (pos + 1) < get_current_pos()); rdb_netbuf_store_uint16(m_data.data() + pos, new_val); } }; - /* A helper class for writing bits into Rdb_string_writer. The class assumes (but doesn't check) that nobody tries to write anything to the Rdb_string_writer that it is writing to. */ -class Rdb_bit_writer -{ +class Rdb_bit_writer { Rdb_string_writer *m_writer; uchar m_offset; - public: - Rdb_bit_writer(const Rdb_bit_writer&) = delete; - Rdb_bit_writer& operator=(const Rdb_bit_writer&) = delete; - explicit Rdb_bit_writer(Rdb_string_writer* writer_arg) - : m_writer(writer_arg), - m_offset(0) - { - } +public: + Rdb_bit_writer(const Rdb_bit_writer &) = delete; + Rdb_bit_writer &operator=(const Rdb_bit_writer &) = delete; - void write(uint size, const uint &value) - { + explicit Rdb_bit_writer(Rdb_string_writer *writer_arg) + : m_writer(writer_arg), m_offset(0) {} + + void write(uint size, const uint &value) { DBUG_ASSERT((value & ((1 << size) - 1)) == value); - while (size > 0) - { - if (m_offset == 0) - { + while (size > 0) { + if (m_offset == 0) { m_writer->write_uint8(0); } // number of bits to put in this byte const uint bits = std::min(size, (uint)(8 - m_offset)); - uchar* const last_byte= m_writer->ptr() + m_writer->get_current_pos() - 1; - *last_byte |= - (uchar) ((value >> (size - bits)) & ((1 << bits) - 1)) << m_offset; + uchar *const last_byte = + m_writer->ptr() + m_writer->get_current_pos() - 1; + *last_byte |= (uchar)((value >> (size - bits)) & ((1 << bits) - 1)) + << m_offset; size -= bits; m_offset = (m_offset + bits) & 0x7; } } }; -class Rdb_bit_reader -{ +class Rdb_bit_reader { const uchar *m_cur; uchar m_offset; uint m_ret; - Rdb_string_reader* const m_reader; - public: - Rdb_bit_reader(const Rdb_bit_reader&) = delete; - Rdb_bit_reader& operator=(const Rdb_bit_reader&) = delete; + Rdb_string_reader *const m_reader; - explicit Rdb_bit_reader(Rdb_string_reader* const reader) - : m_cur(nullptr), - m_offset(0), - m_reader(reader) - { - } +public: + Rdb_bit_reader(const Rdb_bit_reader &) = delete; + Rdb_bit_reader &operator=(const Rdb_bit_reader &) = delete; + + explicit Rdb_bit_reader(Rdb_string_reader *const reader) + : m_cur(nullptr), m_offset(0), m_reader(reader) {} // Returns a pointer to an uint containing the bits read. On subsequent // reads, the value being pointed to will be overwritten. Returns nullptr // on failure. - uint *read(uint size) - { - m_ret= 0; + uint *read(uint size) { + m_ret = 0; DBUG_ASSERT(size <= 32); - while (size > 0) - { - if (m_offset == 0) - { - m_cur= (const uchar *)m_reader->read(1); - if (m_cur == nullptr) - { + while (size > 0) { + if (m_offset == 0) { + m_cur = (const uchar *)m_reader->read(1); + if (m_cur == nullptr) { return nullptr; } } @@ -479,4 +426,4 @@ class Rdb_bit_reader } }; -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_cf_manager.cc b/storage/rocksdb/rdb_cf_manager.cc index 7ae7d362159..2bd89fffee5 100644 --- a/storage/rocksdb/rdb_cf_manager.cc +++ b/storage/rocksdb/rdb_cf_manager.cc @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef USE_PRAGMA_IMPLEMENTATION -#pragma implementation // gcc: Class implementation +#pragma implementation // gcc: Class implementation #endif /* This C++ files header file */ @@ -28,8 +28,7 @@ namespace myrocks { /* Check if ColumnFamily name says it's a reverse-ordered CF */ -bool Rdb_cf_manager::is_cf_name_reverse(const char* const name) -{ +bool Rdb_cf_manager::is_cf_name_reverse(const char *const name) { /* nullptr means the default CF is used.. (TODO: can the default CF be * reverse?) */ if (name && !strncmp(name, "rev:", 4)) @@ -43,9 +42,8 @@ static PSI_mutex_key ex_key_cfm; #endif void Rdb_cf_manager::init( - Rdb_cf_options* const cf_options, - std::vector* const handles) -{ + Rdb_cf_options *const cf_options, + std::vector *const handles) { mysql_mutex_init(ex_key_cfm, &m_mutex, MY_MUTEX_INIT_FAST); DBUG_ASSERT(cf_options != nullptr); @@ -61,33 +59,28 @@ void Rdb_cf_manager::init( } } - -void Rdb_cf_manager::cleanup() -{ +void Rdb_cf_manager::cleanup() { for (auto it : m_cf_name_map) { delete it.second; } mysql_mutex_destroy(&m_mutex); } - /** Generate Column Family name for per-index column families @param res OUT Column Family name */ -void Rdb_cf_manager::get_per_index_cf_name(const std::string& db_table_name, - const char* const index_name, - std::string* const res) -{ +void Rdb_cf_manager::get_per_index_cf_name(const std::string &db_table_name, + const char *const index_name, + std::string *const res) { DBUG_ASSERT(index_name != nullptr); DBUG_ASSERT(res != nullptr); *res = db_table_name + "." + index_name; } - /* @brief Find column family by name. If it doesn't exist, create it @@ -95,53 +88,50 @@ void Rdb_cf_manager::get_per_index_cf_name(const std::string& db_table_name, @detail See Rdb_cf_manager::get_cf */ -rocksdb::ColumnFamilyHandle* -Rdb_cf_manager::get_or_create_cf(rocksdb::DB* const rdb, - const char *cf_name, - const std::string& db_table_name, - const char* const index_name, - bool* const is_automatic) -{ +rocksdb::ColumnFamilyHandle * +Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name, + const std::string &db_table_name, + const char *const index_name, + bool *const is_automatic) { DBUG_ASSERT(rdb != nullptr); DBUG_ASSERT(is_automatic != nullptr); - rocksdb::ColumnFamilyHandle* cf_handle; + rocksdb::ColumnFamilyHandle *cf_handle; mysql_mutex_lock(&m_mutex); - *is_automatic= false; + *is_automatic = false; if (cf_name == nullptr) - cf_name= DEFAULT_CF_NAME; + cf_name = DEFAULT_CF_NAME; std::string per_index_name; - if (!strcmp(cf_name, PER_INDEX_CF_NAME)) - { + if (!strcmp(cf_name, PER_INDEX_CF_NAME)) { get_per_index_cf_name(db_table_name, index_name, &per_index_name); - cf_name= per_index_name.c_str(); - *is_automatic= true; + cf_name = per_index_name.c_str(); + *is_automatic = true; } const auto it = m_cf_name_map.find(cf_name); if (it != m_cf_name_map.end()) - cf_handle= it->second; - else - { + cf_handle = it->second; + else { /* Create a Column Family. */ const std::string cf_name_str(cf_name); rocksdb::ColumnFamilyOptions opts; m_cf_options->get_cf_options(cf_name_str, &opts); - sql_print_information("RocksDB: creating column family %s", cf_name_str.c_str()); - sql_print_information(" write_buffer_size=%ld", opts.write_buffer_size); + sql_print_information("RocksDB: creating column family %s", + cf_name_str.c_str()); + sql_print_information(" write_buffer_size=%ld", opts.write_buffer_size); sql_print_information(" target_file_size_base=%" PRIu64, opts.target_file_size_base); - const rocksdb::Status s= - rdb->CreateColumnFamily(opts, cf_name_str, &cf_handle); + const rocksdb::Status s = + rdb->CreateColumnFamily(opts, cf_name_str, &cf_handle); if (s.ok()) { m_cf_name_map[cf_handle->GetName()] = cf_handle; m_cf_id_map[cf_handle->GetID()] = cf_handle; } else { - cf_handle= nullptr; + cf_handle = nullptr; } } mysql_mutex_unlock(&m_mutex); @@ -149,7 +139,6 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB* const rdb, return cf_handle; } - /* Find column family by its cf_name. @@ -162,27 +151,24 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB* const rdb, db_table_name and index_name. */ -rocksdb::ColumnFamilyHandle* -Rdb_cf_manager::get_cf(const char *cf_name, - const std::string& db_table_name, - const char* const index_name, - bool* const is_automatic) const -{ +rocksdb::ColumnFamilyHandle * +Rdb_cf_manager::get_cf(const char *cf_name, const std::string &db_table_name, + const char *const index_name, + bool *const is_automatic) const { DBUG_ASSERT(is_automatic != nullptr); - rocksdb::ColumnFamilyHandle* cf_handle; + rocksdb::ColumnFamilyHandle *cf_handle; - *is_automatic= false; + *is_automatic = false; mysql_mutex_lock(&m_mutex); if (cf_name == nullptr) - cf_name= DEFAULT_CF_NAME; + cf_name = DEFAULT_CF_NAME; std::string per_index_name; - if (!strcmp(cf_name, PER_INDEX_CF_NAME)) - { + if (!strcmp(cf_name, PER_INDEX_CF_NAME)) { get_per_index_cf_name(db_table_name, index_name, &per_index_name); - cf_name= per_index_name.c_str(); - *is_automatic= true; + cf_name = per_index_name.c_str(); + *is_automatic = true; } const auto it = m_cf_name_map.find(cf_name); @@ -193,9 +179,8 @@ Rdb_cf_manager::get_cf(const char *cf_name, return cf_handle; } -rocksdb::ColumnFamilyHandle* Rdb_cf_manager::get_cf(const uint32_t &id) const -{ - rocksdb::ColumnFamilyHandle* cf_handle = nullptr; +rocksdb::ColumnFamilyHandle *Rdb_cf_manager::get_cf(const uint32_t &id) const { + rocksdb::ColumnFamilyHandle *cf_handle = nullptr; mysql_mutex_lock(&m_mutex); const auto it = m_cf_id_map.find(id); @@ -206,9 +191,7 @@ rocksdb::ColumnFamilyHandle* Rdb_cf_manager::get_cf(const uint32_t &id) const return cf_handle; } -std::vector -Rdb_cf_manager::get_cf_names(void) const -{ +std::vector Rdb_cf_manager::get_cf_names(void) const { std::vector names; mysql_mutex_lock(&m_mutex); @@ -219,10 +202,9 @@ Rdb_cf_manager::get_cf_names(void) const return names; } -std::vector -Rdb_cf_manager::get_all_cf(void) const -{ - std::vector list; +std::vector +Rdb_cf_manager::get_all_cf(void) const { + std::vector list; mysql_mutex_lock(&m_mutex); for (auto it : m_cf_id_map) { @@ -233,4 +215,4 @@ Rdb_cf_manager::get_all_cf(void) const return list; } -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_cf_manager.h b/storage/rocksdb/rdb_cf_manager.h index 4fb5f7437e8..7b9654f3537 100644 --- a/storage/rocksdb/rdb_cf_manager.h +++ b/storage/rocksdb/rdb_cf_manager.h @@ -46,33 +46,32 @@ namespace myrocks { - CFs are created in a synchronized way. We can't remove them, yet. */ -class Rdb_cf_manager -{ - std::map m_cf_name_map; - std::map m_cf_id_map; +class Rdb_cf_manager { + std::map m_cf_name_map; + std::map m_cf_id_map; mutable mysql_mutex_t m_mutex; - static - void get_per_index_cf_name(const std::string& db_table_name, - const char* const index_name, - std::string* const res); + static void get_per_index_cf_name(const std::string &db_table_name, + const char *const index_name, + std::string *const res); - Rdb_cf_options* m_cf_options= nullptr; + Rdb_cf_options *m_cf_options = nullptr; public: - Rdb_cf_manager(const Rdb_cf_manager&) = delete; - Rdb_cf_manager& operator=(const Rdb_cf_manager&) = delete; + Rdb_cf_manager(const Rdb_cf_manager &) = delete; + Rdb_cf_manager &operator=(const Rdb_cf_manager &) = delete; Rdb_cf_manager() = default; - static bool is_cf_name_reverse(const char* const name); + static bool is_cf_name_reverse(const char *const name); /* - This is called right after the DB::Open() call. The parameters describe column + This is called right after the DB::Open() call. The parameters describe + column families that are present in the database. The first CF is the default CF. */ - void init(Rdb_cf_options* cf_options, - std::vector* const handles); + void init(Rdb_cf_options *cf_options, + std::vector *const handles); void cleanup(); /* @@ -80,33 +79,33 @@ public: - cf_name=nullptr means use default column family - cf_name=_auto_ means use 'dbname.tablename.indexname' */ - rocksdb::ColumnFamilyHandle* get_or_create_cf( - rocksdb::DB* const rdb, const char *cf_name, - const std::string& db_table_name, const char* const index_name, - bool* const is_automatic); + rocksdb::ColumnFamilyHandle * + get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name, + const std::string &db_table_name, + const char *const index_name, bool *const is_automatic); /* Used by table open */ - rocksdb::ColumnFamilyHandle* get_cf(const char *cf_name, - const std::string& db_table_name, - const char* const index_name, - bool* const is_automatic) const; + rocksdb::ColumnFamilyHandle *get_cf(const char *cf_name, + const std::string &db_table_name, + const char *const index_name, + bool *const is_automatic) const; /* Look up cf by id; used by datadic */ - rocksdb::ColumnFamilyHandle* get_cf(const uint32_t &id) const; + rocksdb::ColumnFamilyHandle *get_cf(const uint32_t &id) const; /* Used to iterate over column families for show status */ std::vector get_cf_names(void) const; /* Used to iterate over column families */ - std::vector get_all_cf(void) const; + std::vector get_all_cf(void) const; // void drop_cf(); -- not implemented so far. - void get_cf_options( - const std::string &cf_name, - rocksdb::ColumnFamilyOptions* const opts) __attribute__((__nonnull__)) { - m_cf_options->get_cf_options(cf_name, opts); + void get_cf_options(const std::string &cf_name, + rocksdb::ColumnFamilyOptions *const opts) + MY_ATTRIBUTE((__nonnull__)) { + m_cf_options->get_cf_options(cf_name, opts); } }; -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_cf_options.cc b/storage/rocksdb/rdb_cf_options.cc index bd4d78d0796..97dc16fe4e6 100644 --- a/storage/rocksdb/rdb_cf_options.cc +++ b/storage/rocksdb/rdb_cf_options.cc @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef USE_PRAGMA_IMPLEMENTATION -#pragma implementation // gcc: Class implementation +#pragma implementation // gcc: Class implementation #endif /* This C++ files header file */ @@ -41,24 +41,23 @@ Rdb_pk_comparator Rdb_cf_options::s_pk_comparator; Rdb_rev_comparator Rdb_cf_options::s_rev_pk_comparator; bool Rdb_cf_options::init( - const rocksdb::BlockBasedTableOptions& table_options, - std::shared_ptr prop_coll_factory, - const char* const default_cf_options, - const char* const override_cf_options) -{ + const rocksdb::BlockBasedTableOptions &table_options, + std::shared_ptr prop_coll_factory, + const char *const default_cf_options, + const char *const override_cf_options) { DBUG_ASSERT(default_cf_options != nullptr); DBUG_ASSERT(override_cf_options != nullptr); m_default_cf_opts.comparator = &s_pk_comparator; m_default_cf_opts.compaction_filter_factory.reset( - new Rdb_compact_filter_factory); + new Rdb_compact_filter_factory); m_default_cf_opts.table_factory.reset( - rocksdb::NewBlockBasedTableFactory(table_options)); + rocksdb::NewBlockBasedTableFactory(table_options)); if (prop_coll_factory) { m_default_cf_opts.table_properties_collector_factories.push_back( - prop_coll_factory); + prop_coll_factory); } if (!set_default(std::string(default_cf_options)) || @@ -70,34 +69,27 @@ bool Rdb_cf_options::init( } void Rdb_cf_options::get(const std::string &cf_name, - rocksdb::ColumnFamilyOptions* const opts) -{ + rocksdb::ColumnFamilyOptions *const opts) { DBUG_ASSERT(opts != nullptr); // set defaults - rocksdb::GetColumnFamilyOptionsFromString(*opts, - m_default_config, - opts); + rocksdb::GetColumnFamilyOptionsFromString(*opts, m_default_config, opts); // set per-cf config if we have one Name_to_config_t::iterator it = m_name_map.find(cf_name); if (it != m_name_map.end()) { - rocksdb::GetColumnFamilyOptionsFromString(*opts, - it->second, - opts); + rocksdb::GetColumnFamilyOptionsFromString(*opts, it->second, opts); } } -bool Rdb_cf_options::set_default(const std::string &default_config) -{ +bool Rdb_cf_options::set_default(const std::string &default_config) { rocksdb::ColumnFamilyOptions options; if (!default_config.empty() && - !rocksdb::GetColumnFamilyOptionsFromString(options, - default_config, - &options).ok()) { - fprintf(stderr, - "Invalid default column family config: %s\n", + !rocksdb::GetColumnFamilyOptionsFromString(options, default_config, + &options) + .ok()) { + fprintf(stderr, "Invalid default column family config: %s\n", default_config.c_str()); return false; } @@ -107,8 +99,7 @@ bool Rdb_cf_options::set_default(const std::string &default_config) } // Skip over any spaces in the input string. -void Rdb_cf_options::skip_spaces(const std::string& input, size_t* const pos) -{ +void Rdb_cf_options::skip_spaces(const std::string &input, size_t *const pos) { DBUG_ASSERT(pos != nullptr); while (*pos < input.size() && isspace(input[*pos])) @@ -118,10 +109,9 @@ void Rdb_cf_options::skip_spaces(const std::string& input, size_t* const pos) // Find a valid column family name. Note that all characters except a // semicolon are valid (should this change?) and all spaces are trimmed from // the beginning and end but are not removed between other characters. -bool Rdb_cf_options::find_column_family(const std::string& input, - size_t* const pos, - std::string* const key) -{ +bool Rdb_cf_options::find_column_family(const std::string &input, + size_t *const pos, + std::string *const key) { DBUG_ASSERT(pos != nullptr); DBUG_ASSERT(key != nullptr); @@ -129,15 +119,13 @@ bool Rdb_cf_options::find_column_family(const std::string& input, size_t end_pos = *pos - 1; // Loop through the characters in the string until we see a '='. - for ( ; *pos < input.size() && input[*pos] != '='; ++(*pos)) - { + for (; *pos < input.size() && input[*pos] != '='; ++(*pos)) { // If this is not a space, move the end position to the current position. if (input[*pos] != ' ') end_pos = *pos; } - if (end_pos == beg_pos - 1) - { + if (end_pos == beg_pos - 1) { // NO_LINT_DEBUG sql_print_warning("No column family found (options: %s)", input.c_str()); return false; @@ -150,18 +138,16 @@ bool Rdb_cf_options::find_column_family(const std::string& input, // Find a valid options portion. Everything is deemed valid within the options // portion until we hit as many close curly braces as we have seen open curly // braces. -bool Rdb_cf_options::find_options(const std::string& input, size_t* const pos, - std::string* const options) -{ +bool Rdb_cf_options::find_options(const std::string &input, size_t *const pos, + std::string *const options) { DBUG_ASSERT(pos != nullptr); DBUG_ASSERT(options != nullptr); // Make sure we have an open curly brace at the current position. - if (*pos < input.size() && input[*pos] != '{') - { + if (*pos < input.size() && input[*pos] != '{') { // NO_LINT_DEBUG sql_print_warning("Invalid cf options, '{' expected (options: %s)", - input.c_str()); + input.c_str()); return false; } @@ -175,29 +161,26 @@ bool Rdb_cf_options::find_options(const std::string& input, size_t* const pos, // Loop through the characters in the string until we find the appropriate // number of closing curly braces. - while (*pos < input.size()) - { - switch (input[*pos]) - { - case '}': - // If this is a closing curly brace and we bring the count down to zero - // we can exit the loop with a valid options string. - if (--brace_count == 0) - { - *options = input.substr(beg_pos, *pos - beg_pos); - ++(*pos); // Move past the last closing curly brace - return true; - } + while (*pos < input.size()) { + switch (input[*pos]) { + case '}': + // If this is a closing curly brace and we bring the count down to zero + // we can exit the loop with a valid options string. + if (--brace_count == 0) { + *options = input.substr(beg_pos, *pos - beg_pos); + ++(*pos); // Move past the last closing curly brace + return true; + } - break; + break; - case '{': - // If this is an open curly brace increment the count. - ++brace_count; - break; + case '{': + // If this is an open curly brace increment the count. + ++brace_count; + break; - default: - break; + default: + break; } // Move to the next character. @@ -208,15 +191,14 @@ bool Rdb_cf_options::find_options(const std::string& input, size_t* const pos, // Generate an error. // NO_LINT_DEBUG sql_print_warning("Mismatched cf options, '}' expected (options: %s)", - input.c_str()); + input.c_str()); return false; } -bool Rdb_cf_options::find_cf_options_pair(const std::string& input, - size_t* const pos, - std::string* const cf, - std::string* const opt_str) -{ +bool Rdb_cf_options::find_cf_options_pair(const std::string &input, + size_t *const pos, + std::string *const cf, + std::string *const opt_str) { DBUG_ASSERT(pos != nullptr); DBUG_ASSERT(cf != nullptr); DBUG_ASSERT(opt_str != nullptr); @@ -229,11 +211,10 @@ bool Rdb_cf_options::find_cf_options_pair(const std::string& input, return false; // If we are at the end of the input then we generate an error. - if (*pos == input.size()) - { + if (*pos == input.size()) { // NO_LINT_DEBUG sql_print_warning("Invalid cf options, '=' expected (options: %s)", - input.c_str()); + input.c_str()); return false; } @@ -250,13 +231,11 @@ bool Rdb_cf_options::find_cf_options_pair(const std::string& input, skip_spaces(input, pos); // We should either be at the end of the input string or at a semicolon. - if (*pos < input.size()) - { - if (input[*pos] != ';') - { + if (*pos < input.size()) { + if (input[*pos] != ';') { // NO_LINT_DEBUG sql_print_warning("Invalid cf options, ';' expected (options: %s)", - input.c_str()); + input.c_str()); return false; } @@ -266,8 +245,7 @@ bool Rdb_cf_options::find_cf_options_pair(const std::string& input, return true; } -bool Rdb_cf_options::set_override(const std::string &override_config) -{ +bool Rdb_cf_options::set_override(const std::string &override_config) { // TODO(???): support updates? std::string cf; @@ -277,15 +255,13 @@ bool Rdb_cf_options::set_override(const std::string &override_config) // Loop through the characters of the string until we reach the end. size_t pos = 0; - while (pos < override_config.size()) - { + while (pos < override_config.size()) { // Attempt to find ={}. if (!find_cf_options_pair(override_config, &pos, &cf, &opt_str)) return false; // Generate an error if we have already seen this column family. - if (configs.find(cf) != configs.end()) - { + if (configs.find(cf) != configs.end()) { // NO_LINT_DEBUG sql_print_warning( "Duplicate entry for %s in override options (options: %s)", @@ -294,9 +270,8 @@ bool Rdb_cf_options::set_override(const std::string &override_config) } // Generate an error if the is not valid according to RocksDB. - if (!rocksdb::GetColumnFamilyOptionsFromString( - options, opt_str, &options).ok()) - { + if (!rocksdb::GetColumnFamilyOptionsFromString(options, opt_str, &options) + .ok()) { // NO_LINT_DEBUG sql_print_warning( "Invalid cf config for %s in override options (options: %s)", @@ -314,29 +289,24 @@ bool Rdb_cf_options::set_override(const std::string &override_config) return true; } -const rocksdb::Comparator* Rdb_cf_options::get_cf_comparator( - const std::string& cf_name) -{ - if (Rdb_cf_manager::is_cf_name_reverse(cf_name.c_str())) - { +const rocksdb::Comparator * +Rdb_cf_options::get_cf_comparator(const std::string &cf_name) { + if (Rdb_cf_manager::is_cf_name_reverse(cf_name.c_str())) { return &s_rev_pk_comparator; - } - else - { + } else { return &s_pk_comparator; } } void Rdb_cf_options::get_cf_options(const std::string &cf_name, - rocksdb::ColumnFamilyOptions* const opts) -{ + rocksdb::ColumnFamilyOptions *const opts) { DBUG_ASSERT(opts != nullptr); *opts = m_default_cf_opts; get(cf_name, opts); // Set the comparator according to 'rev:' - opts->comparator= get_cf_comparator(cf_name); + opts->comparator = get_cf_comparator(cf_name); } -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_cf_options.h b/storage/rocksdb/rdb_cf_options.h index 8151d907eb7..1cd80a131ad 100644 --- a/storage/rocksdb/rdb_cf_options.h +++ b/storage/rocksdb/rdb_cf_options.h @@ -38,47 +38,47 @@ namespace myrocks { and also there is a default value which applies to column families not found in the map. */ -class Rdb_cf_options -{ - public: - Rdb_cf_options(const Rdb_cf_options&) = delete; - Rdb_cf_options& operator=(const Rdb_cf_options&) = delete; +class Rdb_cf_options { +public: + Rdb_cf_options(const Rdb_cf_options &) = delete; + Rdb_cf_options &operator=(const Rdb_cf_options &) = delete; Rdb_cf_options() = default; - void get(const std::string &cf_name, rocksdb::ColumnFamilyOptions* const opts); + void get(const std::string &cf_name, + rocksdb::ColumnFamilyOptions *const opts); - bool init( - const rocksdb::BlockBasedTableOptions& table_options, - std::shared_ptr prop_coll_factory, - const char* const default_cf_options, - const char* const override_cf_options); + bool init(const rocksdb::BlockBasedTableOptions &table_options, + std::shared_ptr + prop_coll_factory, + const char *const default_cf_options, + const char *const override_cf_options); - const rocksdb::ColumnFamilyOptions& get_defaults() const { + const rocksdb::ColumnFamilyOptions &get_defaults() const { return m_default_cf_opts; } - static const rocksdb::Comparator* get_cf_comparator( - const std::string& cf_name); + static const rocksdb::Comparator * + get_cf_comparator(const std::string &cf_name); - void get_cf_options( - const std::string &cf_name, - rocksdb::ColumnFamilyOptions* const opts) __attribute__((__nonnull__)); + void get_cf_options(const std::string &cf_name, + rocksdb::ColumnFamilyOptions *const opts) + MY_ATTRIBUTE((__nonnull__)); - private: +private: bool set_default(const std::string &default_config); bool set_override(const std::string &overide_config); /* Helper string manipulation functions */ - static void skip_spaces(const std::string& input, size_t* const pos); - static bool find_column_family(const std::string& input, size_t* const pos, - std::string* const key); - static bool find_options(const std::string& input, size_t* const pos, - std::string* const options); - static bool find_cf_options_pair(const std::string& input, size_t* const pos, - std::string* const cf, - std::string* const opt_str); + static void skip_spaces(const std::string &input, size_t *const pos); + static bool find_column_family(const std::string &input, size_t *const pos, + std::string *const key); + static bool find_options(const std::string &input, size_t *const pos, + std::string *const options); + static bool find_cf_options_pair(const std::string &input, size_t *const pos, + std::string *const cf, + std::string *const opt_str); - private: +private: static Rdb_pk_comparator s_pk_comparator; static Rdb_rev_comparator s_rev_pk_comparator; @@ -93,4 +93,4 @@ class Rdb_cf_options rocksdb::ColumnFamilyOptions m_default_cf_opts; }; -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_compact_filter.h b/storage/rocksdb/rdb_compact_filter.h index ca634f74d43..9e0d69597ff 100644 --- a/storage/rocksdb/rdb_compact_filter.h +++ b/storage/rocksdb/rdb_compact_filter.h @@ -17,7 +17,7 @@ #pragma once #ifdef USE_PRAGMA_IMPLEMENTATION -#pragma implementation // gcc: Class implementation +#pragma implementation // gcc: Class implementation #endif /* C++ system header files */ @@ -32,11 +32,10 @@ namespace myrocks { -class Rdb_compact_filter : public rocksdb::CompactionFilter -{ - public: - Rdb_compact_filter(const Rdb_compact_filter&) = delete; - Rdb_compact_filter& operator=(const Rdb_compact_filter&) = delete; +class Rdb_compact_filter : public rocksdb::CompactionFilter { +public: + Rdb_compact_filter(const Rdb_compact_filter &) = delete; + Rdb_compact_filter &operator=(const Rdb_compact_filter &) = delete; explicit Rdb_compact_filter(uint32_t _cf_id) : m_cf_id(_cf_id) {} ~Rdb_compact_filter() {} @@ -45,32 +44,28 @@ class Rdb_compact_filter : public rocksdb::CompactionFilter // V1 Filter is thread safe on our usage (creating from Factory). // Make sure to protect instance variables when switching to thread // unsafe in the future. - virtual bool Filter(int level, - const rocksdb::Slice& key, - const rocksdb::Slice& existing_value, - std::string* new_value, - bool* value_changed) const override - { + virtual bool Filter(int level, const rocksdb::Slice &key, + const rocksdb::Slice &existing_value, + std::string *new_value, + bool *value_changed) const override { DBUG_ASSERT(key.size() >= sizeof(uint32)); GL_INDEX_ID gl_index_id; - gl_index_id.cf_id= m_cf_id; - gl_index_id.index_id= rdb_netbuf_to_uint32((const uchar*)key.data()); + gl_index_id.cf_id = m_cf_id; + gl_index_id.index_id = rdb_netbuf_to_uint32((const uchar *)key.data()); DBUG_ASSERT(gl_index_id.index_id >= 1); - if (gl_index_id != m_prev_index) // processing new index id + if (gl_index_id != m_prev_index) // processing new index id { - if (m_num_deleted > 0) - { - m_num_deleted= 0; + if (m_num_deleted > 0) { + m_num_deleted = 0; } - m_should_delete= + m_should_delete = rdb_get_dict_manager()->is_drop_index_ongoing(gl_index_id); - m_prev_index= gl_index_id; + m_prev_index = gl_index_id; } - if (m_should_delete) - { + if (m_should_delete) { m_num_deleted++; } @@ -79,42 +74,35 @@ class Rdb_compact_filter : public rocksdb::CompactionFilter virtual bool IgnoreSnapshots() const override { return true; } - virtual const char* Name() const override - { - return "Rdb_compact_filter"; - } + virtual const char *Name() const override { return "Rdb_compact_filter"; } - private: +private: // Column family for this compaction filter const uint32_t m_cf_id; // Index id of the previous record - mutable GL_INDEX_ID m_prev_index= {0, 0}; + mutable GL_INDEX_ID m_prev_index = {0, 0}; // Number of rows deleted for the same index id - mutable uint64 m_num_deleted= 0; + mutable uint64 m_num_deleted = 0; // Current index id should be deleted or not (should be deleted if true) - mutable bool m_should_delete= false; + mutable bool m_should_delete = false; }; -class Rdb_compact_filter_factory : public rocksdb::CompactionFilterFactory -{ - public: - Rdb_compact_filter_factory(const Rdb_compact_filter_factory&) = delete; - Rdb_compact_filter_factory& operator=(const Rdb_compact_filter_factory&) = delete; +class Rdb_compact_filter_factory : public rocksdb::CompactionFilterFactory { +public: + Rdb_compact_filter_factory(const Rdb_compact_filter_factory &) = delete; + Rdb_compact_filter_factory & + operator=(const Rdb_compact_filter_factory &) = delete; Rdb_compact_filter_factory() {} ~Rdb_compact_filter_factory() {} - const char* Name() const override - { - return "Rdb_compact_filter_factory"; - } + const char *Name() const override { return "Rdb_compact_filter_factory"; } std::unique_ptr CreateCompactionFilter( - const rocksdb::CompactionFilter::Context& context) override - { + const rocksdb::CompactionFilter::Context &context) override { return std::unique_ptr( - new Rdb_compact_filter(context.column_family_id)); + new Rdb_compact_filter(context.column_family_id)); } }; -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_comparator.h b/storage/rocksdb/rdb_comparator.h index 0e47556a778..47b83abc70a 100644 --- a/storage/rocksdb/rdb_comparator.h +++ b/storage/rocksdb/rdb_comparator.h @@ -24,6 +24,9 @@ /* RocksDB header files */ #include "rocksdb/comparator.h" +/* MyRocks header files */ +#include "./rdb_utils.h" + namespace myrocks { /* @@ -32,70 +35,65 @@ namespace myrocks { (todo: knowledge about this format is shared between this class and Rdb_key_def) */ -class Rdb_pk_comparator : public rocksdb::Comparator -{ - public: - Rdb_pk_comparator(const Rdb_pk_comparator&) = delete; - Rdb_pk_comparator& operator=(const Rdb_pk_comparator&) = delete; +class Rdb_pk_comparator : public rocksdb::Comparator { +public: + Rdb_pk_comparator(const Rdb_pk_comparator &) = delete; + Rdb_pk_comparator &operator=(const Rdb_pk_comparator &) = delete; Rdb_pk_comparator() = default; - static int bytewise_compare(const rocksdb::Slice& a, const rocksdb::Slice& b) - { - const size_t a_size= a.size(); - const size_t b_size= b.size(); - const size_t len= (a_size < b_size) ? a_size : b_size; + static int bytewise_compare(const rocksdb::Slice &a, + const rocksdb::Slice &b) { + const size_t a_size = a.size(); + const size_t b_size = b.size(); + const size_t len = (a_size < b_size) ? a_size : b_size; int res; - if ((res= memcmp(a.data(), b.data(), len))) + if ((res = memcmp(a.data(), b.data(), len))) return res; /* Ok, res== 0 */ - if (a_size != b_size) - { - return a_size < b_size? -1 : 1; + if (a_size != b_size) { + return a_size < b_size ? -1 : 1; } - return 0; + return HA_EXIT_SUCCESS; } /* Override virtual methods of interest */ - int Compare(const rocksdb::Slice& a, const rocksdb::Slice& b) const override - { - return bytewise_compare(a,b); + int Compare(const rocksdb::Slice &a, const rocksdb::Slice &b) const override { + return bytewise_compare(a, b); } - const char* Name() const override { return "RocksDB_SE_v3.10"; } + const char *Name() const override { return "RocksDB_SE_v3.10"; } - //TODO: advanced funcs: + // TODO: advanced funcs: // - FindShortestSeparator // - FindShortSuccessor // for now, do-nothing implementations: - void FindShortestSeparator(std::string* start, - const rocksdb::Slice& limit) const override {} - void FindShortSuccessor(std::string* key) const override {} + void FindShortestSeparator(std::string *start, + const rocksdb::Slice &limit) const override {} + void FindShortSuccessor(std::string *key) const override {} }; -class Rdb_rev_comparator : public rocksdb::Comparator -{ - public: - Rdb_rev_comparator(const Rdb_rev_comparator&) = delete; - Rdb_rev_comparator& operator=(const Rdb_rev_comparator&) = delete; +class Rdb_rev_comparator : public rocksdb::Comparator { +public: + Rdb_rev_comparator(const Rdb_rev_comparator &) = delete; + Rdb_rev_comparator &operator=(const Rdb_rev_comparator &) = delete; Rdb_rev_comparator() = default; - static int bytewise_compare(const rocksdb::Slice& a, const rocksdb::Slice& b) - { + static int bytewise_compare(const rocksdb::Slice &a, + const rocksdb::Slice &b) { return -Rdb_pk_comparator::bytewise_compare(a, b); } - int Compare(const rocksdb::Slice& a, const rocksdb::Slice& b) const override - { + int Compare(const rocksdb::Slice &a, const rocksdb::Slice &b) const override { return -Rdb_pk_comparator::bytewise_compare(a, b); } - const char* Name() const override { return "rev:RocksDB_SE_v3.10"; } - void FindShortestSeparator(std::string* start, - const rocksdb::Slice& limit) const override {} - void FindShortSuccessor(std::string* key) const override {} + const char *Name() const override { return "rev:RocksDB_SE_v3.10"; } + void FindShortestSeparator(std::string *start, + const rocksdb::Slice &limit) const override {} + void FindShortSuccessor(std::string *key) const override {} }; -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index 3fa5f07844b..bd6bd2a0834 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -15,7 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef USE_PRAGMA_IMPLEMENTATION -#pragma implementation // gcc: Class implementation +#pragma implementation // gcc: Class implementation #endif /* This C++ file's header file */ @@ -44,76 +44,56 @@ namespace myrocks { void get_mem_comparable_space(const CHARSET_INFO *cs, - const std::vector **xfrm, - size_t *xfrm_len, + const std::vector **xfrm, size_t *xfrm_len, size_t *mb_len); /* Rdb_key_def class implementation */ -Rdb_key_def::Rdb_key_def( - uint indexnr_arg, uint keyno_arg, - rocksdb::ColumnFamilyHandle* cf_handle_arg, - uint16_t index_dict_version_arg, - uchar index_type_arg, - uint16_t kv_format_version_arg, - bool is_reverse_cf_arg, bool is_auto_cf_arg, - const char* _name, - Rdb_index_stats _stats -) : - m_index_number(indexnr_arg), - m_cf_handle(cf_handle_arg), - m_index_dict_version(index_dict_version_arg), - m_index_type(index_type_arg), - m_kv_format_version(kv_format_version_arg), - m_is_reverse_cf(is_reverse_cf_arg), - m_is_auto_cf(is_auto_cf_arg), - m_name(_name), - m_stats(_stats), - m_pk_part_no(nullptr), - m_pack_info(nullptr), - m_keyno(keyno_arg), - m_key_parts(0), - m_maxlength(0) // means 'not intialized' +Rdb_key_def::Rdb_key_def(uint indexnr_arg, uint keyno_arg, + rocksdb::ColumnFamilyHandle *cf_handle_arg, + uint16_t index_dict_version_arg, uchar index_type_arg, + uint16_t kv_format_version_arg, bool is_reverse_cf_arg, + bool is_auto_cf_arg, const char *_name, + Rdb_index_stats _stats) + : m_index_number(indexnr_arg), m_cf_handle(cf_handle_arg), + m_index_dict_version(index_dict_version_arg), + m_index_type(index_type_arg), m_kv_format_version(kv_format_version_arg), + m_is_reverse_cf(is_reverse_cf_arg), m_is_auto_cf(is_auto_cf_arg), + m_name(_name), m_stats(_stats), m_pk_part_no(nullptr), + m_pack_info(nullptr), m_keyno(keyno_arg), m_key_parts(0), + m_prefix_extractor(nullptr), m_maxlength(0) // means 'not intialized' { mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST); rdb_netbuf_store_index(m_index_number_storage_form, m_index_number); DBUG_ASSERT(m_cf_handle != nullptr); } -Rdb_key_def::Rdb_key_def(const Rdb_key_def& k) : - m_index_number(k.m_index_number), - m_cf_handle(k.m_cf_handle), - m_is_reverse_cf(k.m_is_reverse_cf), - m_is_auto_cf(k.m_is_auto_cf), - m_name(k.m_name), - m_stats(k.m_stats), - m_pk_part_no(k.m_pk_part_no), - m_pack_info(k.m_pack_info), - m_keyno(k.m_keyno), - m_key_parts(k.m_key_parts), - m_maxlength(k.m_maxlength) -{ +Rdb_key_def::Rdb_key_def(const Rdb_key_def &k) + : m_index_number(k.m_index_number), m_cf_handle(k.m_cf_handle), + m_is_reverse_cf(k.m_is_reverse_cf), m_is_auto_cf(k.m_is_auto_cf), + m_name(k.m_name), m_stats(k.m_stats), m_pk_part_no(k.m_pk_part_no), + m_pack_info(k.m_pack_info), m_keyno(k.m_keyno), + m_key_parts(k.m_key_parts), m_prefix_extractor(k.m_prefix_extractor), + m_maxlength(k.m_maxlength) { mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST); rdb_netbuf_store_index(m_index_number_storage_form, m_index_number); - if (k.m_pack_info) - { - const size_t size= sizeof(Rdb_field_packing) * k.m_key_parts; - m_pack_info= reinterpret_cast(my_malloc(size, MYF(0))); + if (k.m_pack_info) { + const size_t size = sizeof(Rdb_field_packing) * k.m_key_parts; + m_pack_info = + reinterpret_cast(my_malloc(size, MYF(0))); memcpy(m_pack_info, k.m_pack_info, size); } - if (k.m_pk_part_no) - { - const size_t size = sizeof(uint)*m_key_parts; - m_pk_part_no= reinterpret_cast(my_malloc(size, MYF(0))); + if (k.m_pk_part_no) { + const size_t size = sizeof(uint) * m_key_parts; + m_pk_part_no = reinterpret_cast(my_malloc(size, MYF(0))); memcpy(m_pk_part_no, k.m_pk_part_no, size); } } -Rdb_key_def::~Rdb_key_def() -{ +Rdb_key_def::~Rdb_key_def() { mysql_mutex_destroy(&m_mutex); my_free(m_pk_part_no); @@ -123,9 +103,8 @@ Rdb_key_def::~Rdb_key_def() m_pack_info = nullptr; } -void Rdb_key_def::setup(const TABLE* const tbl, - const Rdb_tbl_def* const tbl_def) -{ +void Rdb_key_def::setup(const TABLE *const tbl, + const Rdb_tbl_def *const tbl_def) { DBUG_ASSERT(tbl != nullptr); DBUG_ASSERT(tbl_def != nullptr); @@ -133,45 +112,38 @@ void Rdb_key_def::setup(const TABLE* const tbl, Set max_length based on the table. This can be called concurrently from multiple threads, so there is a mutex to protect this code. */ - const bool is_hidden_pk= (m_index_type == INDEX_TYPE_HIDDEN_PRIMARY); - const bool hidden_pk_exists= table_has_hidden_pk(tbl); - const bool secondary_key= (m_index_type == INDEX_TYPE_SECONDARY); - if (!m_maxlength) - { + const bool is_hidden_pk = (m_index_type == INDEX_TYPE_HIDDEN_PRIMARY); + const bool hidden_pk_exists = table_has_hidden_pk(tbl); + const bool secondary_key = (m_index_type == INDEX_TYPE_SECONDARY); + if (!m_maxlength) { mysql_mutex_lock(&m_mutex); - if (m_maxlength != 0) - { + if (m_maxlength != 0) { mysql_mutex_unlock(&m_mutex); return; } - KEY *key_info= nullptr; - KEY *pk_info= nullptr; - if (!is_hidden_pk) - { - key_info= &tbl->key_info[m_keyno]; + KEY *key_info = nullptr; + KEY *pk_info = nullptr; + if (!is_hidden_pk) { + key_info = &tbl->key_info[m_keyno]; if (!hidden_pk_exists) - pk_info= &tbl->key_info[tbl->s->primary_key]; - m_name= std::string(key_info->name); - } - else - { - m_name= HIDDEN_PK_NAME; + pk_info = &tbl->key_info[tbl->s->primary_key]; + m_name = std::string(key_info->name); + } else { + m_name = HIDDEN_PK_NAME; } if (secondary_key) - m_pk_key_parts= hidden_pk_exists ? 1 : pk_info->actual_key_parts; - else - { - pk_info= nullptr; - m_pk_key_parts= 0; + m_pk_key_parts = hidden_pk_exists ? 1 : pk_info->actual_key_parts; + else { + pk_info = nullptr; + m_pk_key_parts = 0; } // "unique" secondary keys support: - m_key_parts= is_hidden_pk ? 1 : key_info->actual_key_parts; + m_key_parts = is_hidden_pk ? 1 : key_info->actual_key_parts; - if (secondary_key) - { + if (secondary_key) { /* In most cases, SQL layer puts PK columns as invisible suffix at the end of secondary key. There are cases where this doesn't happen: @@ -190,115 +162,102 @@ void Rdb_key_def::setup(const TABLE* const tbl, } if (secondary_key) - m_pk_part_no= reinterpret_cast(my_malloc(sizeof(uint)*m_key_parts, - MYF(0))); + m_pk_part_no = reinterpret_cast( + my_malloc(sizeof(uint) * m_key_parts, MYF(0))); else - m_pk_part_no= nullptr; + m_pk_part_no = nullptr; - const size_t size= sizeof(Rdb_field_packing) * m_key_parts; - m_pack_info= reinterpret_cast(my_malloc(size, MYF(0))); + const size_t size = sizeof(Rdb_field_packing) * m_key_parts; + m_pack_info = + reinterpret_cast(my_malloc(size, MYF(0))); - size_t max_len= INDEX_NUMBER_SIZE; - int unpack_len= 0; - int max_part_len= 0; - bool simulating_extkey= false; - uint dst_i= 0; + size_t max_len = INDEX_NUMBER_SIZE; + int unpack_len = 0; + int max_part_len = 0; + bool simulating_extkey = false; + uint dst_i = 0; - uint keyno_to_set= m_keyno; - uint keypart_to_set= 0; + uint keyno_to_set = m_keyno; + uint keypart_to_set = 0; - if (is_hidden_pk) - { - Field *field= nullptr; + if (is_hidden_pk) { + Field *field = nullptr; m_pack_info[dst_i].setup(this, field, keyno_to_set, 0, 0); - m_pack_info[dst_i].m_unpack_data_offset= unpack_len; - max_len += m_pack_info[dst_i].m_max_image_len; - max_part_len= std::max(max_part_len, m_pack_info[dst_i].m_max_image_len); + m_pack_info[dst_i].m_unpack_data_offset = unpack_len; + max_len += m_pack_info[dst_i].m_max_image_len; + max_part_len = std::max(max_part_len, m_pack_info[dst_i].m_max_image_len); dst_i++; - } - else - { - KEY_PART_INFO *key_part= key_info->key_part; + } else { + KEY_PART_INFO *key_part = key_info->key_part; /* this loop also loops over the 'extended key' tail */ - for (uint src_i= 0; src_i < m_key_parts; src_i++, keypart_to_set++) - { - Field* const field= key_part ? key_part->field : nullptr; + for (uint src_i = 0; src_i < m_key_parts; src_i++, keypart_to_set++) { + Field *const field = key_part ? key_part->field : nullptr; - if (simulating_extkey && !hidden_pk_exists) - { + if (simulating_extkey && !hidden_pk_exists) { + DBUG_ASSERT(secondary_key); /* Check if this field is already present in the key definition */ - bool found= false; - for (uint j= 0; j < key_info->actual_key_parts; j++) - { - if (field->field_index == key_info->key_part[j].field->field_index) - { - found= true; + bool found = false; + for (uint j = 0; j < key_info->actual_key_parts; j++) { + if (field->field_index == + key_info->key_part[j].field->field_index && + key_part->length == key_info->key_part[j].length) { + found = true; break; } } - if (found) - { + if (found) { key_part++; continue; } } if (field && field->real_maybe_null()) - max_len +=1; // NULL-byte + max_len += 1; // NULL-byte m_pack_info[dst_i].setup(this, field, keyno_to_set, keypart_to_set, key_part ? key_part->length : 0); - m_pack_info[dst_i].m_unpack_data_offset= unpack_len; + m_pack_info[dst_i].m_unpack_data_offset = unpack_len; - if (pk_info) - { - m_pk_part_no[dst_i]= -1; - for (uint j= 0; j < m_pk_key_parts; j++) - { - if (field->field_index == pk_info->key_part[j].field->field_index) - { - m_pk_part_no[dst_i]= j; + if (pk_info) { + m_pk_part_no[dst_i] = -1; + for (uint j = 0; j < m_pk_key_parts; j++) { + if (field->field_index == pk_info->key_part[j].field->field_index) { + m_pk_part_no[dst_i] = j; break; } } - } - else if (secondary_key && hidden_pk_exists) - { + } else if (secondary_key && hidden_pk_exists) { /* The hidden pk can never be part of the sk. So it is always appended to the end of the sk. */ - m_pk_part_no[dst_i]= -1; + m_pk_part_no[dst_i] = -1; if (simulating_extkey) - m_pk_part_no[dst_i]= 0; + m_pk_part_no[dst_i] = 0; } - max_len += m_pack_info[dst_i].m_max_image_len; + max_len += m_pack_info[dst_i].m_max_image_len; - max_part_len= std::max(max_part_len, - m_pack_info[dst_i].m_max_image_len); + max_part_len = + std::max(max_part_len, m_pack_info[dst_i].m_max_image_len); key_part++; /* For "unique" secondary indexes, pretend they have "index extensions" */ - if (secondary_key && src_i+1 == key_info->actual_key_parts) - { - simulating_extkey= true; - if (!hidden_pk_exists) - { - keyno_to_set= tbl->s->primary_key; - key_part= pk_info->key_part; - keypart_to_set= (uint)-1; - } - else - { - keyno_to_set= tbl_def->m_key_count - 1; - key_part= nullptr; - keypart_to_set= 0; + if (secondary_key && src_i + 1 == key_info->actual_key_parts) { + simulating_extkey = true; + if (!hidden_pk_exists) { + keyno_to_set = tbl->s->primary_key; + key_part = pk_info->key_part; + keypart_to_set = (uint)-1; + } else { + keyno_to_set = tbl_def->m_key_count - 1; + key_part = nullptr; + keypart_to_set = 0; } } @@ -306,21 +265,61 @@ void Rdb_key_def::setup(const TABLE* const tbl, } } - m_key_parts= dst_i; + m_key_parts = dst_i; /* Initialize the memory needed by the stats structure */ m_stats.m_distinct_keys_per_prefix.resize(get_key_parts()); + /* Cache prefix extractor for bloom filter usage later */ + rocksdb::Options opt = rdb_get_rocksdb_db()->GetOptions(get_cf()); + m_prefix_extractor = opt.prefix_extractor; + /* This should be the last member variable set before releasing the mutex so that other threads can't see the object partially set up. */ - m_maxlength= max_len; + m_maxlength = max_len; mysql_mutex_unlock(&m_mutex); } } +/** + Read a memcmp key part from a slice using the passed in reader. + + Returns -1 if field was null, 1 if error, 0 otherwise. +*/ +int Rdb_key_def::read_memcmp_key_part(const TABLE *table_arg, + Rdb_string_reader *reader, + const uint part_num) const { + /* It is impossible to unpack the column. Skip it. */ + if (m_pack_info[part_num].m_maybe_null) { + const char *nullp; + if (!(nullp = reader->read(1))) + return 1; + if (*nullp == 0) { + /* This is a NULL value */ + return -1; + } else { + /* If NULL marker is not '0', it can be only '1' */ + if (*nullp != 1) + return 1; + } + } + + Rdb_field_packing *fpi = &m_pack_info[part_num]; + DBUG_ASSERT(table_arg->s != nullptr); + + bool is_hidden_pk_part = (part_num + 1 == m_key_parts) && + (table_arg->s->primary_key == MAX_INDEXES); + Field *field = nullptr; + if (!is_hidden_pk_part) + field = fpi->get_field_in_table(table_arg); + if (fpi->m_skip_func(fpi, field, reader)) + return 1; + + return 0; +} /** Get a mem-comparable form of Primary Key from mem-comparable form of this key @@ -347,17 +346,16 @@ void Rdb_key_def::setup(const TABLE* const tbl, set of queries for which we would check the checksum twice. */ -uint Rdb_key_def::get_primary_key_tuple(const TABLE* const table, - const Rdb_key_def& pk_descr, - const rocksdb::Slice* const key, - uchar* const pk_buffer) const -{ +uint Rdb_key_def::get_primary_key_tuple(const TABLE *const table, + const Rdb_key_def &pk_descr, + const rocksdb::Slice *const key, + uchar *const pk_buffer) const { DBUG_ASSERT(table != nullptr); DBUG_ASSERT(key != nullptr); DBUG_ASSERT(pk_buffer); - uint size= 0; - uchar *buf= pk_buffer; + uint size = 0; + uchar *buf = pk_buffer; DBUG_ASSERT(m_pk_key_parts); /* Put the PK number */ @@ -365,8 +363,8 @@ uint Rdb_key_def::get_primary_key_tuple(const TABLE* const table, buf += INDEX_NUMBER_SIZE; size += INDEX_NUMBER_SIZE; - const char* start_offs[MAX_REF_PARTS]; - const char* end_offs[MAX_REF_PARTS]; + const char *start_offs[MAX_REF_PARTS]; + const char *end_offs[MAX_REF_PARTS]; int pk_key_part; uint i; Rdb_string_reader reader(key); @@ -375,56 +373,22 @@ uint Rdb_key_def::get_primary_key_tuple(const TABLE* const table, if ((!reader.read(INDEX_NUMBER_SIZE))) return RDB_INVALID_KEY_LEN; - for (i= 0; i < m_key_parts; i++) - { - if ((pk_key_part= m_pk_part_no[i]) != -1) - { - start_offs[pk_key_part]= reader.get_current_ptr(); + for (i = 0; i < m_key_parts; i++) { + if ((pk_key_part = m_pk_part_no[i]) != -1) { + start_offs[pk_key_part] = reader.get_current_ptr(); } - bool have_value= true; - /* It is impossible to unpack the column. Skip it. */ - if (m_pack_info[i].m_maybe_null) - { - const char* nullp; - if (!(nullp= reader.read(1))) - return RDB_INVALID_KEY_LEN; - if (*nullp == 0) - { - /* This is a NULL value */ - have_value= false; - } - else - { - /* If NULL marker is not '0', it can be only '1' */ - if (*nullp != 1) - return RDB_INVALID_KEY_LEN; - } + if (read_memcmp_key_part(table, &reader, i) > 0) { + return RDB_INVALID_KEY_LEN; } - if (have_value) - { - Rdb_field_packing* const fpi= &m_pack_info[i]; - - DBUG_ASSERT(table->s != nullptr); - const bool is_hidden_pk_part= (i + 1 == m_key_parts) && - (table->s->primary_key == MAX_INDEXES); - Field *field= nullptr; - if (!is_hidden_pk_part) - field= fpi->get_field_in_table(table); - if (fpi->m_skip_func(fpi, field, &reader)) - return RDB_INVALID_KEY_LEN; - } - - if (pk_key_part != -1) - { - end_offs[pk_key_part]= reader.get_current_ptr(); + if (pk_key_part != -1) { + end_offs[pk_key_part] = reader.get_current_ptr(); } } - for (i= 0; i < m_pk_key_parts; i++) - { - const uint part_size= end_offs[i] - start_offs[i]; + for (i = 0; i < m_pk_key_parts; i++) { + const uint part_size = end_offs[i] - start_offs[i]; memcpy(buf, start_offs[i], end_offs[i] - start_offs[i]); buf += part_size; size += part_size; @@ -433,6 +397,46 @@ uint Rdb_key_def::get_primary_key_tuple(const TABLE* const table, return size; } +/** + Get a mem-comparable form of Secondary Key from mem-comparable form of this + key, without the extended primary key tail. + + @param + key Index tuple from this key in mem-comparable form + sk_buffer OUT Put here mem-comparable form of the Secondary Key. + n_null_fields OUT Put number of null fields contained within sk entry +*/ +uint Rdb_key_def::get_memcmp_sk_parts(const TABLE *table, + const rocksdb::Slice &key, + uchar *sk_buffer, + uint *n_null_fields) const { + DBUG_ASSERT(table != nullptr); + DBUG_ASSERT(sk_buffer != nullptr); + DBUG_ASSERT(n_null_fields != nullptr); + DBUG_ASSERT(m_keyno != table->s->primary_key && !table_has_hidden_pk(table)); + + uchar *buf = sk_buffer; + + int res; + Rdb_string_reader reader(&key); + const char *start = reader.get_current_ptr(); + + // Skip the index number + if ((!reader.read(INDEX_NUMBER_SIZE))) + return RDB_INVALID_KEY_LEN; + + for (uint i = 0; i < table->key_info[m_keyno].user_defined_key_parts; i++) { + if ((res = read_memcmp_key_part(table, &reader, i)) > 0) { + return RDB_INVALID_KEY_LEN; + } else if (res == -1) { + (*n_null_fields)++; + } + } + + uint sk_memcmp_len = reader.get_current_ptr() - start; + memcpy(buf, start, sk_memcmp_len); + return sk_memcmp_len; +} /** Convert index tuple into storage (i.e. mem-comparable) format @@ -445,30 +449,28 @@ uint Rdb_key_def::get_primary_key_tuple(const TABLE* const table, size is at least max_storage_fmt_length() bytes. */ -uint Rdb_key_def::pack_index_tuple(TABLE* const tbl, uchar* const pack_buffer, - uchar* const packed_tuple, - const uchar* const key_tuple, - const key_part_map &keypart_map) const -{ +uint Rdb_key_def::pack_index_tuple(TABLE *const tbl, uchar *const pack_buffer, + uchar *const packed_tuple, + const uchar *const key_tuple, + const key_part_map &keypart_map) const { DBUG_ASSERT(tbl != nullptr); DBUG_ASSERT(pack_buffer != nullptr); DBUG_ASSERT(packed_tuple != nullptr); DBUG_ASSERT(key_tuple != nullptr); /* We were given a record in KeyTupleFormat. First, save it to record */ - const uint key_len= calculate_key_len(tbl, m_keyno, key_tuple, keypart_map); + const uint key_len = calculate_key_len(tbl, m_keyno, key_tuple, keypart_map); key_restore(tbl->record[0], key_tuple, &tbl->key_info[m_keyno], key_len); - uint n_used_parts= my_count_bits(keypart_map); + uint n_used_parts = my_count_bits(keypart_map); if (keypart_map == HA_WHOLE_KEY) - n_used_parts= 0; // Full key is used + n_used_parts = 0; // Full key is used /* Then, convert the record into a mem-comparable form */ return pack_record(tbl, pack_buffer, tbl->record[0], packed_tuple, nullptr, false, 0, n_used_parts); } - /** @brief Check if "unpack info" data includes checksum. @@ -478,16 +480,13 @@ uint Rdb_key_def::pack_index_tuple(TABLE* const tbl, uchar* const pack_buffer, checksums. */ -bool Rdb_key_def::unpack_info_has_checksum(const rocksdb::Slice &unpack_info) -{ - const uchar* ptr= (const uchar*)unpack_info.data(); - size_t size= unpack_info.size(); +bool Rdb_key_def::unpack_info_has_checksum(const rocksdb::Slice &unpack_info) { + const uchar *ptr = (const uchar *)unpack_info.data(); + size_t size = unpack_info.size(); // Skip unpack info if present. - if (size >= RDB_UNPACK_HEADER_SIZE && - ptr[0] == RDB_UNPACK_DATA_TAG) - { - const uint16 skip_len= rdb_netbuf_to_uint16(ptr + 1); + if (size >= RDB_UNPACK_HEADER_SIZE && ptr[0] == RDB_UNPACK_DATA_TAG) { + const uint16 skip_len = rdb_netbuf_to_uint16(ptr + 1); SHIP_ASSERT(size >= skip_len); size -= skip_len; @@ -500,26 +499,22 @@ bool Rdb_key_def::unpack_info_has_checksum(const rocksdb::Slice &unpack_info) /* @return Number of bytes that were changed */ -int Rdb_key_def::successor(uchar* const packed_tuple, const uint &len) -{ +int Rdb_key_def::successor(uchar *const packed_tuple, const uint &len) { DBUG_ASSERT(packed_tuple != nullptr); - int changed= 0; - uchar *p= packed_tuple + len - 1; - for (; p > packed_tuple; p--) - { + int changed = 0; + uchar *p = packed_tuple + len - 1; + for (; p > packed_tuple; p--) { changed++; - if (*p != uchar(0xFF)) - { - *p= *p + 1; + if (*p != uchar(0xFF)) { + *p = *p + 1; break; } - *p='\0'; + *p = '\0'; } return changed; } - /** Get index columns from the record and pack them into mem-comparable form. @@ -542,15 +537,13 @@ int Rdb_key_def::successor(uchar* const packed_tuple, const uint &len) Length of the packed tuple */ -uint Rdb_key_def::pack_record(const TABLE* const tbl, uchar* const pack_buffer, - const uchar* const record, - uchar* const packed_tuple, - Rdb_string_writer* const unpack_info, +uint Rdb_key_def::pack_record(const TABLE *const tbl, uchar *const pack_buffer, + const uchar *const record, + uchar *const packed_tuple, + Rdb_string_writer *const unpack_info, const bool &should_store_row_debug_checksums, - const longlong &hidden_pk_id, - uint n_key_parts, - uint* const n_null_fields) const -{ + const longlong &hidden_pk_id, uint n_key_parts, + uint *const n_null_fields) const { DBUG_ASSERT(tbl != nullptr); DBUG_ASSERT(pack_buffer != nullptr); DBUG_ASSERT(record != nullptr); @@ -560,9 +553,9 @@ uint Rdb_key_def::pack_record(const TABLE* const tbl, uchar* const pack_buffer, DBUG_ASSERT_IMP(should_store_row_debug_checksums, (m_index_type == INDEX_TYPE_SECONDARY)); - uchar *tuple= packed_tuple; - size_t unpack_len_pos= size_t(-1); - const bool hidden_pk_exists= table_has_hidden_pk(tbl); + uchar *tuple = packed_tuple; + size_t unpack_len_pos = size_t(-1); + const bool hidden_pk_exists = table_has_hidden_pk(tbl); rdb_netbuf_store_index(tuple, m_index_number); tuple += INDEX_NUMBER_SIZE; @@ -576,62 +569,55 @@ uint Rdb_key_def::pack_record(const TABLE* const tbl, uchar* const pack_buffer, // If hidden pk exists, but hidden pk wasnt passed in, we can't pack the // hidden key part. So we skip it (its always 1 part). if (hidden_pk_exists && !hidden_pk_id && use_all_columns) - n_key_parts= m_key_parts - 1; + n_key_parts = m_key_parts - 1; else if (use_all_columns) - n_key_parts= m_key_parts; + n_key_parts = m_key_parts; if (n_null_fields) *n_null_fields = 0; - if (unpack_info) - { + if (unpack_info) { unpack_info->clear(); unpack_info->write_uint8(RDB_UNPACK_DATA_TAG); - unpack_len_pos= unpack_info->get_current_pos(); + unpack_len_pos = unpack_info->get_current_pos(); // we don't know the total length yet, so write a zero unpack_info->write_uint16(0); } - for (uint i=0; i < n_key_parts; i++) - { + for (uint i = 0; i < n_key_parts; i++) { // Fill hidden pk id into the last key part for secondary keys for tables // with no pk - if (hidden_pk_exists && hidden_pk_id && i + 1 == n_key_parts) - { + if (hidden_pk_exists && hidden_pk_id && i + 1 == n_key_parts) { m_pack_info[i].fill_hidden_pk_val(&tuple, hidden_pk_id); break; } - Field* const field= m_pack_info[i].get_field_in_table(tbl); + Field *const field = m_pack_info[i].get_field_in_table(tbl); DBUG_ASSERT(field != nullptr); // Old Field methods expected the record pointer to be at tbl->record[0]. // The quick and easy way to fix this was to pass along the offset // for the pointer. - const my_ptrdiff_t ptr_diff= record - tbl->record[0]; + const my_ptrdiff_t ptr_diff = record - tbl->record[0]; - if (field->real_maybe_null()) - { + if (field->real_maybe_null()) { DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 1)); - if (field->is_real_null(ptr_diff)) - { + if (field->is_real_null(ptr_diff)) { /* NULL value. store '\0' so that it sorts before non-NULL values */ *tuple++ = 0; /* That's it, don't store anything else */ if (n_null_fields) (*n_null_fields)++; continue; - } - else - { + } else { /* Not a NULL value. Store '1' */ *tuple++ = 1; } } - const bool create_unpack_info= - (unpack_info && // we were requested to generate unpack_info - m_pack_info[i].uses_unpack_info()); // and this keypart uses it + const bool create_unpack_info = + (unpack_info && // we were requested to generate unpack_info + m_pack_info[i].uses_unpack_info()); // and this keypart uses it Rdb_pack_field_context pack_ctx(unpack_info); // Set the offset for methods which do not take an offset as an argument @@ -639,21 +625,19 @@ uint Rdb_key_def::pack_record(const TABLE* const tbl, uchar* const pack_buffer, m_pack_info[i].m_max_image_len)); field->move_field_offset(ptr_diff); - m_pack_info[i].m_pack_func(&m_pack_info[i], field, - pack_buffer, &tuple, &pack_ctx); + m_pack_info[i].m_pack_func(&m_pack_info[i], field, pack_buffer, &tuple, + &pack_ctx); /* Make "unpack info" to be stored in the value */ - if (create_unpack_info) - { + if (create_unpack_info) { m_pack_info[i].m_make_unpack_info_func(m_pack_info[i].m_charset_codec, field, &pack_ctx); } field->move_field_offset(-ptr_diff); } - if (unpack_info) - { - const size_t len= unpack_info->get_current_pos(); + if (unpack_info) { + const size_t len = unpack_info->get_current_pos(); DBUG_ASSERT(len <= std::numeric_limits::max()); // Don't store the unpack_info if it has only the header (that is, there's @@ -662,12 +646,9 @@ uint Rdb_key_def::pack_record(const TABLE* const tbl, uchar* const pack_buffer, // empty (provided m_maybe_unpack_info==true, see // ha_rocksdb::convert_record_to_storage_format) if (len == RDB_UNPACK_HEADER_SIZE && - m_index_type != Rdb_key_def::INDEX_TYPE_PRIMARY) - { + m_index_type != Rdb_key_def::INDEX_TYPE_PRIMARY) { unpack_info->clear(); - } - else - { + } else { unpack_info->write_uint16_at(unpack_len_pos, len); } @@ -677,11 +658,10 @@ uint Rdb_key_def::pack_record(const TABLE* const tbl, uchar* const pack_buffer, // so the checksums are computed and stored by // ha_rocksdb::convert_record_to_storage_format // - if (should_store_row_debug_checksums) - { - const uint32_t key_crc32= crc32(0, packed_tuple, tuple - packed_tuple); - const uint32_t val_crc32= crc32(0, unpack_info->ptr(), - unpack_info->get_current_pos()); + if (should_store_row_debug_checksums) { + const uint32_t key_crc32 = crc32(0, packed_tuple, tuple - packed_tuple); + const uint32_t val_crc32 = + crc32(0, unpack_info->ptr(), unpack_info->get_current_pos()); unpack_info->write_uint8(RDB_CHECKSUM_DATA_TAG); unpack_info->write_uint32(key_crc32); @@ -707,11 +687,10 @@ uint Rdb_key_def::pack_record(const TABLE* const tbl, uchar* const pack_buffer, */ uint Rdb_key_def::pack_hidden_pk(const longlong &hidden_pk_id, - uchar* const packed_tuple) const -{ + uchar *const packed_tuple) const { DBUG_ASSERT(packed_tuple != nullptr); - uchar *tuple= packed_tuple; + uchar *tuple = packed_tuple; rdb_netbuf_store_index(tuple, m_index_number); tuple += INDEX_NUMBER_SIZE; DBUG_ASSERT(m_key_parts == 1); @@ -724,24 +703,20 @@ uint Rdb_key_def::pack_hidden_pk(const longlong &hidden_pk_id, return tuple - packed_tuple; } - /* Function of type rdb_index_field_pack_t */ -void rdb_pack_with_make_sort_key(Rdb_field_packing* const fpi, - Field* const field, - uchar* const buf __attribute__((__unused__)), - uchar **dst, - Rdb_pack_field_context* const pack_ctx - __attribute__((__unused__))) -{ +void rdb_pack_with_make_sort_key( + Rdb_field_packing *const fpi, Field *const field, + uchar *const buf MY_ATTRIBUTE((__unused__)), uchar **dst, + Rdb_pack_field_context *const pack_ctx MY_ATTRIBUTE((__unused__))) { DBUG_ASSERT(fpi != nullptr); DBUG_ASSERT(field != nullptr); DBUG_ASSERT(dst != nullptr); DBUG_ASSERT(*dst != nullptr); - const int max_len= fpi->m_max_image_len; + const int max_len = fpi->m_max_image_len; field->make_sort_key(*dst, max_len); *dst += max_len; } @@ -755,12 +730,9 @@ void rdb_pack_with_make_sort_key(Rdb_field_packing* const fpi, -1 if two kes are equal 1 - Data format error. */ -int Rdb_key_def::compare_keys( - const rocksdb::Slice *key1, - const rocksdb::Slice *key2, - std::size_t* const column_index -) const -{ +int Rdb_key_def::compare_keys(const rocksdb::Slice *key1, + const rocksdb::Slice *key2, + std::size_t *const column_index) const { DBUG_ASSERT(key1 != nullptr); DBUG_ASSERT(key2 != nullptr); DBUG_ASSERT(column_index != nullptr); @@ -774,29 +746,27 @@ int Rdb_key_def::compare_keys( // Skip the index number if ((!reader1.read(INDEX_NUMBER_SIZE))) - return 1; + return HA_EXIT_FAILURE; if ((!reader2.read(INDEX_NUMBER_SIZE))) - return 1; + return HA_EXIT_FAILURE; - for (uint i= 0; i < m_key_parts ; i++) - { - const Rdb_field_packing* const fpi= &m_pack_info[i]; - if (fpi->m_maybe_null) - { - const auto nullp1= reader1.read(1); - const auto nullp2= reader2.read(1); - if (nullp1 == nullptr || nullp2 == nullptr) - return 1; //error + for (uint i = 0; i < m_key_parts; i++) { + const Rdb_field_packing *const fpi = &m_pack_info[i]; + if (fpi->m_maybe_null) { + const auto nullp1 = reader1.read(1); + const auto nullp2 = reader2.read(1); - if (*nullp1 != *nullp2) - { - *column_index = i; - return 0; + if (nullp1 == nullptr || nullp2 == nullptr) { + return HA_EXIT_FAILURE; } - if (*nullp1 == 0) - { + if (*nullp1 != *nullp2) { + *column_index = i; + return HA_EXIT_SUCCESS; + } + + if (*nullp1 == 0) { /* This is a NULL value */ continue; } @@ -806,29 +776,26 @@ int Rdb_key_def::compare_keys( const auto before_skip2 = reader2.get_current_ptr(); DBUG_ASSERT(fpi->m_skip_func); if (fpi->m_skip_func(fpi, nullptr, &reader1)) - return 1; + return HA_EXIT_FAILURE; if (fpi->m_skip_func(fpi, nullptr, &reader2)) - return 1; + return HA_EXIT_FAILURE; const auto size1 = reader1.get_current_ptr() - before_skip1; const auto size2 = reader2.get_current_ptr() - before_skip2; - if (size1 != size2) - { + if (size1 != size2) { *column_index = i; - return 0; + return HA_EXIT_SUCCESS; } if (memcmp(before_skip1, before_skip2, size1) != 0) { *column_index = i; - return 0; + return HA_EXIT_SUCCESS; } } *column_index = m_key_parts; - return 0; - + return HA_EXIT_SUCCESS; } - /* @brief Given a zero-padded key, determine its real key length @@ -837,9 +804,8 @@ int Rdb_key_def::compare_keys( Fixed-size skip functions just read. */ -size_t Rdb_key_def::key_length(const TABLE* const table, - const rocksdb::Slice &key) const -{ +size_t Rdb_key_def::key_length(const TABLE *const table, + const rocksdb::Slice &key) const { DBUG_ASSERT(table != nullptr); Rdb_string_reader reader(&key); @@ -847,19 +813,17 @@ size_t Rdb_key_def::key_length(const TABLE* const table, if ((!reader.read(INDEX_NUMBER_SIZE))) return size_t(-1); - for (uint i= 0; i < m_key_parts ; i++) - { - const Rdb_field_packing *fpi= &m_pack_info[i]; - const Field *field= nullptr; + for (uint i = 0; i < m_key_parts; i++) { + const Rdb_field_packing *fpi = &m_pack_info[i]; + const Field *field = nullptr; if (m_index_type != INDEX_TYPE_HIDDEN_PRIMARY) - field= fpi->get_field_in_table(table); + field = fpi->get_field_in_table(table); if (fpi->m_skip_func(fpi, field, &reader)) return size_t(-1); } return key.size() - reader.remaining_bytes(); } - /* Take mem-comparable form and unpack_info and unpack it to Table->record @@ -869,21 +833,18 @@ size_t Rdb_key_def::key_length(const TABLE* const table, @return UNPACK_SUCCESS - Ok UNPACK_FAILURE - Data format error. - UNPACK_INFO_MISSING - Unpack info was unavailable and was required for - unpacking. */ -int Rdb_key_def::unpack_record(TABLE* const table, uchar* const buf, - const rocksdb::Slice* const packed_key, - const rocksdb::Slice* const unpack_info, - const bool &verify_row_debug_checksums) const -{ +int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf, + const rocksdb::Slice *const packed_key, + const rocksdb::Slice *const unpack_info, + const bool &verify_row_debug_checksums) const { Rdb_string_reader reader(packed_key); - Rdb_string_reader unp_reader= Rdb_string_reader::read_or_empty(unpack_info); + Rdb_string_reader unp_reader = Rdb_string_reader::read_or_empty(unpack_info); - const bool is_hidden_pk= (m_index_type == INDEX_TYPE_HIDDEN_PRIMARY); - const bool hidden_pk_exists= table_has_hidden_pk(table); - const bool secondary_key= (m_index_type == INDEX_TYPE_SECONDARY); + const bool is_hidden_pk = (m_index_type == INDEX_TYPE_HIDDEN_PRIMARY); + const bool hidden_pk_exists = table_has_hidden_pk(table); + const bool secondary_key = (m_index_type == INDEX_TYPE_SECONDARY); // There is no checksuming data after unpack_info for primary keys, because // the layout there is different. The checksum is verified in // ha_rocksdb::convert_record_from_storage_format instead. @@ -892,166 +853,143 @@ int Rdb_key_def::unpack_record(TABLE* const table, uchar* const buf, // Old Field methods expected the record pointer to be at tbl->record[0]. // The quick and easy way to fix this was to pass along the offset // for the pointer. - const my_ptrdiff_t ptr_diff= buf - table->record[0]; + const my_ptrdiff_t ptr_diff = buf - table->record[0]; // Skip the index number - if ((!reader.read(INDEX_NUMBER_SIZE))) - { - return 1; + if ((!reader.read(INDEX_NUMBER_SIZE))) { + return HA_EXIT_FAILURE; } // For secondary keys, we expect the value field to contain unpack data and // checksum data in that order. One or both can be missing, but they cannot // be reordered. - const bool has_unpack_info= unp_reader.remaining_bytes() && - *unp_reader.get_current_ptr() == RDB_UNPACK_DATA_TAG; - if (has_unpack_info && !unp_reader.read(RDB_UNPACK_HEADER_SIZE)) - { - return 1; + const bool has_unpack_info = + unp_reader.remaining_bytes() && + *unp_reader.get_current_ptr() == RDB_UNPACK_DATA_TAG; + if (has_unpack_info && !unp_reader.read(RDB_UNPACK_HEADER_SIZE)) { + return HA_EXIT_FAILURE; } - for (uint i= 0; i < m_key_parts ; i++) - { - Rdb_field_packing* const fpi= &m_pack_info[i]; + for (uint i = 0; i < m_key_parts; i++) { + Rdb_field_packing *const fpi = &m_pack_info[i]; /* Hidden pk field is packed at the end of the secondary keys, but the SQL layer does not know about it. Skip retrieving field if hidden pk. */ if ((secondary_key && hidden_pk_exists && i + 1 == m_key_parts) || - is_hidden_pk) - { + is_hidden_pk) { DBUG_ASSERT(fpi->m_unpack_func); - if (fpi->m_skip_func(fpi, nullptr, &reader)) - { - return 1; + if (fpi->m_skip_func(fpi, nullptr, &reader)) { + return HA_EXIT_FAILURE; } continue; } - Field* const field= fpi->get_field_in_table(table); + Field *const field = fpi->get_field_in_table(table); - if (fpi->m_unpack_func) - { + if (fpi->m_unpack_func) { /* It is possible to unpack this column. Do it. */ - if (fpi->m_maybe_null) - { - const char* nullp; - if (!(nullp= reader.read(1))) - return 1; - if (*nullp == 0) - { + if (fpi->m_maybe_null) { + const char *nullp; + if (!(nullp = reader.read(1))) + return HA_EXIT_FAILURE; + if (*nullp == 0) { /* Set the NULL-bit of this field */ field->set_null(ptr_diff); /* Also set the field to its default value */ - uint field_offset= field->ptr - table->record[0]; - memcpy(buf + field_offset, - table->s->default_values + field_offset, + uint field_offset = field->ptr - table->record[0]; + memcpy(buf + field_offset, table->s->default_values + field_offset, field->pack_length()); continue; - } - else if (*nullp == 1) + } else if (*nullp == 1) field->set_notnull(ptr_diff); else - return 1; + return HA_EXIT_FAILURE; } // If we need unpack info, but there is none, tell the unpack function // this by passing unp_reader as nullptr. If we never read unpack_info // during unpacking anyway, then there won't an error. - const bool maybe_missing_unpack= - !has_unpack_info && fpi->uses_unpack_info(); - const int res= fpi->m_unpack_func(fpi, field, field->ptr + ptr_diff, - &reader, - maybe_missing_unpack ? nullptr : &unp_reader); + const bool maybe_missing_unpack = + !has_unpack_info && fpi->uses_unpack_info(); + const int res = + fpi->m_unpack_func(fpi, field, field->ptr + ptr_diff, &reader, + maybe_missing_unpack ? nullptr : &unp_reader); if (res) return res; - } - else - { + } else { /* It is impossible to unpack the column. Skip it. */ - if (fpi->m_maybe_null) - { - const char* nullp; - if (!(nullp= reader.read(1))) - return 1; - if (*nullp == 0) - { + if (fpi->m_maybe_null) { + const char *nullp; + if (!(nullp = reader.read(1))) + return HA_EXIT_FAILURE; + if (*nullp == 0) { /* This is a NULL value */ continue; } /* If NULL marker is not '0', it can be only '1' */ if (*nullp != 1) - return 1; + return HA_EXIT_FAILURE; } if (fpi->m_skip_func(fpi, field, &reader)) - return 1; + return HA_EXIT_FAILURE; } } /* Check checksum values if present */ - const char* ptr; - if ((ptr= unp_reader.read(1)) && *ptr == RDB_CHECKSUM_DATA_TAG) - { - if (verify_row_debug_checksums) - { - uint32_t stored_key_chksum= rdb_netbuf_to_uint32( - (const uchar*)unp_reader.read(RDB_CHECKSUM_SIZE)); - const uint32_t stored_val_chksum= rdb_netbuf_to_uint32( - (const uchar*)unp_reader.read(RDB_CHECKSUM_SIZE)); + const char *ptr; + if ((ptr = unp_reader.read(1)) && *ptr == RDB_CHECKSUM_DATA_TAG) { + if (verify_row_debug_checksums) { + uint32_t stored_key_chksum = rdb_netbuf_to_uint32( + (const uchar *)unp_reader.read(RDB_CHECKSUM_SIZE)); + const uint32_t stored_val_chksum = rdb_netbuf_to_uint32( + (const uchar *)unp_reader.read(RDB_CHECKSUM_SIZE)); - const uint32_t computed_key_chksum= - crc32(0, (const uchar*)packed_key->data(), packed_key->size()); - const uint32_t computed_val_chksum= - crc32(0, (const uchar*) unpack_info->data(), - unpack_info->size() - RDB_CHECKSUM_CHUNK_SIZE); + const uint32_t computed_key_chksum = + crc32(0, (const uchar *)packed_key->data(), packed_key->size()); + const uint32_t computed_val_chksum = + crc32(0, (const uchar *)unpack_info->data(), + unpack_info->size() - RDB_CHECKSUM_CHUNK_SIZE); DBUG_EXECUTE_IF("myrocks_simulate_bad_key_checksum1", stored_key_chksum++;); - if (stored_key_chksum != computed_key_chksum) - { - report_checksum_mismatch(true, packed_key->data(), - packed_key->size()); - return 1; + if (stored_key_chksum != computed_key_chksum) { + report_checksum_mismatch(true, packed_key->data(), packed_key->size()); + return HA_EXIT_FAILURE; } - if (stored_val_chksum != computed_val_chksum) - { - report_checksum_mismatch( - false, unpack_info->data(), - unpack_info->size() - RDB_CHECKSUM_CHUNK_SIZE); - return 1; + if (stored_val_chksum != computed_val_chksum) { + report_checksum_mismatch(false, unpack_info->data(), + unpack_info->size() - RDB_CHECKSUM_CHUNK_SIZE); + return HA_EXIT_FAILURE; } - } - else - { + } else { /* The checksums are present but we are not checking checksums */ } } if (reader.remaining_bytes()) - return 1; + return HA_EXIT_FAILURE; - return 0; + return HA_EXIT_SUCCESS; } -bool Rdb_key_def::table_has_hidden_pk(const TABLE* const table) -{ +bool Rdb_key_def::table_has_hidden_pk(const TABLE *const table) { return table->s->primary_key == MAX_INDEXES; } void Rdb_key_def::report_checksum_mismatch(const bool &is_key, - const char* const data, - const size_t data_size) const -{ + const char *const data, + const size_t data_size) const { // NO_LINT_DEBUG sql_print_error("Checksum mismatch in %s of key-value pair for index 0x%x", - is_key? "key" : "value", get_index_number()); + is_key ? "key" : "value", get_index_number()); const std::string buf = rdb_hexdump(data, data_size, RDB_MAX_HEXDUMP_LEN); // NO_LINT_DEBUG @@ -1062,18 +1000,16 @@ void Rdb_key_def::report_checksum_mismatch(const bool &is_key, } bool Rdb_key_def::index_format_min_check(const int &pk_min, - const int &sk_min) const -{ - switch (m_index_type) - { - case INDEX_TYPE_PRIMARY: - case INDEX_TYPE_HIDDEN_PRIMARY: - return (m_kv_format_version >= pk_min); - case INDEX_TYPE_SECONDARY: - return (m_kv_format_version >= sk_min); - default: - DBUG_ASSERT(0); - return false; + const int &sk_min) const { + switch (m_index_type) { + case INDEX_TYPE_PRIMARY: + case INDEX_TYPE_HIDDEN_PRIMARY: + return (m_kv_format_version >= pk_min); + case INDEX_TYPE_SECONDARY: + return (m_kv_format_version >= sk_min); + default: + DBUG_ASSERT(0); + return false; } } @@ -1085,13 +1021,12 @@ bool Rdb_key_def::index_format_min_check(const int &pk_min, Function of type rdb_index_field_skip_t */ -int rdb_skip_max_length(const Rdb_field_packing* const fpi, - const Field* const field __attribute__((__unused__)), - Rdb_string_reader* const reader) -{ +int rdb_skip_max_length(const Rdb_field_packing *const fpi, + const Field *const field MY_ATTRIBUTE((__unused__)), + Rdb_string_reader *const reader) { if (!reader->read(fpi->m_max_image_len)) - return 1; - return 0; + return HA_EXIT_FAILURE; + return HA_EXIT_SUCCESS; } /* @@ -1100,7 +1035,7 @@ int rdb_skip_max_length(const Rdb_field_packing* const fpi, rdb_unpack_binary_or_utf8_varchar. */ -const uint RDB_ESCAPE_LENGTH= 9; +const uint RDB_ESCAPE_LENGTH = 9; static_assert((RDB_ESCAPE_LENGTH - 1) % 2 == 0, "RDB_ESCAPE_LENGTH-1 must be even."); @@ -1109,50 +1044,43 @@ static_assert((RDB_ESCAPE_LENGTH - 1) % 2 == 0, */ static int rdb_skip_variable_length( - const Rdb_field_packing* const fpi __attribute__((__unused__)), - const Field* const field, Rdb_string_reader* const reader) -{ + const Rdb_field_packing *const fpi MY_ATTRIBUTE((__unused__)), + const Field *const field, Rdb_string_reader *const reader) { const uchar *ptr; - bool finished= false; + bool finished = false; size_t dst_len; /* How much data can be there */ - if (field) - { - const Field_varstring* const field_var= - static_cast(field); - dst_len= field_var->pack_length() - field_var->length_bytes; - } - else - { - dst_len= UINT_MAX; + if (field) { + const Field_varstring *const field_var = + static_cast(field); + dst_len = field_var->pack_length() - field_var->length_bytes; + } else { + dst_len = UINT_MAX; } /* Decode the length-emitted encoding here */ - while ((ptr= (const uchar*)reader->read(RDB_ESCAPE_LENGTH))) - { + while ((ptr = (const uchar *)reader->read(RDB_ESCAPE_LENGTH))) { /* See rdb_pack_with_varchar_encoding. */ - const uchar pad= 255 - ptr[RDB_ESCAPE_LENGTH - 1]; // number of padding bytes - const uchar used_bytes= RDB_ESCAPE_LENGTH - 1 - pad; + const uchar pad = + 255 - ptr[RDB_ESCAPE_LENGTH - 1]; // number of padding bytes + const uchar used_bytes = RDB_ESCAPE_LENGTH - 1 - pad; - if (used_bytes > RDB_ESCAPE_LENGTH - 1 || used_bytes > dst_len) - { - return 1; /* cannot store that much, invalid data */ + if (used_bytes > RDB_ESCAPE_LENGTH - 1 || used_bytes > dst_len) { + return HA_EXIT_FAILURE; /* cannot store that much, invalid data */ } - if (used_bytes < RDB_ESCAPE_LENGTH - 1) - { - finished= true; + if (used_bytes < RDB_ESCAPE_LENGTH - 1) { + finished = true; break; } dst_len -= used_bytes; } - if (!finished) - { - return 1; + if (!finished) { + return HA_EXIT_FAILURE; } - return 0; + return HA_EXIT_SUCCESS; } const int VARCHAR_CMP_LESS_THAN_SPACES = 1; @@ -1163,136 +1091,135 @@ const int VARCHAR_CMP_GREATER_THAN_SPACES = 3; Skip a keypart that uses Variable-Length Space-Padded encoding */ -static int rdb_skip_variable_space_pad( - const Rdb_field_packing* const fpi, - const Field* const field, Rdb_string_reader* const reader) -{ +static int rdb_skip_variable_space_pad(const Rdb_field_packing *const fpi, + const Field *const field, + Rdb_string_reader *const reader) { const uchar *ptr; - bool finished= false; + bool finished = false; - size_t dst_len= UINT_MAX; /* How much data can be there */ + size_t dst_len = UINT_MAX; /* How much data can be there */ - if (field) - { - const Field_varstring* const field_var= - static_cast(field); - dst_len= field_var->pack_length() - field_var->length_bytes; + if (field) { + const Field_varstring *const field_var = + static_cast(field); + dst_len = field_var->pack_length() - field_var->length_bytes; } /* Decode the length-emitted encoding here */ - while ((ptr= (const uchar*)reader->read(fpi->m_segment_size))) - { + while ((ptr = (const uchar *)reader->read(fpi->m_segment_size))) { // See rdb_pack_with_varchar_space_pad - const uchar c= ptr[fpi->m_segment_size-1]; - if (c == VARCHAR_CMP_EQUAL_TO_SPACES) - { + const uchar c = ptr[fpi->m_segment_size - 1]; + if (c == VARCHAR_CMP_EQUAL_TO_SPACES) { // This is the last segment - finished= true; + finished = true; break; - } - else if (c == VARCHAR_CMP_LESS_THAN_SPACES || - c == VARCHAR_CMP_GREATER_THAN_SPACES) - { + } else if (c == VARCHAR_CMP_LESS_THAN_SPACES || + c == VARCHAR_CMP_GREATER_THAN_SPACES) { // This is not the last segment - if ((fpi->m_segment_size-1) > dst_len) - { + if ((fpi->m_segment_size - 1) > dst_len) { // The segment is full of data but the table field can't hold that // much! This must be data corruption. - return 1; + return HA_EXIT_FAILURE; } - dst_len -= (fpi->m_segment_size-1); - } - else - { + dst_len -= (fpi->m_segment_size - 1); + } else { // Encountered a value that's none of the VARCHAR_CMP* constants // It's data corruption. - return 1; + return HA_EXIT_FAILURE; } } - return finished? 0: 1; + return finished ? HA_EXIT_SUCCESS : HA_EXIT_FAILURE; } - /* Function of type rdb_index_field_unpack_t */ -int rdb_unpack_integer( - Rdb_field_packing* const fpi, Field* const field, uchar* const to, - Rdb_string_reader* const reader, - Rdb_string_reader* const unp_reader __attribute__((__unused__))) -{ - const int length= fpi->m_max_image_len; +int rdb_unpack_integer(Rdb_field_packing *const fpi, Field *const field, + uchar *const to, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader + MY_ATTRIBUTE((__unused__))) { + const int length = fpi->m_max_image_len; const uchar *from; - if (!(from= (const uchar*)reader->read(length))) + if (!(from = (const uchar *)reader->read(length))) return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ #ifdef WORDS_BIGENDIAN { - if (((Field_num*)field)->unsigned_flag) - to[0]= from[0]; + if (((Field_num *)field)->unsigned_flag) + to[0] = from[0]; else - to[0]= (char)(from[0] ^ 128); // Reverse the sign bit. + to[0] = (char)(from[0] ^ 128); // Reverse the sign bit. memcpy(to + 1, from + 1, length - 1); } #else { - const int sign_byte= from[0]; - if (((Field_num*)field)->unsigned_flag) - to[length - 1]= sign_byte; + const int sign_byte = from[0]; + if (((Field_num *)field)->unsigned_flag) + to[length - 1] = sign_byte; else - to[length - 1]= static_cast(sign_byte ^ 128); // Reverse the sign bit. - for (int i= 0, j= length - 1; i < length-1; ++i, --j) - to[i]= from[j]; + to[length - 1] = + static_cast(sign_byte ^ 128); // Reverse the sign bit. + for (int i = 0, j = length - 1; i < length - 1; ++i, --j) + to[i] = from[j]; } #endif return UNPACK_SUCCESS; } #if !defined(WORDS_BIGENDIAN) -static void rdb_swap_double_bytes(uchar* const dst, const uchar* const src) -{ +static void rdb_swap_double_bytes(uchar *const dst, const uchar *const src) { #if defined(__FLOAT_WORD_ORDER) && (__FLOAT_WORD_ORDER == __BIG_ENDIAN) // A few systems store the most-significant _word_ first on little-endian - dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; - dst[4] = src[7]; dst[5] = src[6]; dst[6] = src[5]; dst[7] = src[4]; + dst[0] = src[3]; + dst[1] = src[2]; + dst[2] = src[1]; + dst[3] = src[0]; + dst[4] = src[7]; + dst[5] = src[6]; + dst[6] = src[5]; + dst[7] = src[4]; #else - dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; - dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; + dst[0] = src[7]; + dst[1] = src[6]; + dst[2] = src[5]; + dst[3] = src[4]; + dst[4] = src[3]; + dst[5] = src[2]; + dst[6] = src[1]; + dst[7] = src[0]; #endif } -static void rdb_swap_float_bytes(uchar* const dst, const uchar* const src) -{ - dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; +static void rdb_swap_float_bytes(uchar *const dst, const uchar *const src) { + dst[0] = src[3]; + dst[1] = src[2]; + dst[2] = src[1]; + dst[3] = src[0]; } #else #define rdb_swap_double_bytes nullptr -#define rdb_swap_float_bytes nullptr +#define rdb_swap_float_bytes nullptr #endif static int rdb_unpack_floating_point( - uchar* const dst, Rdb_string_reader* const reader, const size_t &size, - const int &exp_digit, - const uchar* const zero_pattern, - const uchar* const zero_val, - void (*swap_func)(uchar *, const uchar *)) -{ - const uchar* const from = (const uchar*) reader->read(size); + uchar *const dst, Rdb_string_reader *const reader, const size_t &size, + const int &exp_digit, const uchar *const zero_pattern, + const uchar *const zero_val, void (*swap_func)(uchar *, const uchar *)) { + const uchar *const from = (const uchar *)reader->read(size); if (from == nullptr) return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ /* Check to see if the value is zero */ - if (memcmp(from, zero_pattern, size) == 0) - { + if (memcmp(from, zero_pattern, size) == 0) { memcpy(dst, zero_val, size); return UNPACK_SUCCESS; } #if defined(WORDS_BIGENDIAN) // On big-endian, output can go directly into result - uchar* const tmp = dst; + uchar *const tmp = dst; #else // Otherwise use a temporary buffer to make byte-swapping easier later uchar tmp[8]; @@ -1300,18 +1227,15 @@ static int rdb_unpack_floating_point( memcpy(tmp, from, size); - if (tmp[0] & 0x80) - { + if (tmp[0] & 0x80) { // If the high bit is set the original value was positive so // remove the high bit and subtract one from the exponent. - ushort exp_part= ((ushort) tmp[0] << 8) | (ushort) tmp[1]; - exp_part &= 0x7FFF; // clear high bit; - exp_part -= (ushort) 1 << (16 - 1 - exp_digit); // subtract from exponent - tmp[0] = (uchar) (exp_part >> 8); - tmp[1] = (uchar) exp_part; - } - else - { + ushort exp_part = ((ushort)tmp[0] << 8) | (ushort)tmp[1]; + exp_part &= 0x7FFF; // clear high bit; + exp_part -= (ushort)1 << (16 - 1 - exp_digit); // subtract from exponent + tmp[0] = (uchar)(exp_part >> 8); + tmp[1] = (uchar)exp_part; + } else { // Otherwise the original value was negative and all bytes have been // negated. for (size_t ii = 0; ii < size; ii++) @@ -1332,7 +1256,6 @@ static int rdb_unpack_floating_point( #define DBL_EXP_DIG (sizeof(double) * 8 - DBL_MANT_DIG) #endif - /* Function of type rdb_index_field_unpack_t @@ -1342,19 +1265,16 @@ static int rdb_unpack_floating_point( allowed in the database. */ static int rdb_unpack_double( - Rdb_field_packing* const fpi __attribute__((__unused__)), - Field* const field __attribute__((__unused__)), - uchar* const field_ptr, - Rdb_string_reader* const reader, - Rdb_string_reader* const unp_reader __attribute__((__unused__))) -{ - static double zero_val = 0.0; - static const uchar zero_pattern[8] = { 128, 0, 0, 0, 0, 0, 0, 0 }; + Rdb_field_packing *const fpi MY_ATTRIBUTE((__unused__)), + Field *const field MY_ATTRIBUTE((__unused__)), uchar *const field_ptr, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) { + static double zero_val = 0.0; + static const uchar zero_pattern[8] = {128, 0, 0, 0, 0, 0, 0, 0}; - return rdb_unpack_floating_point(field_ptr, reader, - sizeof(double), - DBL_EXP_DIG, zero_pattern, (const uchar *) &zero_val, - rdb_swap_double_bytes); + return rdb_unpack_floating_point( + field_ptr, reader, sizeof(double), DBL_EXP_DIG, zero_pattern, + (const uchar *)&zero_val, rdb_swap_double_bytes); } #if !defined(FLT_EXP_DIG) @@ -1370,18 +1290,15 @@ static int rdb_unpack_double( allowed in the database. */ static int rdb_unpack_float( - Rdb_field_packing* const, Field* const field __attribute__((__unused__)), - uchar* const field_ptr, - Rdb_string_reader* const reader, - Rdb_string_reader* const unp_reader __attribute__((__unused__))) -{ - static float zero_val = 0.0; - static const uchar zero_pattern[4] = { 128, 0, 0, 0 }; + Rdb_field_packing *const, Field *const field MY_ATTRIBUTE((__unused__)), + uchar *const field_ptr, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) { + static float zero_val = 0.0; + static const uchar zero_pattern[4] = {128, 0, 0, 0}; - return rdb_unpack_floating_point(field_ptr, reader, - sizeof(float), - FLT_EXP_DIG, zero_pattern, (const uchar *) &zero_val, - rdb_swap_float_bytes); + return rdb_unpack_floating_point( + field_ptr, reader, sizeof(float), FLT_EXP_DIG, zero_pattern, + (const uchar *)&zero_val, rdb_swap_float_bytes); } /* @@ -1389,25 +1306,22 @@ static int rdb_unpack_float( Unpack by doing the reverse action to Field_newdate::make_sort_key. */ -int rdb_unpack_newdate( - Rdb_field_packing* const fpi, Field* constfield, - uchar* const field_ptr, - Rdb_string_reader* const reader, - Rdb_string_reader* const unp_reader __attribute__((__unused__))) -{ - const char* from; +int rdb_unpack_newdate(Rdb_field_packing *const fpi, Field *constfield, + uchar *const field_ptr, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader + MY_ATTRIBUTE((__unused__))) { + const char *from; DBUG_ASSERT(fpi->m_max_image_len == 3); - if (!(from= reader->read(3))) + if (!(from = reader->read(3))) return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ - field_ptr[0]= from[2]; - field_ptr[1]= from[1]; - field_ptr[2]= from[0]; + field_ptr[0] = from[2]; + field_ptr[1] = from[1]; + field_ptr[2] = from[0]; return UNPACK_SUCCESS; } - /* Function of type rdb_index_field_unpack_t, used to Unpack the string by copying it over. @@ -1415,64 +1329,57 @@ int rdb_unpack_newdate( */ static int rdb_unpack_binary_str( - Rdb_field_packing* const fpi, Field* const field, uchar* const to, - Rdb_string_reader* const reader, - Rdb_string_reader* const unp_reader __attribute__((__unused__))) -{ - const char* from; - if (!(from= reader->read(fpi->m_max_image_len))) + Rdb_field_packing *const fpi, Field *const field, uchar *const to, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) { + const char *from; + if (!(from = reader->read(fpi->m_max_image_len))) return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ memcpy(to, from, fpi->m_max_image_len); return UNPACK_SUCCESS; } - /* Function of type rdb_index_field_unpack_t. For UTF-8, we need to convert 2-byte wide-character entities back into UTF8 sequences. */ -static int rdb_unpack_utf8_str( - Rdb_field_packing* const fpi, Field* const field, - uchar *dst, - Rdb_string_reader* const reader, - Rdb_string_reader* const unp_reader __attribute__((__unused__))) -{ - my_core::CHARSET_INFO* const cset= (my_core::CHARSET_INFO*)field->charset(); +static int rdb_unpack_utf8_str(Rdb_field_packing *const fpi, Field *const field, + uchar *dst, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader + MY_ATTRIBUTE((__unused__))) { + my_core::CHARSET_INFO *const cset = (my_core::CHARSET_INFO *)field->charset(); const uchar *src; - if (!(src= (const uchar*)reader->read(fpi->m_max_image_len))) + if (!(src = (const uchar *)reader->read(fpi->m_max_image_len))) return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ - const uchar* const src_end= src + fpi->m_max_image_len; - uchar* const dst_end= dst + field->pack_length(); + const uchar *const src_end = src + fpi->m_max_image_len; + uchar *const dst_end = dst + field->pack_length(); - while (src < src_end) - { - my_wc_t wc= (src[0] <<8) | src[1]; + while (src < src_end) { + my_wc_t wc = (src[0] << 8) | src[1]; src += 2; - int res= cset->cset->wc_mb(cset, wc, dst, dst_end); - DBUG_ASSERT(res > 0 && res <=3); + int res = cset->cset->wc_mb(cset, wc, dst, dst_end); + DBUG_ASSERT(res > 0 && res <= 3); if (res < 0) return UNPACK_FAILURE; dst += res; } - cset->cset->fill(cset, reinterpret_cast(dst), - dst_end - dst, cset->pad_char); + cset->cset->fill(cset, reinterpret_cast(dst), dst_end - dst, + cset->pad_char); return UNPACK_SUCCESS; } - /* Function of type rdb_index_field_pack_t */ static void rdb_pack_with_varchar_encoding( - Rdb_field_packing* const fpi, Field* const field, uchar *buf, uchar **dst, - Rdb_pack_field_context* const pack_ctx __attribute__((__unused__))) -{ + Rdb_field_packing *const fpi, Field *const field, uchar *buf, uchar **dst, + Rdb_pack_field_context *const pack_ctx MY_ATTRIBUTE((__unused__))) { /* Use a flag byte every Nth byte. Set it to (255 - #pad) where #pad is 0 when the var length field filled all N-1 previous bytes and #pad is @@ -1483,66 +1390,58 @@ static void rdb_pack_with_varchar_encoding( * 4 bytes (1, 2, 3, 0) this is encoded as: 1, 2, 3, 0, 0, 0, 0, 252 And the 4 byte string compares as greater than the 3 byte string */ - const CHARSET_INFO* const charset= field->charset(); - Field_varstring* const field_var= (Field_varstring*)field; + const CHARSET_INFO *const charset = field->charset(); + Field_varstring *const field_var = (Field_varstring *)field; - const size_t value_length= (field_var->length_bytes == 1) ? - (uint) *field->ptr : - uint2korr(field->ptr); - size_t xfrm_len= charset->coll->strnxfrm( - charset, - buf, fpi->m_max_image_len, - field_var->char_length(), - field_var->ptr + field_var->length_bytes, - value_length, - 0); + const size_t value_length = (field_var->length_bytes == 1) + ? (uint)*field->ptr + : uint2korr(field->ptr); + size_t xfrm_len = charset->coll->strnxfrm( + charset, buf, fpi->m_max_image_len, field_var->char_length(), + field_var->ptr + field_var->length_bytes, value_length, 0); /* Got a mem-comparable image in 'buf'. Now, produce varlength encoding */ - size_t encoded_size= 0; - uchar* ptr= *dst; - while (1) - { - const size_t copy_len= std::min((size_t)RDB_ESCAPE_LENGTH-1, xfrm_len); - const size_t padding_bytes= RDB_ESCAPE_LENGTH - 1 - copy_len; + size_t encoded_size = 0; + uchar *ptr = *dst; + while (1) { + const size_t copy_len = std::min((size_t)RDB_ESCAPE_LENGTH - 1, xfrm_len); + const size_t padding_bytes = RDB_ESCAPE_LENGTH - 1 - copy_len; memcpy(ptr, buf, copy_len); ptr += copy_len; buf += copy_len; // pad with zeros if necessary; - for (size_t idx= 0; idx < padding_bytes; idx++) - *(ptr++)= 0; + for (size_t idx = 0; idx < padding_bytes; idx++) + *(ptr++) = 0; *(ptr++) = 255 - padding_bytes; - xfrm_len -= copy_len; + xfrm_len -= copy_len; encoded_size += RDB_ESCAPE_LENGTH; - if (padding_bytes !=0) + if (padding_bytes != 0) break; } *dst += encoded_size; } - /* Compare the string in [buf..buf_end) with a string that is an infinite sequence of strings in space_xfrm */ -static -int rdb_compare_string_with_spaces(const uchar *buf, const uchar* const buf_end, - const std::vector* const space_xfrm) -{ - int cmp= 0; - while (buf < buf_end) - { - size_t bytes = std::min((size_t) (buf_end - buf), space_xfrm->size()); - if ((cmp= memcmp(buf, space_xfrm->data(), bytes)) != 0) +static int +rdb_compare_string_with_spaces(const uchar *buf, const uchar *const buf_end, + const std::vector *const space_xfrm) { + int cmp = 0; + while (buf < buf_end) { + size_t bytes = std::min((size_t)(buf_end - buf), space_xfrm->size()); + if ((cmp = memcmp(buf, space_xfrm->data(), bytes)) != 0) break; buf += bytes; } return cmp; } -static const int RDB_TRIMMED_CHARS_OFFSET= 8; +static const int RDB_TRIMMED_CHARS_OFFSET = 8; /* Pack the data with Variable-Length Space-Padded Encoding. @@ -1614,68 +1513,57 @@ static const int RDB_TRIMMED_CHARS_OFFSET= 8; rdb_skip_variable_space_pad */ -static void rdb_pack_with_varchar_space_pad( - Rdb_field_packing* const fpi, Field* const field, uchar* buf, - uchar **dst, Rdb_pack_field_context* const pack_ctx) -{ - Rdb_string_writer* const unpack_info= pack_ctx->writer; - const CHARSET_INFO* const charset= field->charset(); - const auto field_var= static_cast(field); +static void +rdb_pack_with_varchar_space_pad(Rdb_field_packing *const fpi, + Field *const field, uchar *buf, uchar **dst, + Rdb_pack_field_context *const pack_ctx) { + Rdb_string_writer *const unpack_info = pack_ctx->writer; + const CHARSET_INFO *const charset = field->charset(); + const auto field_var = static_cast(field); - const size_t value_length= (field_var->length_bytes == 1) ? - (uint) *field->ptr : - uint2korr(field->ptr); + const size_t value_length = (field_var->length_bytes == 1) + ? (uint)*field->ptr + : uint2korr(field->ptr); - const size_t trimmed_len= - charset->cset->lengthsp(charset, - (const char*)field_var->ptr + - field_var->length_bytes, - value_length); + const size_t trimmed_len = charset->cset->lengthsp( + charset, (const char *)field_var->ptr + field_var->length_bytes, + value_length); const size_t xfrm_len = charset->coll->strnxfrm( - charset, - buf, fpi->m_max_image_len, - field_var->char_length(), - field_var->ptr + field_var->length_bytes, - trimmed_len, - 0); + charset, buf, fpi->m_max_image_len, field_var->char_length(), + field_var->ptr + field_var->length_bytes, trimmed_len, 0); /* Got a mem-comparable image in 'buf'. Now, produce varlength encoding */ - uchar* const buf_end= buf + xfrm_len; + uchar *const buf_end = buf + xfrm_len; - size_t encoded_size= 0; - uchar *ptr= *dst; + size_t encoded_size = 0; + uchar *ptr = *dst; size_t padding_bytes; - while (true) - { - const size_t copy_len= - std::min(fpi->m_segment_size-1, buf_end - buf); - padding_bytes= fpi->m_segment_size - 1 - copy_len; + while (true) { + const size_t copy_len = + std::min(fpi->m_segment_size - 1, buf_end - buf); + padding_bytes = fpi->m_segment_size - 1 - copy_len; memcpy(ptr, buf, copy_len); ptr += copy_len; buf += copy_len; - if (padding_bytes) - { + if (padding_bytes) { memcpy(ptr, fpi->space_xfrm->data(), padding_bytes); - ptr+= padding_bytes; - *ptr= VARCHAR_CMP_EQUAL_TO_SPACES; // last segment - } - else - { + ptr += padding_bytes; + *ptr = VARCHAR_CMP_EQUAL_TO_SPACES; // last segment + } else { // Compare the string suffix with a hypothetical infinite string of // spaces. It could be that the first difference is beyond the end of // current chunk. - const int cmp= - rdb_compare_string_with_spaces(buf, buf_end, fpi->space_xfrm); + const int cmp = + rdb_compare_string_with_spaces(buf, buf_end, fpi->space_xfrm); if (cmp < 0) - *ptr= VARCHAR_CMP_LESS_THAN_SPACES; + *ptr = VARCHAR_CMP_LESS_THAN_SPACES; else if (cmp > 0) - *ptr= VARCHAR_CMP_GREATER_THAN_SPACES; - else - { + *ptr = VARCHAR_CMP_GREATER_THAN_SPACES; + else { // It turns out all the rest are spaces. - *ptr= VARCHAR_CMP_EQUAL_TO_SPACES; + *ptr = VARCHAR_CMP_EQUAL_TO_SPACES; } } encoded_size += fpi->m_segment_size; @@ -1687,24 +1575,20 @@ static void rdb_pack_with_varchar_space_pad( // m_unpack_info_stores_value means unpack_info stores the whole original // value. There is no need to store the number of trimmed/padded endspaces // in that case. - if (unpack_info && !fpi->m_unpack_info_stores_value) - { + if (unpack_info && !fpi->m_unpack_info_stores_value) { // (value_length - trimmed_len) is the number of trimmed space *characters* // then, padding_bytes is the number of *bytes* added as padding // then, we add 8, because we don't store negative values. DBUG_ASSERT(padding_bytes % fpi->space_xfrm_len == 0); - DBUG_ASSERT((value_length - trimmed_len)% fpi->space_mb_len == 0); - const size_t removed_chars= - RDB_TRIMMED_CHARS_OFFSET + - (value_length - trimmed_len) / fpi->space_mb_len - - padding_bytes/fpi->space_xfrm_len; + DBUG_ASSERT((value_length - trimmed_len) % fpi->space_mb_len == 0); + const size_t removed_chars = + RDB_TRIMMED_CHARS_OFFSET + + (value_length - trimmed_len) / fpi->space_mb_len - + padding_bytes / fpi->space_xfrm_len; - if (fpi->m_unpack_info_uses_two_bytes) - { + if (fpi->m_unpack_info_uses_two_bytes) { unpack_info->write_uint16(removed_chars); - } - else - { + } else { DBUG_ASSERT(removed_chars < 0x100); unpack_info->write_uint8(removed_chars); } @@ -1718,35 +1602,30 @@ static void rdb_pack_with_varchar_space_pad( */ static int rdb_unpack_binary_or_utf8_varchar( - Rdb_field_packing* const fpi, Field* const field, - uchar* dst, - Rdb_string_reader* const reader, - Rdb_string_reader* const unp_reader __attribute__((__unused__))) -{ + Rdb_field_packing *const fpi, Field *const field, uchar *dst, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) { const uchar *ptr; - size_t len= 0; - bool finished= false; - uchar *d0= dst; - Field_varstring* const field_var= (Field_varstring*)field; + size_t len = 0; + bool finished = false; + uchar *d0 = dst; + Field_varstring *const field_var = (Field_varstring *)field; dst += field_var->length_bytes; // How much we can unpack - size_t dst_len= field_var->pack_length() - field_var->length_bytes; - uchar* const dst_end= dst + dst_len; + size_t dst_len = field_var->pack_length() - field_var->length_bytes; + uchar *const dst_end = dst + dst_len; /* Decode the length-emitted encoding here */ - while ((ptr= (const uchar*)reader->read(RDB_ESCAPE_LENGTH))) - { + while ((ptr = (const uchar *)reader->read(RDB_ESCAPE_LENGTH))) { /* See rdb_pack_with_varchar_encoding. */ - uchar pad= 255 - ptr[RDB_ESCAPE_LENGTH - 1]; // number of padding bytes - uchar used_bytes= RDB_ESCAPE_LENGTH - 1 - pad; + uchar pad = 255 - ptr[RDB_ESCAPE_LENGTH - 1]; // number of padding bytes + uchar used_bytes = RDB_ESCAPE_LENGTH - 1 - pad; - if (used_bytes > RDB_ESCAPE_LENGTH - 1) - { + if (used_bytes > RDB_ESCAPE_LENGTH - 1) { return UNPACK_FAILURE; /* cannot store that much, invalid data */ } - if (dst_len < used_bytes) - { + if (dst_len < used_bytes) { /* Encoded index tuple is longer than the size in the record buffer? */ return UNPACK_FAILURE; } @@ -1754,10 +1633,8 @@ static int rdb_unpack_binary_or_utf8_varchar( /* Now, we need to decode used_bytes of data and append them to the value. */ - if (fpi->m_varchar_charset == &my_charset_utf8_bin) - { - if (used_bytes & 1) - { + if (fpi->m_varchar_charset == &my_charset_utf8_bin) { + if (used_bytes & 1) { /* UTF-8 characters are encoded into two-byte entities. There is no way we can have an odd number of bytes after encoding. @@ -1765,33 +1642,29 @@ static int rdb_unpack_binary_or_utf8_varchar( return UNPACK_FAILURE; } - const uchar *src= ptr; - const uchar *src_end= ptr + used_bytes; - while (src < src_end) - { - my_wc_t wc= (src[0] <<8) | src[1]; + const uchar *src = ptr; + const uchar *src_end = ptr + used_bytes; + while (src < src_end) { + my_wc_t wc = (src[0] << 8) | src[1]; src += 2; - const CHARSET_INFO *cset= fpi->m_varchar_charset; - int res= cset->cset->wc_mb(cset, wc, dst, dst_end); - DBUG_ASSERT(res > 0 && res <=3); + const CHARSET_INFO *cset = fpi->m_varchar_charset; + int res = cset->cset->wc_mb(cset, wc, dst, dst_end); + DBUG_ASSERT(res > 0 && res <= 3); if (res < 0) return UNPACK_FAILURE; dst += res; len += res; dst_len -= res; } - } - else - { + } else { memcpy(dst, ptr, used_bytes); dst += used_bytes; dst_len -= used_bytes; len += used_bytes; } - if (used_bytes < RDB_ESCAPE_LENGTH - 1) - { - finished= true; + if (used_bytes < RDB_ESCAPE_LENGTH - 1) { + finished = true; break; } } @@ -1800,12 +1673,9 @@ static int rdb_unpack_binary_or_utf8_varchar( return UNPACK_FAILURE; /* Save the length */ - if (field_var->length_bytes == 1) - { - d0[0]= len; - } - else - { + if (field_var->length_bytes == 1) { + d0[0] = len; + } else { DBUG_ASSERT(field_var->length_bytes == 2); int2store(d0, len); } @@ -1820,66 +1690,54 @@ static int rdb_unpack_binary_or_utf8_varchar( rdb_skip_variable_space_pad - skip function */ static int rdb_unpack_binary_or_utf8_varchar_space_pad( - Rdb_field_packing* const fpi, Field* const field, - uchar* dst, - Rdb_string_reader* const reader, - Rdb_string_reader* const unp_reader) -{ + Rdb_field_packing *const fpi, Field *const field, uchar *dst, + Rdb_string_reader *const reader, Rdb_string_reader *const unp_reader) { const uchar *ptr; - size_t len= 0; - bool finished= false; - Field_varstring* const field_var= static_cast(field); - uchar *d0= dst; - uchar *dst_end= dst + field_var->pack_length(); + size_t len = 0; + bool finished = false; + Field_varstring *const field_var = static_cast(field); + uchar *d0 = dst; + uchar *dst_end = dst + field_var->pack_length(); dst += field_var->length_bytes; - uint space_padding_bytes= 0; + uint space_padding_bytes = 0; uint extra_spaces; - if ((fpi->m_unpack_info_uses_two_bytes? - unp_reader->read_uint16(&extra_spaces): - unp_reader->read_uint8(&extra_spaces))) - { + if ((fpi->m_unpack_info_uses_two_bytes + ? unp_reader->read_uint16(&extra_spaces) + : unp_reader->read_uint8(&extra_spaces))) { return UNPACK_FAILURE; } - if (extra_spaces <= RDB_TRIMMED_CHARS_OFFSET) - { - space_padding_bytes= -(static_cast(extra_spaces) - - RDB_TRIMMED_CHARS_OFFSET); - extra_spaces= 0; - } - else + if (extra_spaces <= RDB_TRIMMED_CHARS_OFFSET) { + space_padding_bytes = + -(static_cast(extra_spaces) - RDB_TRIMMED_CHARS_OFFSET); + extra_spaces = 0; + } else extra_spaces -= RDB_TRIMMED_CHARS_OFFSET; space_padding_bytes *= fpi->space_xfrm_len; /* Decode the length-emitted encoding here */ - while ((ptr= (const uchar*)reader->read(fpi->m_segment_size))) - { - const char last_byte= ptr[fpi->m_segment_size - 1]; + while ((ptr = (const uchar *)reader->read(fpi->m_segment_size))) { + const char last_byte = ptr[fpi->m_segment_size - 1]; size_t used_bytes; - if (last_byte == VARCHAR_CMP_EQUAL_TO_SPACES) // this is the last segment - { - if (space_padding_bytes > (fpi->m_segment_size-1)) - return UNPACK_FAILURE; // Cannot happen, corrupted data - used_bytes= (fpi->m_segment_size-1) - space_padding_bytes; - finished= true; - } - else + if (last_byte == VARCHAR_CMP_EQUAL_TO_SPACES) // this is the last segment { + if (space_padding_bytes > (fpi->m_segment_size - 1)) + return UNPACK_FAILURE; // Cannot happen, corrupted data + used_bytes = (fpi->m_segment_size - 1) - space_padding_bytes; + finished = true; + } else { if (last_byte != VARCHAR_CMP_LESS_THAN_SPACES && - last_byte != VARCHAR_CMP_GREATER_THAN_SPACES) - { - return UNPACK_FAILURE; // Invalid value + last_byte != VARCHAR_CMP_GREATER_THAN_SPACES) { + return UNPACK_FAILURE; // Invalid value } - used_bytes= fpi->m_segment_size-1; + used_bytes = fpi->m_segment_size - 1; } // Now, need to decode used_bytes of data and append them to the value. - if (fpi->m_varchar_charset == &my_charset_utf8_bin) - { - if (used_bytes & 1) - { + if (fpi->m_varchar_charset == &my_charset_utf8_bin) { + if (used_bytes & 1) { /* UTF-8 characters are encoded into two-byte entities. There is no way we can have an odd number of bytes after encoding. @@ -1887,23 +1745,20 @@ static int rdb_unpack_binary_or_utf8_varchar_space_pad( return UNPACK_FAILURE; } - const uchar *src= ptr; - const uchar* const src_end= ptr + used_bytes; - while (src < src_end) - { - my_wc_t wc= (src[0] <<8) | src[1]; + const uchar *src = ptr; + const uchar *const src_end = ptr + used_bytes; + while (src < src_end) { + my_wc_t wc = (src[0] << 8) | src[1]; src += 2; - const CHARSET_INFO *cset= fpi->m_varchar_charset; - int res= cset->cset->wc_mb(cset, wc, dst, dst_end); - DBUG_ASSERT(res <=3); + const CHARSET_INFO *cset = fpi->m_varchar_charset; + int res = cset->cset->wc_mb(cset, wc, dst, dst_end); + DBUG_ASSERT(res <= 3); if (res <= 0) return UNPACK_FAILURE; dst += res; len += res; } - } - else - { + } else { if (dst + used_bytes > dst_end) return UNPACK_FAILURE; memcpy(dst, ptr, used_bytes); @@ -1911,10 +1766,8 @@ static int rdb_unpack_binary_or_utf8_varchar_space_pad( len += used_bytes; } - if (finished) - { - if (extra_spaces) - { + if (finished) { + if (extra_spaces) { // Both binary and UTF-8 charset store space as ' ', // so the following is ok: if (dst + extra_spaces > dst_end) @@ -1930,12 +1783,9 @@ static int rdb_unpack_binary_or_utf8_varchar_space_pad( return UNPACK_FAILURE; /* Save the length */ - if (field_var->length_bytes == 1) - { - d0[0]= len; - } - else - { + if (field_var->length_bytes == 1) { + d0[0] = len; + } else { DBUG_ASSERT(field_var->length_bytes == 2); int2store(d0, len); } @@ -1949,13 +1799,11 @@ static int rdb_unpack_binary_or_utf8_varchar_space_pad( */ static void rdb_make_unpack_unknown( - const Rdb_collation_codec *codec __attribute__((__unused__)), - const Field* const field, Rdb_pack_field_context* const pack_ctx) -{ + const Rdb_collation_codec *codec MY_ATTRIBUTE((__unused__)), + const Field *const field, Rdb_pack_field_context *const pack_ctx) { pack_ctx->writer->write(field->ptr, field->pack_length()); } - /* This point of this function is only to indicate that unpack_info is available. @@ -1965,35 +1813,27 @@ static void rdb_make_unpack_unknown( */ static void rdb_dummy_make_unpack_info( - const Rdb_collation_codec *codec __attribute__((__unused__)), - const Field *field __attribute__((__unused__)), - Rdb_pack_field_context *pack_ctx __attribute__((__unused__))) -{ -} + const Rdb_collation_codec *codec MY_ATTRIBUTE((__unused__)), + const Field *field MY_ATTRIBUTE((__unused__)), + Rdb_pack_field_context *pack_ctx MY_ATTRIBUTE((__unused__))) {} /* Function of type rdb_index_field_unpack_t */ -static int rdb_unpack_unknown(Rdb_field_packing* const fpi, Field* const field, - uchar* const dst, - Rdb_string_reader* const reader, - Rdb_string_reader* const unp_reader) -{ +static int rdb_unpack_unknown(Rdb_field_packing *const fpi, Field *const field, + uchar *const dst, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) { const uchar *ptr; const uint len = fpi->m_unpack_data_len; // We don't use anything from the key, so skip over it. - if (rdb_skip_max_length(fpi, field, reader)) - { + if (rdb_skip_max_length(fpi, field, reader)) { return UNPACK_FAILURE; } - // Unpack info is needed but none available. - if (len > 0 && unp_reader == nullptr) - { - return UNPACK_INFO_MISSING; - } - if ((ptr= (const uchar*)unp_reader->read(len))) - { + + DBUG_ASSERT_IMP(len > 0, unp_reader != nullptr); + + if ((ptr = (const uchar *)unp_reader->read(len))) { memcpy(dst, ptr, len); return UNPACK_SUCCESS; } @@ -2005,16 +1845,14 @@ static int rdb_unpack_unknown(Rdb_field_packing* const fpi, Field* const field, */ static void rdb_make_unpack_unknown_varchar( - const Rdb_collation_codec* const codec __attribute__((__unused__)), - const Field* const field, Rdb_pack_field_context* const pack_ctx) -{ - const auto f= static_cast(field); - uint len= f->length_bytes == 1 ? (uint) *f->ptr : uint2korr(f->ptr); - len+= f->length_bytes; + const Rdb_collation_codec *const codec MY_ATTRIBUTE((__unused__)), + const Field *const field, Rdb_pack_field_context *const pack_ctx) { + const auto f = static_cast(field); + uint len = f->length_bytes == 1 ? (uint)*f->ptr : uint2korr(f->ptr); + len += f->length_bytes; pack_ctx->writer->write(field->ptr, len); } - /* Function of type rdb_index_field_unpack_t @@ -2029,34 +1867,27 @@ static void rdb_make_unpack_unknown_varchar( rdb_make_unpack_unknown, rdb_unpack_unknown */ -static int rdb_unpack_unknown_varchar(Rdb_field_packing* const fpi, - Field* const field, - uchar *dst, - Rdb_string_reader* const reader, - Rdb_string_reader* const unp_reader) -{ +static int rdb_unpack_unknown_varchar(Rdb_field_packing *const fpi, + Field *const field, uchar *dst, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) { const uchar *ptr; - uchar* const d0= dst; - const auto f= static_cast(field); + uchar *const d0 = dst; + const auto f = static_cast(field); dst += f->length_bytes; - const uint len_bytes= f->length_bytes; + const uint len_bytes = f->length_bytes; // We don't use anything from the key, so skip over it. - if (fpi->m_skip_func(fpi, field, reader)) - { + if (fpi->m_skip_func(fpi, field, reader)) { return UNPACK_FAILURE; } - // Unpack info is needed but none available. + DBUG_ASSERT(len_bytes > 0); - if (unp_reader == nullptr) - { - return UNPACK_INFO_MISSING; - } - if ((ptr= (const uchar*)unp_reader->read(len_bytes))) - { + DBUG_ASSERT(unp_reader != nullptr); + + if ((ptr = (const uchar *)unp_reader->read(len_bytes))) { memcpy(d0, ptr, len_bytes); - const uint len= len_bytes == 1 ? (uint) *ptr : uint2korr(ptr); - if ((ptr= (const uchar*)unp_reader->read(len))) - { + const uint len = len_bytes == 1 ? (uint)*ptr : uint2korr(ptr); + if ((ptr = (const uchar *)unp_reader->read(len))) { memcpy(dst, ptr, len); return UNPACK_SUCCESS; } @@ -2064,47 +1895,33 @@ static int rdb_unpack_unknown_varchar(Rdb_field_packing* const fpi, return UNPACK_FAILURE; } - /* Write unpack_data for a "simple" collation */ -static void rdb_write_unpack_simple(Rdb_bit_writer* const writer, - const Rdb_collation_codec* const codec, - const uchar* const src, - const size_t src_len) -{ - for (uint i= 0; i < src_len; i++) - { +static void rdb_write_unpack_simple(Rdb_bit_writer *const writer, + const Rdb_collation_codec *const codec, + const uchar *const src, + const size_t src_len) { + for (uint i = 0; i < src_len; i++) { writer->write(codec->m_enc_size[src[i]], codec->m_enc_idx[src[i]]); } } - -static uint rdb_read_unpack_simple(Rdb_bit_reader* const reader, - const Rdb_collation_codec* const codec, - const uchar* const src, - const size_t &src_len, uchar* const dst) -{ - for (uint i= 0; i < src_len; i++) - { - if (codec->m_dec_size[src[i]] > 0) - { +static uint rdb_read_unpack_simple(Rdb_bit_reader *const reader, + const Rdb_collation_codec *const codec, + const uchar *const src, + const size_t &src_len, uchar *const dst) { + for (uint i = 0; i < src_len; i++) { + if (codec->m_dec_size[src[i]] > 0) { uint *ret; - // Unpack info is needed but none available. - if (reader == nullptr) - { - return UNPACK_INFO_MISSING; - } + DBUG_ASSERT(reader != nullptr); - if ((ret= reader->read(codec->m_dec_size[src[i]])) == nullptr) - { + if ((ret = reader->read(codec->m_dec_size[src[i]])) == nullptr) { return UNPACK_FAILURE; } - dst[i]= codec->m_dec_idx[*ret][src[i]]; - } - else - { - dst[i]= codec->m_dec_idx[0][src[i]]; + dst[i] = codec->m_dec_idx[*ret][src[i]]; + } else { + dst[i] = codec->m_dec_idx[0][src[i]]; } } @@ -2119,14 +1936,13 @@ static uint rdb_read_unpack_simple(Rdb_bit_reader* const reader, */ static void -rdb_make_unpack_simple_varchar(const Rdb_collation_codec* const codec, - const Field* const field, - Rdb_pack_field_context* const pack_ctx) -{ - const auto f= static_cast(field); - uchar* const src= f->ptr + f->length_bytes; - const size_t src_len= - f->length_bytes == 1 ? (uint) *f->ptr : uint2korr(f->ptr); +rdb_make_unpack_simple_varchar(const Rdb_collation_codec *const codec, + const Field *const field, + Rdb_pack_field_context *const pack_ctx) { + const auto f = static_cast(field); + uchar *const src = f->ptr + f->length_bytes; + const size_t src_len = + f->length_bytes == 1 ? (uint)*f->ptr : uint2korr(f->ptr); Rdb_bit_writer bit_writer(pack_ctx->writer); // The std::min compares characters with bytes, but for simple collations, // mbmaxlen = 1. @@ -2142,92 +1958,75 @@ rdb_make_unpack_simple_varchar(const Rdb_collation_codec* const codec, rdb_unpack_binary_or_utf8_varchar_space_pad - a similar unpacking function */ -int -rdb_unpack_simple_varchar_space_pad(Rdb_field_packing* const fpi, - Field* const field, - uchar* dst, - Rdb_string_reader* const reader, - Rdb_string_reader * const unp_reader) -{ +int rdb_unpack_simple_varchar_space_pad(Rdb_field_packing *const fpi, + Field *const field, uchar *dst, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) { const uchar *ptr; - size_t len= 0; - bool finished= false; - uchar *d0= dst; - const Field_varstring* const field_var= static_cast(field); + size_t len = 0; + bool finished = false; + uchar *d0 = dst; + const Field_varstring *const field_var = + static_cast(field); // For simple collations, char_length is also number of bytes. DBUG_ASSERT((size_t)fpi->m_max_image_len >= field_var->char_length()); - uchar *dst_end= dst + field_var->pack_length(); + uchar *dst_end = dst + field_var->pack_length(); dst += field_var->length_bytes; Rdb_bit_reader bit_reader(unp_reader); - uint space_padding_bytes= 0; + uint space_padding_bytes = 0; uint extra_spaces; - if (!unp_reader) - { - return UNPACK_INFO_MISSING; - } + DBUG_ASSERT(unp_reader != nullptr); - if ((fpi->m_unpack_info_uses_two_bytes? - unp_reader->read_uint16(&extra_spaces): - unp_reader->read_uint8(&extra_spaces))) - { + if ((fpi->m_unpack_info_uses_two_bytes + ? unp_reader->read_uint16(&extra_spaces) + : unp_reader->read_uint8(&extra_spaces))) { return UNPACK_FAILURE; } - if (extra_spaces <= 8) - { - space_padding_bytes= -(static_cast(extra_spaces) - 8); - extra_spaces= 0; - } - else + if (extra_spaces <= 8) { + space_padding_bytes = -(static_cast(extra_spaces) - 8); + extra_spaces = 0; + } else extra_spaces -= 8; space_padding_bytes *= fpi->space_xfrm_len; /* Decode the length-emitted encoding here */ - while ((ptr= (const uchar*)reader->read(fpi->m_segment_size))) - { - const char last_byte= ptr[fpi->m_segment_size - 1]; // number of padding bytes + while ((ptr = (const uchar *)reader->read(fpi->m_segment_size))) { + const char last_byte = + ptr[fpi->m_segment_size - 1]; // number of padding bytes size_t used_bytes; - if (last_byte == VARCHAR_CMP_EQUAL_TO_SPACES) - { + if (last_byte == VARCHAR_CMP_EQUAL_TO_SPACES) { // this is the last one - if (space_padding_bytes > (fpi->m_segment_size-1)) - return UNPACK_FAILURE; // Cannot happen, corrupted data - used_bytes= (fpi->m_segment_size-1) - space_padding_bytes; - finished= true; - } - else - { + if (space_padding_bytes > (fpi->m_segment_size - 1)) + return UNPACK_FAILURE; // Cannot happen, corrupted data + used_bytes = (fpi->m_segment_size - 1) - space_padding_bytes; + finished = true; + } else { if (last_byte != VARCHAR_CMP_LESS_THAN_SPACES && - last_byte != VARCHAR_CMP_GREATER_THAN_SPACES) - { + last_byte != VARCHAR_CMP_GREATER_THAN_SPACES) { return UNPACK_FAILURE; } - used_bytes= fpi->m_segment_size-1; + used_bytes = fpi->m_segment_size - 1; } - if (dst + used_bytes > dst_end) - { + if (dst + used_bytes > dst_end) { // The value on disk is longer than the field definition allows? return UNPACK_FAILURE; } uint ret; - if ((ret= rdb_read_unpack_simple(&bit_reader, - fpi->m_charset_codec, ptr, used_bytes, - dst)) != UNPACK_SUCCESS) - { + if ((ret = rdb_read_unpack_simple(&bit_reader, fpi->m_charset_codec, ptr, + used_bytes, dst)) != UNPACK_SUCCESS) { return ret; } dst += used_bytes; len += used_bytes; - if (finished) - { - if (extra_spaces) - { + if (finished) { + if (extra_spaces) { if (dst + extra_spaces > dst_end) return UNPACK_FAILURE; // pad_char has a 1-byte form in all charsets that @@ -2243,19 +2042,15 @@ rdb_unpack_simple_varchar_space_pad(Rdb_field_packing* const fpi, return UNPACK_FAILURE; /* Save the length */ - if (field_var->length_bytes == 1) - { - d0[0]= len; - } - else - { + if (field_var->length_bytes == 1) { + d0[0] = len; + } else { DBUG_ASSERT(field_var->length_bytes == 2); int2store(d0, len); } return UNPACK_SUCCESS; } - /* Function of type rdb_make_unpack_info_t @@ -2267,11 +2062,10 @@ rdb_unpack_simple_varchar_space_pad(Rdb_field_packing* const fpi, The VARCHAR variant is in rdb_make_unpack_simple_varchar */ -static void rdb_make_unpack_simple(const Rdb_collation_codec* const codec, - const Field* const field, - Rdb_pack_field_context* const pack_ctx) -{ - const uchar* const src= field->ptr; +static void rdb_make_unpack_simple(const Rdb_collation_codec *const codec, + const Field *const field, + Rdb_pack_field_context *const pack_ctx) { + const uchar *const src = field->ptr; Rdb_bit_writer bit_writer(pack_ctx->writer); rdb_write_unpack_simple(&bit_writer, codec, src, field->pack_length()); } @@ -2280,18 +2074,15 @@ static void rdb_make_unpack_simple(const Rdb_collation_codec* const codec, Function of type rdb_index_field_unpack_t */ -static int rdb_unpack_simple(Rdb_field_packing* const fpi, - Field* const field __attribute__((__unused__)), - uchar* const dst, - Rdb_string_reader* const reader, - Rdb_string_reader* const unp_reader) -{ +static int rdb_unpack_simple(Rdb_field_packing *const fpi, + Field *const field MY_ATTRIBUTE((__unused__)), + uchar *const dst, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) { const uchar *ptr; const uint len = fpi->m_max_image_len; Rdb_bit_reader bit_reader(unp_reader); - if (!(ptr= (const uchar*)reader->read(len))) - { + if (!(ptr = (const uchar *)reader->read(len))) { return UNPACK_FAILURE; } @@ -2299,17 +2090,15 @@ static int rdb_unpack_simple(Rdb_field_packing* const fpi, fpi->m_charset_codec, ptr, len, dst); } - // See Rdb_charset_space_info::spaces_xfrm -const int RDB_SPACE_XFRM_SIZE= 32; +const int RDB_SPACE_XFRM_SIZE = 32; // A class holding information about how space character is represented in a // charset. -class Rdb_charset_space_info -{ - public: - Rdb_charset_space_info(const Rdb_charset_space_info&) = delete; - Rdb_charset_space_info& operator=(const Rdb_charset_space_info&) = delete; +class Rdb_charset_space_info { +public: + Rdb_charset_space_info(const Rdb_charset_space_info &) = delete; + Rdb_charset_space_info &operator=(const Rdb_charset_space_info &) = delete; Rdb_charset_space_info() = default; // A few strxfrm'ed space characters, at least RDB_SPACE_XFRM_SIZE bytes @@ -2325,8 +2114,7 @@ class Rdb_charset_space_info }; static std::array, MY_ALL_CHARSETS_SIZE> -rdb_mem_comparable_space; - + rdb_mem_comparable_space; /* @brief @@ -2347,39 +2135,33 @@ rdb_mem_comparable_space; uses the charset). */ -static -void rdb_get_mem_comparable_space(const CHARSET_INFO* const cs, - const std::vector **xfrm, - size_t* const xfrm_len, - size_t* const mb_len) -{ +static void rdb_get_mem_comparable_space(const CHARSET_INFO *const cs, + const std::vector **xfrm, + size_t *const xfrm_len, + size_t *const mb_len) { DBUG_ASSERT(cs->number < MY_ALL_CHARSETS_SIZE); - if (!rdb_mem_comparable_space[cs->number].get()) - { + if (!rdb_mem_comparable_space[cs->number].get()) { mysql_mutex_lock(&rdb_mem_cmp_space_mutex); - if (!rdb_mem_comparable_space[cs->number].get()) - { + if (!rdb_mem_comparable_space[cs->number].get()) { // Upper bound of how many bytes can be occupied by multi-byte form of a // character in any charset. - const int MAX_MULTI_BYTE_CHAR_SIZE= 4; + const int MAX_MULTI_BYTE_CHAR_SIZE = 4; DBUG_ASSERT(cs->mbmaxlen <= MAX_MULTI_BYTE_CHAR_SIZE); // multi-byte form of the ' ' (space) character uchar space_mb[MAX_MULTI_BYTE_CHAR_SIZE]; - const size_t space_mb_len= cs->cset->wc_mb(cs, (my_wc_t) cs->pad_char, - space_mb, - space_mb + sizeof(space_mb)); + const size_t space_mb_len = cs->cset->wc_mb( + cs, (my_wc_t)cs->pad_char, space_mb, space_mb + sizeof(space_mb)); - uchar space[20]; // mem-comparable image of the space character + uchar space[20]; // mem-comparable image of the space character - const size_t space_len= cs->coll->strnxfrm(cs, space, sizeof(space), 1, - space_mb, space_mb_len, 0); - Rdb_charset_space_info* const info= new Rdb_charset_space_info; - info->space_xfrm_len= space_len; - info->space_mb_len= space_mb_len; - while (info->spaces_xfrm.size() < RDB_SPACE_XFRM_SIZE) - { + const size_t space_len = cs->coll->strnxfrm(cs, space, sizeof(space), 1, + space_mb, space_mb_len, 0); + Rdb_charset_space_info *const info = new Rdb_charset_space_info; + info->space_xfrm_len = space_len; + info->space_mb_len = space_mb_len; + while (info->spaces_xfrm.size() < RDB_SPACE_XFRM_SIZE) { info->spaces_xfrm.insert(info->spaces_xfrm.end(), space, space + space_len); } @@ -2388,79 +2170,69 @@ void rdb_get_mem_comparable_space(const CHARSET_INFO* const cs, mysql_mutex_unlock(&rdb_mem_cmp_space_mutex); } - *xfrm= &rdb_mem_comparable_space[cs->number]->spaces_xfrm; - *xfrm_len= rdb_mem_comparable_space[cs->number]->space_xfrm_len; - *mb_len= rdb_mem_comparable_space[cs->number]->space_mb_len; + *xfrm = &rdb_mem_comparable_space[cs->number]->spaces_xfrm; + *xfrm_len = rdb_mem_comparable_space[cs->number]->space_xfrm_len; + *mb_len = rdb_mem_comparable_space[cs->number]->space_mb_len; } mysql_mutex_t rdb_mem_cmp_space_mutex; -std::array - rdb_collation_data; +std::array + rdb_collation_data; mysql_mutex_t rdb_collation_data_mutex; -static bool rdb_is_collation_supported(const my_core::CHARSET_INFO* const cs) -{ +static bool rdb_is_collation_supported(const my_core::CHARSET_INFO *const cs) { return (cs->coll == &my_collation_8bit_simple_ci_handler); } -static const Rdb_collation_codec *rdb_init_collation_mapping( - const my_core::CHARSET_INFO* const cs) -{ +static const Rdb_collation_codec * +rdb_init_collation_mapping(const my_core::CHARSET_INFO *const cs) { DBUG_ASSERT(cs && cs->state & MY_CS_AVAILABLE); - const Rdb_collation_codec* codec= rdb_collation_data[cs->number]; + const Rdb_collation_codec *codec = rdb_collation_data[cs->number]; - if (codec == nullptr && rdb_is_collation_supported(cs)) - { + if (codec == nullptr && rdb_is_collation_supported(cs)) { mysql_mutex_lock(&rdb_collation_data_mutex); - codec= rdb_collation_data[cs->number]; - if (codec == nullptr) - { - Rdb_collation_codec *cur= nullptr; + codec = rdb_collation_data[cs->number]; + if (codec == nullptr) { + Rdb_collation_codec *cur = nullptr; // Compute reverse mapping for simple collations. - if (cs->coll == &my_collation_8bit_simple_ci_handler) - { - cur= new Rdb_collation_codec; + if (cs->coll == &my_collation_8bit_simple_ci_handler) { + cur = new Rdb_collation_codec; std::map> rev_map; - size_t max_conflict_size= 0; - for (int src = 0; src < 256; src++) - { - uchar dst= cs->sort_order[src]; + size_t max_conflict_size = 0; + for (int src = 0; src < 256; src++) { + uchar dst = cs->sort_order[src]; rev_map[dst].push_back(src); - max_conflict_size= std::max(max_conflict_size, rev_map[dst].size()); + max_conflict_size = std::max(max_conflict_size, rev_map[dst].size()); } cur->m_dec_idx.resize(max_conflict_size); - for (auto const &p : rev_map) - { - uchar dst= p.first; - for (uint idx = 0; idx < p.second.size(); idx++) - { - uchar src= p.second[idx]; - uchar bits= my_bit_log2(my_round_up_to_next_power(p.second.size())); - cur->m_enc_idx[src]= idx; - cur->m_enc_size[src]= bits; - cur->m_dec_size[dst]= bits; - cur->m_dec_idx[idx][dst]= src; + for (auto const &p : rev_map) { + uchar dst = p.first; + for (uint idx = 0; idx < p.second.size(); idx++) { + uchar src = p.second[idx]; + uchar bits = + my_bit_log2(my_round_up_to_next_power(p.second.size())); + cur->m_enc_idx[src] = idx; + cur->m_enc_size[src] = bits; + cur->m_dec_size[dst] = bits; + cur->m_dec_idx[idx][dst] = src; } } - cur->m_make_unpack_info_func= - {{ rdb_make_unpack_simple_varchar, rdb_make_unpack_simple }}; - cur->m_unpack_func= - {{ rdb_unpack_simple_varchar_space_pad, rdb_unpack_simple }}; - } - else - { + cur->m_make_unpack_info_func = { + {rdb_make_unpack_simple_varchar, rdb_make_unpack_simple}}; + cur->m_unpack_func = { + {rdb_unpack_simple_varchar_space_pad, rdb_unpack_simple}}; + } else { // Out of luck for now. } - if (cur != nullptr) - { - codec= cur; - cur->m_cs= cs; - rdb_collation_data[cs->number]= cur; + if (cur != nullptr) { + codec = cur; + cur->m_cs = cs; + rdb_collation_data[cs->number] = cur; } } mysql_mutex_unlock(&rdb_collation_data_mutex); @@ -2469,24 +2241,17 @@ static const Rdb_collation_codec *rdb_init_collation_mapping( return codec; } - -static int get_segment_size_from_collation(const CHARSET_INFO* const cs) -{ +static int get_segment_size_from_collation(const CHARSET_INFO *const cs) { int ret; - if (cs == &my_charset_utf8mb4_bin || - cs == &my_charset_utf16_bin || - cs == &my_charset_utf16le_bin || - cs == &my_charset_utf32_bin) - { + if (cs == &my_charset_utf8mb4_bin || cs == &my_charset_utf16_bin || + cs == &my_charset_utf16le_bin || cs == &my_charset_utf32_bin) { /* In these collations, a character produces one weight, which is 3 bytes. Segment has 3 characters, add one byte for VARCHAR_CMP_* marker, and we get 3*3+1=10 */ - ret= 10; - } - else - { + ret = 10; + } else { /* All other collations. There are two classes: - Unicode-based, except for collations mentioned in the if-condition. @@ -2500,13 +2265,12 @@ static int get_segment_size_from_collation(const CHARSET_INFO* const cs) In both cases, take 8 bytes payload + 1 byte for VARCHAR_CMP* marker. */ - ret= 9; + ret = 9; } DBUG_ASSERT(ret < RDB_SPACE_XFRM_SIZE); return ret; } - /* @brief Setup packing of index field into its mem-comparable form @@ -2527,187 +2291,173 @@ static int get_segment_size_from_collation(const CHARSET_INFO* const cs) FALSE - Otherwise */ -bool Rdb_field_packing::setup(const Rdb_key_def* const key_descr, - const Field* const field, - const uint &keynr_arg, const uint &key_part_arg, - const uint16 &key_length) -{ - int res= false; - enum_field_types type= field ? field->real_type() : MYSQL_TYPE_LONGLONG; +bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, + const Field *const field, const uint &keynr_arg, + const uint &key_part_arg, + const uint16 &key_length) { + int res = false; + enum_field_types type = field ? field->real_type() : MYSQL_TYPE_LONGLONG; - m_keynr= keynr_arg; - m_key_part= key_part_arg; + m_keynr = keynr_arg; + m_key_part = key_part_arg; - m_maybe_null= field ? field->real_maybe_null() : false; - m_unpack_func= nullptr; - m_make_unpack_info_func= nullptr; - m_unpack_data_len= 0; - space_xfrm= nullptr; // safety + m_maybe_null = field ? field->real_maybe_null() : false; + m_unpack_func = nullptr; + m_make_unpack_info_func = nullptr; + m_unpack_data_len = 0; + space_xfrm = nullptr; // safety /* Calculate image length. By default, is is pack_length() */ - m_max_image_len= field ? field->pack_length() : - ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN; - m_skip_func= rdb_skip_max_length; - m_pack_func= rdb_pack_with_make_sort_key; + m_max_image_len = + field ? field->pack_length() : ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN; + m_skip_func = rdb_skip_max_length; + m_pack_func = rdb_pack_with_make_sort_key; switch (type) { - case MYSQL_TYPE_LONGLONG: - case MYSQL_TYPE_LONG: - case MYSQL_TYPE_INT24: - case MYSQL_TYPE_SHORT: - case MYSQL_TYPE_TINY: - m_unpack_func= rdb_unpack_integer; - return true; + case MYSQL_TYPE_LONGLONG: + case MYSQL_TYPE_LONG: + case MYSQL_TYPE_INT24: + case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_TINY: + m_unpack_func = rdb_unpack_integer; + return true; - case MYSQL_TYPE_DOUBLE: - m_unpack_func= rdb_unpack_double; - return true; + case MYSQL_TYPE_DOUBLE: + m_unpack_func = rdb_unpack_double; + return true; - case MYSQL_TYPE_FLOAT: - m_unpack_func= rdb_unpack_float; - return true; + case MYSQL_TYPE_FLOAT: + m_unpack_func = rdb_unpack_float; + return true; - case MYSQL_TYPE_NEWDECIMAL: - /* - Decimal is packed with Field_new_decimal::make_sort_key, which just - does memcpy. - Unpacking decimal values was supported only after fix for issue#253, - because of that ha_rocksdb::get_storage_type() handles decimal values - in a special way. - */ - case MYSQL_TYPE_DATETIME2: - case MYSQL_TYPE_TIMESTAMP2: - /* These are packed with Field_temporal_with_date_and_timef::make_sort_key */ - case MYSQL_TYPE_TIME2: /* TIME is packed with Field_timef::make_sort_key */ - case MYSQL_TYPE_YEAR: /* YEAR is packed with Field_tiny::make_sort_key */ - /* Everything that comes here is packed with just a memcpy(). */ - m_unpack_func= rdb_unpack_binary_str; - return true; + case MYSQL_TYPE_NEWDECIMAL: + /* + Decimal is packed with Field_new_decimal::make_sort_key, which just + does memcpy. + Unpacking decimal values was supported only after fix for issue#253, + because of that ha_rocksdb::get_storage_type() handles decimal values + in a special way. + */ + case MYSQL_TYPE_DATETIME2: + case MYSQL_TYPE_TIMESTAMP2: + /* These are packed with Field_temporal_with_date_and_timef::make_sort_key */ + case MYSQL_TYPE_TIME2: /* TIME is packed with Field_timef::make_sort_key */ + case MYSQL_TYPE_YEAR: /* YEAR is packed with Field_tiny::make_sort_key */ + /* Everything that comes here is packed with just a memcpy(). */ + m_unpack_func = rdb_unpack_binary_str; + return true; - case MYSQL_TYPE_NEWDATE: - /* - This is packed by Field_newdate::make_sort_key. It assumes the data is - 3 bytes, and packing is done by swapping the byte order (for both big- - and little-endian) - */ - m_unpack_func= rdb_unpack_newdate; - return true; - case MYSQL_TYPE_TINY_BLOB: - case MYSQL_TYPE_MEDIUM_BLOB: - case MYSQL_TYPE_LONG_BLOB: - case MYSQL_TYPE_BLOB: - { - if (key_descr) - { - // The my_charset_bin collation is special in that it will consider - // shorter strings sorting as less than longer strings. - // - // See Field_blob::make_sort_key for details. - m_max_image_len= key_length + - (field->charset() == &my_charset_bin - ? reinterpret_cast(field)->pack_length_no_ptr() - : 0); - // Return false because indexes on text/blob will always require - // a prefix. With a prefix, the optimizer will not be able to do an - // index-only scan since there may be content occuring after the prefix - // length. - return false; - } + case MYSQL_TYPE_NEWDATE: + /* + This is packed by Field_newdate::make_sort_key. It assumes the data is + 3 bytes, and packing is done by swapping the byte order (for both big- + and little-endian) + */ + m_unpack_func = rdb_unpack_newdate; + return true; + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_BLOB: { + if (key_descr) { + // The my_charset_bin collation is special in that it will consider + // shorter strings sorting as less than longer strings. + // + // See Field_blob::make_sort_key for details. + m_max_image_len = + key_length + (field->charset() == &my_charset_bin + ? reinterpret_cast(field) + ->pack_length_no_ptr() + : 0); + // Return false because indexes on text/blob will always require + // a prefix. With a prefix, the optimizer will not be able to do an + // index-only scan since there may be content occuring after the prefix + // length. + return false; } - default: - break; + } + default: + break; } - m_unpack_info_stores_value= false; + m_unpack_info_stores_value = false; /* Handle [VAR](CHAR|BINARY) */ - if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING) - { + if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING) { /* For CHAR-based columns, check how strxfrm image will take. field->field_length = field->char_length() * cs->mbmaxlen. */ - const CHARSET_INFO *cs= field->charset(); - m_max_image_len= cs->coll->strnxfrmlen(cs, field->field_length); + const CHARSET_INFO *cs = field->charset(); + m_max_image_len = cs->coll->strnxfrmlen(cs, field->field_length); } - const bool is_varchar= (type == MYSQL_TYPE_VARCHAR); - const CHARSET_INFO *cs= field->charset(); + const bool is_varchar = (type == MYSQL_TYPE_VARCHAR); + const CHARSET_INFO *cs = field->charset(); // max_image_len before chunking is taken into account - const int max_image_len_before_chunks= m_max_image_len; + const int max_image_len_before_chunks = m_max_image_len; - if (is_varchar) - { + if (is_varchar) { // The default for varchar is variable-length, without space-padding for // comparisons - m_varchar_charset= cs; - m_skip_func= rdb_skip_variable_length; - m_pack_func= rdb_pack_with_varchar_encoding; - m_max_image_len= - (m_max_image_len/(RDB_ESCAPE_LENGTH-1) + 1) * RDB_ESCAPE_LENGTH; + m_varchar_charset = cs; + m_skip_func = rdb_skip_variable_length; + m_pack_func = rdb_pack_with_varchar_encoding; + m_max_image_len = + (m_max_image_len / (RDB_ESCAPE_LENGTH - 1) + 1) * RDB_ESCAPE_LENGTH; - const auto field_var= static_cast(field); - m_unpack_info_uses_two_bytes= (field_var->field_length + 8 >= 0x100); + const auto field_var = static_cast(field); + m_unpack_info_uses_two_bytes = (field_var->field_length + 8 >= 0x100); } - if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING) - { + if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING) { // See http://dev.mysql.com/doc/refman/5.7/en/string-types.html for // information about character-based datatypes are compared. - bool use_unknown_collation= false; + bool use_unknown_collation = false; DBUG_EXECUTE_IF("myrocks_enable_unknown_collation_index_only_scans", - use_unknown_collation= true;); + use_unknown_collation = true;); - if (cs == &my_charset_bin) - { + if (cs == &my_charset_bin) { // - SQL layer pads BINARY(N) so that it always is N bytes long. // - For VARBINARY(N), values may have different lengths, so we're using // variable-length encoding. This is also the only charset where the // values are not space-padded for comparison. - m_unpack_func= is_varchar? rdb_unpack_binary_or_utf8_varchar : - rdb_unpack_binary_str; - res= true; - } - else if (cs == &my_charset_latin1_bin || cs == &my_charset_utf8_bin) - { + m_unpack_func = is_varchar ? rdb_unpack_binary_or_utf8_varchar + : rdb_unpack_binary_str; + res = true; + } else if (cs == &my_charset_latin1_bin || cs == &my_charset_utf8_bin) { // For _bin collations, mem-comparable form of the string is the string // itself. - if (is_varchar) - { + if (is_varchar) { // VARCHARs - are compared as if they were space-padded - but are // not actually space-padded (reading the value back produces the // original value, without the padding) - m_unpack_func= rdb_unpack_binary_or_utf8_varchar_space_pad; - m_skip_func= rdb_skip_variable_space_pad; - m_pack_func= rdb_pack_with_varchar_space_pad; - m_make_unpack_info_func= rdb_dummy_make_unpack_info; - m_segment_size= get_segment_size_from_collation(cs); - m_max_image_len= - (max_image_len_before_chunks/(m_segment_size-1) + 1) * - m_segment_size; + m_unpack_func = rdb_unpack_binary_or_utf8_varchar_space_pad; + m_skip_func = rdb_skip_variable_space_pad; + m_pack_func = rdb_pack_with_varchar_space_pad; + m_make_unpack_info_func = rdb_dummy_make_unpack_info; + m_segment_size = get_segment_size_from_collation(cs); + m_max_image_len = + (max_image_len_before_chunks / (m_segment_size - 1) + 1) * + m_segment_size; rdb_get_mem_comparable_space(cs, &space_xfrm, &space_xfrm_len, &space_mb_len); - } - else - { + } else { // SQL layer pads CHAR(N) values to their maximum length. // We just store that and restore it back. - m_unpack_func= (cs == &my_charset_latin1_bin)? rdb_unpack_binary_str: - rdb_unpack_utf8_str; + m_unpack_func = (cs == &my_charset_latin1_bin) ? rdb_unpack_binary_str + : rdb_unpack_utf8_str; } - res= true; - } - else - { + res = true; + } else { // This is [VAR]CHAR(n) and the collation is not $(charset_name)_bin - res= true; // index-only scans are possible - m_unpack_data_len= is_varchar ? 0 : field->field_length; - const uint idx= is_varchar ? 0 : 1; - const Rdb_collation_codec *codec= nullptr; + res = true; // index-only scans are possible + m_unpack_data_len = is_varchar ? 0 : field->field_length; + const uint idx = is_varchar ? 0 : 1; + const Rdb_collation_codec *codec = nullptr; - if (is_varchar) - { + if (is_varchar) { // VARCHAR requires space-padding for doing comparisons // // The check for cs->levels_for_order is to catch @@ -2717,100 +2467,84 @@ bool Rdb_field_packing::setup(const Rdb_key_def* const key_descr, // either. // Currently we handle these collations as NO_PAD, even if they have // PAD_SPACE attribute. - if (cs->levels_for_order == 1) - { - m_pack_func= rdb_pack_with_varchar_space_pad; - m_skip_func= rdb_skip_variable_space_pad; - m_segment_size= get_segment_size_from_collation(cs); - m_max_image_len= - (max_image_len_before_chunks/(m_segment_size-1) + 1) * + if (cs->levels_for_order == 1) { + m_pack_func = rdb_pack_with_varchar_space_pad; + m_skip_func = rdb_skip_variable_space_pad; + m_segment_size = get_segment_size_from_collation(cs); + m_max_image_len = + (max_image_len_before_chunks / (m_segment_size - 1) + 1) * m_segment_size; rdb_get_mem_comparable_space(cs, &space_xfrm, &space_xfrm_len, &space_mb_len); - } - else - { + } else { // NO_LINT_DEBUG sql_print_warning("RocksDB: you're trying to create an index " - "with a multi-level collation %s", cs->name); + "with a multi-level collation %s", + cs->name); // NO_LINT_DEBUG sql_print_warning("MyRocks will handle this collation internally " " as if it had a NO_PAD attribute."); - m_pack_func= rdb_pack_with_varchar_encoding; - m_skip_func= rdb_skip_variable_length; + m_pack_func = rdb_pack_with_varchar_encoding; + m_skip_func = rdb_skip_variable_length; } } - if ((codec= rdb_init_collation_mapping(cs)) != nullptr) - { + if ((codec = rdb_init_collation_mapping(cs)) != nullptr) { // The collation allows to store extra information in the unpack_info // which can be used to restore the original value from the // mem-comparable form. - m_make_unpack_info_func= codec->m_make_unpack_info_func[idx]; - m_unpack_func= codec->m_unpack_func[idx]; - m_charset_codec= codec; - } - else if (use_unknown_collation) - { + m_make_unpack_info_func = codec->m_make_unpack_info_func[idx]; + m_unpack_func = codec->m_unpack_func[idx]; + m_charset_codec = codec; + } else if (use_unknown_collation) { // We have no clue about how this collation produces mem-comparable // form. Our way of restoring the original value is to keep a copy of // the original value in unpack_info. - m_unpack_info_stores_value= true; - m_make_unpack_info_func= is_varchar ? rdb_make_unpack_unknown_varchar - : rdb_make_unpack_unknown; - m_unpack_func= is_varchar ? rdb_unpack_unknown_varchar - : rdb_unpack_unknown; - } - else - { + m_unpack_info_stores_value = true; + m_make_unpack_info_func = is_varchar ? rdb_make_unpack_unknown_varchar + : rdb_make_unpack_unknown; + m_unpack_func = + is_varchar ? rdb_unpack_unknown_varchar : rdb_unpack_unknown; + } else { // Same as above: we don't know how to restore the value from its // mem-comparable form. // Here, we just indicate to the SQL layer we can't do it. DBUG_ASSERT(m_unpack_func == nullptr); - m_unpack_info_stores_value= false; - res= false; // Indicate that index-only reads are not possible + m_unpack_info_stores_value = false; + res = false; // Indicate that index-only reads are not possible } } // Make an adjustment: unpacking partially covered columns is not // possible. field->table is populated when called through // Rdb_key_def::setup, but not during ha_rocksdb::index_flags. - if (field->table) - { + if (field->table) { // Get the original Field object and compare lengths. If this key part is // a prefix of a column, then we can't do index-only scans. - if (field->table->field[field->field_index]->field_length != key_length) - { - m_unpack_func= nullptr; - m_make_unpack_info_func= nullptr; - m_unpack_info_stores_value= true; - res= false; + if (field->table->field[field->field_index]->field_length != key_length) { + m_unpack_func = nullptr; + m_make_unpack_info_func = nullptr; + m_unpack_info_stores_value = true; + res = false; } - } - else - { - if (field->field_length != key_length) - { - m_unpack_func= nullptr; - m_make_unpack_info_func= nullptr; - m_unpack_info_stores_value= true; - res= false; + } else { + if (field->field_length != key_length) { + m_unpack_func = nullptr; + m_make_unpack_info_func = nullptr; + m_unpack_info_stores_value = true; + res = false; } } } return res; } - -Field *Rdb_field_packing::get_field_in_table(const TABLE* const tbl) const -{ +Field *Rdb_field_packing::get_field_in_table(const TABLE *const tbl) const { return tbl->key_info[m_keynr].key_part[m_key_part].field; } - void Rdb_field_packing::fill_hidden_pk_val(uchar **dst, - const longlong &hidden_pk_id) const -{ + const longlong &hidden_pk_id) const { DBUG_ASSERT(m_max_image_len == 8); String to; @@ -2820,27 +2554,24 @@ void Rdb_field_packing::fill_hidden_pk_val(uchar **dst, *dst += m_max_image_len; } - /////////////////////////////////////////////////////////////////////////////////////////// // Rdb_ddl_manager /////////////////////////////////////////////////////////////////////////////////////////// -Rdb_tbl_def::~Rdb_tbl_def() -{ - auto ddl_manager= rdb_get_ddl_manager(); +Rdb_tbl_def::~Rdb_tbl_def() { + auto ddl_manager = rdb_get_ddl_manager(); /* Don't free key definitions */ - if (m_key_descr_arr) - { - for (uint i= 0; i < m_key_count; i++) { + if (m_key_descr_arr) { + for (uint i = 0; i < m_key_count; i++) { if (ddl_manager && m_key_descr_arr[i]) { ddl_manager->erase_index_num(m_key_descr_arr[i]->get_gl_index_id()); } - m_key_descr_arr[i]= nullptr; + m_key_descr_arr[i] = nullptr; } delete[] m_key_descr_arr; - m_key_descr_arr= nullptr; + m_key_descr_arr = nullptr; } } @@ -2855,24 +2586,22 @@ Rdb_tbl_def::~Rdb_tbl_def() ( cf_id, index_nr ) */ -bool Rdb_tbl_def::put_dict(Rdb_dict_manager* const dict, - rocksdb::WriteBatch* const batch, - uchar* const key, const size_t &keylen) -{ +bool Rdb_tbl_def::put_dict(Rdb_dict_manager *const dict, + rocksdb::WriteBatch *const batch, uchar *const key, + const size_t &keylen) { StringBuffer<8 * Rdb_key_def::PACKED_SIZE> indexes; indexes.alloc(Rdb_key_def::VERSION_SIZE + m_key_count * Rdb_key_def::PACKED_SIZE * 2); rdb_netstr_append_uint16(&indexes, Rdb_key_def::DDL_ENTRY_INDEX_VERSION); - for (uint i = 0; i < m_key_count; i++) - { - const Rdb_key_def& kd= *m_key_descr_arr[i]; + for (uint i = 0; i < m_key_count; i++) { + const Rdb_key_def &kd = *m_key_descr_arr[i]; const uchar flags = - (kd.m_is_reverse_cf ? Rdb_key_def::REVERSE_CF_FLAG : 0) | - (kd.m_is_auto_cf ? Rdb_key_def::AUTO_CF_FLAG : 0); + (kd.m_is_reverse_cf ? Rdb_key_def::REVERSE_CF_FLAG : 0) | + (kd.m_is_auto_cf ? Rdb_key_def::AUTO_CF_FLAG : 0); - const uint cf_id= kd.get_cf()->GetID(); + const uint cf_id = kd.get_cf()->GetID(); /* If cf_id already exists, cf_flags must be the same. To prevent race condition, reading/modifying/committing CF flags @@ -2881,19 +2610,16 @@ bool Rdb_tbl_def::put_dict(Rdb_dict_manager* const dict, control, we can switch to use it and removing mutex. */ uint existing_cf_flags; - if (dict->get_cf_flags(cf_id, &existing_cf_flags)) - { - if (existing_cf_flags != flags) - { + if (dict->get_cf_flags(cf_id, &existing_cf_flags)) { + if (existing_cf_flags != flags) { my_printf_error(ER_UNKNOWN_ERROR, "Column Family Flag is different from existing flag. " "Assign a new CF flag, or do not change existing " - "CF flag.", MYF(0)); + "CF flag.", + MYF(0)); return true; } - } - else - { + } else { dict->add_cf_flags(batch, cf_id, flags); } @@ -2904,37 +2630,33 @@ bool Rdb_tbl_def::put_dict(Rdb_dict_manager* const dict, kd.m_index_number, cf_id); } - const rocksdb::Slice skey((char*)key, keylen); + const rocksdb::Slice skey((char *)key, keylen); const rocksdb::Slice svalue(indexes.c_ptr(), indexes.length()); dict->put_key(batch, skey, svalue); return false; } -void Rdb_tbl_def::check_if_is_mysql_system_table() -{ +void Rdb_tbl_def::check_if_is_mysql_system_table() { static const char *const system_dbs[] = { - "mysql", - "performance_schema", - "information_schema", + "mysql", "performance_schema", "information_schema", }; - m_is_mysql_system_table= false; + m_is_mysql_system_table = false; for (uint ii = 0; ii < array_elements(system_dbs); ii++) { if (strcmp(m_dbname.c_str(), system_dbs[ii]) == 0) { - m_is_mysql_system_table= true; + m_is_mysql_system_table = true; break; } } } -void Rdb_tbl_def::set_name(const std::string& name) -{ - int err __attribute__((__unused__)); +void Rdb_tbl_def::set_name(const std::string &name) { + int err MY_ATTRIBUTE((__unused__)); - m_dbname_tablename= name; - err= rdb_split_normalized_tablename(name, &m_dbname, &m_tablename, - &m_partition); + m_dbname_tablename = name; + err = rdb_split_normalized_tablename(name, &m_dbname, &m_tablename, + &m_partition); DBUG_ASSERT(err == 0); check_if_is_mysql_system_table(); @@ -2946,68 +2668,53 @@ void Rdb_tbl_def::set_name(const std::string& name) It manufactures a key (db+table name in our case) from a record (Rdb_tbl_def in our case). */ -const uchar* Rdb_ddl_manager::get_hash_key( - Rdb_tbl_def* const rec, size_t* const length, - my_bool not_used __attribute__((__unused__))) -{ - const std::string& dbname_tablename= rec->full_tablename(); - *length= dbname_tablename.size(); - return reinterpret_cast(dbname_tablename.c_str()); +const uchar * +Rdb_ddl_manager::get_hash_key(Rdb_tbl_def *const rec, size_t *const length, + my_bool not_used MY_ATTRIBUTE((__unused__))) { + const std::string &dbname_tablename = rec->full_tablename(); + *length = dbname_tablename.size(); + return reinterpret_cast(dbname_tablename.c_str()); } - /* Static function of type void (*my_hash_free_element_func_t)(void*) that gets invoked by the m_ddl_hash object of type my_core::HASH. It deletes a record (Rdb_tbl_def in our case). */ -void Rdb_ddl_manager::free_hash_elem(void* const data) -{ - Rdb_tbl_def* elem= reinterpret_cast(data); +void Rdb_ddl_manager::free_hash_elem(void *const data) { + Rdb_tbl_def *elem = reinterpret_cast(data); delete elem; } -void Rdb_ddl_manager::erase_index_num(const GL_INDEX_ID &gl_index_id) -{ +void Rdb_ddl_manager::erase_index_num(const GL_INDEX_ID &gl_index_id) { m_index_num_to_keydef.erase(gl_index_id); } - -namespace // anonymous namespace = not visible outside this source file +namespace // anonymous namespace = not visible outside this source file { -struct Rdb_validate_tbls : public Rdb_tables_scanner -{ - using tbl_info_t= std::pair; - using tbl_list_t= std::map>; +struct Rdb_validate_tbls : public Rdb_tables_scanner { + using tbl_info_t = std::pair; + using tbl_list_t = std::map>; tbl_list_t m_list; - int add_table(Rdb_tbl_def* tdef) override; + int add_table(Rdb_tbl_def *tdef) override; - bool compare_to_actual_tables( - const std::string& datadir, - bool* has_errors); + bool compare_to_actual_tables(const std::string &datadir, bool *has_errors); - bool scan_for_frms( - const std::string& datadir, - const std::string& dbname, - bool* has_errors); + bool scan_for_frms(const std::string &datadir, const std::string &dbname, + bool *has_errors); - bool check_frm_file( - const std::string& fullpath, - const std::string& dbname, - const std::string& tablename, - bool* has_errors); + bool check_frm_file(const std::string &fullpath, const std::string &dbname, + const std::string &tablename, bool *has_errors); }; -} // anonymous namespace - +} // anonymous namespace /* Get a list of tables that we expect to have .frm files for. This will use the information just read from the RocksDB data dictionary. */ -int Rdb_validate_tbls::add_table(Rdb_tbl_def* tdef) -{ +int Rdb_validate_tbls::add_table(Rdb_tbl_def *tdef) { DBUG_ASSERT(tdef != nullptr); /* Add the database/table into the list */ @@ -3015,19 +2722,17 @@ int Rdb_validate_tbls::add_table(Rdb_tbl_def* tdef) m_list[tdef->base_dbname()].insert( tbl_info_t(tdef->base_tablename(), is_partition)); - return 0; + return HA_EXIT_SUCCESS; } /* Access the .frm file for this dbname/tablename and see if it is a RocksDB table (or partition table). */ -bool Rdb_validate_tbls::check_frm_file( - const std::string& fullpath, - const std::string& dbname, - const std::string& tablename, - bool* has_errors) -{ +bool Rdb_validate_tbls::check_frm_file(const std::string &fullpath, + const std::string &dbname, + const std::string &tablename, + bool *has_errors) { /* Check this .frm file to see what engine it uses */ String fullfilename(fullpath.c_str(), &my_charset_bin); fullfilename.append(FN_DIRSEP); @@ -3042,42 +2747,34 @@ bool Rdb_validate_tbls::check_frm_file( */ enum legacy_db_type eng_type; frm_type_enum type = dd_frm_type(nullptr, fullfilename.c_ptr(), &eng_type); - if (type == FRMTYPE_ERROR) - { + if (type == FRMTYPE_ERROR) { sql_print_warning("RocksDB: Failed to open/read .from file: %s", - fullfilename.ptr()); + fullfilename.ptr()); return false; } - if (type == FRMTYPE_TABLE) - { + if (type == FRMTYPE_TABLE) { /* For a RocksDB table do we have a reference in the data dictionary? */ - if (eng_type == DB_TYPE_ROCKSDB) - { + if (eng_type == DB_TYPE_ROCKSDB) { /* Attempt to remove the table entry from the list of tables. If this fails then we know we had a .frm file that wasn't registered in RocksDB. */ tbl_info_t element(tablename, false); - if (m_list.count(dbname) == 0 || - m_list[dbname].erase(element) == 0) - { + if (m_list.count(dbname) == 0 || m_list[dbname].erase(element) == 0) { sql_print_warning("RocksDB: Schema mismatch - " "A .frm file exists for table %s.%s, " "but that table is not registered in RocksDB", dbname.c_str(), tablename.c_str()); *has_errors = true; } - } - else if (eng_type == DB_TYPE_PARTITION_DB) - { + } else if (eng_type == DB_TYPE_PARTITION_DB) { /* For partition tables, see if it is in the m_list as a partition, but don't generate an error if it isn't there - we don't know that the .frm is for RocksDB. */ - if (m_list.count(dbname) > 0) - { + if (m_list.count(dbname) > 0) { m_list[dbname].erase(tbl_info_t(tablename, true)); } } @@ -3087,38 +2784,32 @@ bool Rdb_validate_tbls::check_frm_file( } /* Scan the database subdirectory for .frm files */ -bool Rdb_validate_tbls::scan_for_frms( - const std::string& datadir, - const std::string& dbname, - bool* has_errors) -{ - bool result = true; - std::string fullpath = datadir + dbname; - struct st_my_dir* dir_info = my_dir(fullpath.c_str(), MYF(MY_DONT_SORT)); +bool Rdb_validate_tbls::scan_for_frms(const std::string &datadir, + const std::string &dbname, + bool *has_errors) { + bool result = true; + std::string fullpath = datadir + dbname; + struct st_my_dir *dir_info = my_dir(fullpath.c_str(), MYF(MY_DONT_SORT)); /* Access the directory */ - if (dir_info == nullptr) - { + if (dir_info == nullptr) { sql_print_warning("RocksDB: Could not open database directory: %s", - fullpath.c_str()); + fullpath.c_str()); return false; } /* Scan through the files in the directory */ - struct fileinfo* file_info = dir_info->dir_entry; - for (uint ii = 0; ii < dir_info->number_off_files; ii++, file_info++) - { + struct fileinfo *file_info = dir_info->dir_entry; + for (uint ii = 0; ii < dir_info->number_off_files; ii++, file_info++) { /* Find .frm files that are not temp files (those that start with '#') */ - const char* ext = strrchr(file_info->name, '.'); + const char *ext = strrchr(file_info->name, '.'); if (ext != nullptr && !is_prefix(file_info->name, tmp_file_prefix) && - strcmp(ext, ".frm") == 0) - { - std::string tablename = std::string(file_info->name, - ext - file_info->name); + strcmp(ext, ".frm") == 0) { + std::string tablename = + std::string(file_info->name, ext - file_info->name); /* Check to see if the .frm file is from RocksDB */ - if (!check_frm_file(fullpath, dbname, tablename, has_errors)) - { + if (!check_frm_file(fullpath, dbname, tablename, has_errors)) { result = false; break; } @@ -3126,8 +2817,7 @@ bool Rdb_validate_tbls::scan_for_frms( } /* Remove any databases who have no more tables listed */ - if (m_list.count(dbname) == 1 && m_list[dbname].size() == 0) - { + if (m_list.count(dbname) == 1 && m_list[dbname].size() == 0) { m_list.erase(dbname); } @@ -3141,24 +2831,20 @@ bool Rdb_validate_tbls::scan_for_frms( Scan the datadir for all databases (subdirectories) and get a list of .frm files they contain */ -bool Rdb_validate_tbls::compare_to_actual_tables( - const std::string& datadir, - bool* has_errors) -{ - bool result = true; - struct st_my_dir* dir_info; - struct fileinfo* file_info; +bool Rdb_validate_tbls::compare_to_actual_tables(const std::string &datadir, + bool *has_errors) { + bool result = true; + struct st_my_dir *dir_info; + struct fileinfo *file_info; dir_info = my_dir(datadir.c_str(), MYF(MY_DONT_SORT | MY_WANT_STAT)); - if (dir_info == nullptr) - { + if (dir_info == nullptr) { sql_print_warning("RocksDB: could not open datadir: %s", datadir.c_str()); return false; } file_info = dir_info->dir_entry; - for (uint ii = 0; ii < dir_info->number_off_files; ii++, file_info++) - { + for (uint ii = 0; ii < dir_info->number_off_files; ii++, file_info++) { /* Ignore files/dirs starting with '.' */ if (file_info->name[0] == '.') continue; @@ -3168,8 +2854,7 @@ bool Rdb_validate_tbls::compare_to_actual_tables( continue; /* Scan all the .frm files in the directory */ - if (!scan_for_frms(datadir, file_info->name, has_errors)) - { + if (!scan_for_frms(datadir, file_info->name, has_errors)) { result = false; break; } @@ -3185,21 +2870,18 @@ bool Rdb_validate_tbls::compare_to_actual_tables( Validate that all the tables in the RocksDB database dictionary match the .frm files in the datdir */ -bool Rdb_ddl_manager::validate_schemas(void) -{ - bool has_errors= false; - const std::string datadir= std::string(mysql_real_data_home); +bool Rdb_ddl_manager::validate_schemas(void) { + bool has_errors = false; + const std::string datadir = std::string(mysql_real_data_home); Rdb_validate_tbls table_list; /* Get the list of tables from the database dictionary */ - if (scan_for_tables(&table_list) != 0) - { + if (scan_for_tables(&table_list) != 0) { return false; } /* Compare that to the list of actual .frm files */ - if (!table_list.compare_to_actual_tables(datadir, &has_errors)) - { + if (!table_list.compare_to_actual_tables(datadir, &has_errors)) { return false; } @@ -3207,14 +2889,12 @@ bool Rdb_ddl_manager::validate_schemas(void) Any tables left in the tables list are ones that are registered in RocksDB but don't have .frm files. */ - for (const auto& db : table_list.m_list) - { - for (const auto& table : db.second) - { + for (const auto &db : table_list.m_list) { + for (const auto &table : db.second) { sql_print_warning("RocksDB: Schema mismatch - " "Table %s.%s is registered in RocksDB " - "but does not have a .frm file", db.first.c_str(), - table.first.c_str()); + "but does not have a .frm file", + db.first.c_str(), table.first.c_str()); has_errors = true; } } @@ -3222,111 +2902,99 @@ bool Rdb_ddl_manager::validate_schemas(void) return !has_errors; } -bool Rdb_ddl_manager::init(Rdb_dict_manager* const dict_arg, - Rdb_cf_manager* const cf_manager, - const uint32_t &validate_tables) -{ - const ulong TABLE_HASH_SIZE= 32; - m_dict= dict_arg; +bool Rdb_ddl_manager::init(Rdb_dict_manager *const dict_arg, + Rdb_cf_manager *const cf_manager, + const uint32_t &validate_tables) { + const ulong TABLE_HASH_SIZE = 32; + m_dict = dict_arg; mysql_rwlock_init(0, &m_rwlock); - (void) my_hash_init(&m_ddl_hash, - /*system_charset_info*/ &my_charset_bin, - TABLE_HASH_SIZE, 0, 0, - (my_hash_get_key) Rdb_ddl_manager::get_hash_key, - Rdb_ddl_manager::free_hash_elem, - 0); + (void)my_hash_init(&m_ddl_hash, + /*system_charset_info*/ &my_charset_bin, TABLE_HASH_SIZE, + 0, 0, (my_hash_get_key)Rdb_ddl_manager::get_hash_key, + Rdb_ddl_manager::free_hash_elem, 0); /* Read the data dictionary and populate the hash */ uchar ddl_entry[Rdb_key_def::INDEX_NUMBER_SIZE]; rdb_netbuf_store_index(ddl_entry, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER); - const rocksdb::Slice ddl_entry_slice((char*)ddl_entry, - Rdb_key_def::INDEX_NUMBER_SIZE); + const rocksdb::Slice ddl_entry_slice((char *)ddl_entry, + Rdb_key_def::INDEX_NUMBER_SIZE); /* Reading data dictionary should always skip bloom filter */ - rocksdb::Iterator* it= m_dict->new_iterator(); - int i= 0; + rocksdb::Iterator *it = m_dict->new_iterator(); + int i = 0; - uint max_index_id_in_dict= 0; + uint max_index_id_in_dict = 0; m_dict->get_max_index_id(&max_index_id_in_dict); - for (it->Seek(ddl_entry_slice); it->Valid(); it->Next()) - { + for (it->Seek(ddl_entry_slice); it->Valid(); it->Next()) { const uchar *ptr; const uchar *ptr_end; - const rocksdb::Slice key= it->key(); - const rocksdb::Slice val= it->value(); + const rocksdb::Slice key = it->key(); + const rocksdb::Slice val = it->value(); if (key.size() >= Rdb_key_def::INDEX_NUMBER_SIZE && memcmp(key.data(), ddl_entry, Rdb_key_def::INDEX_NUMBER_SIZE)) break; - if (key.size() <= Rdb_key_def::INDEX_NUMBER_SIZE) - { + if (key.size() <= Rdb_key_def::INDEX_NUMBER_SIZE) { sql_print_error("RocksDB: Table_store: key has length %d (corruption?)", (int)key.size()); return true; } - Rdb_tbl_def* const tdef= - new Rdb_tbl_def(key, Rdb_key_def::INDEX_NUMBER_SIZE); + Rdb_tbl_def *const tdef = + new Rdb_tbl_def(key, Rdb_key_def::INDEX_NUMBER_SIZE); // Now, read the DDLs. - const int real_val_size= val.size() - Rdb_key_def::VERSION_SIZE; - if (real_val_size % Rdb_key_def::PACKED_SIZE*2) - { + const int real_val_size = val.size() - Rdb_key_def::VERSION_SIZE; + if (real_val_size % Rdb_key_def::PACKED_SIZE * 2) { sql_print_error("RocksDB: Table_store: invalid keylist for table %s", tdef->full_tablename().c_str()); return true; } - tdef->m_key_count= real_val_size / (Rdb_key_def::PACKED_SIZE*2); - tdef->m_key_descr_arr= new std::shared_ptr[tdef->m_key_count]; + tdef->m_key_count = real_val_size / (Rdb_key_def::PACKED_SIZE * 2); + tdef->m_key_descr_arr = new std::shared_ptr[tdef->m_key_count]; - ptr= reinterpret_cast(val.data()); - const int version= rdb_netbuf_read_uint16(&ptr); - if (version != Rdb_key_def::DDL_ENTRY_INDEX_VERSION) - { + ptr = reinterpret_cast(val.data()); + const int version = rdb_netbuf_read_uint16(&ptr); + if (version != Rdb_key_def::DDL_ENTRY_INDEX_VERSION) { sql_print_error("RocksDB: DDL ENTRY Version was not expected." "Expected: %d, Actual: %d", Rdb_key_def::DDL_ENTRY_INDEX_VERSION, version); return true; } - ptr_end= ptr + real_val_size; - for (uint keyno= 0; ptr < ptr_end; keyno++) - { + ptr_end = ptr + real_val_size; + for (uint keyno = 0; ptr < ptr_end; keyno++) { GL_INDEX_ID gl_index_id; rdb_netbuf_read_gl_index(&ptr, &gl_index_id); - uint16 m_index_dict_version= 0; - uchar m_index_type= 0; - uint16 kv_version= 0; - uint flags= 0; + uint16 m_index_dict_version = 0; + uchar m_index_type = 0; + uint16 kv_version = 0; + uint flags = 0; if (!m_dict->get_index_info(gl_index_id, &m_index_dict_version, - &m_index_type, &kv_version)) - { + &m_index_type, &kv_version)) { sql_print_error("RocksDB: Could not get index information " "for Index Number (%u,%u), table %s", gl_index_id.cf_id, gl_index_id.index_id, tdef->full_tablename().c_str()); return true; } - if (max_index_id_in_dict < gl_index_id.index_id) - { + if (max_index_id_in_dict < gl_index_id.index_id) { sql_print_error("RocksDB: Found max index id %u from data dictionary " "but also found larger index id %u from dictionary. " "This should never happen and possibly a bug.", max_index_id_in_dict, gl_index_id.index_id); return true; } - if (!m_dict->get_cf_flags(gl_index_id.cf_id, &flags)) - { + if (!m_dict->get_cf_flags(gl_index_id.cf_id, &flags)) { sql_print_error("RocksDB: Could not get Column Family Flags " "for CF Number %d, table %s", - gl_index_id.cf_id, - tdef->full_tablename().c_str()); + gl_index_id.cf_id, tdef->full_tablename().c_str()); return true; } - rocksdb::ColumnFamilyHandle* const cfh = - cf_manager->get_cf(gl_index_id.cf_id); + rocksdb::ColumnFamilyHandle *const cfh = + cf_manager->get_cf(gl_index_id.cf_id); DBUG_ASSERT(cfh != nullptr); /* @@ -3334,13 +3002,11 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager* const dict_arg, initialization requires that there is an open TABLE* where we could look at Field* objects and set max_length and other attributes */ - tdef->m_key_descr_arr[keyno]= - std::make_shared(gl_index_id.index_id, keyno, cfh, - m_index_dict_version, - m_index_type, kv_version, - flags & Rdb_key_def::REVERSE_CF_FLAG, - flags & Rdb_key_def::AUTO_CF_FLAG, "", - m_dict->get_stats(gl_index_id)); + tdef->m_key_descr_arr[keyno] = std::make_shared( + gl_index_id.index_id, keyno, cfh, m_index_dict_version, m_index_type, + kv_version, flags & Rdb_key_def::REVERSE_CF_FLAG, + flags & Rdb_key_def::AUTO_CF_FLAG, "", + m_dict->get_stats(gl_index_id)); } put(tdef); i++; @@ -3360,40 +3026,34 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager* const dict_arg, // index ids used by applications should not conflict with // data dictionary index ids - if (max_index_id_in_dict < Rdb_key_def::END_DICT_INDEX_ID) - { - max_index_id_in_dict= Rdb_key_def::END_DICT_INDEX_ID; + if (max_index_id_in_dict < Rdb_key_def::END_DICT_INDEX_ID) { + max_index_id_in_dict = Rdb_key_def::END_DICT_INDEX_ID; } - m_sequence.init(max_index_id_in_dict+1); + m_sequence.init(max_index_id_in_dict + 1); - if (!it->status().ok()) - { - const std::string s= it->status().ToString(); + if (!it->status().ok()) { + const std::string s = it->status().ToString(); sql_print_error("RocksDB: Table_store: load error: %s", s.c_str()); return true; } delete it; - sql_print_information("RocksDB: Table_store: loaded DDL data for %d tables", i); + sql_print_information("RocksDB: Table_store: loaded DDL data for %d tables", + i); return false; } - -Rdb_tbl_def* Rdb_ddl_manager::find(const std::string& table_name, - const bool &lock) -{ - if (lock) - { +Rdb_tbl_def *Rdb_ddl_manager::find(const std::string &table_name, + const bool &lock) { + if (lock) { mysql_rwlock_rdlock(&m_rwlock); } - Rdb_tbl_def* const rec= reinterpret_cast( - my_hash_search(&m_ddl_hash, - reinterpret_cast(table_name.c_str()), - table_name.size())); + Rdb_tbl_def *const rec = reinterpret_cast(my_hash_search( + &m_ddl_hash, reinterpret_cast(table_name.c_str()), + table_name.size())); - if (lock) - { + if (lock) { mysql_rwlock_unlock(&m_rwlock); } @@ -3404,22 +3064,18 @@ Rdb_tbl_def* Rdb_ddl_manager::find(const std::string& table_name, // lock on m_rwlock to make sure the Rdb_key_def is not discarded while we // are finding it. Copying it into 'ret' increments the count making sure // that the object will not be discarded until we are finished with it. -std::shared_ptr Rdb_ddl_manager::safe_find( - GL_INDEX_ID gl_index_id) -{ +std::shared_ptr +Rdb_ddl_manager::safe_find(GL_INDEX_ID gl_index_id) { std::shared_ptr ret(nullptr); mysql_rwlock_rdlock(&m_rwlock); - auto it= m_index_num_to_keydef.find(gl_index_id); - if (it != m_index_num_to_keydef.end()) - { + auto it = m_index_num_to_keydef.find(gl_index_id); + if (it != m_index_num_to_keydef.end()) { const auto table_def = find(it->second.first, false); - if (table_def && it->second.second < table_def->m_key_count) - { - const auto &kd= table_def->m_key_descr_arr[it->second.second]; - if (kd->max_storage_fmt_length() != 0) - { + if (table_def && it->second.second < table_def->m_key_count) { + const auto &kd = table_def->m_key_descr_arr[it->second.second]; + if (kd->max_storage_fmt_length() != 0) { ret = kd; } } @@ -3431,10 +3087,9 @@ std::shared_ptr Rdb_ddl_manager::safe_find( } // this method assumes at least read-only lock on m_rwlock -const std::shared_ptr& Rdb_ddl_manager::find( - GL_INDEX_ID gl_index_id) -{ - auto it= m_index_num_to_keydef.find(gl_index_id); +const std::shared_ptr & +Rdb_ddl_manager::find(GL_INDEX_ID gl_index_id) { + auto it = m_index_num_to_keydef.find(gl_index_id); if (it != m_index_num_to_keydef.end()) { auto table_def = find(it->second.first, false); if (table_def) { @@ -3450,11 +3105,10 @@ const std::shared_ptr& Rdb_ddl_manager::find( } void Rdb_ddl_manager::set_stats( - const std::unordered_map& stats) -{ + const std::unordered_map &stats) { mysql_rwlock_wrlock(&m_rwlock); for (auto src : stats) { - const auto& keydef = find(src.second.m_gl_index_id); + const auto &keydef = find(src.second.m_gl_index_id); if (keydef) { keydef->m_stats = src.second; m_stats2store[keydef->m_stats.m_gl_index_id] = keydef->m_stats; @@ -3464,35 +3118,29 @@ void Rdb_ddl_manager::set_stats( } void Rdb_ddl_manager::adjust_stats( - const std::vector& new_data, - const std::vector& deleted_data) -{ + const std::vector &new_data, + const std::vector &deleted_data) { mysql_rwlock_wrlock(&m_rwlock); int i = 0; - for (const auto& data : {new_data, deleted_data}) - { - for (const auto& src : data) - { - const auto& keydef= find(src.m_gl_index_id); - if (keydef) - { + for (const auto &data : {new_data, deleted_data}) { + for (const auto &src : data) { + const auto &keydef = find(src.m_gl_index_id); + if (keydef) { keydef->m_stats.merge(src, i == 0, keydef->max_storage_fmt_length()); m_stats2store[keydef->m_stats.m_gl_index_id] = keydef->m_stats; } } i++; } - const bool should_save_stats= !m_stats2store.empty(); + const bool should_save_stats = !m_stats2store.empty(); mysql_rwlock_unlock(&m_rwlock); - if (should_save_stats) - { + if (should_save_stats) { // Queue an async persist_stats(false) call to the background thread. rdb_queue_save_stats_request(); } } -void Rdb_ddl_manager::persist_stats(const bool &sync) -{ +void Rdb_ddl_manager::persist_stats(const bool &sync) { mysql_rwlock_wrlock(&m_rwlock); const auto local_stats2store = std::move(m_stats2store); m_stats2store.clear(); @@ -3501,12 +3149,11 @@ void Rdb_ddl_manager::persist_stats(const bool &sync) // Persist stats const std::unique_ptr wb = m_dict->begin(); std::vector stats; - std::transform( - local_stats2store.begin(), local_stats2store.end(), - std::back_inserter(stats), - []( - const std::pair& s - ) {return s.second;}); + std::transform(local_stats2store.begin(), local_stats2store.end(), + std::back_inserter(stats), + [](const std::pair &s) { + return s.second; + }); m_dict->add_stats(wb.get(), stats); m_dict->commit(wb.get(), sync); } @@ -3516,32 +3163,28 @@ void Rdb_ddl_manager::persist_stats(const bool &sync) on-disk data dictionary. */ -int Rdb_ddl_manager::put_and_write(Rdb_tbl_def* const tbl, - rocksdb::WriteBatch* const batch) -{ +int Rdb_ddl_manager::put_and_write(Rdb_tbl_def *const tbl, + rocksdb::WriteBatch *const batch) { uchar buf[FN_LEN * 2 + Rdb_key_def::INDEX_NUMBER_SIZE]; - uint pos= 0; + uint pos = 0; rdb_netbuf_store_index(buf, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER); - pos+= Rdb_key_def::INDEX_NUMBER_SIZE; + pos += Rdb_key_def::INDEX_NUMBER_SIZE; - const std::string& dbname_tablename= tbl->full_tablename(); + const std::string &dbname_tablename = tbl->full_tablename(); memcpy(buf + pos, dbname_tablename.c_str(), dbname_tablename.size()); pos += dbname_tablename.size(); int res; - if ((res= tbl->put_dict(m_dict, batch, buf, pos))) - { + if ((res = tbl->put_dict(m_dict, batch, buf, pos))) { return res; } - if ((res= put(tbl))) - { + if ((res = put(tbl))) { return res; } - return 0; + return HA_EXIT_SUCCESS; } - /* Return 0 - ok, other value - error */ /* TODO: This function modifies m_ddl_hash and m_index_num_to_keydef. @@ -3549,28 +3192,26 @@ int Rdb_ddl_manager::put_and_write(Rdb_tbl_def* const tbl, See the discussion here: https://reviews.facebook.net/D35925#inline-259167 Tracked by https://github.com/facebook/mysql-5.6/issues/33 */ -int Rdb_ddl_manager::put(Rdb_tbl_def* const tbl, const bool &lock) -{ +int Rdb_ddl_manager::put(Rdb_tbl_def *const tbl, const bool &lock) { Rdb_tbl_def *rec; my_bool result; - const std::string& dbname_tablename= tbl->full_tablename(); + const std::string &dbname_tablename = tbl->full_tablename(); if (lock) mysql_rwlock_wrlock(&m_rwlock); // We have to do this find because 'tbl' is not yet in the list. We need // to find the one we are replacing ('rec') - rec= find(dbname_tablename, false); - if (rec) - { + rec = find(dbname_tablename, false); + if (rec) { // this will free the old record. - my_hash_delete(&m_ddl_hash, reinterpret_cast(rec)); + my_hash_delete(&m_ddl_hash, reinterpret_cast(rec)); } - result= my_hash_insert(&m_ddl_hash, reinterpret_cast(tbl)); + result = my_hash_insert(&m_ddl_hash, reinterpret_cast(tbl)); - for (uint keyno= 0; keyno < tbl->m_key_count; keyno++) { - m_index_num_to_keydef[tbl->m_key_descr_arr[keyno]->get_gl_index_id()]= - std::make_pair(dbname_tablename, keyno); + for (uint keyno = 0; keyno < tbl->m_key_count; keyno++) { + m_index_num_to_keydef[tbl->m_key_descr_arr[keyno]->get_gl_index_id()] = + std::make_pair(dbname_tablename, keyno); } if (lock) @@ -3578,91 +3219,81 @@ int Rdb_ddl_manager::put(Rdb_tbl_def* const tbl, const bool &lock) return result; } - -void Rdb_ddl_manager::remove(Rdb_tbl_def* const tbl, - rocksdb::WriteBatch * const batch, - const bool &lock) -{ +void Rdb_ddl_manager::remove(Rdb_tbl_def *const tbl, + rocksdb::WriteBatch *const batch, + const bool &lock) { if (lock) mysql_rwlock_wrlock(&m_rwlock); uchar buf[FN_LEN * 2 + Rdb_key_def::INDEX_NUMBER_SIZE]; - uint pos= 0; + uint pos = 0; rdb_netbuf_store_index(buf, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER); - pos+= Rdb_key_def::INDEX_NUMBER_SIZE; + pos += Rdb_key_def::INDEX_NUMBER_SIZE; - const std::string& dbname_tablename= tbl->full_tablename(); + const std::string &dbname_tablename = tbl->full_tablename(); memcpy(buf + pos, dbname_tablename.c_str(), dbname_tablename.size()); pos += dbname_tablename.size(); - const rocksdb::Slice tkey((char*)buf, pos); + const rocksdb::Slice tkey((char *)buf, pos); m_dict->delete_key(batch, tkey); /* The following will also delete the object: */ - my_hash_delete(&m_ddl_hash, reinterpret_cast(tbl)); + my_hash_delete(&m_ddl_hash, reinterpret_cast(tbl)); if (lock) mysql_rwlock_unlock(&m_rwlock); } - -bool Rdb_ddl_manager::rename(const std::string& from, const std::string& to, - rocksdb::WriteBatch* const batch) -{ +bool Rdb_ddl_manager::rename(const std::string &from, const std::string &to, + rocksdb::WriteBatch *const batch) { Rdb_tbl_def *rec; Rdb_tbl_def *new_rec; - bool res= true; + bool res = true; uchar new_buf[FN_LEN * 2 + Rdb_key_def::INDEX_NUMBER_SIZE]; - uint new_pos= 0; + uint new_pos = 0; mysql_rwlock_wrlock(&m_rwlock); - if (!(rec= find(from, false))) - { + if (!(rec = find(from, false))) { mysql_rwlock_unlock(&m_rwlock); return true; } - new_rec= new Rdb_tbl_def(to); + new_rec = new Rdb_tbl_def(to); - new_rec->m_key_count= rec->m_key_count; - new_rec->m_auto_incr_val= - rec->m_auto_incr_val.load(std::memory_order_relaxed); - new_rec->m_key_descr_arr= rec->m_key_descr_arr; + new_rec->m_key_count = rec->m_key_count; + new_rec->m_auto_incr_val = + rec->m_auto_incr_val.load(std::memory_order_relaxed); + new_rec->m_key_descr_arr = rec->m_key_descr_arr; // so that it's not free'd when deleting the old rec - rec->m_key_descr_arr= nullptr; + rec->m_key_descr_arr = nullptr; // Create a new key rdb_netbuf_store_index(new_buf, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER); - new_pos+= Rdb_key_def::INDEX_NUMBER_SIZE; + new_pos += Rdb_key_def::INDEX_NUMBER_SIZE; - const std::string& dbname_tablename= new_rec->full_tablename(); + const std::string &dbname_tablename = new_rec->full_tablename(); memcpy(new_buf + new_pos, dbname_tablename.c_str(), dbname_tablename.size()); new_pos += dbname_tablename.size(); // Create a key to add - if (!new_rec->put_dict(m_dict, batch, new_buf, new_pos)) - { + if (!new_rec->put_dict(m_dict, batch, new_buf, new_pos)) { remove(rec, batch, false); put(new_rec, false); - res= false; // ok + res = false; // ok } mysql_rwlock_unlock(&m_rwlock); return res; } - -void Rdb_ddl_manager::cleanup() -{ +void Rdb_ddl_manager::cleanup() { my_hash_free(&m_ddl_hash); mysql_rwlock_destroy(&m_rwlock); m_sequence.cleanup(); } - -int Rdb_ddl_manager::scan_for_tables(Rdb_tables_scanner* const tables_scanner) -{ +int Rdb_ddl_manager::scan_for_tables(Rdb_tables_scanner *const tables_scanner) { int i, ret; Rdb_tbl_def *rec; @@ -3670,12 +3301,11 @@ int Rdb_ddl_manager::scan_for_tables(Rdb_tables_scanner* const tables_scanner) mysql_rwlock_rdlock(&m_rwlock); - ret= 0; - i= 0; + ret = 0; + i = 0; - while ((rec = reinterpret_cast(my_hash_element(&m_ddl_hash, - i)))) - { + while (( + rec = reinterpret_cast(my_hash_element(&m_ddl_hash, i)))) { ret = tables_scanner->add_table(rec); if (ret) break; @@ -3686,25 +3316,21 @@ int Rdb_ddl_manager::scan_for_tables(Rdb_tables_scanner* const tables_scanner) return ret; } - /* Rdb_binlog_manager class implementation */ -bool Rdb_binlog_manager::init(Rdb_dict_manager* const dict_arg) -{ +bool Rdb_binlog_manager::init(Rdb_dict_manager *const dict_arg) { DBUG_ASSERT(dict_arg != nullptr); - m_dict= dict_arg; + m_dict = dict_arg; rdb_netbuf_store_index(m_key_buf, Rdb_key_def::BINLOG_INFO_INDEX_NUMBER); - m_key_slice = rocksdb::Slice(reinterpret_cast(m_key_buf), + m_key_slice = rocksdb::Slice(reinterpret_cast(m_key_buf), Rdb_key_def::INDEX_NUMBER_SIZE); return false; } -void Rdb_binlog_manager::cleanup() -{ -} +void Rdb_binlog_manager::cleanup() {} /** Set binlog name, pos and optionally gtid into WriteBatch. @@ -3717,19 +3343,17 @@ void Rdb_binlog_manager::cleanup() @param binlog_gtid Binlog max GTID @param batch WriteBatch */ -void Rdb_binlog_manager::update(const char* const binlog_name, +void Rdb_binlog_manager::update(const char *const binlog_name, const my_off_t binlog_pos, - const char* const binlog_max_gtid, - rocksdb::WriteBatchBase* const batch) -{ - if (binlog_name && binlog_pos) - { + const char *const binlog_max_gtid, + rocksdb::WriteBatchBase *const batch) { + if (binlog_name && binlog_pos) { // max binlog length (512) + binlog pos (4) + binlog gtid (57) < 1024 - const size_t RDB_MAX_BINLOG_INFO_LEN= 1024; - uchar value_buf[RDB_MAX_BINLOG_INFO_LEN]; - m_dict->put_key(batch, m_key_slice, - pack_value(value_buf, binlog_name, - binlog_pos, binlog_max_gtid)); + const size_t RDB_MAX_BINLOG_INFO_LEN = 1024; + uchar value_buf[RDB_MAX_BINLOG_INFO_LEN]; + m_dict->put_key( + batch, m_key_slice, + pack_value(value_buf, binlog_name, binlog_pos, binlog_max_gtid)); } } @@ -3742,20 +3366,17 @@ void Rdb_binlog_manager::update(const char* const binlog_name, true is binlog info was found (valid behavior) false otherwise */ -bool Rdb_binlog_manager::read(char* const binlog_name, - my_off_t* const binlog_pos, - char* const binlog_gtid) const -{ - bool ret= false; - if (binlog_name) - { +bool Rdb_binlog_manager::read(char *const binlog_name, + my_off_t *const binlog_pos, + char *const binlog_gtid) const { + bool ret = false; + if (binlog_name) { std::string value; - rocksdb::Status status= m_dict->get_value(m_key_slice, &value); - if(status.ok()) - { - if (!unpack_value((const uchar*)value.c_str(), - binlog_name, binlog_pos, binlog_gtid)) - ret= true; + rocksdb::Status status = m_dict->get_value(m_key_slice, &value); + if (status.ok()) { + if (!unpack_value((const uchar *)value.c_str(), binlog_name, binlog_pos, + binlog_gtid)) + ret = true; } } return ret; @@ -3770,13 +3391,11 @@ bool Rdb_binlog_manager::read(char* const binlog_name, @param binlog_gtid Binlog GTID @return rocksdb::Slice converted from buf and its length */ -rocksdb::Slice Rdb_binlog_manager::pack_value(uchar* const buf, - const char* const binlog_name, - const my_off_t &binlog_pos, - const char* const binlog_gtid - ) const -{ - uint pack_len= 0; +rocksdb::Slice +Rdb_binlog_manager::pack_value(uchar *const buf, const char *const binlog_name, + const my_off_t &binlog_pos, + const char *const binlog_gtid) const { + uint pack_len = 0; // store version rdb_netbuf_store_uint16(buf, Rdb_key_def::BINLOG_INFO_INDEX_NUMBER_VERSION); @@ -3785,31 +3404,30 @@ rocksdb::Slice Rdb_binlog_manager::pack_value(uchar* const buf, // store binlog file name length DBUG_ASSERT(strlen(binlog_name) <= FN_REFLEN); const uint16_t binlog_name_len = strlen(binlog_name); - rdb_netbuf_store_uint16(buf+pack_len, binlog_name_len); + rdb_netbuf_store_uint16(buf + pack_len, binlog_name_len); pack_len += sizeof(uint16); // store binlog file name - memcpy(buf+pack_len, binlog_name, binlog_name_len); + memcpy(buf + pack_len, binlog_name, binlog_name_len); pack_len += binlog_name_len; // store binlog pos - rdb_netbuf_store_uint32(buf+pack_len, binlog_pos); + rdb_netbuf_store_uint32(buf + pack_len, binlog_pos); pack_len += sizeof(uint32); // store binlog gtid length. // If gtid was not set, store 0 instead - const uint16_t binlog_gtid_len = binlog_gtid? strlen(binlog_gtid) : 0; - rdb_netbuf_store_uint16(buf+pack_len, binlog_gtid_len); + const uint16_t binlog_gtid_len = binlog_gtid ? strlen(binlog_gtid) : 0; + rdb_netbuf_store_uint16(buf + pack_len, binlog_gtid_len); pack_len += sizeof(uint16); - if (binlog_gtid_len > 0) - { + if (binlog_gtid_len > 0) { // store binlog gtid - memcpy(buf+pack_len, binlog_gtid, binlog_gtid_len); + memcpy(buf + pack_len, binlog_gtid, binlog_gtid_len); pack_len += binlog_gtid_len; } - return rocksdb::Slice((char*)buf, pack_len); + return rocksdb::Slice((char *)buf, pack_len); } /** @@ -3820,43 +3438,40 @@ rocksdb::Slice Rdb_binlog_manager::pack_value(uchar* const buf, @param[OUT] binlog_gtid Binlog GTID @return true on error */ -bool Rdb_binlog_manager::unpack_value(const uchar* const value, - char* const binlog_name, - my_off_t* const binlog_pos, - char* const binlog_gtid) const -{ - uint pack_len= 0; +bool Rdb_binlog_manager::unpack_value(const uchar *const value, + char *const binlog_name, + my_off_t *const binlog_pos, + char *const binlog_gtid) const { + uint pack_len = 0; DBUG_ASSERT(binlog_pos != nullptr); // read version - const uint16_t version= rdb_netbuf_to_uint16(value); + const uint16_t version = rdb_netbuf_to_uint16(value); pack_len += Rdb_key_def::VERSION_SIZE; if (version != Rdb_key_def::BINLOG_INFO_INDEX_NUMBER_VERSION) return true; // read binlog file name length - const uint16_t binlog_name_len= rdb_netbuf_to_uint16(value+pack_len); + const uint16_t binlog_name_len = rdb_netbuf_to_uint16(value + pack_len); pack_len += sizeof(uint16); - if (binlog_name_len) - { + if (binlog_name_len) { // read and set binlog name - memcpy(binlog_name, value+pack_len, binlog_name_len); - binlog_name[binlog_name_len]= '\0'; + memcpy(binlog_name, value + pack_len, binlog_name_len); + binlog_name[binlog_name_len] = '\0'; pack_len += binlog_name_len; // read and set binlog pos - *binlog_pos= rdb_netbuf_to_uint32(value+pack_len); + *binlog_pos = rdb_netbuf_to_uint32(value + pack_len); pack_len += sizeof(uint32); // read gtid length - const uint16_t binlog_gtid_len= rdb_netbuf_to_uint16(value+pack_len); + const uint16_t binlog_gtid_len = rdb_netbuf_to_uint16(value + pack_len); pack_len += sizeof(uint16); - if (binlog_gtid && binlog_gtid_len > 0) - { + if (binlog_gtid && binlog_gtid_len > 0) { // read and set gtid - memcpy(binlog_gtid, value+pack_len, binlog_gtid_len); - binlog_gtid[binlog_gtid_len]= '\0'; + memcpy(binlog_gtid, value + pack_len, binlog_gtid_len); + binlog_gtid[binlog_gtid_len] = '\0'; pack_len += binlog_gtid_len; } } @@ -3873,15 +3488,14 @@ bool Rdb_binlog_manager::unpack_value(const uchar* const value, @param[IN] write_batch Handle to storage engine writer. */ void Rdb_binlog_manager::update_slave_gtid_info( - const uint &id, const char* const db, const char* const gtid, - rocksdb::WriteBatchBase* const write_batch) -{ + const uint &id, const char *const db, const char *const gtid, + rocksdb::WriteBatchBase *const write_batch) { if (id && db && gtid) { // Make sure that if the slave_gtid_info table exists we have a // pointer to it via m_slave_gtid_info_tbl. if (!m_slave_gtid_info_tbl.load()) { m_slave_gtid_info_tbl.store( - rdb_get_ddl_manager()->find("mysql.slave_gtid_info")); + rdb_get_ddl_manager()->find("mysql.slave_gtid_info")); } if (!m_slave_gtid_info_tbl.load()) { // slave_gtid_info table is not present. Simply return. @@ -3889,26 +3503,26 @@ void Rdb_binlog_manager::update_slave_gtid_info( } DBUG_ASSERT(m_slave_gtid_info_tbl.load()->m_key_count == 1); - const std::shared_ptr& kd= + const std::shared_ptr &kd = m_slave_gtid_info_tbl.load()->m_key_descr_arr[0]; String value; // Build key - uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE + 4]= {0}; - uchar* buf= key_buf; + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE + 4] = {0}; + uchar *buf = key_buf; rdb_netbuf_store_index(buf, kd->get_index_number()); buf += Rdb_key_def::INDEX_NUMBER_SIZE; rdb_netbuf_store_uint32(buf, id); buf += 4; const rocksdb::Slice key_slice = - rocksdb::Slice((const char*)key_buf, buf-key_buf); + rocksdb::Slice((const char *)key_buf, buf - key_buf); // Build value - uchar value_buf[128]= {0}; + uchar value_buf[128] = {0}; DBUG_ASSERT(gtid); - const uint db_len= strlen(db); - const uint gtid_len= strlen(gtid); - buf= value_buf; + const uint db_len = strlen(db); + const uint gtid_len = strlen(gtid); + buf = value_buf; // 1 byte used for flags. Empty here. buf++; @@ -3926,87 +3540,77 @@ void Rdb_binlog_manager::update_slave_gtid_info( memcpy(buf, gtid, gtid_len); buf += gtid_len; const rocksdb::Slice value_slice = - rocksdb::Slice((const char*)value_buf, buf-value_buf); + rocksdb::Slice((const char *)value_buf, buf - value_buf); write_batch->Put(kd->get_cf(), key_slice, value_slice); } } -bool Rdb_dict_manager::init(rocksdb::DB* const rdb_dict, - Rdb_cf_manager* const cf_manager) -{ +bool Rdb_dict_manager::init(rocksdb::DB *const rdb_dict, + Rdb_cf_manager *const cf_manager) { mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST); - m_db= rdb_dict; + m_db = rdb_dict; bool is_automatic; - m_system_cfh= cf_manager->get_or_create_cf(m_db, DEFAULT_SYSTEM_CF_NAME, - "", nullptr, &is_automatic); - rdb_netbuf_store_index(m_key_buf_max_index_id, - Rdb_key_def::MAX_INDEX_ID); - m_key_slice_max_index_id= rocksdb::Slice( - reinterpret_cast(m_key_buf_max_index_id), - Rdb_key_def::INDEX_NUMBER_SIZE); + m_system_cfh = cf_manager->get_or_create_cf(m_db, DEFAULT_SYSTEM_CF_NAME, "", + nullptr, &is_automatic); + rdb_netbuf_store_index(m_key_buf_max_index_id, Rdb_key_def::MAX_INDEX_ID); + m_key_slice_max_index_id = + rocksdb::Slice(reinterpret_cast(m_key_buf_max_index_id), + Rdb_key_def::INDEX_NUMBER_SIZE); resume_drop_indexes(); rollback_ongoing_index_creation(); return (m_system_cfh == nullptr); } -std::unique_ptr Rdb_dict_manager::begin() const -{ +std::unique_ptr Rdb_dict_manager::begin() const { return std::unique_ptr(new rocksdb::WriteBatch); } -void Rdb_dict_manager::put_key(rocksdb::WriteBatchBase* const batch, +void Rdb_dict_manager::put_key(rocksdb::WriteBatchBase *const batch, const rocksdb::Slice &key, - const rocksdb::Slice &value) const -{ + const rocksdb::Slice &value) const { batch->Put(m_system_cfh, key, value); } rocksdb::Status Rdb_dict_manager::get_value(const rocksdb::Slice &key, - std::string* const value) const -{ + std::string *const value) const { rocksdb::ReadOptions options; - options.total_order_seek= true; + options.total_order_seek = true; return m_db->Get(options, m_system_cfh, key, value); } void Rdb_dict_manager::delete_key(rocksdb::WriteBatchBase *batch, - const rocksdb::Slice &key) const -{ + const rocksdb::Slice &key) const { batch->Delete(m_system_cfh, key); } -rocksdb::Iterator* Rdb_dict_manager::new_iterator() const -{ +rocksdb::Iterator *Rdb_dict_manager::new_iterator() const { /* Reading data dictionary should always skip bloom filter */ rocksdb::ReadOptions read_options; - read_options.total_order_seek= true; + read_options.total_order_seek = true; return m_db->NewIterator(read_options, m_system_cfh); } -int Rdb_dict_manager::commit(rocksdb::WriteBatch* const batch, const bool &sync) -const -{ +int Rdb_dict_manager::commit(rocksdb::WriteBatch *const batch, + const bool &sync) const { if (!batch) - return 1; - int res= 0; + return HA_EXIT_FAILURE; + int res = 0; rocksdb::WriteOptions options; - options.sync= sync; - rocksdb::Status s= m_db->Write(options, batch); - res= !s.ok(); // we return true when something failed - if (res) - { + options.sync = sync; + rocksdb::Status s = m_db->Write(options, batch); + res = !s.ok(); // we return true when something failed + if (res) { rdb_handle_io_error(s, RDB_IO_ERROR_DICT_COMMIT); } batch->Clear(); return res; } -void Rdb_dict_manager::dump_index_id(uchar* const netbuf, +void Rdb_dict_manager::dump_index_id(uchar *const netbuf, Rdb_key_def::DATA_DICT_TYPE dict_type, - const GL_INDEX_ID &gl_index_id) -{ + const GL_INDEX_ID &gl_index_id) { rdb_netbuf_store_uint32(netbuf, dict_type); rdb_netbuf_store_uint32(netbuf + Rdb_key_def::INDEX_NUMBER_SIZE, gl_index_id.cf_id); @@ -4014,127 +3618,116 @@ void Rdb_dict_manager::dump_index_id(uchar* const netbuf, gl_index_id.index_id); } -void Rdb_dict_manager::delete_with_prefix(rocksdb::WriteBatch* const batch, - Rdb_key_def::DATA_DICT_TYPE dict_type, - const GL_INDEX_ID &gl_index_id) const -{ - uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; +void Rdb_dict_manager::delete_with_prefix( + rocksdb::WriteBatch *const batch, Rdb_key_def::DATA_DICT_TYPE dict_type, + const GL_INDEX_ID &gl_index_id) const { + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; dump_index_id(key_buf, dict_type, gl_index_id); - rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + rocksdb::Slice key = rocksdb::Slice((char *)key_buf, sizeof(key_buf)); delete_key(batch, key); } void Rdb_dict_manager::add_or_update_index_cf_mapping( - rocksdb::WriteBatch* batch, - const uchar m_index_type, - const uint16_t kv_version, - const uint32_t index_id, - const uint32_t cf_id) const -{ - uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; - uchar value_buf[256]= {0}; - GL_INDEX_ID gl_index_id= {cf_id, index_id}; + rocksdb::WriteBatch *batch, const uchar m_index_type, + const uint16_t kv_version, const uint32_t index_id, + const uint32_t cf_id) const { + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; + uchar value_buf[256] = {0}; + GL_INDEX_ID gl_index_id = {cf_id, index_id}; dump_index_id(key_buf, Rdb_key_def::INDEX_INFO, gl_index_id); - const rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + const rocksdb::Slice key = rocksdb::Slice((char *)key_buf, sizeof(key_buf)); - uchar* ptr= value_buf; + uchar *ptr = value_buf; rdb_netbuf_store_uint16(ptr, Rdb_key_def::INDEX_INFO_VERSION_LATEST); - ptr+= 2; + ptr += 2; rdb_netbuf_store_byte(ptr, m_index_type); - ptr+= 1; + ptr += 1; rdb_netbuf_store_uint16(ptr, kv_version); - ptr+= 2; + ptr += 2; - const rocksdb::Slice value= rocksdb::Slice((char*)value_buf, ptr-value_buf); + const rocksdb::Slice value = + rocksdb::Slice((char *)value_buf, ptr - value_buf); batch->Put(m_system_cfh, key, value); } -void Rdb_dict_manager::add_cf_flags(rocksdb::WriteBatch* const batch, +void Rdb_dict_manager::add_cf_flags(rocksdb::WriteBatch *const batch, const uint32_t &cf_id, - const uint32_t &cf_flags) const -{ - uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]= {0}; - uchar value_buf[Rdb_key_def::VERSION_SIZE+ - Rdb_key_def::INDEX_NUMBER_SIZE]= {0}; + const uint32_t &cf_flags) const { + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2] = {0}; + uchar value_buf[Rdb_key_def::VERSION_SIZE + Rdb_key_def::INDEX_NUMBER_SIZE] = + {0}; rdb_netbuf_store_uint32(key_buf, Rdb_key_def::CF_DEFINITION); rdb_netbuf_store_uint32(key_buf + Rdb_key_def::INDEX_NUMBER_SIZE, cf_id); - const rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + const rocksdb::Slice key = rocksdb::Slice((char *)key_buf, sizeof(key_buf)); rdb_netbuf_store_uint16(value_buf, Rdb_key_def::CF_DEFINITION_VERSION); rdb_netbuf_store_uint32(value_buf + Rdb_key_def::VERSION_SIZE, cf_flags); - const rocksdb::Slice value= - rocksdb::Slice((char*)value_buf, sizeof(value_buf)); + const rocksdb::Slice value = + rocksdb::Slice((char *)value_buf, sizeof(value_buf)); batch->Put(m_system_cfh, key, value); } -void Rdb_dict_manager::delete_index_info(rocksdb::WriteBatch* batch, - const GL_INDEX_ID &gl_index_id) const -{ +void Rdb_dict_manager::delete_index_info(rocksdb::WriteBatch *batch, + const GL_INDEX_ID &gl_index_id) const { delete_with_prefix(batch, Rdb_key_def::INDEX_INFO, gl_index_id); } - bool Rdb_dict_manager::get_index_info(const GL_INDEX_ID &gl_index_id, uint16_t *m_index_dict_version, uchar *m_index_type, - uint16_t *kv_version) const -{ - bool found= false; - bool error= false; + uint16_t *kv_version) const { + bool found = false; + bool error = false; std::string value; - uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; dump_index_id(key_buf, Rdb_key_def::INDEX_INFO, gl_index_id); - const rocksdb::Slice &key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + const rocksdb::Slice &key = rocksdb::Slice((char *)key_buf, sizeof(key_buf)); - const rocksdb::Status &status= get_value(key, &value); - if (status.ok()) - { - const uchar* const val= (const uchar*)value.c_str(); - const uchar* ptr= val; - *m_index_dict_version= rdb_netbuf_to_uint16(val); - *kv_version= 0; - *m_index_type= 0; - ptr+= 2; + const rocksdb::Status &status = get_value(key, &value); + if (status.ok()) { + const uchar *const val = (const uchar *)value.c_str(); + const uchar *ptr = val; + *m_index_dict_version = rdb_netbuf_to_uint16(val); + *kv_version = 0; + *m_index_type = 0; + ptr += 2; switch (*m_index_dict_version) { case Rdb_key_def::INDEX_INFO_VERSION_VERIFY_KV_FORMAT: case Rdb_key_def::INDEX_INFO_VERSION_GLOBAL_ID: - *m_index_type= rdb_netbuf_to_byte(ptr); - ptr+= 1; - *kv_version= rdb_netbuf_to_uint16(ptr); - found= true; + *m_index_type = rdb_netbuf_to_byte(ptr); + ptr += 1; + *kv_version = rdb_netbuf_to_uint16(ptr); + found = true; break; default: - error= true; + error = true; break; } - switch (*m_index_type) - { + switch (*m_index_type) { case Rdb_key_def::INDEX_TYPE_PRIMARY: - case Rdb_key_def::INDEX_TYPE_HIDDEN_PRIMARY: - { - error= *kv_version > Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; + case Rdb_key_def::INDEX_TYPE_HIDDEN_PRIMARY: { + error = *kv_version > Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; break; } case Rdb_key_def::INDEX_TYPE_SECONDARY: - error= *kv_version > Rdb_key_def::SECONDARY_FORMAT_VERSION_LATEST; + error = *kv_version > Rdb_key_def::SECONDARY_FORMAT_VERSION_LATEST; break; default: - error= true; + error = true; break; } } - if (error) - { + if (error) { // NO_LINT_DEBUG sql_print_error("RocksDB: Found invalid key version number (%u, %u, %u) " "from data dictionary. This should never happen " - "and it may be a bug.", *m_index_dict_version, - *m_index_type, *kv_version); + "and it may be a bug.", + *m_index_dict_version, *m_index_type, *kv_version); abort_with_stack_traces(); } @@ -4142,24 +3735,21 @@ bool Rdb_dict_manager::get_index_info(const GL_INDEX_ID &gl_index_id, } bool Rdb_dict_manager::get_cf_flags(const uint32_t &cf_id, - uint32_t* const cf_flags) const -{ - bool found= false; + uint32_t *const cf_flags) const { + bool found = false; std::string value; - uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]= {0}; + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2] = {0}; rdb_netbuf_store_uint32(key_buf, Rdb_key_def::CF_DEFINITION); rdb_netbuf_store_uint32(key_buf + Rdb_key_def::INDEX_NUMBER_SIZE, cf_id); - const rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + const rocksdb::Slice key = rocksdb::Slice((char *)key_buf, sizeof(key_buf)); - const rocksdb::Status status= get_value(key, &value); - if (status.ok()) - { - const uchar* val= (const uchar*)value.c_str(); - uint16_t version= rdb_netbuf_to_uint16(val); - if (version == Rdb_key_def::CF_DEFINITION_VERSION) - { - *cf_flags= rdb_netbuf_to_uint32(val+Rdb_key_def::VERSION_SIZE); - found= true; + const rocksdb::Status status = get_value(key, &value); + if (status.ok()) { + const uchar *val = (const uchar *)value.c_str(); + uint16_t version = rdb_netbuf_to_uint16(val); + if (version == Rdb_key_def::CF_DEFINITION_VERSION) { + *cf_flags = rdb_netbuf_to_uint32(val + Rdb_key_def::VERSION_SIZE); + found = true; } } return found; @@ -4171,22 +3761,20 @@ bool Rdb_dict_manager::get_cf_flags(const uint32_t &cf_id, ongoing creation. */ void Rdb_dict_manager::get_ongoing_index_operation( - std::vector* const gl_index_ids, - Rdb_key_def::DATA_DICT_TYPE dd_type) const -{ + std::unordered_set *gl_index_ids, + Rdb_key_def::DATA_DICT_TYPE dd_type) const { DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); uchar index_buf[Rdb_key_def::INDEX_NUMBER_SIZE]; rdb_netbuf_store_uint32(index_buf, dd_type); - const rocksdb::Slice index_slice(reinterpret_cast(index_buf), - Rdb_key_def::INDEX_NUMBER_SIZE); + const rocksdb::Slice index_slice(reinterpret_cast(index_buf), + Rdb_key_def::INDEX_NUMBER_SIZE); - rocksdb::Iterator* it= new_iterator(); - for (it->Seek(index_slice); it->Valid(); it->Next()) - { - rocksdb::Slice key= it->key(); - const uchar* const ptr= (const uchar*)key.data(); + rocksdb::Iterator *it = new_iterator(); + for (it->Seek(index_slice); it->Valid(); it->Next()) { + rocksdb::Slice key = it->key(); + const uchar *const ptr = (const uchar *)key.data(); /* Ongoing drop/create index operations require key to be of the form: @@ -4196,8 +3784,7 @@ void Rdb_dict_manager::get_ongoing_index_operation( ddl_type with different format. */ if (key.size() != Rdb_key_def::INDEX_NUMBER_SIZE * 3 || - rdb_netbuf_to_uint32(ptr) != dd_type) - { + rdb_netbuf_to_uint32(ptr) != dd_type) { break; } @@ -4205,10 +3792,11 @@ void Rdb_dict_manager::get_ongoing_index_operation( // Rdb_key_def::DDL_DROP_INDEX_ONGOING_VERSION = 1 as a value. // If increasing version number, we need to add version check logic here. GL_INDEX_ID gl_index_id; - gl_index_id.cf_id= rdb_netbuf_to_uint32(ptr+Rdb_key_def::INDEX_NUMBER_SIZE); - gl_index_id.index_id= rdb_netbuf_to_uint32( - ptr + 2 * Rdb_key_def::INDEX_NUMBER_SIZE); - gl_index_ids->push_back(gl_index_id); + gl_index_id.cf_id = + rdb_netbuf_to_uint32(ptr + Rdb_key_def::INDEX_NUMBER_SIZE); + gl_index_id.index_id = + rdb_netbuf_to_uint32(ptr + 2 * Rdb_key_def::INDEX_NUMBER_SIZE); + gl_index_ids->insert(gl_index_id); } delete it; } @@ -4219,22 +3807,19 @@ void Rdb_dict_manager::get_ongoing_index_operation( or not. */ bool Rdb_dict_manager::is_index_operation_ongoing( - const GL_INDEX_ID& gl_index_id, - Rdb_key_def::DATA_DICT_TYPE dd_type) const -{ + const GL_INDEX_ID &gl_index_id, Rdb_key_def::DATA_DICT_TYPE dd_type) const { DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); - bool found= false; + bool found = false; std::string value; - uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; dump_index_id(key_buf, dd_type, gl_index_id); - const rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); + const rocksdb::Slice key = rocksdb::Slice((char *)key_buf, sizeof(key_buf)); - const rocksdb::Status status= get_value(key, &value); - if (status.ok()) - { - found= true; + const rocksdb::Status status = get_value(key, &value); + if (status.ok()) { + found = true; } return found; } @@ -4244,32 +3829,27 @@ bool Rdb_dict_manager::is_index_operation_ongoing( by drop_index_thread, or to track online index creation. */ void Rdb_dict_manager::start_ongoing_index_operation( - rocksdb::WriteBatch* const batch, - const GL_INDEX_ID& gl_index_id, - Rdb_key_def::DATA_DICT_TYPE dd_type) const -{ + rocksdb::WriteBatch *const batch, const GL_INDEX_ID &gl_index_id, + Rdb_key_def::DATA_DICT_TYPE dd_type) const { DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); - uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; - uchar value_buf[Rdb_key_def::VERSION_SIZE]= {0}; + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; + uchar value_buf[Rdb_key_def::VERSION_SIZE] = {0}; dump_index_id(key_buf, dd_type, gl_index_id); // version as needed - if (dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING) - { + if (dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING) { rdb_netbuf_store_uint16(value_buf, Rdb_key_def::DDL_DROP_INDEX_ONGOING_VERSION); - } - else - { + } else { rdb_netbuf_store_uint16(value_buf, Rdb_key_def::DDL_CREATE_INDEX_ONGOING_VERSION); } - const rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf)); - const rocksdb::Slice value= - rocksdb::Slice((char*)value_buf, sizeof(value_buf)); + const rocksdb::Slice key = rocksdb::Slice((char *)key_buf, sizeof(key_buf)); + const rocksdb::Slice value = + rocksdb::Slice((char *)value_buf, sizeof(value_buf)); batch->Put(m_system_cfh, key, value); } @@ -4278,10 +3858,8 @@ void Rdb_dict_manager::start_ongoing_index_operation( completed dropping entire key/values of the index_id */ void Rdb_dict_manager::end_ongoing_index_operation( - rocksdb::WriteBatch* const batch, - const GL_INDEX_ID& gl_index_id, - Rdb_key_def::DATA_DICT_TYPE dd_type) const -{ + rocksdb::WriteBatch *const batch, const GL_INDEX_ID &gl_index_id, + Rdb_key_def::DATA_DICT_TYPE dd_type) const { DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); @@ -4292,9 +3870,8 @@ void Rdb_dict_manager::end_ongoing_index_operation( Returning true if there is no target index ids to be removed by drop_index_thread */ -bool Rdb_dict_manager::is_drop_index_empty() const -{ - std::vector gl_index_ids; +bool Rdb_dict_manager::is_drop_index_empty() const { + std::unordered_set gl_index_ids; get_ongoing_drop_indexes(&gl_index_ids); return gl_index_ids.empty(); } @@ -4304,13 +3881,11 @@ bool Rdb_dict_manager::is_drop_index_empty() const that dropping indexes started, and adding data dictionary so that all associated indexes to be removed */ -void Rdb_dict_manager::add_drop_table(std::shared_ptr* const key_descr, - const uint32 &n_keys, - rocksdb::WriteBatch* const batch) const -{ +void Rdb_dict_manager::add_drop_table( + std::shared_ptr *const key_descr, const uint32 &n_keys, + rocksdb::WriteBatch *const batch) const { std::unordered_set dropped_index_ids; - for (uint32 i = 0; i < n_keys; i++) - { + for (uint32 i = 0; i < n_keys; i++) { dropped_index_ids.insert(key_descr[i]->get_gl_index_id()); } @@ -4323,11 +3898,9 @@ void Rdb_dict_manager::add_drop_table(std::shared_ptr* const key_de all associated indexes to be removed */ void Rdb_dict_manager::add_drop_index( - const std::unordered_set& gl_index_ids, - rocksdb::WriteBatch* const batch) const -{ - for (const auto& gl_index_id : gl_index_ids) - { + const std::unordered_set &gl_index_ids, + rocksdb::WriteBatch *const batch) const { + for (const auto &gl_index_id : gl_index_ids) { log_start_drop_index(gl_index_id, "Begin"); start_drop_index(batch, gl_index_id); } @@ -4339,14 +3912,12 @@ void Rdb_dict_manager::add_drop_index( indexes to be added. */ void Rdb_dict_manager::add_create_index( - const std::unordered_set& gl_index_ids, - rocksdb::WriteBatch* const batch) const -{ - for (const auto& gl_index_id : gl_index_ids) - { + const std::unordered_set &gl_index_ids, + rocksdb::WriteBatch *const batch) const { + for (const auto &gl_index_id : gl_index_ids) { // NO_LINT_DEBUG sql_print_information("RocksDB: Begin index creation (%u,%u)", - gl_index_id.cf_id, gl_index_id.index_id); + gl_index_id.cf_id, gl_index_id.index_id); start_create_index(batch, gl_index_id); } } @@ -4356,30 +3927,41 @@ void Rdb_dict_manager::add_create_index( finished dropping any index, or at the completion of online index creation. */ void Rdb_dict_manager::finish_indexes_operation( - const std::unordered_set& gl_index_ids, - Rdb_key_def::DATA_DICT_TYPE dd_type) const -{ + const std::unordered_set &gl_index_ids, + Rdb_key_def::DATA_DICT_TYPE dd_type) const { DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); - const std::unique_ptr wb= begin(); - rocksdb::WriteBatch* const batch= wb.get(); + const std::unique_ptr wb = begin(); + rocksdb::WriteBatch *const batch = wb.get(); - for (const auto& gl_index_id : gl_index_ids) - { - if (is_index_operation_ongoing(gl_index_id, dd_type)) - { + std::unordered_set incomplete_create_indexes; + get_ongoing_create_indexes(&incomplete_create_indexes); + + for (const auto &gl_index_id : gl_index_ids) { + if (is_index_operation_ongoing(gl_index_id, dd_type)) { // NO_LINT_DEBUG sql_print_information("RocksDB: Finished %s (%u,%u)", - dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING ? - "filtering dropped index" : "index creation", - gl_index_id.cf_id, gl_index_id.index_id); + dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING + ? "filtering dropped index" + : "index creation", + gl_index_id.cf_id, gl_index_id.index_id); end_ongoing_index_operation(batch, gl_index_id, dd_type); + + /* + Remove the corresponding incomplete create indexes from data + dictionary as well + */ + if (dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING) { + if (incomplete_create_indexes.count(gl_index_id)) { + end_ongoing_index_operation(batch, gl_index_id, + Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + } + } } - if (dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING) - { + if (dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING) { delete_index_info(batch, gl_index_id); } } @@ -4391,163 +3973,154 @@ void Rdb_dict_manager::finish_indexes_operation( Rdb_dict_manager (at startup). If there is any index ids that are drop ongoing, printing out messages for diagnostics purposes. */ -void Rdb_dict_manager::resume_drop_indexes() const -{ - std::vector gl_index_ids; +void Rdb_dict_manager::resume_drop_indexes() const { + std::unordered_set gl_index_ids; get_ongoing_drop_indexes(&gl_index_ids); - uint max_index_id_in_dict= 0; + uint max_index_id_in_dict = 0; get_max_index_id(&max_index_id_in_dict); - for (const auto& gl_index_id : gl_index_ids) - { + for (const auto &gl_index_id : gl_index_ids) { log_start_drop_index(gl_index_id, "Resume"); - if (max_index_id_in_dict < gl_index_id.index_id) - { + if (max_index_id_in_dict < gl_index_id.index_id) { sql_print_error("RocksDB: Found max index id %u from data dictionary " "but also found dropped index id (%u,%u) from drop_index " "dictionary. This should never happen and is possibly a " - "bug.", max_index_id_in_dict, gl_index_id.cf_id, + "bug.", + max_index_id_in_dict, gl_index_id.cf_id, gl_index_id.index_id); abort_with_stack_traces(); } } } -void Rdb_dict_manager::rollback_ongoing_index_creation() const -{ - const std::unique_ptr wb= begin(); - rocksdb::WriteBatch* const batch= wb.get(); +void Rdb_dict_manager::rollback_ongoing_index_creation() const { + const std::unique_ptr wb = begin(); + rocksdb::WriteBatch *const batch = wb.get(); - std::vector gl_index_ids; + std::unordered_set gl_index_ids; get_ongoing_create_indexes(&gl_index_ids); - for (const auto& gl_index_id : gl_index_ids) - { + for (const auto &gl_index_id : gl_index_ids) { // NO_LINT_DEBUG sql_print_information("RocksDB: Removing incomplete create index (%u,%u)", - gl_index_id.cf_id, gl_index_id.index_id); + gl_index_id.cf_id, gl_index_id.index_id); start_drop_index(batch, gl_index_id); - end_ongoing_index_operation(batch, gl_index_id, - Rdb_key_def::DDL_CREATE_INDEX_ONGOING); } commit(batch); } void Rdb_dict_manager::log_start_drop_table( - const std::shared_ptr* const key_descr, - const uint32 &n_keys, - const char* const log_action) const -{ + const std::shared_ptr *const key_descr, const uint32 &n_keys, + const char *const log_action) const { for (uint32 i = 0; i < n_keys; i++) { log_start_drop_index(key_descr[i]->get_gl_index_id(), log_action); } } void Rdb_dict_manager::log_start_drop_index(GL_INDEX_ID gl_index_id, - const char* log_action) const -{ - uint16 m_index_dict_version= 0; - uchar m_index_type= 0; - uint16 kv_version= 0; - if (!get_index_info(gl_index_id, &m_index_dict_version, - &m_index_type, &kv_version)) - { - sql_print_error("RocksDB: Failed to get column family info " - "from index id (%u,%u). MyRocks data dictionary may " - "get corrupted.", gl_index_id.cf_id, gl_index_id.index_id); - abort_with_stack_traces(); + const char *log_action) const { + uint16 m_index_dict_version = 0; + uchar m_index_type = 0; + uint16 kv_version = 0; + + if (!get_index_info(gl_index_id, &m_index_dict_version, &m_index_type, + &kv_version)) { + /* + If we don't find the index info, it could be that it's because it was a + partially created index that isn't in the data dictionary yet that needs + to be rolled back. + */ + std::unordered_set incomplete_create_indexes; + get_ongoing_create_indexes(&incomplete_create_indexes); + + if (!incomplete_create_indexes.count(gl_index_id)) { + /* If it's not a partially created index, something is very wrong. */ + sql_print_error("RocksDB: Failed to get column family info " + "from index id (%u,%u). MyRocks data dictionary may " + "get corrupted.", + gl_index_id.cf_id, gl_index_id.index_id); + abort_with_stack_traces(); + } } sql_print_information("RocksDB: %s filtering dropped index (%u,%u)", log_action, gl_index_id.cf_id, gl_index_id.index_id); } -bool Rdb_dict_manager::get_max_index_id(uint32_t* const index_id) const -{ - bool found= false; +bool Rdb_dict_manager::get_max_index_id(uint32_t *const index_id) const { + bool found = false; std::string value; - const rocksdb::Status status= get_value(m_key_slice_max_index_id, &value); - if (status.ok()) - { - const uchar* const val= (const uchar*)value.c_str(); - const uint16_t &version= rdb_netbuf_to_uint16(val); - if (version == Rdb_key_def::MAX_INDEX_ID_VERSION) - { - *index_id= rdb_netbuf_to_uint32(val+Rdb_key_def::VERSION_SIZE); - found= true; + const rocksdb::Status status = get_value(m_key_slice_max_index_id, &value); + if (status.ok()) { + const uchar *const val = (const uchar *)value.c_str(); + const uint16_t &version = rdb_netbuf_to_uint16(val); + if (version == Rdb_key_def::MAX_INDEX_ID_VERSION) { + *index_id = rdb_netbuf_to_uint32(val + Rdb_key_def::VERSION_SIZE); + found = true; } } return found; } -bool Rdb_dict_manager::update_max_index_id(rocksdb::WriteBatch* const batch, - const uint32_t &index_id) const -{ +bool Rdb_dict_manager::update_max_index_id(rocksdb::WriteBatch *const batch, + const uint32_t &index_id) const { DBUG_ASSERT(batch != nullptr); - uint32_t old_index_id= -1; - if (get_max_index_id(&old_index_id)) - { - if (old_index_id > index_id) - { + uint32_t old_index_id = -1; + if (get_max_index_id(&old_index_id)) { + if (old_index_id > index_id) { sql_print_error("RocksDB: Found max index id %u from data dictionary " "but trying to update to older value %u. This should " - "never happen and possibly a bug.", old_index_id, - index_id); + "never happen and possibly a bug.", + old_index_id, index_id); return true; } } - uchar value_buf[Rdb_key_def::VERSION_SIZE + Rdb_key_def::INDEX_NUMBER_SIZE]= - {0}; + uchar value_buf[Rdb_key_def::VERSION_SIZE + Rdb_key_def::INDEX_NUMBER_SIZE] = + {0}; rdb_netbuf_store_uint16(value_buf, Rdb_key_def::MAX_INDEX_ID_VERSION); rdb_netbuf_store_uint32(value_buf + Rdb_key_def::VERSION_SIZE, index_id); - const rocksdb::Slice value= - rocksdb::Slice((char*)value_buf, sizeof(value_buf)); + const rocksdb::Slice value = + rocksdb::Slice((char *)value_buf, sizeof(value_buf)); batch->Put(m_system_cfh, m_key_slice_max_index_id, value); return false; } -void Rdb_dict_manager::add_stats(rocksdb::WriteBatch* const batch, - const std::vector& stats) const -{ +void Rdb_dict_manager::add_stats( + rocksdb::WriteBatch *const batch, + const std::vector &stats) const { DBUG_ASSERT(batch != nullptr); - for (const auto& it : stats) { - uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; + for (const auto &it : stats) { + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; dump_index_id(key_buf, Rdb_key_def::INDEX_STATISTICS, it.m_gl_index_id); // IndexStats::materialize takes complete care of serialization including // storing the version - const auto value = Rdb_index_stats::materialize( - std::vector{it}, 1.); + const auto value = + Rdb_index_stats::materialize(std::vector{it}, 1.); - batch->Put( - m_system_cfh, - rocksdb::Slice((char*)key_buf, sizeof(key_buf)), - value - ); + batch->Put(m_system_cfh, rocksdb::Slice((char *)key_buf, sizeof(key_buf)), + value); } } -Rdb_index_stats Rdb_dict_manager::get_stats(GL_INDEX_ID gl_index_id) const -{ - uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0}; +Rdb_index_stats Rdb_dict_manager::get_stats(GL_INDEX_ID gl_index_id) const { + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; dump_index_id(key_buf, Rdb_key_def::INDEX_STATISTICS, gl_index_id); std::string value; - const rocksdb::Status status= get_value( - rocksdb::Slice(reinterpret_cast(key_buf), sizeof(key_buf)), - &value); - if (status.ok()) - { + const rocksdb::Status status = get_value( + rocksdb::Slice(reinterpret_cast(key_buf), sizeof(key_buf)), + &value); + if (status.ok()) { std::vector v; // unmaterialize checks if the version matches - if (Rdb_index_stats::unmaterialize(value, &v)== 0 && v.size() == 1) - { + if (Rdb_index_stats::unmaterialize(value, &v) == 0 && v.size() == 1) { return v[0]; } } @@ -4555,18 +4128,17 @@ Rdb_index_stats Rdb_dict_manager::get_stats(GL_INDEX_ID gl_index_id) const return Rdb_index_stats(); } -uint -Rdb_seq_generator::get_and_update_next_number(Rdb_dict_manager* const dict) -{ +uint Rdb_seq_generator::get_and_update_next_number( + Rdb_dict_manager *const dict) { DBUG_ASSERT(dict != nullptr); uint res; mysql_mutex_lock(&m_mutex); - res= m_next_number++; + res = m_next_number++; - const std::unique_ptr wb= dict->begin(); - rocksdb::WriteBatch* const batch= wb.get(); + const std::unique_ptr wb = dict->begin(); + rocksdb::WriteBatch *const batch = wb.get(); DBUG_ASSERT(batch != nullptr); dict->update_max_index_id(batch, res); @@ -4577,4 +4149,4 @@ Rdb_seq_generator::get_and_update_next_number(Rdb_dict_manager* const dict) return res; } -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_datadic.h b/storage/rocksdb/rdb_datadic.h index 91cb9b8d563..b084752f601 100644 --- a/storage/rocksdb/rdb_datadic.h +++ b/storage/rocksdb/rdb_datadic.h @@ -56,15 +56,13 @@ class Rdb_ddl_manager; In order to accommodate both cases, we require both calls to be made and unpack_info is passed as context data between the two. */ -class Rdb_pack_field_context -{ - public: - Rdb_pack_field_context(const Rdb_pack_field_context&) = delete; - Rdb_pack_field_context& operator=(const Rdb_pack_field_context&) = delete; +class Rdb_pack_field_context { +public: + Rdb_pack_field_context(const Rdb_pack_field_context &) = delete; + Rdb_pack_field_context &operator=(const Rdb_pack_field_context &) = delete; - explicit Rdb_pack_field_context(Rdb_string_writer* const writer_arg) : - writer(writer_arg) - {} + explicit Rdb_pack_field_context(Rdb_string_writer *const writer_arg) + : writer(writer_arg) {} // NULL means we're not producing unpack_info. Rdb_string_writer *writer; @@ -76,51 +74,50 @@ struct Rdb_collation_codec; C-style "virtual table" allowing different handling of packing logic based on the field type. See Rdb_field_packing::setup() implementation. */ -using rdb_make_unpack_info_t= void (*)(const Rdb_collation_codec *codec, - const Field *field, - Rdb_pack_field_context *pack_ctx); -using rdb_index_field_unpack_t= int (*)(Rdb_field_packing *fpi, Field *field, - uchar *field_ptr, - Rdb_string_reader *reader, - Rdb_string_reader *unpack_reader); -using rdb_index_field_skip_t= int (*)(const Rdb_field_packing *fpi, - const Field *field, - Rdb_string_reader *reader); -using rdb_index_field_pack_t= void (*)(Rdb_field_packing *fpi, Field *field, - uchar* buf, uchar **dst, - Rdb_pack_field_context *pack_ctx); +using rdb_make_unpack_info_t = void (*)(const Rdb_collation_codec *codec, + const Field *field, + Rdb_pack_field_context *pack_ctx); +using rdb_index_field_unpack_t = int (*)(Rdb_field_packing *fpi, Field *field, + uchar *field_ptr, + Rdb_string_reader *reader, + Rdb_string_reader *unpack_reader); +using rdb_index_field_skip_t = int (*)(const Rdb_field_packing *fpi, + const Field *field, + Rdb_string_reader *reader); +using rdb_index_field_pack_t = void (*)(Rdb_field_packing *fpi, Field *field, + uchar *buf, uchar **dst, + Rdb_pack_field_context *pack_ctx); -const uint RDB_INVALID_KEY_LEN= uint(-1); +const uint RDB_INVALID_KEY_LEN = uint(-1); /* How much one checksum occupies when stored in the record */ -const size_t RDB_CHECKSUM_SIZE= sizeof(uint32_t); +const size_t RDB_CHECKSUM_SIZE = sizeof(uint32_t); /* How much the checksum data occupies in record, in total. It is storing two checksums plus 1 tag-byte. */ -const size_t RDB_CHECKSUM_CHUNK_SIZE= 2 * RDB_CHECKSUM_SIZE + 1; +const size_t RDB_CHECKSUM_CHUNK_SIZE = 2 * RDB_CHECKSUM_SIZE + 1; /* Checksum data starts from CHECKSUM_DATA_TAG which is followed by two CRC32 checksums. */ -const char RDB_CHECKSUM_DATA_TAG= 0x01; +const char RDB_CHECKSUM_DATA_TAG = 0x01; /* Unpack data is variable length. It is a 1 tag-byte plus a two byte length field. The length field includes the header as well. */ -const char RDB_UNPACK_DATA_TAG= 0x02; -const size_t RDB_UNPACK_DATA_LEN_SIZE= sizeof(uint16_t); -const size_t RDB_UNPACK_HEADER_SIZE= sizeof(RDB_UNPACK_DATA_TAG) + - RDB_UNPACK_DATA_LEN_SIZE; +const char RDB_UNPACK_DATA_TAG = 0x02; +const size_t RDB_UNPACK_DATA_LEN_SIZE = sizeof(uint16_t); +const size_t RDB_UNPACK_HEADER_SIZE = + sizeof(RDB_UNPACK_DATA_TAG) + RDB_UNPACK_DATA_LEN_SIZE; // Possible return values for rdb_index_field_unpack_t functions. enum { - UNPACK_SUCCESS= 0, - UNPACK_FAILURE= 1, - UNPACK_INFO_MISSING= 2, + UNPACK_SUCCESS = 0, + UNPACK_FAILURE = 1, }; /* @@ -163,62 +160,55 @@ enum { reads. */ -class Rdb_key_def -{ +class Rdb_key_def { public: /* Convert a key from KeyTupleFormat to mem-comparable form */ - uint pack_index_tuple(TABLE* const tbl, uchar* const pack_buffer, - uchar* const packed_tuple, - const uchar* const key_tuple, + uint pack_index_tuple(TABLE *const tbl, uchar *const pack_buffer, + uchar *const packed_tuple, const uchar *const key_tuple, const key_part_map &keypart_map) const; /* Convert a key from Table->record format to mem-comparable form */ - uint pack_record(const TABLE* const tbl, uchar* const pack_buffer, - const uchar* const record, - uchar* const packed_tuple, - Rdb_string_writer* const unpack_info, + uint pack_record(const TABLE *const tbl, uchar *const pack_buffer, + const uchar *const record, uchar *const packed_tuple, + Rdb_string_writer *const unpack_info, const bool &should_store_row_debug_checksums, - const longlong &hidden_pk_id= 0, uint n_key_parts= 0, - uint* const n_null_fields= nullptr) const; + const longlong &hidden_pk_id = 0, uint n_key_parts = 0, + uint *const n_null_fields = nullptr) const; /* Pack the hidden primary key into mem-comparable form. */ uint pack_hidden_pk(const longlong &hidden_pk_id, - uchar* const packed_tuple) const; - int unpack_record(TABLE* const table, uchar* const buf, - const rocksdb::Slice* const packed_key, - const rocksdb::Slice* const unpack_info, - const bool &verify_row_debug_checksums) - const; + uchar *const packed_tuple) const; + int unpack_record(TABLE *const table, uchar *const buf, + const rocksdb::Slice *const packed_key, + const rocksdb::Slice *const unpack_info, + const bool &verify_row_debug_checksums) const; - static bool unpack_info_has_checksum(const rocksdb::Slice& unpack_info); + static bool unpack_info_has_checksum(const rocksdb::Slice &unpack_info); int compare_keys(const rocksdb::Slice *key1, const rocksdb::Slice *key2, - std::size_t* const column_index) const; + std::size_t *const column_index) const; - size_t key_length(const TABLE* const table, const rocksdb::Slice &key) const; + size_t key_length(const TABLE *const table, const rocksdb::Slice &key) const; /* Get the key that is the "infimum" for this index */ - inline void get_infimum_key(uchar* const key, uint* const size) const - { + inline void get_infimum_key(uchar *const key, uint *const size) const { rdb_netbuf_store_index(key, m_index_number); - *size= INDEX_NUMBER_SIZE; + *size = INDEX_NUMBER_SIZE; } /* Get the key that is a "supremum" for this index */ - inline void get_supremum_key(uchar* const key, uint* const size) const - { - rdb_netbuf_store_index(key, m_index_number+1); - *size= INDEX_NUMBER_SIZE; + inline void get_supremum_key(uchar *const key, uint *const size) const { + rdb_netbuf_store_index(key, m_index_number + 1); + *size = INDEX_NUMBER_SIZE; } /* Make a key that is right after the given key. */ - static int successor(uchar* const packed_tuple, const uint &len); + static int successor(uchar *const packed_tuple, const uint &len); /* This can be used to compare prefixes. if X is a prefix of Y, then we consider that X = Y. */ // b describes the lookup key, which can be a prefix of a. - int cmp_full_keys(const rocksdb::Slice& a, const rocksdb::Slice& b) const - { + int cmp_full_keys(const rocksdb::Slice &a, const rocksdb::Slice &b) const { DBUG_ASSERT(covers_key(a)); DBUG_ASSERT(covers_key(b)); @@ -226,8 +216,7 @@ public: } /* Check if given mem-comparable key belongs to this index */ - bool covers_key(const rocksdb::Slice &slice) const - { + bool covers_key(const rocksdb::Slice &slice) const { if (slice.size() < INDEX_NUMBER_SIZE) return false; @@ -244,43 +233,35 @@ public: form) */ bool value_matches_prefix(const rocksdb::Slice &value, - const rocksdb::Slice &prefix) const - { + const rocksdb::Slice &prefix) const { return covers_key(value) && !cmp_full_keys(value, prefix); } - uint32 get_keyno() const - { - return m_keyno; - } + uint32 get_keyno() const { return m_keyno; } - uint32 get_index_number() const - { - return m_index_number; - } + uint32 get_index_number() const { return m_index_number; } - GL_INDEX_ID get_gl_index_id() const - { - const GL_INDEX_ID gl_index_id = { m_cf_handle->GetID(), m_index_number }; + GL_INDEX_ID get_gl_index_id() const { + const GL_INDEX_ID gl_index_id = {m_cf_handle->GetID(), m_index_number}; return gl_index_id; } + int read_memcmp_key_part(const TABLE *table_arg, Rdb_string_reader *reader, + const uint part_num) const; + /* Must only be called for secondary keys: */ - uint get_primary_key_tuple(const TABLE* const tbl, - const Rdb_key_def& pk_descr, - const rocksdb::Slice* const key, - uchar* const pk_buffer) const; + uint get_primary_key_tuple(const TABLE *const tbl, + const Rdb_key_def &pk_descr, + const rocksdb::Slice *const key, + uchar *const pk_buffer) const; + + uint get_memcmp_sk_parts(const TABLE *table, const rocksdb::Slice &key, + uchar *sk_buffer, uint *n_null_fields) const; /* Return max length of mem-comparable form */ - uint max_storage_fmt_length() const - { - return m_maxlength; - } + uint max_storage_fmt_length() const { return m_maxlength; } - uint get_key_parts() const - { - return m_key_parts; - } + uint get_key_parts() const { return m_key_parts; } /* Get a field object for key part #part_no @@ -292,60 +273,60 @@ public: Internally, we always extend all indexes with PK columns. This function uses our definition of how the index is Extended. */ - inline Field* get_table_field_for_part_no(TABLE *table, uint part_no) const; + inline Field *get_table_field_for_part_no(TABLE *table, uint part_no) const; - const std::string& get_name() const { - return m_name; + const std::string &get_name() const { return m_name; } + + const rocksdb::SliceTransform *get_extractor() const { + return m_prefix_extractor.get(); } - Rdb_key_def& operator=(const Rdb_key_def&) = delete; - Rdb_key_def(const Rdb_key_def& k); + Rdb_key_def &operator=(const Rdb_key_def &) = delete; + Rdb_key_def(const Rdb_key_def &k); Rdb_key_def(uint indexnr_arg, uint keyno_arg, - rocksdb::ColumnFamilyHandle* cf_handle_arg, - uint16_t index_dict_version_arg, - uchar index_type_arg, - uint16_t kv_format_version_arg, - bool is_reverse_cf_arg, bool is_auto_cf_arg, - const char* name, - Rdb_index_stats stats= Rdb_index_stats()); + rocksdb::ColumnFamilyHandle *cf_handle_arg, + uint16_t index_dict_version_arg, uchar index_type_arg, + uint16_t kv_format_version_arg, bool is_reverse_cf_arg, + bool is_auto_cf_arg, const char *name, + Rdb_index_stats stats = Rdb_index_stats()); ~Rdb_key_def(); enum { - INDEX_NUMBER_SIZE= 4, - VERSION_SIZE= 2, - CF_NUMBER_SIZE= 4, - CF_FLAG_SIZE= 4, - PACKED_SIZE= 4, // one int + INDEX_NUMBER_SIZE = 4, + VERSION_SIZE = 2, + CF_NUMBER_SIZE = 4, + CF_FLAG_SIZE = 4, + PACKED_SIZE = 4, // one int }; // bit flags for combining bools when writing to disk enum { - REVERSE_CF_FLAG= 1, - AUTO_CF_FLAG= 2, + REVERSE_CF_FLAG = 1, + AUTO_CF_FLAG = 2, }; // Data dictionary types enum DATA_DICT_TYPE { - DDL_ENTRY_INDEX_START_NUMBER= 1, - INDEX_INFO= 2, - CF_DEFINITION= 3, - BINLOG_INFO_INDEX_NUMBER= 4, - DDL_DROP_INDEX_ONGOING= 5, - INDEX_STATISTICS= 6, - MAX_INDEX_ID= 7, - DDL_CREATE_INDEX_ONGOING= 8, - END_DICT_INDEX_ID= 255 + DDL_ENTRY_INDEX_START_NUMBER = 1, + INDEX_INFO = 2, + CF_DEFINITION = 3, + BINLOG_INFO_INDEX_NUMBER = 4, + DDL_DROP_INDEX_ONGOING = 5, + INDEX_STATISTICS = 6, + MAX_INDEX_ID = 7, + DDL_CREATE_INDEX_ONGOING = 8, + END_DICT_INDEX_ID = 255 }; // Data dictionary schema version. Introduce newer versions // if changing schema layout enum { - DDL_ENTRY_INDEX_VERSION= 1, - CF_DEFINITION_VERSION= 1, - BINLOG_INFO_INDEX_NUMBER_VERSION= 1, - DDL_DROP_INDEX_ONGOING_VERSION= 1, - MAX_INDEX_ID_VERSION= 1, - DDL_CREATE_INDEX_ONGOING_VERSION= 1, + DDL_ENTRY_INDEX_VERSION = 1, + CF_DEFINITION_VERSION = 1, + BINLOG_INFO_INDEX_NUMBER_VERSION = 1, + DDL_DROP_INDEX_ONGOING_VERSION = 1, + MAX_INDEX_ID_VERSION = 1, + DDL_CREATE_INDEX_ONGOING_VERSION = 1, // Version for index stats is stored in IndexStats struct }; @@ -353,7 +334,7 @@ public: // INDEX_INFO layout. Update INDEX_INFO_VERSION_LATEST to point to the // latest version number. enum { - INDEX_INFO_VERSION_INITIAL= 1, // Obsolete + INDEX_INFO_VERSION_INITIAL = 1, // Obsolete INDEX_INFO_VERSION_KV_FORMAT, INDEX_INFO_VERSION_GLOBAL_ID, // There is no change to data format in this version, but this version @@ -362,35 +343,35 @@ public: // check inadvertently. INDEX_INFO_VERSION_VERIFY_KV_FORMAT, // This normally point to the latest (currently it does). - INDEX_INFO_VERSION_LATEST= INDEX_INFO_VERSION_VERIFY_KV_FORMAT, + INDEX_INFO_VERSION_LATEST = INDEX_INFO_VERSION_VERIFY_KV_FORMAT, }; // MyRocks index types enum { - INDEX_TYPE_PRIMARY= 1, - INDEX_TYPE_SECONDARY= 2, - INDEX_TYPE_HIDDEN_PRIMARY= 3, + INDEX_TYPE_PRIMARY = 1, + INDEX_TYPE_SECONDARY = 2, + INDEX_TYPE_HIDDEN_PRIMARY = 3, }; // Key/Value format version for each index type enum { - PRIMARY_FORMAT_VERSION_INITIAL= 10, + PRIMARY_FORMAT_VERSION_INITIAL = 10, // This change includes: // - For columns that can be unpacked with unpack_info, PK // stores the unpack_info. // - DECIMAL datatype is no longer stored in the row (because // it can be decoded from its mem-comparable form) // - VARCHAR-columns use endspace-padding. - PRIMARY_FORMAT_VERSION_UPDATE1= 11, - PRIMARY_FORMAT_VERSION_LATEST= PRIMARY_FORMAT_VERSION_UPDATE1, + PRIMARY_FORMAT_VERSION_UPDATE1 = 11, + PRIMARY_FORMAT_VERSION_LATEST = PRIMARY_FORMAT_VERSION_UPDATE1, - SECONDARY_FORMAT_VERSION_INITIAL= 10, + SECONDARY_FORMAT_VERSION_INITIAL = 10, // This change the SK format to include unpack_info. - SECONDARY_FORMAT_VERSION_UPDATE1= 11, - SECONDARY_FORMAT_VERSION_LATEST= SECONDARY_FORMAT_VERSION_UPDATE1, + SECONDARY_FORMAT_VERSION_UPDATE1 = 11, + SECONDARY_FORMAT_VERSION_LATEST = SECONDARY_FORMAT_VERSION_UPDATE1, }; - void setup(const TABLE* const table, const Rdb_tbl_def* const tbl_def); + void setup(const TABLE *const table, const Rdb_tbl_def *const tbl_def); rocksdb::ColumnFamilyHandle *get_cf() const { return m_cf_handle; } @@ -400,9 +381,9 @@ public: inline bool has_unpack_info(const uint &kp) const; /* Check if given table has a primary key */ - static bool table_has_hidden_pk(const TABLE* const table); + static bool table_has_hidden_pk(const TABLE *const table); - void report_checksum_mismatch(const bool &is_key, const char* const data, + void report_checksum_mismatch(const bool &is_key, const char *const data, const size_t data_size) const; /* Check if index is at least pk_min if it is a PK, @@ -410,21 +391,19 @@ public: bool index_format_min_check(const int &pk_min, const int &sk_min) const; private: - #ifndef DBUG_OFF - inline bool is_storage_available(const int &offset, const int &needed) const - { - const int storage_length= static_cast(max_storage_fmt_length()); + inline bool is_storage_available(const int &offset, const int &needed) const { + const int storage_length = static_cast(max_storage_fmt_length()); return (storage_length - offset) >= needed; } -#endif // DBUG_OFF +#endif // DBUG_OFF /* Global number of this index (used as prefix in StorageFormat) */ const uint32 m_index_number; uchar m_index_number_storage_form[INDEX_NUMBER_SIZE]; - rocksdb::ColumnFamilyHandle* m_cf_handle; + rocksdb::ColumnFamilyHandle *m_cf_handle; public: uint16_t m_index_dict_version; @@ -437,9 +416,9 @@ public: bool m_is_auto_cf; std::string m_name; mutable Rdb_index_stats m_stats; -private: - friend class Rdb_tbl_def; // for m_index_number above +private: + friend class Rdb_tbl_def; // for m_index_number above /* Number of key parts in the primary key*/ uint m_pk_key_parts; @@ -461,6 +440,9 @@ private: */ uint m_key_parts; + /* Prefix extractor for the column family of the key definiton */ + std::shared_ptr m_prefix_extractor; + /* Maximum length of the mem-comparable form. */ uint m_maxlength; @@ -485,8 +467,7 @@ private: // // We have m_dec_idx[idx][dst] = src to get our original character back. // -struct Rdb_collation_codec -{ +struct Rdb_collation_codec { const my_core::CHARSET_INFO *m_cs; // The first element unpacks VARCHAR(n), the second one - CHAR(n). std::array m_make_unpack_info_func; @@ -501,15 +482,13 @@ struct Rdb_collation_codec extern mysql_mutex_t rdb_collation_data_mutex; extern mysql_mutex_t rdb_mem_cmp_space_mutex; -extern std::array - rdb_collation_data; +extern std::array + rdb_collation_data; - -class Rdb_field_packing -{ +class Rdb_field_packing { public: - Rdb_field_packing(const Rdb_field_packing&) = delete; - Rdb_field_packing& operator=(const Rdb_field_packing&) = delete; + Rdb_field_packing(const Rdb_field_packing &) = delete; + Rdb_field_packing &operator=(const Rdb_field_packing &) = delete; Rdb_field_packing() = default; /* Length of mem-comparable image of the field, in bytes */ @@ -527,25 +506,22 @@ public: const CHARSET_INFO *m_varchar_charset; // (Valid when Variable Length Space Padded Encoding is used): - uint m_segment_size; // size of segment used + uint m_segment_size; // size of segment used // number of bytes used to store number of trimmed (or added) // spaces in the upack_info bool m_unpack_info_uses_two_bytes; - const std::vector* space_xfrm; + const std::vector *space_xfrm; size_t space_xfrm_len; size_t space_mb_len; - const Rdb_collation_codec* m_charset_codec; + const Rdb_collation_codec *m_charset_codec; /* @return TRUE: this field makes use of unpack_info. */ - bool uses_unpack_info() const - { - return (m_make_unpack_info_func != nullptr); - } + bool uses_unpack_info() const { return (m_make_unpack_info_func != nullptr); } /* TRUE means unpack_info stores the original field value */ bool m_unpack_info_stores_value; @@ -591,11 +567,12 @@ private: */ uint m_keynr; uint m_key_part; + public: - bool setup(const Rdb_key_def* const key_descr, const Field* const field, + bool setup(const Rdb_key_def *const key_descr, const Field *const field, const uint &keynr_arg, const uint &key_part_arg, const uint16 &key_length); - Field *get_field_in_table(const TABLE* const tbl) const; + Field *get_field_in_table(const TABLE *const tbl) const; void fill_hidden_pk_val(uchar **dst, const longlong &hidden_pk_id) const; }; @@ -606,11 +583,10 @@ public: For encoding/decoding of index tuples, see Rdb_key_def. */ -class Rdb_field_encoder -{ - public: - Rdb_field_encoder(const Rdb_field_encoder&) = delete; - Rdb_field_encoder& operator=(const Rdb_field_encoder&) = delete; +class Rdb_field_encoder { +public: + Rdb_field_encoder(const Rdb_field_encoder &) = delete; + Rdb_field_encoder &operator=(const Rdb_field_encoder &) = delete; /* STORE_NONE is set when a column can be decoded solely from their mem-comparable form. @@ -629,7 +605,7 @@ class Rdb_field_encoder uint m_null_offset; uint16 m_field_index; - uchar m_null_mask; // 0 means the field cannot be null + uchar m_null_mask; // 0 means the field cannot be null my_core::enum_field_types m_field_type; @@ -637,33 +613,28 @@ class Rdb_field_encoder bool maybe_null() const { return m_null_mask != 0; } - bool uses_variable_len_encoding() const - { + bool uses_variable_len_encoding() const { return (m_field_type == MYSQL_TYPE_BLOB || m_field_type == MYSQL_TYPE_VARCHAR); } }; -inline Field* Rdb_key_def::get_table_field_for_part_no(TABLE *table, - uint part_no) const -{ +inline Field *Rdb_key_def::get_table_field_for_part_no(TABLE *table, + uint part_no) const { DBUG_ASSERT(part_no < get_key_parts()); return m_pack_info[part_no].get_field_in_table(table); } -inline bool Rdb_key_def::can_unpack(const uint &kp) const -{ +inline bool Rdb_key_def::can_unpack(const uint &kp) const { DBUG_ASSERT(kp < m_key_parts); return (m_pack_info[kp].m_unpack_func != nullptr); } -inline bool Rdb_key_def::has_unpack_info(const uint &kp) const -{ +inline bool Rdb_key_def::has_unpack_info(const uint &kp) const { DBUG_ASSERT(kp < m_key_parts); return m_pack_info[kp].uses_unpack_info(); } - /* A table definition. This is an entry in the mapping @@ -673,9 +644,8 @@ inline bool Rdb_key_def::has_unpack_info(const uint &kp) const That's why we keep auto_increment value here, too. */ -class Rdb_tbl_def -{ - private: +class Rdb_tbl_def { +private: void check_if_is_mysql_system_table(); /* Stores 'dbname.tablename' */ @@ -686,27 +656,24 @@ class Rdb_tbl_def std::string m_tablename; std::string m_partition; - void set_name(const std::string& name); + void set_name(const std::string &name); - public: - Rdb_tbl_def(const Rdb_tbl_def&) = delete; - Rdb_tbl_def& operator=(const Rdb_tbl_def&) = delete; +public: + Rdb_tbl_def(const Rdb_tbl_def &) = delete; + Rdb_tbl_def &operator=(const Rdb_tbl_def &) = delete; - explicit Rdb_tbl_def(const std::string& name) : - m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) - { + explicit Rdb_tbl_def(const std::string &name) + : m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) { set_name(name); } - Rdb_tbl_def(const char* const name, const size_t &len) : - m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) - { + Rdb_tbl_def(const char *const name, const size_t &len) + : m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) { set_name(std::string(name, len)); } - explicit Rdb_tbl_def(const rocksdb::Slice& slice, const size_t &pos= 0) : - m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) - { + explicit Rdb_tbl_def(const rocksdb::Slice &slice, const size_t &pos = 0) + : m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) { set_name(std::string(slice.data() + pos, slice.size() - pos)); } @@ -716,7 +683,7 @@ class Rdb_tbl_def uint m_key_count; /* Array of index descriptors */ - std::shared_ptr* m_key_descr_arr; + std::shared_ptr *m_key_descr_arr; std::atomic m_hidden_pk_val; std::atomic m_auto_incr_val; @@ -724,52 +691,44 @@ class Rdb_tbl_def /* Is this a system table */ bool m_is_mysql_system_table; - bool put_dict(Rdb_dict_manager* const dict, rocksdb::WriteBatch* const batch, - uchar* const key, const size_t &keylen); + bool put_dict(Rdb_dict_manager *const dict, rocksdb::WriteBatch *const batch, + uchar *const key, const size_t &keylen); - const std::string& full_tablename() const { return m_dbname_tablename; } - const std::string& base_dbname() const { return m_dbname; } - const std::string& base_tablename() const { return m_tablename; } - const std::string& base_partition() const { return m_partition; } + const std::string &full_tablename() const { return m_dbname_tablename; } + const std::string &base_dbname() const { return m_dbname; } + const std::string &base_tablename() const { return m_tablename; } + const std::string &base_partition() const { return m_partition; } }; - /* A thread-safe sequential number generator. Its performance is not a concern hence it is ok to protect it by a mutex. */ -class Rdb_seq_generator -{ - uint m_next_number= 0; +class Rdb_seq_generator { + uint m_next_number = 0; mysql_mutex_t m_mutex; + public: - Rdb_seq_generator(const Rdb_seq_generator&) = delete; - Rdb_seq_generator& operator=(const Rdb_seq_generator&) = delete; + Rdb_seq_generator(const Rdb_seq_generator &) = delete; + Rdb_seq_generator &operator=(const Rdb_seq_generator &) = delete; Rdb_seq_generator() = default; - void init(const uint &initial_number) - { - mysql_mutex_init(0 , &m_mutex, MY_MUTEX_INIT_FAST); - m_next_number= initial_number; + void init(const uint &initial_number) { + mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST); + m_next_number = initial_number; } - uint get_and_update_next_number(Rdb_dict_manager* const dict); + uint get_and_update_next_number(Rdb_dict_manager *const dict); - void cleanup() - { - mysql_mutex_destroy(&m_mutex); - } + void cleanup() { mysql_mutex_destroy(&m_mutex); } }; - -interface Rdb_tables_scanner -{ - virtual int add_table(Rdb_tbl_def* tdef) =0; +interface Rdb_tables_scanner { + virtual int add_table(Rdb_tbl_def * tdef) = 0; }; - /* This contains a mapping of @@ -778,10 +737,9 @@ interface Rdb_tables_scanner objects are shared among all threads. */ -class Rdb_ddl_manager -{ - Rdb_dict_manager *m_dict= nullptr; - my_core::HASH m_ddl_hash; // Contains Rdb_tbl_def elements +class Rdb_ddl_manager { + Rdb_dict_manager *m_dict = nullptr; + my_core::HASH m_ddl_hash; // Contains Rdb_tbl_def elements // maps index id to std::map> m_index_num_to_keydef; mysql_rwlock_t m_rwlock; @@ -792,58 +750,56 @@ class Rdb_ddl_manager // and consumed by the rocksdb background thread std::map m_stats2store; - const std::shared_ptr& find( - GL_INDEX_ID gl_index_id); + const std::shared_ptr &find(GL_INDEX_ID gl_index_id); + public: - Rdb_ddl_manager(const Rdb_ddl_manager&) = delete; - Rdb_ddl_manager& operator=(const Rdb_ddl_manager&) = delete; + Rdb_ddl_manager(const Rdb_ddl_manager &) = delete; + Rdb_ddl_manager &operator=(const Rdb_ddl_manager &) = delete; Rdb_ddl_manager() {} /* Load the data dictionary from on-disk storage */ - bool init(Rdb_dict_manager* const dict_arg, Rdb_cf_manager* const cf_manager, + bool init(Rdb_dict_manager *const dict_arg, Rdb_cf_manager *const cf_manager, const uint32_t &validate_tables); void cleanup(); - Rdb_tbl_def* find(const std::string& table_name, const bool &lock= true); + Rdb_tbl_def *find(const std::string &table_name, const bool &lock = true); std::shared_ptr safe_find(GL_INDEX_ID gl_index_id); - void set_stats( - const std::unordered_map& stats); - void adjust_stats( - const std::vector& new_data, - const std::vector& deleted_data - =std::vector()); + void set_stats(const std::unordered_map &stats); + void adjust_stats(const std::vector &new_data, + const std::vector &deleted_data = + std::vector()); void persist_stats(const bool &sync = false); /* Modify the mapping and write it to on-disk storage */ - int put_and_write(Rdb_tbl_def* const key_descr, - rocksdb::WriteBatch* const batch); - void remove(Rdb_tbl_def* const rec, rocksdb::WriteBatch* const batch, - const bool &lock= true); - bool rename(const std::string& from, const std::string& to, - rocksdb::WriteBatch* const batch); + int put_and_write(Rdb_tbl_def *const key_descr, + rocksdb::WriteBatch *const batch); + void remove(Rdb_tbl_def *const rec, rocksdb::WriteBatch *const batch, + const bool &lock = true); + bool rename(const std::string &from, const std::string &to, + rocksdb::WriteBatch *const batch); - uint get_and_update_next_number(Rdb_dict_manager* const dict) - { return m_sequence.get_and_update_next_number(dict); } + uint get_and_update_next_number(Rdb_dict_manager *const dict) { + return m_sequence.get_and_update_next_number(dict); + } /* Walk the data dictionary */ - int scan_for_tables(Rdb_tables_scanner* tables_scanner); + int scan_for_tables(Rdb_tables_scanner *tables_scanner); void erase_index_num(const GL_INDEX_ID &gl_index_id); private: /* Put the data into in-memory table (only) */ - int put(Rdb_tbl_def* const key_descr, const bool &lock= true); + int put(Rdb_tbl_def *const key_descr, const bool &lock = true); /* Helper functions to be passed to my_core::HASH object */ - static const uchar* get_hash_key(Rdb_tbl_def* const rec, size_t* const length, - my_bool not_used __attribute__((unused))); - static void free_hash_elem(void* const data); + static const uchar *get_hash_key(Rdb_tbl_def *const rec, size_t *const length, + my_bool not_used MY_ATTRIBUTE((unused))); + static void free_hash_elem(void *const data); bool validate_schemas(); }; - /* Writing binlog information into RocksDB at commit(), and retrieving binlog information at crash recovery. @@ -859,40 +815,37 @@ private: binlog_gtid_length (2 byte form) binlog_gtid */ -class Rdb_binlog_manager -{ +class Rdb_binlog_manager { public: - Rdb_binlog_manager(const Rdb_binlog_manager&) = delete; - Rdb_binlog_manager& operator=(const Rdb_binlog_manager&) = delete; + Rdb_binlog_manager(const Rdb_binlog_manager &) = delete; + Rdb_binlog_manager &operator=(const Rdb_binlog_manager &) = delete; Rdb_binlog_manager() = default; - bool init(Rdb_dict_manager* const dict); + bool init(Rdb_dict_manager *const dict); void cleanup(); - void update(const char* const binlog_name, const my_off_t binlog_pos, - const char* const binlog_max_gtid, - rocksdb::WriteBatchBase* const batch); - bool read(char* const binlog_name, my_off_t* const binlog_pos, - char* const binlog_gtid) const; - void update_slave_gtid_info(const uint &id, const char* const db, - const char* const gtid, - rocksdb::WriteBatchBase* const write_batch); + void update(const char *const binlog_name, const my_off_t binlog_pos, + const char *const binlog_max_gtid, + rocksdb::WriteBatchBase *const batch); + bool read(char *const binlog_name, my_off_t *const binlog_pos, + char *const binlog_gtid) const; + void update_slave_gtid_info(const uint &id, const char *const db, + const char *const gtid, + rocksdb::WriteBatchBase *const write_batch); private: - Rdb_dict_manager *m_dict= nullptr; - uchar m_key_buf[Rdb_key_def::INDEX_NUMBER_SIZE]= {0}; + Rdb_dict_manager *m_dict = nullptr; + uchar m_key_buf[Rdb_key_def::INDEX_NUMBER_SIZE] = {0}; rocksdb::Slice m_key_slice; - rocksdb::Slice pack_value(uchar* const buf, - const char* const binlog_name, + rocksdb::Slice pack_value(uchar *const buf, const char *const binlog_name, const my_off_t &binlog_pos, - const char* const binlog_gtid) const; - bool unpack_value(const uchar* const value, char* const binlog_name, - my_off_t* const binlog_pos, char* const binlog_gtid) const; + const char *const binlog_gtid) const; + bool unpack_value(const uchar *const value, char *const binlog_name, + my_off_t *const binlog_pos, char *const binlog_gtid) const; - std::atomic m_slave_gtid_info_tbl; + std::atomic m_slave_gtid_info_tbl; }; - /* Rdb_dict_manager manages how MySQL on RocksDB (MyRocks) stores its internal data dictionary. @@ -944,58 +897,49 @@ private: begin() and commit() to make it easier to do atomic operations. */ -class Rdb_dict_manager -{ +class Rdb_dict_manager { private: mysql_mutex_t m_mutex; - rocksdb::DB *m_db= nullptr; - rocksdb::ColumnFamilyHandle *m_system_cfh= nullptr; + rocksdb::DB *m_db = nullptr; + rocksdb::ColumnFamilyHandle *m_system_cfh = nullptr; /* Utility to put INDEX_INFO and CF_DEFINITION */ - uchar m_key_buf_max_index_id[Rdb_key_def::INDEX_NUMBER_SIZE]= {0}; + uchar m_key_buf_max_index_id[Rdb_key_def::INDEX_NUMBER_SIZE] = {0}; rocksdb::Slice m_key_slice_max_index_id; - static void dump_index_id(uchar* const netbuf, + static void dump_index_id(uchar *const netbuf, Rdb_key_def::DATA_DICT_TYPE dict_type, const GL_INDEX_ID &gl_index_id); - void delete_with_prefix(rocksdb::WriteBatch* const batch, + void delete_with_prefix(rocksdb::WriteBatch *const batch, Rdb_key_def::DATA_DICT_TYPE dict_type, const GL_INDEX_ID &gl_index_id) const; /* Functions for fast DROP TABLE/INDEX */ void resume_drop_indexes() const; - void log_start_drop_table(const std::shared_ptr* const key_descr, + void log_start_drop_table(const std::shared_ptr *const key_descr, const uint32 &n_keys, - const char* const log_action) const; + const char *const log_action) const; void log_start_drop_index(GL_INDEX_ID gl_index_id, - const char* log_action) const; + const char *log_action) const; + public: - Rdb_dict_manager(const Rdb_dict_manager&) = delete; - Rdb_dict_manager& operator=(const Rdb_dict_manager&) = delete; + Rdb_dict_manager(const Rdb_dict_manager &) = delete; + Rdb_dict_manager &operator=(const Rdb_dict_manager &) = delete; Rdb_dict_manager() = default; - bool init(rocksdb::DB* const rdb_dict, Rdb_cf_manager* const cf_manager); + bool init(rocksdb::DB *const rdb_dict, Rdb_cf_manager *const cf_manager); - inline void cleanup() - { - mysql_mutex_destroy(&m_mutex); - } + inline void cleanup() { mysql_mutex_destroy(&m_mutex); } - inline void lock() - { - mysql_mutex_lock(&m_mutex); - } + inline void lock() { mysql_mutex_lock(&m_mutex); } - inline void unlock() - { - mysql_mutex_unlock(&m_mutex); - } + inline void unlock() { mysql_mutex_unlock(&m_mutex); } /* Raw RocksDB operations */ std::unique_ptr begin() const; - int commit(rocksdb::WriteBatch* const batch, const bool &sync = true) const; - rocksdb::Status get_value(const rocksdb::Slice& key, - std::string* const value) const; - void put_key(rocksdb::WriteBatchBase* const batch, const rocksdb::Slice &key, + int commit(rocksdb::WriteBatch *const batch, const bool &sync = true) const; + rocksdb::Status get_value(const rocksdb::Slice &key, + std::string *const value) const; + void put_key(rocksdb::WriteBatchBase *const batch, const rocksdb::Slice &key, const rocksdb::Slice &value) const; void delete_key(rocksdb::WriteBatchBase *batch, const rocksdb::Slice &key) const; @@ -1007,95 +951,86 @@ public: const uint16_t kv_version, const uint index_id, const uint cf_id) const; - void delete_index_info(rocksdb::WriteBatch* batch, + void delete_index_info(rocksdb::WriteBatch *batch, const GL_INDEX_ID &index_id) const; bool get_index_info(const GL_INDEX_ID &gl_index_id, - uint16_t *index_dict_version, - uchar *index_type, uint16_t *kv_version) const; + uint16_t *index_dict_version, uchar *index_type, + uint16_t *kv_version) const; /* CF id => CF flags */ - void add_cf_flags(rocksdb::WriteBatch* const batch, - const uint &cf_id, + void add_cf_flags(rocksdb::WriteBatch *const batch, const uint &cf_id, const uint &cf_flags) const; - bool get_cf_flags(const uint &cf_id, uint* const cf_flags) const; + bool get_cf_flags(const uint &cf_id, uint *const cf_flags) const; /* Functions for fast CREATE/DROP TABLE/INDEX */ - void get_ongoing_index_operation(std::vector* gl_index_ids, - Rdb_key_def::DATA_DICT_TYPE dd_type) const; - bool is_index_operation_ongoing(const GL_INDEX_ID& gl_index_id, + void + get_ongoing_index_operation(std::unordered_set *gl_index_ids, + Rdb_key_def::DATA_DICT_TYPE dd_type) const; + bool is_index_operation_ongoing(const GL_INDEX_ID &gl_index_id, Rdb_key_def::DATA_DICT_TYPE dd_type) const; - void start_ongoing_index_operation(rocksdb::WriteBatch* batch, - const GL_INDEX_ID& gl_index_id, + void start_ongoing_index_operation(rocksdb::WriteBatch *batch, + const GL_INDEX_ID &gl_index_id, Rdb_key_def::DATA_DICT_TYPE dd_type) const; - void end_ongoing_index_operation(rocksdb::WriteBatch* const batch, - const GL_INDEX_ID& gl_index_id, + void end_ongoing_index_operation(rocksdb::WriteBatch *const batch, + const GL_INDEX_ID &gl_index_id, Rdb_key_def::DATA_DICT_TYPE dd_type) const; bool is_drop_index_empty() const; - void add_drop_table(std::shared_ptr* const key_descr, + void add_drop_table(std::shared_ptr *const key_descr, const uint32 &n_keys, - rocksdb::WriteBatch* const batch) const; - void add_drop_index(const std::unordered_set& gl_index_ids, - rocksdb::WriteBatch* const batch) const; - void add_create_index(const std::unordered_set& gl_index_ids, - rocksdb::WriteBatch* const batch) const; - void finish_indexes_operation( - const std::unordered_set& gl_index_ids, - Rdb_key_def::DATA_DICT_TYPE dd_type) const; + rocksdb::WriteBatch *const batch) const; + void add_drop_index(const std::unordered_set &gl_index_ids, + rocksdb::WriteBatch *const batch) const; + void add_create_index(const std::unordered_set &gl_index_ids, + rocksdb::WriteBatch *const batch) const; + void + finish_indexes_operation(const std::unordered_set &gl_index_ids, + Rdb_key_def::DATA_DICT_TYPE dd_type) const; void rollback_ongoing_index_creation() const; - inline void - get_ongoing_drop_indexes(std::vector* gl_index_ids) const - { + inline void get_ongoing_drop_indexes( + std::unordered_set *gl_index_ids) const { get_ongoing_index_operation(gl_index_ids, Rdb_key_def::DDL_DROP_INDEX_ONGOING); } - inline void - get_ongoing_create_indexes(std::vector* gl_index_ids) const - { + inline void get_ongoing_create_indexes( + std::unordered_set *gl_index_ids) const { get_ongoing_index_operation(gl_index_ids, Rdb_key_def::DDL_CREATE_INDEX_ONGOING); } inline void start_drop_index(rocksdb::WriteBatch *wb, - const GL_INDEX_ID& gl_index_id) const - { + const GL_INDEX_ID &gl_index_id) const { start_ongoing_index_operation(wb, gl_index_id, Rdb_key_def::DDL_DROP_INDEX_ONGOING); } inline void start_create_index(rocksdb::WriteBatch *wb, - const GL_INDEX_ID& gl_index_id) const - { + const GL_INDEX_ID &gl_index_id) const { start_ongoing_index_operation(wb, gl_index_id, Rdb_key_def::DDL_CREATE_INDEX_ONGOING); } inline void finish_drop_indexes( - const std::unordered_set& gl_index_ids) const - { - finish_indexes_operation(gl_index_ids, - Rdb_key_def::DDL_DROP_INDEX_ONGOING); + const std::unordered_set &gl_index_ids) const { + finish_indexes_operation(gl_index_ids, Rdb_key_def::DDL_DROP_INDEX_ONGOING); } inline void finish_create_indexes( - const std::unordered_set& gl_index_ids) const - { + const std::unordered_set &gl_index_ids) const { finish_indexes_operation(gl_index_ids, Rdb_key_def::DDL_CREATE_INDEX_ONGOING); } - inline bool is_drop_index_ongoing(const GL_INDEX_ID& gl_index_id) const - { + inline bool is_drop_index_ongoing(const GL_INDEX_ID &gl_index_id) const { return is_index_operation_ongoing(gl_index_id, Rdb_key_def::DDL_DROP_INDEX_ONGOING); } - inline bool is_create_index_ongoing(const GL_INDEX_ID& gl_index_id) const - { + inline bool is_create_index_ongoing(const GL_INDEX_ID &gl_index_id) const { return is_index_operation_ongoing(gl_index_id, Rdb_key_def::DDL_CREATE_INDEX_ONGOING); } - bool get_max_index_id(uint32_t* const index_id) const; - bool update_max_index_id(rocksdb::WriteBatch* const batch, + bool get_max_index_id(uint32_t *const index_id) const; + bool update_max_index_id(rocksdb::WriteBatch *const batch, const uint32_t &index_id) const; - void add_stats(rocksdb::WriteBatch* const batch, - const std::vector& stats) const; + void add_stats(rocksdb::WriteBatch *const batch, + const std::vector &stats) const; Rdb_index_stats get_stats(GL_INDEX_ID gl_index_id) const; }; -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_i_s.cc b/storage/rocksdb/rdb_i_s.cc index c35116e10a9..35b0cf4899e 100644 --- a/storage/rocksdb/rdb_i_s.cc +++ b/storage/rocksdb/rdb_i_s.cc @@ -15,6 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* C++ standard header files */ +#include #include #include @@ -22,8 +23,8 @@ #include /* RocksDB header files */ -#include "rocksdb/convenience.h" #include "rocksdb/compaction_filter.h" +#include "rocksdb/convenience.h" #include "rocksdb/filter_policy.h" #include "rocksdb/memtablerep.h" #include "rocksdb/merge_operator.h" @@ -40,98 +41,84 @@ namespace myrocks { /** - Define the INFORMATION_SCHEMA (I_S) structures needed by MyRocks storage engine. + Define the INFORMATION_SCHEMA (I_S) structures needed by MyRocks storage + engine. */ -#define ROCKSDB_FIELD_INFO(_name_, _len_, _type_, _flag_) \ - { _name_, _len_, _type_, 0, _flag_, nullptr, 0 } +#define ROCKSDB_FIELD_INFO(_name_, _len_, _type_, _flag_) \ + { _name_, _len_, _type_, 0, _flag_, nullptr, 0 } -#define ROCKSDB_FIELD_INFO_END ROCKSDB_FIELD_INFO(nullptr, \ - 0, MYSQL_TYPE_NULL, 0) +#define ROCKSDB_FIELD_INFO_END \ + ROCKSDB_FIELD_INFO(nullptr, 0, MYSQL_TYPE_NULL, 0) /* Support for INFORMATION_SCHEMA.ROCKSDB_CFSTATS dynamic table */ -namespace RDB_CFSTATS_FIELD -{ - enum - { - CF_NAME= 0, - STAT_TYPE, - VALUE - }; -} // namespace RDB_CFSTATS_FIELD +namespace RDB_CFSTATS_FIELD { +enum { CF_NAME = 0, STAT_TYPE, VALUE }; +} // namespace RDB_CFSTATS_FIELD -static ST_FIELD_INFO rdb_i_s_cfstats_fields_info[]= -{ - ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), - ROCKSDB_FIELD_INFO_END -}; +static ST_FIELD_INFO rdb_i_s_cfstats_fields_info[] = { + ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO_END}; static int rdb_i_s_cfstats_fill_table( - my_core::THD* const thd, - my_core::TABLE_LIST* const tables, - my_core::Item* const cond __attribute__((__unused__))) -{ + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + bool ret; uint64_t val; - DBUG_ENTER("rdb_i_s_cfstats_fill_table"); - const std::vector> cf_properties = { - {rocksdb::DB::Properties::kNumImmutableMemTable, "NUM_IMMUTABLE_MEM_TABLE"}, - {rocksdb::DB::Properties::kMemTableFlushPending, - "MEM_TABLE_FLUSH_PENDING"}, - {rocksdb::DB::Properties::kCompactionPending, "COMPACTION_PENDING"}, - {rocksdb::DB::Properties::kCurSizeActiveMemTable, - "CUR_SIZE_ACTIVE_MEM_TABLE"}, - {rocksdb::DB::Properties::kCurSizeAllMemTables, "CUR_SIZE_ALL_MEM_TABLES"}, - {rocksdb::DB::Properties::kNumEntriesActiveMemTable, - "NUM_ENTRIES_ACTIVE_MEM_TABLE"}, - {rocksdb::DB::Properties::kNumEntriesImmMemTables, - "NUM_ENTRIES_IMM_MEM_TABLES"}, - {rocksdb::DB::Properties::kEstimateTableReadersMem, - "NON_BLOCK_CACHE_SST_MEM_USAGE"}, - {rocksdb::DB::Properties::kNumLiveVersions, "NUM_LIVE_VERSIONS"} - }; + {rocksdb::DB::Properties::kNumImmutableMemTable, + "NUM_IMMUTABLE_MEM_TABLE"}, + {rocksdb::DB::Properties::kMemTableFlushPending, + "MEM_TABLE_FLUSH_PENDING"}, + {rocksdb::DB::Properties::kCompactionPending, "COMPACTION_PENDING"}, + {rocksdb::DB::Properties::kCurSizeActiveMemTable, + "CUR_SIZE_ACTIVE_MEM_TABLE"}, + {rocksdb::DB::Properties::kCurSizeAllMemTables, + "CUR_SIZE_ALL_MEM_TABLES"}, + {rocksdb::DB::Properties::kNumEntriesActiveMemTable, + "NUM_ENTRIES_ACTIVE_MEM_TABLE"}, + {rocksdb::DB::Properties::kNumEntriesImmMemTables, + "NUM_ENTRIES_IMM_MEM_TABLES"}, + {rocksdb::DB::Properties::kEstimateTableReadersMem, + "NON_BLOCK_CACHE_SST_MEM_USAGE"}, + {rocksdb::DB::Properties::kNumLiveVersions, "NUM_LIVE_VERSIONS"}}; - rocksdb::DB* const rdb= rdb_get_rocksdb_db(); - const Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); + rocksdb::DB *const rdb = rdb_get_rocksdb_db(); + const Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); DBUG_ASSERT(rdb != nullptr); - for (const auto &cf_name : cf_manager.get_cf_names()) - { - rocksdb::ColumnFamilyHandle* cfh; + for (const auto &cf_name : cf_manager.get_cf_names()) { + rocksdb::ColumnFamilyHandle *cfh; bool is_automatic; /* Only the cf name is important. Whether it was generated automatically does not matter, so is_automatic is ignored. */ - cfh= cf_manager.get_cf(cf_name.c_str(), "", nullptr, &is_automatic); + cfh = cf_manager.get_cf(cf_name.c_str(), "", nullptr, &is_automatic); if (cfh == nullptr) continue; - for (const auto &property : cf_properties) - { + for (const auto &property : cf_properties) { if (!rdb->GetIntProperty(cfh, property.first, &val)) continue; DBUG_ASSERT(tables != nullptr); tables->table->field[RDB_CFSTATS_FIELD::CF_NAME]->store( - cf_name.c_str(), - cf_name.size(), - system_charset_info); + cf_name.c_str(), cf_name.size(), system_charset_info); tables->table->field[RDB_CFSTATS_FIELD::STAT_TYPE]->store( - property.second.c_str(), - property.second.size(), - system_charset_info); + property.second.c_str(), property.second.size(), system_charset_info); tables->table->field[RDB_CFSTATS_FIELD::VALUE]->store(val, true); - ret= my_core::schema_table_store_record(thd, tables->table); + ret = my_core::schema_table_store_record(thd, tables->table); if (ret) DBUG_RETURN(ret); @@ -140,17 +127,17 @@ static int rdb_i_s_cfstats_fill_table( DBUG_RETURN(0); } -static int rdb_i_s_cfstats_init(void *p) -{ - my_core::ST_SCHEMA_TABLE *schema; +static int rdb_i_s_cfstats_init(void *p) { + DBUG_ENTER_FUNC(); - DBUG_ENTER("rdb_i_s_cfstats_init"); DBUG_ASSERT(p != nullptr); - schema= (my_core::ST_SCHEMA_TABLE*) p; + my_core::ST_SCHEMA_TABLE *schema; - schema->fields_info= rdb_i_s_cfstats_fields_info; - schema->fill_table= rdb_i_s_cfstats_fill_table; + schema = (my_core::ST_SCHEMA_TABLE *)p; + + schema->fields_info = rdb_i_s_cfstats_fields_info; + schema->fill_table = rdb_i_s_cfstats_fill_table; DBUG_RETURN(0); } @@ -158,56 +145,44 @@ static int rdb_i_s_cfstats_init(void *p) /* Support for INFORMATION_SCHEMA.ROCKSDB_DBSTATS dynamic table */ -namespace RDB_DBSTATS_FIELD -{ - enum - { - STAT_TYPE= 0, - VALUE - }; -} // namespace RDB_DBSTATS_FIELD +namespace RDB_DBSTATS_FIELD { +enum { STAT_TYPE = 0, VALUE }; +} // namespace RDB_DBSTATS_FIELD -static ST_FIELD_INFO rdb_i_s_dbstats_fields_info[]= -{ - ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), - ROCKSDB_FIELD_INFO_END -}; +static ST_FIELD_INFO rdb_i_s_dbstats_fields_info[] = { + ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO_END}; static int rdb_i_s_dbstats_fill_table( - my_core::THD* const thd, - my_core::TABLE_LIST* const tables, - my_core::Item* const cond __attribute__((__unused__))) -{ + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + bool ret; uint64_t val; - DBUG_ENTER("rdb_i_s_dbstats_fill_table"); - const std::vector> db_properties = { - {rocksdb::DB::Properties::kBackgroundErrors, "DB_BACKGROUND_ERRORS"}, - {rocksdb::DB::Properties::kNumSnapshots, "DB_NUM_SNAPSHOTS"}, - {rocksdb::DB::Properties::kOldestSnapshotTime, "DB_OLDEST_SNAPSHOT_TIME"} - }; + {rocksdb::DB::Properties::kBackgroundErrors, "DB_BACKGROUND_ERRORS"}, + {rocksdb::DB::Properties::kNumSnapshots, "DB_NUM_SNAPSHOTS"}, + {rocksdb::DB::Properties::kOldestSnapshotTime, + "DB_OLDEST_SNAPSHOT_TIME"}}; - rocksdb::DB* const rdb= rdb_get_rocksdb_db(); - const rocksdb::BlockBasedTableOptions& table_options= - rdb_get_table_options(); + rocksdb::DB *const rdb = rdb_get_rocksdb_db(); + const rocksdb::BlockBasedTableOptions &table_options = + rdb_get_table_options(); - for (const auto &property : db_properties) - { + for (const auto &property : db_properties) { if (!rdb->GetIntProperty(property.first, &val)) continue; DBUG_ASSERT(tables != nullptr); tables->table->field[RDB_DBSTATS_FIELD::STAT_TYPE]->store( - property.second.c_str(), - property.second.size(), - system_charset_info); + property.second.c_str(), property.second.size(), system_charset_info); tables->table->field[RDB_DBSTATS_FIELD::VALUE]->store(val, true); - ret= my_core::schema_table_store_record(thd, tables->table); + ret = my_core::schema_table_store_record(thd, tables->table); if (ret) DBUG_RETURN(ret); @@ -223,28 +198,27 @@ static int rdb_i_s_dbstats_fill_table( There is no interface to retrieve this block cache, nor fetch the usage information from the column family. */ - val= (table_options.block_cache ? table_options.block_cache->GetUsage() : 0); + val = (table_options.block_cache ? table_options.block_cache->GetUsage() : 0); tables->table->field[RDB_DBSTATS_FIELD::STAT_TYPE]->store( - STRING_WITH_LEN("DB_BLOCK_CACHE_USAGE"), system_charset_info); + STRING_WITH_LEN("DB_BLOCK_CACHE_USAGE"), system_charset_info); tables->table->field[RDB_DBSTATS_FIELD::VALUE]->store(val, true); - ret= my_core::schema_table_store_record(thd, tables->table); + ret = my_core::schema_table_store_record(thd, tables->table); DBUG_RETURN(ret); } -static int rdb_i_s_dbstats_init(void* const p) -{ +static int rdb_i_s_dbstats_init(void *const p) { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(p != nullptr); my_core::ST_SCHEMA_TABLE *schema; - DBUG_ENTER("rdb_i_s_dbstats_init"); + schema = (my_core::ST_SCHEMA_TABLE *)p; - schema= (my_core::ST_SCHEMA_TABLE*) p; - - schema->fields_info= rdb_i_s_dbstats_fields_info; - schema->fill_table= rdb_i_s_dbstats_fill_table; + schema->fields_info = rdb_i_s_dbstats_fields_info; + schema->fill_table = rdb_i_s_dbstats_fill_table; DBUG_RETURN(0); } @@ -252,46 +226,32 @@ static int rdb_i_s_dbstats_init(void* const p) /* Support for INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT dynamic table */ -namespace RDB_PERF_CONTEXT_FIELD -{ - enum - { - TABLE_SCHEMA= 0, - TABLE_NAME, - PARTITION_NAME, - STAT_TYPE, - VALUE - }; -} // namespace RDB_PERF_CONTEXT_FIELD +namespace RDB_PERF_CONTEXT_FIELD { +enum { TABLE_SCHEMA = 0, TABLE_NAME, PARTITION_NAME, STAT_TYPE, VALUE }; +} // namespace RDB_PERF_CONTEXT_FIELD -static ST_FIELD_INFO rdb_i_s_perf_context_fields_info[]= -{ - ROCKSDB_FIELD_INFO("TABLE_SCHEMA", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("TABLE_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("PARTITION_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, - MY_I_S_MAYBE_NULL), - ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, - 0), - ROCKSDB_FIELD_INFO_END -}; +static ST_FIELD_INFO rdb_i_s_perf_context_fields_info[] = { + ROCKSDB_FIELD_INFO("TABLE_SCHEMA", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("TABLE_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("PARTITION_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, + MY_I_S_MAYBE_NULL), + ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO_END}; static int rdb_i_s_perf_context_fill_table( - my_core::THD* const thd, - my_core::TABLE_LIST* const tables, - my_core::Item* const cond __attribute__((__unused__))) -{ + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); - int ret= 0; - Field** field= tables->table->field; + int ret = 0; + Field **field = tables->table->field; - DBUG_ENTER("rdb_i_s_perf_context_fill_table"); - - const std::vector tablenames= rdb_get_open_table_names(); - for (const auto& it : tablenames) - { + const std::vector tablenames = rdb_get_open_table_names(); + for (const auto &it : tablenames) { std::string str, dbname, tablename, partname; Rdb_perf_counters counters; @@ -299,42 +259,35 @@ static int rdb_i_s_perf_context_fill_table( return HA_ERR_INTERNAL_ERROR; } - if (rdb_split_normalized_tablename(str, &dbname, &tablename, &partname)) - { + if (rdb_split_normalized_tablename(str, &dbname, &tablename, &partname)) { continue; } - if (rdb_get_table_perf_counters(it.c_str(), &counters)) - { + if (rdb_get_table_perf_counters(it.c_str(), &counters)) { continue; } DBUG_ASSERT(field != nullptr); field[RDB_PERF_CONTEXT_FIELD::TABLE_SCHEMA]->store( - dbname.c_str(), dbname.size(), system_charset_info); + dbname.c_str(), dbname.size(), system_charset_info); field[RDB_PERF_CONTEXT_FIELD::TABLE_NAME]->store( - tablename.c_str(), tablename.size(), system_charset_info); - if (partname.size() == 0) - { + tablename.c_str(), tablename.size(), system_charset_info); + if (partname.size() == 0) { field[RDB_PERF_CONTEXT_FIELD::PARTITION_NAME]->set_null(); - } - else - { + } else { field[RDB_PERF_CONTEXT_FIELD::PARTITION_NAME]->set_notnull(); field[RDB_PERF_CONTEXT_FIELD::PARTITION_NAME]->store( - partname.c_str(), partname.size(), system_charset_info); + partname.c_str(), partname.size(), system_charset_info); } - for (int i= 0; i < PC_MAX_IDX; i++) - { + for (int i = 0; i < PC_MAX_IDX; i++) { field[RDB_PERF_CONTEXT_FIELD::STAT_TYPE]->store( - rdb_pc_stat_types[i].c_str(), - rdb_pc_stat_types[i].size(), - system_charset_info); + rdb_pc_stat_types[i].c_str(), rdb_pc_stat_types[i].size(), + system_charset_info); field[RDB_PERF_CONTEXT_FIELD::VALUE]->store(counters.m_value[i], true); - ret= my_core::schema_table_store_record(thd, tables->table); + ret = my_core::schema_table_store_record(thd, tables->table); if (ret) DBUG_RETURN(ret); } @@ -343,18 +296,17 @@ static int rdb_i_s_perf_context_fill_table( DBUG_RETURN(0); } -static int rdb_i_s_perf_context_init(void* const p) -{ +static int rdb_i_s_perf_context_init(void *const p) { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(p != nullptr); my_core::ST_SCHEMA_TABLE *schema; - DBUG_ENTER("rdb_i_s_perf_context_init"); + schema = (my_core::ST_SCHEMA_TABLE *)p; - schema= (my_core::ST_SCHEMA_TABLE*) p; - - schema->fields_info= rdb_i_s_perf_context_fields_info; - schema->fill_table= rdb_i_s_perf_context_fill_table; + schema->fields_info = rdb_i_s_perf_context_fields_info; + schema->fill_table = rdb_i_s_perf_context_fill_table; DBUG_RETURN(0); } @@ -362,49 +314,40 @@ static int rdb_i_s_perf_context_init(void* const p) /* Support for INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL dynamic table */ -namespace RDB_PERF_CONTEXT_GLOBAL_FIELD -{ - enum - { - STAT_TYPE= 0, - VALUE - }; -} // namespace RDB_PERF_CONTEXT_GLOBAL_FIELD +namespace RDB_PERF_CONTEXT_GLOBAL_FIELD { +enum { STAT_TYPE = 0, VALUE }; +} // namespace RDB_PERF_CONTEXT_GLOBAL_FIELD -static ST_FIELD_INFO rdb_i_s_perf_context_global_fields_info[]= -{ - ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), - ROCKSDB_FIELD_INFO_END -}; +static ST_FIELD_INFO rdb_i_s_perf_context_global_fields_info[] = { + ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO_END}; static int rdb_i_s_perf_context_global_fill_table( - my_core::THD* const thd, - my_core::TABLE_LIST* const tables, - my_core::Item* const cond __attribute__((__unused__))) -{ + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); - int ret= 0; - DBUG_ENTER("rdb_i_s_perf_context_global_fill_table"); + int ret = 0; // Get a copy of the global perf counters. Rdb_perf_counters global_counters; rdb_get_global_perf_counters(&global_counters); - for (int i= 0; i < PC_MAX_IDX; i++) { + for (int i = 0; i < PC_MAX_IDX; i++) { DBUG_ASSERT(tables->table != nullptr); DBUG_ASSERT(tables->table->field != nullptr); tables->table->field[RDB_PERF_CONTEXT_GLOBAL_FIELD::STAT_TYPE]->store( - rdb_pc_stat_types[i].c_str(), - rdb_pc_stat_types[i].size(), - system_charset_info); + rdb_pc_stat_types[i].c_str(), rdb_pc_stat_types[i].size(), + system_charset_info); tables->table->field[RDB_PERF_CONTEXT_GLOBAL_FIELD::VALUE]->store( - global_counters.m_value[i], true); + global_counters.m_value[i], true); - ret= my_core::schema_table_store_record(thd, tables->table); + ret = my_core::schema_table_store_record(thd, tables->table); if (ret) DBUG_RETURN(ret); } @@ -412,18 +355,17 @@ static int rdb_i_s_perf_context_global_fill_table( DBUG_RETURN(0); } -static int rdb_i_s_perf_context_global_init(void* const p) -{ +static int rdb_i_s_perf_context_global_init(void *const p) { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(p != nullptr); my_core::ST_SCHEMA_TABLE *schema; - DBUG_ENTER("rdb_i_s_perf_context_global_init"); + schema = (my_core::ST_SCHEMA_TABLE *)p; - schema= (my_core::ST_SCHEMA_TABLE*) p; - - schema->fields_info= rdb_i_s_perf_context_global_fields_info; - schema->fill_table= rdb_i_s_perf_context_global_fill_table; + schema->fields_info = rdb_i_s_perf_context_global_fields_info; + schema->fill_table = rdb_i_s_perf_context_global_fill_table; DBUG_RETURN(0); } @@ -431,131 +373,124 @@ static int rdb_i_s_perf_context_global_init(void* const p) /* Support for INFORMATION_SCHEMA.ROCKSDB_CFOPTIONS dynamic table */ -namespace RDB_CFOPTIONS_FIELD -{ - enum - { - CF_NAME= 0, - OPTION_TYPE, - VALUE - }; -} // namespace RDB_CFOPTIONS_FIELD +namespace RDB_CFOPTIONS_FIELD { +enum { CF_NAME = 0, OPTION_TYPE, VALUE }; +} // namespace RDB_CFOPTIONS_FIELD -static ST_FIELD_INFO rdb_i_s_cfoptions_fields_info[] = -{ - ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("OPTION_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("VALUE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO_END -}; +static ST_FIELD_INFO rdb_i_s_cfoptions_fields_info[] = { + ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("OPTION_TYPE", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END}; static int rdb_i_s_cfoptions_fill_table( - my_core::THD* const thd, - my_core::TABLE_LIST* const tables, - my_core::Item* const cond __attribute__((__unused__))) -{ + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); bool ret; - DBUG_ENTER("rdb_i_s_cfoptions_fill_table"); + Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); - Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); - - for (const auto &cf_name : cf_manager.get_cf_names()) - { + for (const auto &cf_name : cf_manager.get_cf_names()) { std::string val; rocksdb::ColumnFamilyOptions opts; cf_manager.get_cf_options(cf_name, &opts); std::vector> cf_option_types = { - {"COMPARATOR", opts.comparator == nullptr ? "NULL" : - std::string(opts.comparator->Name())}, - {"MERGE_OPERATOR", opts.merge_operator == nullptr ? "NULL" : - std::string(opts.merge_operator->Name())}, - {"COMPACTION_FILTER", opts.compaction_filter == nullptr ? "NULL" : - std::string(opts.compaction_filter->Name())}, - {"COMPACTION_FILTER_FACTORY", - opts.compaction_filter_factory == nullptr ? "NULL" : - std::string(opts.compaction_filter_factory->Name())}, - {"WRITE_BUFFER_SIZE", std::to_string(opts.write_buffer_size)}, - {"MAX_WRITE_BUFFER_NUMBER", std::to_string(opts.max_write_buffer_number)}, - {"MIN_WRITE_BUFFER_NUMBER_TO_MERGE", - std::to_string(opts.min_write_buffer_number_to_merge)}, - {"NUM_LEVELS", std::to_string(opts.num_levels)}, - {"LEVEL0_FILE_NUM_COMPACTION_TRIGGER", - std::to_string(opts.level0_file_num_compaction_trigger)}, - {"LEVEL0_SLOWDOWN_WRITES_TRIGGER", - std::to_string(opts.level0_slowdown_writes_trigger)}, - {"LEVEL0_STOP_WRITES_TRIGGER", - std::to_string(opts.level0_stop_writes_trigger)}, - {"MAX_MEM_COMPACTION_LEVEL", std::to_string(opts.max_mem_compaction_level)}, - {"TARGET_FILE_SIZE_BASE", std::to_string(opts.target_file_size_base)}, - {"TARGET_FILE_SIZE_MULTIPLIER", std::to_string(opts.target_file_size_multiplier)}, - {"MAX_BYTES_FOR_LEVEL_BASE", std::to_string(opts.max_bytes_for_level_base)}, - {"LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES", - opts.level_compaction_dynamic_level_bytes ? "ON" : "OFF"}, - {"MAX_BYTES_FOR_LEVEL_MULTIPLIER", - std::to_string(opts.max_bytes_for_level_multiplier)}, - {"SOFT_RATE_LIMIT", std::to_string(opts.soft_rate_limit)}, - {"HARD_RATE_LIMIT", std::to_string(opts.hard_rate_limit)}, - {"RATE_LIMIT_DELAY_MAX_MILLISECONDS", - std::to_string(opts.rate_limit_delay_max_milliseconds)}, - {"ARENA_BLOCK_SIZE", std::to_string(opts.arena_block_size)}, - {"DISABLE_AUTO_COMPACTIONS", - opts.disable_auto_compactions ? "ON" : "OFF"}, - {"PURGE_REDUNDANT_KVS_WHILE_FLUSH", - opts.purge_redundant_kvs_while_flush ? "ON" : "OFF"}, - {"VERIFY_CHECKSUM_IN_COMPACTION", - opts.verify_checksums_in_compaction ? "ON" : "OFF"}, - {"MAX_SEQUENTIAL_SKIP_IN_ITERATIONS", - std::to_string(opts.max_sequential_skip_in_iterations)}, - {"MEMTABLE_FACTORY", - opts.memtable_factory == nullptr ? "NULL" : - opts.memtable_factory->Name()}, - {"INPLACE_UPDATE_SUPPORT", - opts.inplace_update_support ? "ON" : "OFF"}, - {"INPLACE_UPDATE_NUM_LOCKS", - opts.inplace_update_num_locks ? "ON" : "OFF"}, - {"MEMTABLE_PREFIX_BLOOM_BITS_RATIO", - std::to_string(opts.memtable_prefix_bloom_size_ratio)}, - {"MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE", - std::to_string(opts.memtable_huge_page_size)}, - {"BLOOM_LOCALITY", std::to_string(opts.bloom_locality)}, - {"MAX_SUCCESSIVE_MERGES", - std::to_string(opts.max_successive_merges)}, - {"MIN_PARTIAL_MERGE_OPERANDS", - std::to_string(opts.min_partial_merge_operands)}, - {"OPTIMIZE_FILTERS_FOR_HITS", - (opts.optimize_filters_for_hits ? "ON" : "OFF")}, + {"COMPARATOR", opts.comparator == nullptr + ? "NULL" + : std::string(opts.comparator->Name())}, + {"MERGE_OPERATOR", opts.merge_operator == nullptr + ? "NULL" + : std::string(opts.merge_operator->Name())}, + {"COMPACTION_FILTER", + opts.compaction_filter == nullptr + ? "NULL" + : std::string(opts.compaction_filter->Name())}, + {"COMPACTION_FILTER_FACTORY", + opts.compaction_filter_factory == nullptr + ? "NULL" + : std::string(opts.compaction_filter_factory->Name())}, + {"WRITE_BUFFER_SIZE", std::to_string(opts.write_buffer_size)}, + {"MAX_WRITE_BUFFER_NUMBER", + std::to_string(opts.max_write_buffer_number)}, + {"MIN_WRITE_BUFFER_NUMBER_TO_MERGE", + std::to_string(opts.min_write_buffer_number_to_merge)}, + {"NUM_LEVELS", std::to_string(opts.num_levels)}, + {"LEVEL0_FILE_NUM_COMPACTION_TRIGGER", + std::to_string(opts.level0_file_num_compaction_trigger)}, + {"LEVEL0_SLOWDOWN_WRITES_TRIGGER", + std::to_string(opts.level0_slowdown_writes_trigger)}, + {"LEVEL0_STOP_WRITES_TRIGGER", + std::to_string(opts.level0_stop_writes_trigger)}, + {"MAX_MEM_COMPACTION_LEVEL", + std::to_string(opts.max_mem_compaction_level)}, + {"TARGET_FILE_SIZE_BASE", std::to_string(opts.target_file_size_base)}, + {"TARGET_FILE_SIZE_MULTIPLIER", + std::to_string(opts.target_file_size_multiplier)}, + {"MAX_BYTES_FOR_LEVEL_BASE", + std::to_string(opts.max_bytes_for_level_base)}, + {"LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES", + opts.level_compaction_dynamic_level_bytes ? "ON" : "OFF"}, + {"MAX_BYTES_FOR_LEVEL_MULTIPLIER", + std::to_string(opts.max_bytes_for_level_multiplier)}, + {"SOFT_RATE_LIMIT", std::to_string(opts.soft_rate_limit)}, + {"HARD_RATE_LIMIT", std::to_string(opts.hard_rate_limit)}, + {"RATE_LIMIT_DELAY_MAX_MILLISECONDS", + std::to_string(opts.rate_limit_delay_max_milliseconds)}, + {"ARENA_BLOCK_SIZE", std::to_string(opts.arena_block_size)}, + {"DISABLE_AUTO_COMPACTIONS", + opts.disable_auto_compactions ? "ON" : "OFF"}, + {"PURGE_REDUNDANT_KVS_WHILE_FLUSH", + opts.purge_redundant_kvs_while_flush ? "ON" : "OFF"}, + {"VERIFY_CHECKSUM_IN_COMPACTION", + opts.verify_checksums_in_compaction ? "ON" : "OFF"}, + {"MAX_SEQUENTIAL_SKIP_IN_ITERATIONS", + std::to_string(opts.max_sequential_skip_in_iterations)}, + {"MEMTABLE_FACTORY", opts.memtable_factory == nullptr + ? "NULL" + : opts.memtable_factory->Name()}, + {"INPLACE_UPDATE_SUPPORT", opts.inplace_update_support ? "ON" : "OFF"}, + {"INPLACE_UPDATE_NUM_LOCKS", + opts.inplace_update_num_locks ? "ON" : "OFF"}, + {"MEMTABLE_PREFIX_BLOOM_BITS_RATIO", + std::to_string(opts.memtable_prefix_bloom_size_ratio)}, + {"MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE", + std::to_string(opts.memtable_huge_page_size)}, + {"BLOOM_LOCALITY", std::to_string(opts.bloom_locality)}, + {"MAX_SUCCESSIVE_MERGES", std::to_string(opts.max_successive_merges)}, + {"MIN_PARTIAL_MERGE_OPERANDS", + std::to_string(opts.min_partial_merge_operands)}, + {"OPTIMIZE_FILTERS_FOR_HITS", + (opts.optimize_filters_for_hits ? "ON" : "OFF")}, }; // get MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL option value val = opts.max_bytes_for_level_multiplier_additional.empty() ? "NULL" : ""; - for (const auto &level : opts.max_bytes_for_level_multiplier_additional) - { + for (const auto &level : opts.max_bytes_for_level_multiplier_additional) { val.append(std::to_string(level) + ":"); } val.pop_back(); - cf_option_types.push_back({"MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL", val}); + cf_option_types.push_back( + {"MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL", val}); // get COMPRESSION_TYPE option value GetStringFromCompressionType(&val, opts.compression); - if (val.empty()) - { + if (val.empty()) { val = "NULL"; } cf_option_types.push_back({"COMPRESSION_TYPE", val}); // get COMPRESSION_PER_LEVEL option value val = opts.compression_per_level.empty() ? "NULL" : ""; - for (const auto &compression_type : opts.compression_per_level) - { + for (const auto &compression_type : opts.compression_per_level) { std::string res; GetStringFromCompressionType(&res, compression_type); - if (!res.empty()) - { + if (!res.empty()) { val.append(res + ":"); } } @@ -569,35 +504,42 @@ static int rdb_i_s_cfoptions_fill_table( cf_option_types.push_back({"COMPRESSION_OPTS", val}); // bottommost_compression - if (opts.bottommost_compression) - { + if (opts.bottommost_compression) { std::string res; GetStringFromCompressionType(&res, opts.bottommost_compression); - if (!res.empty()) - { + if (!res.empty()) { cf_option_types.push_back({"BOTTOMMOST_COMPRESSION", res}); } } // get PREFIX_EXTRACTOR option - cf_option_types.push_back({"PREFIX_EXTRACTOR", - opts.prefix_extractor == nullptr ? "NULL" : - std::string(opts.prefix_extractor->Name())}); + cf_option_types.push_back( + {"PREFIX_EXTRACTOR", opts.prefix_extractor == nullptr + ? "NULL" + : std::string(opts.prefix_extractor->Name())}); // get COMPACTION_STYLE option - switch (opts.compaction_style) - { - case rocksdb::kCompactionStyleLevel: val = "kCompactionStyleLevel"; break; - case rocksdb::kCompactionStyleUniversal: val = "kCompactionStyleUniversal"; break; - case rocksdb:: kCompactionStyleFIFO: val = "kCompactionStyleFIFO"; break; - case rocksdb:: kCompactionStyleNone: val = "kCompactionStyleNone"; break; - default: val = "NULL"; + switch (opts.compaction_style) { + case rocksdb::kCompactionStyleLevel: + val = "kCompactionStyleLevel"; + break; + case rocksdb::kCompactionStyleUniversal: + val = "kCompactionStyleUniversal"; + break; + case rocksdb::kCompactionStyleFIFO: + val = "kCompactionStyleFIFO"; + break; + case rocksdb::kCompactionStyleNone: + val = "kCompactionStyleNone"; + break; + default: + val = "NULL"; } cf_option_types.push_back({"COMPACTION_STYLE", val}); // get COMPACTION_OPTIONS_UNIVERSAL related options const rocksdb::CompactionOptionsUniversal compac_opts = - opts.compaction_options_universal; + opts.compaction_options_universal; val = "{SIZE_RATIO="; val.append(std::to_string(compac_opts.size_ratio)); val.append("; MIN_MERGE_WIDTH="); @@ -609,105 +551,126 @@ static int rdb_i_s_cfoptions_fill_table( val.append("; COMPRESSION_SIZE_PERCENT="); val.append(std::to_string(compac_opts.compression_size_percent)); val.append("; STOP_STYLE="); - switch (compac_opts.stop_style) - { - case rocksdb::kCompactionStopStyleSimilarSize: - val.append("kCompactionStopStyleSimilarSize}"); break; - case rocksdb::kCompactionStopStyleTotalSize: - val.append("kCompactionStopStyleTotalSize}"); break; - default: val.append("}"); + switch (compac_opts.stop_style) { + case rocksdb::kCompactionStopStyleSimilarSize: + val.append("kCompactionStopStyleSimilarSize}"); + break; + case rocksdb::kCompactionStopStyleTotalSize: + val.append("kCompactionStopStyleTotalSize}"); + break; + default: + val.append("}"); } cf_option_types.push_back({"COMPACTION_OPTIONS_UNIVERSAL", val}); // get COMPACTION_OPTION_FIFO option - cf_option_types.push_back({"COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE", - std::to_string(opts.compaction_options_fifo.max_table_files_size)}); + cf_option_types.push_back( + {"COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE", + std::to_string(opts.compaction_options_fifo.max_table_files_size)}); // get block-based table related options - const rocksdb::BlockBasedTableOptions& table_options= rdb_get_table_options(); + const rocksdb::BlockBasedTableOptions &table_options = + rdb_get_table_options(); // get BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS option cf_option_types.push_back( {"BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS", - table_options.cache_index_and_filter_blocks ? "1" : "0"}); + table_options.cache_index_and_filter_blocks ? "1" : "0"}); // get BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE option value - switch (table_options.index_type) - { - case rocksdb::BlockBasedTableOptions::kBinarySearch: val = "kBinarySearch"; break; - case rocksdb::BlockBasedTableOptions::kHashSearch: val = "kHashSearch"; break; - default: val = "NULL"; + switch (table_options.index_type) { + case rocksdb::BlockBasedTableOptions::kBinarySearch: + val = "kBinarySearch"; + break; + case rocksdb::BlockBasedTableOptions::kHashSearch: + val = "kHashSearch"; + break; + default: + val = "NULL"; } cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE", val}); // get BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION option value - cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION", - table_options.hash_index_allow_collision ? "ON" : "OFF"}); + cf_option_types.push_back( + {"BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION", + table_options.hash_index_allow_collision ? "ON" : "OFF"}); // get BLOCK_BASED_TABLE_FACTORY::CHECKSUM option value - switch (table_options.checksum) - { - case rocksdb::kNoChecksum: val = "kNoChecksum"; break; - case rocksdb::kCRC32c: val = "kCRC32c"; break; - case rocksdb::kxxHash: val = "kxxHash"; break; - default: val = "NULL"; + switch (table_options.checksum) { + case rocksdb::kNoChecksum: + val = "kNoChecksum"; + break; + case rocksdb::kCRC32c: + val = "kCRC32c"; + break; + case rocksdb::kxxHash: + val = "kxxHash"; + break; + default: + val = "NULL"; } cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::CHECKSUM", val}); // get BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE option value cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE", - table_options.no_block_cache ? "ON" : "OFF"}); + table_options.no_block_cache ? "ON" : "OFF"}); // get BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY option - cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY", - table_options.filter_policy == nullptr ? "NULL" : - std::string(table_options.filter_policy->Name())}); + cf_option_types.push_back( + {"BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY", + table_options.filter_policy == nullptr + ? "NULL" + : std::string(table_options.filter_policy->Name())}); // get BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING option cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING", - table_options.whole_key_filtering ? "1" : "0"}); + table_options.whole_key_filtering ? "1" : "0"}); // get BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE option - cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE", - table_options.block_cache == nullptr ? "NULL" : - std::to_string(table_options.block_cache->GetUsage())}); + cf_option_types.push_back( + {"BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE", + table_options.block_cache == nullptr + ? "NULL" + : std::to_string(table_options.block_cache->GetUsage())}); // get BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED option - cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED", - table_options.block_cache_compressed == nullptr ? "NULL" : - std::to_string(table_options.block_cache_compressed->GetUsage())}); + cf_option_types.push_back( + {"BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED", + table_options.block_cache_compressed == nullptr + ? "NULL" + : std::to_string( + table_options.block_cache_compressed->GetUsage())}); // get BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE option cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE", - std::to_string(table_options.block_size)}); + std::to_string(table_options.block_size)}); // get BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION option - cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION", - std::to_string(table_options.block_size_deviation)}); + cf_option_types.push_back( + {"BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION", + std::to_string(table_options.block_size_deviation)}); // get BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL option - cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL", - std::to_string(table_options.block_restart_interval)}); + cf_option_types.push_back( + {"BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL", + std::to_string(table_options.block_restart_interval)}); // get BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION option cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION", - std::to_string(table_options.format_version)}); + std::to_string(table_options.format_version)}); - for (const auto &cf_option_type : cf_option_types) - { + for (const auto &cf_option_type : cf_option_types) { DBUG_ASSERT(tables->table != nullptr); DBUG_ASSERT(tables->table->field != nullptr); tables->table->field[RDB_CFOPTIONS_FIELD::CF_NAME]->store( - cf_name.c_str(), cf_name.size(), system_charset_info); + cf_name.c_str(), cf_name.size(), system_charset_info); tables->table->field[RDB_CFOPTIONS_FIELD::OPTION_TYPE]->store( - cf_option_type.first.c_str(), - cf_option_type.first.size(), - system_charset_info); + cf_option_type.first.c_str(), cf_option_type.first.size(), + system_charset_info); tables->table->field[RDB_CFOPTIONS_FIELD::VALUE]->store( - cf_option_type.second.c_str(), - cf_option_type.second.size(), - system_charset_info); + cf_option_type.second.c_str(), cf_option_type.second.size(), + system_charset_info); ret = my_core::schema_table_store_record(thd, tables->table); @@ -721,35 +684,26 @@ static int rdb_i_s_cfoptions_fill_table( /* Support for INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO dynamic table */ -namespace RDB_GLOBAL_INFO_FIELD -{ - enum - { - TYPE= 0, - NAME, - VALUE - }; +namespace RDB_GLOBAL_INFO_FIELD { +enum { TYPE = 0, NAME, VALUE }; } -static ST_FIELD_INFO rdb_i_s_global_info_fields_info[] = -{ - ROCKSDB_FIELD_INFO("TYPE", FN_REFLEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("NAME", FN_REFLEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("VALUE", FN_REFLEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO_END -}; +static ST_FIELD_INFO rdb_i_s_global_info_fields_info[] = { + ROCKSDB_FIELD_INFO("TYPE", FN_REFLEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("NAME", FN_REFLEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", FN_REFLEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END}; /* * helper function for rdb_i_s_global_info_fill_table * to insert (TYPE, KEY, VALUE) rows into * information_schema.rocksdb_global_info */ -static int rdb_global_info_fill_row(my_core::THD* const thd, - my_core::TABLE_LIST* const tables, - const char* const type, - const char* const name, - const char* const value) -{ +static int rdb_global_info_fill_row(my_core::THD *const thd, + my_core::TABLE_LIST *const tables, + const char *const type, + const char *const name, + const char *const value) { DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); DBUG_ASSERT(tables->table != nullptr); @@ -757,56 +711,55 @@ static int rdb_global_info_fill_row(my_core::THD* const thd, DBUG_ASSERT(name != nullptr); DBUG_ASSERT(value != nullptr); - Field **field= tables->table->field; + Field **field = tables->table->field; DBUG_ASSERT(field != nullptr); - field[RDB_GLOBAL_INFO_FIELD::TYPE]->store( - type, strlen(type), system_charset_info); - field[RDB_GLOBAL_INFO_FIELD::NAME]->store( - name, strlen(name), system_charset_info); - field[RDB_GLOBAL_INFO_FIELD::VALUE]->store( - value, strlen(value), system_charset_info); + field[RDB_GLOBAL_INFO_FIELD::TYPE]->store(type, strlen(type), + system_charset_info); + field[RDB_GLOBAL_INFO_FIELD::NAME]->store(name, strlen(name), + system_charset_info); + field[RDB_GLOBAL_INFO_FIELD::VALUE]->store(value, strlen(value), + system_charset_info); return my_core::schema_table_store_record(thd, tables->table); } static int rdb_i_s_global_info_fill_table( - my_core::THD* const thd, - my_core::TABLE_LIST* const tables, - my_core::Item* const cond __attribute__((__unused__))) -{ + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); - DBUG_ENTER("rdb_i_s_global_info_fill_table"); static const uint32_t INT_BUF_LEN = 21; static const uint32_t GTID_BUF_LEN = 60; static const uint32_t CF_ID_INDEX_BUF_LEN = 60; - int ret= 0; + int ret = 0; /* binlog info */ - Rdb_binlog_manager* const blm= rdb_get_binlog_manager(); + Rdb_binlog_manager *const blm = rdb_get_binlog_manager(); DBUG_ASSERT(blm != nullptr); - char file_buf[FN_REFLEN+1]= {0}; + char file_buf[FN_REFLEN + 1] = {0}; my_off_t pos = 0; - char pos_buf[INT_BUF_LEN]= {0}; - char gtid_buf[GTID_BUF_LEN]= {0}; + char pos_buf[INT_BUF_LEN] = {0}; + char gtid_buf[GTID_BUF_LEN] = {0}; if (blm->read(file_buf, &pos, gtid_buf)) { - snprintf(pos_buf, INT_BUF_LEN, "%lu", (uint64_t) pos); + snprintf(pos_buf, INT_BUF_LEN, "%lu", (uint64_t)pos); ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "FILE", file_buf); ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "POS", pos_buf); ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "GTID", gtid_buf); } /* max index info */ - const Rdb_dict_manager* const dict_manager= rdb_get_dict_manager(); + const Rdb_dict_manager *const dict_manager = rdb_get_dict_manager(); DBUG_ASSERT(dict_manager != nullptr); uint32_t max_index_id; - char max_index_id_buf[INT_BUF_LEN]= {0}; + char max_index_id_buf[INT_BUF_LEN] = {0}; if (dict_manager->get_max_index_id(&max_index_id)) { snprintf(max_index_id_buf, INT_BUF_LEN, "%u", max_index_id); @@ -815,32 +768,32 @@ static int rdb_i_s_global_info_fill_table( } /* cf_id -> cf_flags */ - char cf_id_buf[INT_BUF_LEN]= {0}; - char cf_value_buf[FN_REFLEN+1] = {0}; - const Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); + char cf_id_buf[INT_BUF_LEN] = {0}; + char cf_value_buf[FN_REFLEN + 1] = {0}; + const Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); for (const auto &cf_handle : cf_manager.get_all_cf()) { uint flags; dict_manager->get_cf_flags(cf_handle->GetID(), &flags); snprintf(cf_id_buf, INT_BUF_LEN, "%u", cf_handle->GetID()); snprintf(cf_value_buf, FN_REFLEN, "%s [%u]", cf_handle->GetName().c_str(), - flags); + flags); ret |= rdb_global_info_fill_row(thd, tables, "CF_FLAGS", cf_id_buf, - cf_value_buf); + cf_value_buf); if (ret) break; } /* DDL_DROP_INDEX_ONGOING */ - std::vector gl_index_ids; - dict_manager->get_ongoing_index_operation(&gl_index_ids, - Rdb_key_def::DDL_DROP_INDEX_ONGOING); - char cf_id_index_buf[CF_ID_INDEX_BUF_LEN]= {0}; + std::unordered_set gl_index_ids; + dict_manager->get_ongoing_index_operation( + &gl_index_ids, Rdb_key_def::DDL_DROP_INDEX_ONGOING); + char cf_id_index_buf[CF_ID_INDEX_BUF_LEN] = {0}; for (auto gl_index_id : gl_index_ids) { snprintf(cf_id_index_buf, CF_ID_INDEX_BUF_LEN, "cf_id:%u,index_id:%u", - gl_index_id.cf_id, gl_index_id.index_id); + gl_index_id.cf_id, gl_index_id.index_id); ret |= rdb_global_info_fill_row(thd, tables, "DDL_DROP_INDEX_ONGOING", - cf_id_index_buf, ""); + cf_id_index_buf, ""); if (ret) break; @@ -849,177 +802,242 @@ static int rdb_i_s_global_info_fill_table( DBUG_RETURN(ret); } +/* + Support for INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS dynamic table + */ +static int rdb_i_s_compact_stats_fill_table( + my_core::THD *thd, my_core::TABLE_LIST *tables, + my_core::Item *cond MY_ATTRIBUTE((__unused__))) { + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); -namespace // anonymous namespace = not visible outside this source file + DBUG_ENTER("rdb_i_s_global_compact_stats_table"); + + int ret = 0; + + rocksdb::DB *rdb = rdb_get_rocksdb_db(); + Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); + DBUG_ASSERT(rdb != nullptr); + + for (auto cf_name : cf_manager.get_cf_names()) { + rocksdb::ColumnFamilyHandle *cfh; + bool is_automatic; + /* + Only the cf name is important. Whether it was generated automatically + does not matter, so is_automatic is ignored. + */ + cfh = cf_manager.get_cf(cf_name.c_str(), "", nullptr, &is_automatic); + if (cfh == nullptr) { + continue; + } + std::map props; + bool bool_ret MY_ATTRIBUTE((__unused__)); + bool_ret = rdb->GetMapProperty(cfh, "rocksdb.cfstats", &props); + DBUG_ASSERT(bool_ret); + + for (auto const &prop_ent : props) { + std::string prop_name = prop_ent.first; + double value = prop_ent.second; + std::size_t del_pos = prop_name.find('.'); + DBUG_ASSERT(del_pos != std::string::npos); + std::string level_str = prop_name.substr(0, del_pos); + std::string type_str = prop_name.substr(del_pos + 1); + + Field **field = tables->table->field; + DBUG_ASSERT(field != nullptr); + field[0]->store(cf_name.c_str(), cf_name.size(), system_charset_info); + field[1]->store(level_str.c_str(), level_str.size(), system_charset_info); + field[2]->store(type_str.c_str(), type_str.size(), system_charset_info); + field[3]->store(value, true); + + ret |= my_core::schema_table_store_record(thd, tables->table); + if (ret != 0) { + DBUG_RETURN(ret); + } + } + } + + DBUG_RETURN(ret); +} + +static ST_FIELD_INFO rdb_i_s_compact_stats_fields_info[] = { + ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("LEVEL", FN_REFLEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("TYPE", FN_REFLEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(double), MYSQL_TYPE_DOUBLE, 0), + ROCKSDB_FIELD_INFO_END}; + +namespace // anonymous namespace = not visible outside this source file { -struct Rdb_ddl_scanner : public Rdb_tables_scanner -{ - my_core::THD *m_thd; +struct Rdb_ddl_scanner : public Rdb_tables_scanner { + my_core::THD *m_thd; my_core::TABLE *m_table; - int add_table(Rdb_tbl_def* tdef) override; + int add_table(Rdb_tbl_def *tdef) override; }; -} // anonymous namespace +} // anonymous namespace /* Support for INFORMATION_SCHEMA.ROCKSDB_DDL dynamic table */ -namespace RDB_DDL_FIELD -{ - enum - { - TABLE_SCHEMA= 0, - TABLE_NAME, - PARTITION_NAME, - INDEX_NAME, - COLUMN_FAMILY, - INDEX_NUMBER, - INDEX_TYPE, - KV_FORMAT_VERSION, - CF - }; -} // namespace RDB_DDL_FIELD - -static ST_FIELD_INFO rdb_i_s_ddl_fields_info[] = -{ - ROCKSDB_FIELD_INFO("TABLE_SCHEMA", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("TABLE_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("PARTITION_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, - MY_I_S_MAYBE_NULL), - ROCKSDB_FIELD_INFO("INDEX_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("COLUMN_FAMILY", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), - ROCKSDB_FIELD_INFO("INDEX_NUMBER", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), - ROCKSDB_FIELD_INFO("INDEX_TYPE", sizeof(uint16_t), MYSQL_TYPE_SHORT, 0), - ROCKSDB_FIELD_INFO("KV_FORMAT_VERSION", sizeof(uint16_t), - MYSQL_TYPE_SHORT, 0), - ROCKSDB_FIELD_INFO("CF", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO_END +namespace RDB_DDL_FIELD { +enum { + TABLE_SCHEMA = 0, + TABLE_NAME, + PARTITION_NAME, + INDEX_NAME, + COLUMN_FAMILY, + INDEX_NUMBER, + INDEX_TYPE, + KV_FORMAT_VERSION, + CF }; +} // namespace RDB_DDL_FIELD -int Rdb_ddl_scanner::add_table(Rdb_tbl_def *tdef) -{ +static ST_FIELD_INFO rdb_i_s_ddl_fields_info[] = { + ROCKSDB_FIELD_INFO("TABLE_SCHEMA", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("TABLE_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("PARTITION_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, + MY_I_S_MAYBE_NULL), + ROCKSDB_FIELD_INFO("INDEX_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("COLUMN_FAMILY", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("INDEX_NUMBER", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("INDEX_TYPE", sizeof(uint16_t), MYSQL_TYPE_SHORT, 0), + ROCKSDB_FIELD_INFO("KV_FORMAT_VERSION", sizeof(uint16_t), MYSQL_TYPE_SHORT, + 0), + ROCKSDB_FIELD_INFO("CF", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END}; + +int Rdb_ddl_scanner::add_table(Rdb_tbl_def *tdef) { DBUG_ASSERT(tdef != nullptr); - int ret= 0; + int ret = 0; DBUG_ASSERT(m_table != nullptr); - Field** field= m_table->field; + Field **field = m_table->field; DBUG_ASSERT(field != nullptr); - const std::string& dbname= tdef->base_dbname(); - field[RDB_DDL_FIELD::TABLE_SCHEMA]->store( - dbname.c_str(), dbname.size(), system_charset_info); + const std::string &dbname = tdef->base_dbname(); + field[RDB_DDL_FIELD::TABLE_SCHEMA]->store(dbname.c_str(), dbname.size(), + system_charset_info); - const std::string& tablename= tdef->base_tablename(); - field[RDB_DDL_FIELD::TABLE_NAME]->store( - tablename.c_str(), tablename.size(), system_charset_info); + const std::string &tablename = tdef->base_tablename(); + field[RDB_DDL_FIELD::TABLE_NAME]->store(tablename.c_str(), tablename.size(), + system_charset_info); - const std::string& partname= tdef->base_partition(); - if (partname.length() == 0) - { + const std::string &partname = tdef->base_partition(); + if (partname.length() == 0) { field[RDB_DDL_FIELD::PARTITION_NAME]->set_null(); - } - else - { + } else { field[RDB_DDL_FIELD::PARTITION_NAME]->set_notnull(); field[RDB_DDL_FIELD::PARTITION_NAME]->store( - partname.c_str(), partname.size(), system_charset_info); + partname.c_str(), partname.size(), system_charset_info); } - for (uint i= 0; i < tdef->m_key_count; i++) - { - const Rdb_key_def& kd= *tdef->m_key_descr_arr[i]; + for (uint i = 0; i < tdef->m_key_count; i++) { + const Rdb_key_def &kd = *tdef->m_key_descr_arr[i]; - field[RDB_DDL_FIELD::INDEX_NAME]->store( - kd.m_name.c_str(), kd.m_name.size(), system_charset_info); + field[RDB_DDL_FIELD::INDEX_NAME]->store(kd.m_name.c_str(), kd.m_name.size(), + system_charset_info); GL_INDEX_ID gl_index_id = kd.get_gl_index_id(); field[RDB_DDL_FIELD::COLUMN_FAMILY]->store(gl_index_id.cf_id, true); field[RDB_DDL_FIELD::INDEX_NUMBER]->store(gl_index_id.index_id, true); field[RDB_DDL_FIELD::INDEX_TYPE]->store(kd.m_index_type, true); - field[RDB_DDL_FIELD::KV_FORMAT_VERSION]->store( - kd.m_kv_format_version, true); + field[RDB_DDL_FIELD::KV_FORMAT_VERSION]->store(kd.m_kv_format_version, + true); - std::string cf_name= kd.get_cf()->GetName(); - field[RDB_DDL_FIELD::CF]->store( - cf_name.c_str(), cf_name.size(), system_charset_info); + std::string cf_name = kd.get_cf()->GetName(); + field[RDB_DDL_FIELD::CF]->store(cf_name.c_str(), cf_name.size(), + system_charset_info); - ret= my_core::schema_table_store_record(m_thd, m_table); + ret = my_core::schema_table_store_record(m_thd, m_table); if (ret) return ret; } - return 0; + return HA_EXIT_SUCCESS; } -static int rdb_i_s_ddl_fill_table(my_core::THD* const thd, - my_core::TABLE_LIST* const tables, - my_core::Item* const cond) -{ - DBUG_ENTER("rdb_i_s_ddl_fill_table"); +static int rdb_i_s_ddl_fill_table(my_core::THD *const thd, + my_core::TABLE_LIST *const tables, + my_core::Item *const cond) { + DBUG_ENTER_FUNC(); DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); Rdb_ddl_scanner ddl_arg; - ddl_arg.m_thd= thd; - ddl_arg.m_table= tables->table; + ddl_arg.m_thd = thd; + ddl_arg.m_table = tables->table; - Rdb_ddl_manager *ddl_manager= rdb_get_ddl_manager(); + Rdb_ddl_manager *ddl_manager = rdb_get_ddl_manager(); DBUG_ASSERT(ddl_manager != nullptr); - int ret= ddl_manager->scan_for_tables(&ddl_arg); + int ret = ddl_manager->scan_for_tables(&ddl_arg); DBUG_RETURN(ret); } -static int rdb_i_s_ddl_init(void* const p) -{ +static int rdb_i_s_ddl_init(void *const p) { + DBUG_ENTER_FUNC(); + my_core::ST_SCHEMA_TABLE *schema; - DBUG_ENTER("rdb_i_s_ddl_init"); DBUG_ASSERT(p != nullptr); - schema= (my_core::ST_SCHEMA_TABLE*) p; + schema = (my_core::ST_SCHEMA_TABLE *)p; - schema->fields_info= rdb_i_s_ddl_fields_info; - schema->fill_table= rdb_i_s_ddl_fill_table; + schema->fields_info = rdb_i_s_ddl_fields_info; + schema->fill_table = rdb_i_s_ddl_fill_table; DBUG_RETURN(0); } -static int rdb_i_s_cfoptions_init(void* const p) -{ - my_core::ST_SCHEMA_TABLE *schema; +static int rdb_i_s_cfoptions_init(void *const p) { + DBUG_ENTER_FUNC(); - DBUG_ENTER("rdb_i_s_cfoptions_init"); DBUG_ASSERT(p != nullptr); - schema= (my_core::ST_SCHEMA_TABLE*) p; + my_core::ST_SCHEMA_TABLE *schema; - schema->fields_info= rdb_i_s_cfoptions_fields_info; - schema->fill_table= rdb_i_s_cfoptions_fill_table; + schema = (my_core::ST_SCHEMA_TABLE *)p; + + schema->fields_info = rdb_i_s_cfoptions_fields_info; + schema->fill_table = rdb_i_s_cfoptions_fill_table; DBUG_RETURN(0); } -static int rdb_i_s_global_info_init(void* const p) -{ - my_core::ST_SCHEMA_TABLE *schema; +static int rdb_i_s_global_info_init(void *const p) { + DBUG_ENTER_FUNC(); - DBUG_ENTER("rdb_i_s_global_info_init"); DBUG_ASSERT(p != nullptr); - schema= reinterpret_cast(p); + my_core::ST_SCHEMA_TABLE *schema; - schema->fields_info= rdb_i_s_global_info_fields_info; - schema->fill_table= rdb_i_s_global_info_fill_table; + schema = reinterpret_cast(p); + + schema->fields_info = rdb_i_s_global_info_fields_info; + schema->fill_table = rdb_i_s_global_info_fill_table; + + DBUG_RETURN(0); +} + +static int rdb_i_s_compact_stats_init(void *p) { + my_core::ST_SCHEMA_TABLE *schema; + + DBUG_ENTER("rdb_i_s_compact_stats_init"); + DBUG_ASSERT(p != nullptr); + + schema = reinterpret_cast(p); + + schema->fields_info = rdb_i_s_compact_stats_fields_info; + schema->fill_table = rdb_i_s_compact_stats_fill_table; DBUG_RETURN(0); } /* Given a path to a file return just the filename portion. */ -static std::string rdb_filename_without_path( - const std::string& path) -{ +static std::string rdb_filename_without_path(const std::string &path) { /* Find last slash in path */ const size_t pos = path.rfind('/'); @@ -1035,70 +1053,68 @@ static std::string rdb_filename_without_path( /* Support for INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP dynamic table */ -namespace RDB_INDEX_FILE_MAP_FIELD -{ - enum - { - COLUMN_FAMILY= 0, - INDEX_NUMBER, - SST_NAME, - NUM_ROWS, - DATA_SIZE, - ENTRY_DELETES, - ENTRY_SINGLEDELETES, - ENTRY_MERGES, - ENTRY_OTHERS - }; -} // namespace RDB_INDEX_FILE_MAP_FIELD - -static ST_FIELD_INFO rdb_i_s_index_file_map_fields_info[] = -{ - /* The information_schema.rocksdb_index_file_map virtual table has four - * fields: - * COLUMN_FAMILY => the index's column family contained in the SST file - * INDEX_NUMBER => the index id contained in the SST file - * SST_NAME => the name of the SST file containing some indexes - * NUM_ROWS => the number of entries of this index id in this SST file - * DATA_SIZE => the data size stored in this SST file for this index id */ - ROCKSDB_FIELD_INFO("COLUMN_FAMILY", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), - ROCKSDB_FIELD_INFO("INDEX_NUMBER", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), - ROCKSDB_FIELD_INFO("SST_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("NUM_ROWS", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), - ROCKSDB_FIELD_INFO("DATA_SIZE", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), - ROCKSDB_FIELD_INFO("ENTRY_DELETES", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), - ROCKSDB_FIELD_INFO("ENTRY_SINGLEDELETES", sizeof(int64_t), - MYSQL_TYPE_LONGLONG, 0), - ROCKSDB_FIELD_INFO("ENTRY_MERGES", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), - ROCKSDB_FIELD_INFO("ENTRY_OTHERS", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), - ROCKSDB_FIELD_INFO_END +namespace RDB_INDEX_FILE_MAP_FIELD { +enum { + COLUMN_FAMILY = 0, + INDEX_NUMBER, + SST_NAME, + NUM_ROWS, + DATA_SIZE, + ENTRY_DELETES, + ENTRY_SINGLEDELETES, + ENTRY_MERGES, + ENTRY_OTHERS, + DISTINCT_KEYS_PREFIX }; +} // namespace RDB_INDEX_FILE_MAP_FIELD + +static ST_FIELD_INFO rdb_i_s_index_file_map_fields_info[] = { + /* The information_schema.rocksdb_index_file_map virtual table has four + * fields: + * COLUMN_FAMILY => the index's column family contained in the SST file + * INDEX_NUMBER => the index id contained in the SST file + * SST_NAME => the name of the SST file containing some indexes + * NUM_ROWS => the number of entries of this index id in this SST file + * DATA_SIZE => the data size stored in this SST file for this index id */ + ROCKSDB_FIELD_INFO("COLUMN_FAMILY", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("INDEX_NUMBER", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("SST_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("NUM_ROWS", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("DATA_SIZE", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("ENTRY_DELETES", sizeof(int64_t), MYSQL_TYPE_LONGLONG, + 0), + ROCKSDB_FIELD_INFO("ENTRY_SINGLEDELETES", sizeof(int64_t), + MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("ENTRY_MERGES", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("ENTRY_OTHERS", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("DISTINCT_KEYS_PREFIX", MAX_REF_PARTS * 25, + MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END}; /* Fill the information_schema.rocksdb_index_file_map virtual table */ static int rdb_i_s_index_file_map_fill_table( - my_core::THD* const thd, - my_core::TABLE_LIST* const tables, - my_core::Item* const cond __attribute__((__unused__))) -{ + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); DBUG_ASSERT(tables->table != nullptr); - int ret = 0; + int ret = 0; Field **field = tables->table->field; DBUG_ASSERT(field != nullptr); - DBUG_ENTER("rdb_i_s_index_file_map_fill_table"); - /* Iterate over all the column families */ - rocksdb::DB* const rdb= rdb_get_rocksdb_db(); + rocksdb::DB *const rdb = rdb_get_rocksdb_db(); DBUG_ASSERT(rdb != nullptr); - const Rdb_cf_manager& cf_manager= rdb_get_cf_manager(); + const Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); for (const auto &cf_handle : cf_manager.get_all_cf()) { /* Grab the the properties of all the tables in the column family */ rocksdb::TablePropertiesCollection table_props_collection; - const rocksdb::Status s = rdb->GetPropertiesOfAllTables(cf_handle, - &table_props_collection); + const rocksdb::Status s = + rdb->GetPropertiesOfAllTables(cf_handle, &table_props_collection); if (!s.ok()) { continue; } @@ -1109,7 +1125,7 @@ static int rdb_i_s_index_file_map_fill_table( /* Add the SST name into the output */ const std::string sst_name = rdb_filename_without_path(props.first); field[RDB_INDEX_FILE_MAP_FIELD::SST_NAME]->store( - sst_name.data(), sst_name.size(), system_charset_info); + sst_name.data(), sst_name.size(), system_charset_info); /* Get the __indexstats__ data out of the table property */ std::vector stats; @@ -1123,28 +1139,41 @@ static int rdb_i_s_index_file_map_fill_table( field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_SINGLEDELETES]->store(-1, true); field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_MERGES]->store(-1, true); field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_OTHERS]->store(-1, true); - } - else { + } else { for (auto it : stats) { - /* Add the index number, the number of rows, and data size to the output */ + /* Add the index number, the number of rows, and data size to the + * output */ field[RDB_INDEX_FILE_MAP_FIELD::COLUMN_FAMILY]->store( - it.m_gl_index_id.cf_id, true); + it.m_gl_index_id.cf_id, true); field[RDB_INDEX_FILE_MAP_FIELD::INDEX_NUMBER]->store( - it.m_gl_index_id.index_id, true); + it.m_gl_index_id.index_id, true); field[RDB_INDEX_FILE_MAP_FIELD::NUM_ROWS]->store(it.m_rows, true); - field[RDB_INDEX_FILE_MAP_FIELD::DATA_SIZE]->store( - it.m_data_size, true); + field[RDB_INDEX_FILE_MAP_FIELD::DATA_SIZE]->store(it.m_data_size, + true); field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_DELETES]->store( - it.m_entry_deletes, true); + it.m_entry_deletes, true); field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_SINGLEDELETES]->store( - it.m_entry_single_deletes, true); + it.m_entry_single_deletes, true); field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_MERGES]->store( - it.m_entry_merges, true); + it.m_entry_merges, true); field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_OTHERS]->store( - it.m_entry_others, true); + it.m_entry_others, true); + std::string distinct_keys_prefix; + + for (size_t i = 0; i < it.m_distinct_keys_per_prefix.size(); i++) { + if (i > 0) { + distinct_keys_prefix += ","; + } + distinct_keys_prefix += + std::to_string(it.m_distinct_keys_per_prefix[i]); + } + + field[RDB_INDEX_FILE_MAP_FIELD::DISTINCT_KEYS_PREFIX]->store( + distinct_keys_prefix.data(), distinct_keys_prefix.size(), + system_charset_info); /* Tell MySQL about this row in the virtual table */ - ret= my_core::schema_table_store_record(thd, tables->table); + ret = my_core::schema_table_store_record(thd, tables->table); if (ret != 0) { break; } @@ -1157,17 +1186,17 @@ static int rdb_i_s_index_file_map_fill_table( } /* Initialize the information_schema.rocksdb_index_file_map virtual table */ -static int rdb_i_s_index_file_map_init(void* const p) -{ - my_core::ST_SCHEMA_TABLE *schema; +static int rdb_i_s_index_file_map_init(void *const p) { + DBUG_ENTER_FUNC(); - DBUG_ENTER("rdb_i_s_index_file_map_init"); DBUG_ASSERT(p != nullptr); - schema= (my_core::ST_SCHEMA_TABLE*) p; + my_core::ST_SCHEMA_TABLE *schema; - schema->fields_info= rdb_i_s_index_file_map_fields_info; - schema->fill_table= rdb_i_s_index_file_map_fill_table; + schema = (my_core::ST_SCHEMA_TABLE *)p; + + schema->fields_info = rdb_i_s_index_file_map_fields_info; + schema->fill_table = rdb_i_s_index_file_map_fill_table; DBUG_RETURN(0); } @@ -1175,67 +1204,55 @@ static int rdb_i_s_index_file_map_init(void* const p) /* Support for INFORMATION_SCHEMA.ROCKSDB_LOCKS dynamic table */ -namespace RDB_LOCKS_FIELD -{ - enum - { - COLUMN_FAMILY_ID= 0, - TRANSACTION_ID, - KEY, - MODE - }; -} // namespace RDB_LOCKS_FIELD +namespace RDB_LOCKS_FIELD { +enum { COLUMN_FAMILY_ID = 0, TRANSACTION_ID, KEY, MODE }; +} // namespace RDB_LOCKS_FIELD -static ST_FIELD_INFO rdb_i_s_lock_info_fields_info[] = -{ - ROCKSDB_FIELD_INFO("COLUMN_FAMILY_ID", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), - ROCKSDB_FIELD_INFO("TRANSACTION_ID", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), - ROCKSDB_FIELD_INFO("KEY", FN_REFLEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("MODE", 32, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO_END -}; +static ST_FIELD_INFO rdb_i_s_lock_info_fields_info[] = { + ROCKSDB_FIELD_INFO("COLUMN_FAMILY_ID", sizeof(uint32_t), MYSQL_TYPE_LONG, + 0), + ROCKSDB_FIELD_INFO("TRANSACTION_ID", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("KEY", FN_REFLEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("MODE", 32, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END}; /* Fill the information_schema.rocksdb_locks virtual table */ static int rdb_i_s_lock_info_fill_table( - my_core::THD* const thd, - my_core::TABLE_LIST* const tables, - my_core::Item* const cond __attribute__((__unused__))) -{ + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); DBUG_ASSERT(tables->table != nullptr); int ret = 0; - DBUG_ENTER("rdb_i_s_lock_info_fill_table"); - - rocksdb::TransactionDB* const rdb= rdb_get_rocksdb_db(); + rocksdb::TransactionDB *const rdb = rdb_get_rocksdb_db(); DBUG_ASSERT(rdb != nullptr); /* cf id -> rocksdb::KeyLockInfo */ std::unordered_multimap lock_info = - rdb->GetLockStatusData(); + rdb->GetLockStatusData(); - for (const auto& lock : lock_info) { + for (const auto &lock : lock_info) { const uint32_t cf_id = lock.first; - const auto& key_lock_info = lock.second; + const auto &key_lock_info = lock.second; const auto key_hexstr = rdb_hexdump(key_lock_info.key.c_str(), - key_lock_info.key.length(), FN_REFLEN); + key_lock_info.key.length(), FN_REFLEN); for (const auto &id : key_lock_info.ids) { - tables->table->field[RDB_LOCKS_FIELD::COLUMN_FAMILY_ID]->store( - cf_id, true); + tables->table->field[RDB_LOCKS_FIELD::COLUMN_FAMILY_ID]->store(cf_id, + true); tables->table->field[RDB_LOCKS_FIELD::TRANSACTION_ID]->store(id, true); tables->table->field[RDB_LOCKS_FIELD::KEY]->store( - key_hexstr.c_str(), key_hexstr.size(), - system_charset_info); + key_hexstr.c_str(), key_hexstr.size(), system_charset_info); tables->table->field[RDB_LOCKS_FIELD::MODE]->store( - key_lock_info.exclusive ? "X" : "S", - 1, system_charset_info); + key_lock_info.exclusive ? "X" : "S", 1, system_charset_info); /* Tell MySQL about this row in the virtual table */ - ret= my_core::schema_table_store_record(thd, tables->table); + ret = my_core::schema_table_store_record(thd, tables->table); if (ret != 0) { break; } @@ -1245,17 +1262,17 @@ static int rdb_i_s_lock_info_fill_table( } /* Initialize the information_schema.rocksdb_lock_info virtual table */ -static int rdb_i_s_lock_info_init(void* const p) -{ - my_core::ST_SCHEMA_TABLE *schema; +static int rdb_i_s_lock_info_init(void *const p) { + DBUG_ENTER_FUNC(); - DBUG_ENTER("rdb_i_s_lock_info_init"); DBUG_ASSERT(p != nullptr); - schema= (my_core::ST_SCHEMA_TABLE*) p; + my_core::ST_SCHEMA_TABLE *schema; - schema->fields_info= rdb_i_s_lock_info_fields_info; - schema->fill_table= rdb_i_s_lock_info_fill_table; + schema = (my_core::ST_SCHEMA_TABLE *)p; + + schema->fields_info = rdb_i_s_lock_info_fields_info; + schema->fill_table = rdb_i_s_lock_info_fill_table; DBUG_RETURN(0); } @@ -1263,106 +1280,100 @@ static int rdb_i_s_lock_info_init(void* const p) /* Support for INFORMATION_SCHEMA.ROCKSDB_TRX dynamic table */ -namespace RDB_TRX_FIELD -{ - enum - { - TRANSACTION_ID= 0, - STATE, - NAME, - WRITE_COUNT, - LOCK_COUNT, - TIMEOUT_SEC, - WAITING_KEY, - WAITING_COLUMN_FAMILY_ID, - IS_REPLICATION, - SKIP_TRX_API, - READ_ONLY, - HAS_DEADLOCK_DETECTION, - NUM_ONGOING_BULKLOAD, - THREAD_ID, - QUERY - }; -} // namespace RDB_TRX_FIELD - -static ST_FIELD_INFO rdb_i_s_trx_info_fields_info[] = -{ - ROCKSDB_FIELD_INFO("TRANSACTION_ID", sizeof(ulonglong), - MYSQL_TYPE_LONGLONG, 0), - ROCKSDB_FIELD_INFO("STATE", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("WRITE_COUNT", sizeof(ulonglong), MYSQL_TYPE_LONGLONG, 0), - ROCKSDB_FIELD_INFO("LOCK_COUNT", sizeof(ulonglong), MYSQL_TYPE_LONGLONG, 0), - ROCKSDB_FIELD_INFO("TIMEOUT_SEC", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), - ROCKSDB_FIELD_INFO("WAITING_KEY", FN_REFLEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO("WAITING_COLUMN_FAMILY_ID", sizeof(uint32_t), - MYSQL_TYPE_LONG, 0), - ROCKSDB_FIELD_INFO("IS_REPLICATION", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), - ROCKSDB_FIELD_INFO("SKIP_TRX_API", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), - ROCKSDB_FIELD_INFO("READ_ONLY", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), - ROCKSDB_FIELD_INFO("HAS_DEADLOCK_DETECTION", sizeof(uint32_t), - MYSQL_TYPE_LONG, 0), - ROCKSDB_FIELD_INFO("NUM_ONGOING_BULKLOAD", sizeof(uint32_t), - MYSQL_TYPE_LONG, 0), - ROCKSDB_FIELD_INFO("THREAD_ID", sizeof(ulong), MYSQL_TYPE_LONG, 0), - ROCKSDB_FIELD_INFO("QUERY", NAME_LEN+1, MYSQL_TYPE_STRING, 0), - ROCKSDB_FIELD_INFO_END +namespace RDB_TRX_FIELD { +enum { + TRANSACTION_ID = 0, + STATE, + NAME, + WRITE_COUNT, + LOCK_COUNT, + TIMEOUT_SEC, + WAITING_KEY, + WAITING_COLUMN_FAMILY_ID, + IS_REPLICATION, + SKIP_TRX_API, + READ_ONLY, + HAS_DEADLOCK_DETECTION, + NUM_ONGOING_BULKLOAD, + THREAD_ID, + QUERY }; +} // namespace RDB_TRX_FIELD + +static ST_FIELD_INFO rdb_i_s_trx_info_fields_info[] = { + ROCKSDB_FIELD_INFO("TRANSACTION_ID", sizeof(ulonglong), MYSQL_TYPE_LONGLONG, + 0), + ROCKSDB_FIELD_INFO("STATE", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("WRITE_COUNT", sizeof(ulonglong), MYSQL_TYPE_LONGLONG, + 0), + ROCKSDB_FIELD_INFO("LOCK_COUNT", sizeof(ulonglong), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("TIMEOUT_SEC", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("WAITING_KEY", FN_REFLEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("WAITING_COLUMN_FAMILY_ID", sizeof(uint32_t), + MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("IS_REPLICATION", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("SKIP_TRX_API", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("READ_ONLY", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("HAS_DEADLOCK_DETECTION", sizeof(uint32_t), + MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("NUM_ONGOING_BULKLOAD", sizeof(uint32_t), + MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("THREAD_ID", sizeof(ulong), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("QUERY", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END}; /* Fill the information_schema.rocksdb_trx virtual table */ static int rdb_i_s_trx_info_fill_table( - my_core::THD* const thd, - my_core::TABLE_LIST* const tables, - my_core::Item* const cond __attribute__((__unused__))) -{ + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); DBUG_ASSERT(tables->table != nullptr); int ret = 0; - DBUG_ENTER("rdb_i_s_trx_info_fill_table"); - const std::vector &all_trx_info = rdb_get_all_trx_info(); for (const auto &info : all_trx_info) { - auto name_hexstr = rdb_hexdump(info.name.c_str(), info.name.length(), - NAME_LEN); + auto name_hexstr = + rdb_hexdump(info.name.c_str(), info.name.length(), NAME_LEN); auto key_hexstr = rdb_hexdump(info.waiting_key.c_str(), info.waiting_key.length(), FN_REFLEN); - tables->table->field[RDB_TRX_FIELD::TRANSACTION_ID]->store( - info.trx_id, true); + tables->table->field[RDB_TRX_FIELD::TRANSACTION_ID]->store(info.trx_id, + true); tables->table->field[RDB_TRX_FIELD::STATE]->store( - info.state.c_str(), info.state.length(), system_charset_info); + info.state.c_str(), info.state.length(), system_charset_info); tables->table->field[RDB_TRX_FIELD::NAME]->store( - name_hexstr.c_str(), name_hexstr.length(), system_charset_info); - tables->table->field[RDB_TRX_FIELD::WRITE_COUNT]->store( - info.write_count, true); - tables->table->field[RDB_TRX_FIELD::LOCK_COUNT]->store( - info.lock_count, true); - tables->table->field[RDB_TRX_FIELD::TIMEOUT_SEC]->store( - info.timeout_sec, false); + name_hexstr.c_str(), name_hexstr.length(), system_charset_info); + tables->table->field[RDB_TRX_FIELD::WRITE_COUNT]->store(info.write_count, + true); + tables->table->field[RDB_TRX_FIELD::LOCK_COUNT]->store(info.lock_count, + true); + tables->table->field[RDB_TRX_FIELD::TIMEOUT_SEC]->store(info.timeout_sec, + false); tables->table->field[RDB_TRX_FIELD::WAITING_KEY]->store( - key_hexstr.c_str(), key_hexstr.length(), system_charset_info); + key_hexstr.c_str(), key_hexstr.length(), system_charset_info); tables->table->field[RDB_TRX_FIELD::WAITING_COLUMN_FAMILY_ID]->store( - info.waiting_cf_id, true); + info.waiting_cf_id, true); tables->table->field[RDB_TRX_FIELD::IS_REPLICATION]->store( - info.is_replication, false); - tables->table->field[RDB_TRX_FIELD::SKIP_TRX_API]->store( - info.skip_trx_api, false); - tables->table->field[RDB_TRX_FIELD::READ_ONLY]->store( - info.read_only, false); + info.is_replication, false); + tables->table->field[RDB_TRX_FIELD::SKIP_TRX_API]->store(info.skip_trx_api, + false); + tables->table->field[RDB_TRX_FIELD::READ_ONLY]->store(info.read_only, + false); tables->table->field[RDB_TRX_FIELD::HAS_DEADLOCK_DETECTION]->store( - info.deadlock_detect, false); + info.deadlock_detect, false); tables->table->field[RDB_TRX_FIELD::NUM_ONGOING_BULKLOAD]->store( - info.num_ongoing_bulk_load, false); - tables->table->field[RDB_TRX_FIELD::THREAD_ID]->store( - info.thread_id, true); + info.num_ongoing_bulk_load, false); + tables->table->field[RDB_TRX_FIELD::THREAD_ID]->store(info.thread_id, true); tables->table->field[RDB_TRX_FIELD::QUERY]->store( - info.query_str.c_str(), info.query_str.length(), system_charset_info); + info.query_str.c_str(), info.query_str.length(), system_charset_info); /* Tell MySQL about this row in the virtual table */ - ret= my_core::schema_table_store_record(thd, tables->table); + ret = my_core::schema_table_store_record(thd, tables->table); if (ret != 0) { break; } @@ -1372,197 +1383,202 @@ static int rdb_i_s_trx_info_fill_table( } /* Initialize the information_schema.rocksdb_trx_info virtual table */ -static int rdb_i_s_trx_info_init(void* const p) -{ - my_core::ST_SCHEMA_TABLE *schema; +static int rdb_i_s_trx_info_init(void *const p) { + DBUG_ENTER_FUNC(); - DBUG_ENTER("rdb_i_s_trx_info_init"); DBUG_ASSERT(p != nullptr); - schema= (my_core::ST_SCHEMA_TABLE*) p; + my_core::ST_SCHEMA_TABLE *schema; - schema->fields_info= rdb_i_s_trx_info_fields_info; - schema->fill_table= rdb_i_s_trx_info_fill_table; + schema = (my_core::ST_SCHEMA_TABLE *)p; + + schema->fields_info = rdb_i_s_trx_info_fields_info; + schema->fill_table = rdb_i_s_trx_info_fill_table; DBUG_RETURN(0); } -static int rdb_i_s_deinit(void *p __attribute__((__unused__))) -{ - DBUG_ENTER("rdb_i_s_deinit"); +static int rdb_i_s_deinit(void *p MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); DBUG_RETURN(0); } -static struct st_mysql_information_schema rdb_i_s_info= -{ MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION }; +static struct st_mysql_information_schema rdb_i_s_info = { + MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION}; -struct st_mysql_plugin rdb_i_s_cfstats= -{ - MYSQL_INFORMATION_SCHEMA_PLUGIN, - &rdb_i_s_info, - "ROCKSDB_CFSTATS", - "Facebook", - "RocksDB column family stats", - PLUGIN_LICENSE_GPL, - rdb_i_s_cfstats_init, - rdb_i_s_deinit, - 0x0001, /* version number (0.1) */ - nullptr, /* status variables */ - nullptr, /* system variables */ - nullptr, /* config options */ - 0, /* flags */ +struct st_mysql_plugin rdb_i_s_cfstats = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_CFSTATS", + "Facebook", + "RocksDB column family stats", + PLUGIN_LICENSE_GPL, + rdb_i_s_cfstats_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_dbstats= -{ - MYSQL_INFORMATION_SCHEMA_PLUGIN, - &rdb_i_s_info, - "ROCKSDB_DBSTATS", - "Facebook", - "RocksDB database stats", - PLUGIN_LICENSE_GPL, - rdb_i_s_dbstats_init, - rdb_i_s_deinit, - 0x0001, /* version number (0.1) */ - nullptr, /* status variables */ - nullptr, /* system variables */ - nullptr, /* config options */ - 0, /* flags */ +struct st_mysql_plugin rdb_i_s_dbstats = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_DBSTATS", + "Facebook", + "RocksDB database stats", + PLUGIN_LICENSE_GPL, + rdb_i_s_dbstats_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_perf_context= -{ - MYSQL_INFORMATION_SCHEMA_PLUGIN, - &rdb_i_s_info, - "ROCKSDB_PERF_CONTEXT", - "Facebook", - "RocksDB perf context stats", - PLUGIN_LICENSE_GPL, - rdb_i_s_perf_context_init, - rdb_i_s_deinit, - 0x0001, /* version number (0.1) */ - nullptr, /* status variables */ - nullptr, /* system variables */ - nullptr, /* config options */ - 0, /* flags */ +struct st_mysql_plugin rdb_i_s_perf_context = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_PERF_CONTEXT", + "Facebook", + "RocksDB perf context stats", + PLUGIN_LICENSE_GPL, + rdb_i_s_perf_context_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_perf_context_global= -{ - MYSQL_INFORMATION_SCHEMA_PLUGIN, - &rdb_i_s_info, - "ROCKSDB_PERF_CONTEXT_GLOBAL", - "Facebook", - "RocksDB perf context stats (all)", - PLUGIN_LICENSE_GPL, - rdb_i_s_perf_context_global_init, - rdb_i_s_deinit, - 0x0001, /* version number (0.1) */ - nullptr, /* status variables */ - nullptr, /* system variables */ - nullptr, /* config options */ - 0, /* flags */ +struct st_mysql_plugin rdb_i_s_perf_context_global = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_PERF_CONTEXT_GLOBAL", + "Facebook", + "RocksDB perf context stats (all)", + PLUGIN_LICENSE_GPL, + rdb_i_s_perf_context_global_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_cfoptions= -{ - MYSQL_INFORMATION_SCHEMA_PLUGIN, - &rdb_i_s_info, - "ROCKSDB_CF_OPTIONS", - "Facebook", - "RocksDB column family options", - PLUGIN_LICENSE_GPL, - rdb_i_s_cfoptions_init, - rdb_i_s_deinit, - 0x0001, /* version number (0.1) */ - nullptr, /* status variables */ - nullptr, /* system variables */ - nullptr, /* config options */ - 0, /* flags */ +struct st_mysql_plugin rdb_i_s_cfoptions = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_CF_OPTIONS", + "Facebook", + "RocksDB column family options", + PLUGIN_LICENSE_GPL, + rdb_i_s_cfoptions_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_global_info= -{ - MYSQL_INFORMATION_SCHEMA_PLUGIN, - &rdb_i_s_info, - "ROCKSDB_GLOBAL_INFO", - "Facebook", - "RocksDB global info", - PLUGIN_LICENSE_GPL, - rdb_i_s_global_info_init, - rdb_i_s_deinit, - 0x0001, /* version number (0.1) */ - nullptr, /* status variables */ - nullptr, /* system variables */ - nullptr, /* config options */ - 0, /* flags */ +struct st_mysql_plugin rdb_i_s_global_info = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_GLOBAL_INFO", + "Facebook", + "RocksDB global info", + PLUGIN_LICENSE_GPL, + rdb_i_s_global_info_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_ddl= -{ - MYSQL_INFORMATION_SCHEMA_PLUGIN, - &rdb_i_s_info, - "ROCKSDB_DDL", - "Facebook", - "RocksDB Data Dictionary", - PLUGIN_LICENSE_GPL, - rdb_i_s_ddl_init, - rdb_i_s_deinit, - 0x0001, /* version number (0.1) */ - nullptr, /* status variables */ - nullptr, /* system variables */ - nullptr, /* config options */ - 0, /* flags */ +struct st_mysql_plugin rdb_i_s_compact_stats = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_COMPACTION_STATS", + "Facebook", + "RocksDB compaction stats", + PLUGIN_LICENSE_GPL, + rdb_i_s_compact_stats_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_index_file_map= -{ - MYSQL_INFORMATION_SCHEMA_PLUGIN, - &rdb_i_s_info, - "ROCKSDB_INDEX_FILE_MAP", - "Facebook", - "RocksDB index file map", - PLUGIN_LICENSE_GPL, - rdb_i_s_index_file_map_init, - rdb_i_s_deinit, - 0x0001, /* version number (0.1) */ - nullptr, /* status variables */ - nullptr, /* system variables */ - nullptr, /* config options */ - 0, /* flags */ +struct st_mysql_plugin rdb_i_s_ddl = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_DDL", + "Facebook", + "RocksDB Data Dictionary", + PLUGIN_LICENSE_GPL, + rdb_i_s_ddl_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_lock_info= -{ - MYSQL_INFORMATION_SCHEMA_PLUGIN, - &rdb_i_s_info, - "ROCKSDB_LOCKS", - "Facebook", - "RocksDB lock information", - PLUGIN_LICENSE_GPL, - rdb_i_s_lock_info_init, - nullptr, - 0x0001, /* version number (0.1) */ - nullptr, /* status variables */ - nullptr, /* system variables */ - nullptr, /* config options */ - 0, /* flags */ +struct st_mysql_plugin rdb_i_s_index_file_map = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_INDEX_FILE_MAP", + "Facebook", + "RocksDB index file map", + PLUGIN_LICENSE_GPL, + rdb_i_s_index_file_map_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ }; -struct st_mysql_plugin rdb_i_s_trx_info= -{ - MYSQL_INFORMATION_SCHEMA_PLUGIN, - &rdb_i_s_info, - "ROCKSDB_TRX", - "Facebook", - "RocksDB transaction information", - PLUGIN_LICENSE_GPL, - rdb_i_s_trx_info_init, - nullptr, - 0x0001, /* version number (0.1) */ - nullptr, /* status variables */ - nullptr, /* system variables */ - nullptr, /* config options */ - 0, /* flags */ +struct st_mysql_plugin rdb_i_s_lock_info = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_LOCKS", + "Facebook", + "RocksDB lock information", + PLUGIN_LICENSE_GPL, + rdb_i_s_lock_info_init, + nullptr, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ }; -} // namespace myrocks + +struct st_mysql_plugin rdb_i_s_trx_info = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_TRX", + "Facebook", + "RocksDB transaction information", + PLUGIN_LICENSE_GPL, + rdb_i_s_trx_info_init, + nullptr, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; +} // namespace myrocks diff --git a/storage/rocksdb/rdb_i_s.h b/storage/rocksdb/rdb_i_s.h index 5384d237d3b..c684464c996 100644 --- a/storage/rocksdb/rdb_i_s.h +++ b/storage/rocksdb/rdb_i_s.h @@ -26,10 +26,10 @@ extern struct st_mysql_plugin rdb_i_s_dbstats; extern struct st_mysql_plugin rdb_i_s_perf_context; extern struct st_mysql_plugin rdb_i_s_perf_context_global; extern struct st_mysql_plugin rdb_i_s_cfoptions; +extern struct st_mysql_plugin rdb_i_s_compact_stats; extern struct st_mysql_plugin rdb_i_s_global_info; extern struct st_mysql_plugin rdb_i_s_ddl; extern struct st_mysql_plugin rdb_i_s_index_file_map; extern struct st_mysql_plugin rdb_i_s_lock_info; extern struct st_mysql_plugin rdb_i_s_trx_info; -} // namespace myrocks - +} // namespace myrocks diff --git a/storage/rocksdb/rdb_index_merge.cc b/storage/rocksdb/rdb_index_merge.cc index 8544bdf52fd..8f27616e4d9 100644 --- a/storage/rocksdb/rdb_index_merge.cc +++ b/storage/rocksdb/rdb_index_merge.cc @@ -17,47 +17,37 @@ /* This C++ file's header file */ #include "./rdb_index_merge.h" +/* MySQL header files */ +#include "../sql/sql_class.h" + /* MyRocks header files */ #include "./ha_rocksdb.h" #include "./rdb_datadic.h" namespace myrocks { -Rdb_index_merge::Rdb_index_merge(const char* const tmpfile_path, +Rdb_index_merge::Rdb_index_merge(const char *const tmpfile_path, const ulonglong &merge_buf_size, const ulonglong &merge_combine_read_size, - const rocksdb::Comparator* const comparator) : - m_tmpfile_path(tmpfile_path), - m_merge_buf_size(merge_buf_size), - m_merge_combine_read_size(merge_combine_read_size), - m_comparator(comparator), - m_rec_buf_unsorted(nullptr), - m_output_buf(nullptr) -{ -} + const rocksdb::Comparator *const comparator) + : m_tmpfile_path(tmpfile_path), m_merge_buf_size(merge_buf_size), + m_merge_combine_read_size(merge_combine_read_size), + m_comparator(comparator), m_rec_buf_unsorted(nullptr), + m_output_buf(nullptr) {} -Rdb_index_merge::~Rdb_index_merge() -{ +Rdb_index_merge::~Rdb_index_merge() { /* Close tmp file, we don't need to worry about deletion, mysql handles it. */ my_close(m_merge_file.fd, MYF(MY_WME)); - - /* There should be no records left in the offset tree */ - DBUG_ASSERT(m_offset_tree.empty()); - - /* There should be no pointers left on the merge heap */ - DBUG_ASSERT(m_merge_min_heap.empty()); } -int Rdb_index_merge::init() -{ +int Rdb_index_merge::init() { /* Create a temporary merge file on disk to store sorted chunks during inplace index creation. */ - if (merge_file_create()) - { + if (merge_file_create()) { return HA_ERR_INTERNAL_ERROR; } @@ -66,46 +56,41 @@ int Rdb_index_merge::init() to disk. They will be written to disk sorted. A sorted tree is used to keep track of the offset of each record within the unsorted buffer. */ - m_rec_buf_unsorted= std::shared_ptr( - new merge_buf_info(m_merge_buf_size)); + m_rec_buf_unsorted = + std::shared_ptr(new merge_buf_info(m_merge_buf_size)); /* Allocate output buffer that will contain sorted block that is written to disk. */ - m_output_buf= std::shared_ptr( - new merge_buf_info(m_merge_buf_size)); + m_output_buf = + std::shared_ptr(new merge_buf_info(m_merge_buf_size)); - return 0; + return HA_EXIT_SUCCESS; } /** Create a merge file in the given location. */ -int Rdb_index_merge::merge_file_create() -{ +int Rdb_index_merge::merge_file_create() { DBUG_ASSERT(m_merge_file.fd == -1); int fd; /* If no path set for tmpfile, use mysql_tmpdir by default */ - if (m_tmpfile_path == nullptr) - { + if (m_tmpfile_path == nullptr) { fd = mysql_tmpfile("myrocks"); - } - else - { + } else { fd = mysql_tmpfile_path(m_tmpfile_path, "myrocks"); } - if (fd < 0) - { + if (fd < 0) { return HA_ERR_INTERNAL_ERROR; } m_merge_file.fd = fd; m_merge_file.num_sort_buffers = 0; - return 0; + return HA_EXIT_SUCCESS; } /** @@ -115,9 +100,7 @@ int Rdb_index_merge::merge_file_create() If buffer in memory is full, write the buffer out to disk sorted using the offset tree, and clear the tree. (Happens in merge_buf_write) */ -int Rdb_index_merge::add(const rocksdb::Slice& key, - const rocksdb::Slice& val) -{ +int Rdb_index_merge::add(const rocksdb::Slice &key, const rocksdb::Slice &val) { /* Adding a record after heap is already created results in error */ DBUG_ASSERT(m_merge_min_heap.empty()); @@ -125,33 +108,30 @@ int Rdb_index_merge::add(const rocksdb::Slice& key, Check if sort buffer is going to be out of space, if so write it out to disk in sorted order using offset tree. */ - const uint total_offset= - RDB_MERGE_CHUNK_LEN + m_rec_buf_unsorted->curr_offset + - RDB_MERGE_KEY_DELIMITER + RDB_MERGE_VAL_DELIMITER + - key.size() + val.size(); - if (total_offset >= m_rec_buf_unsorted->total_size) - { + const uint total_offset = RDB_MERGE_CHUNK_LEN + + m_rec_buf_unsorted->curr_offset + + RDB_MERGE_KEY_DELIMITER + RDB_MERGE_VAL_DELIMITER + + key.size() + val.size(); + if (total_offset >= m_rec_buf_unsorted->total_size) { /* If the offset tree is empty here, that means that the proposed key to add is too large for the buffer. */ - if (m_offset_tree.empty()) - { + if (m_offset_tree.empty()) { // NO_LINT_DEBUG sql_print_error("Sort buffer size is too small to process merge. " "Please set merge buffer size to a higher value."); return HA_ERR_INTERNAL_ERROR; } - if (merge_buf_write()) - { + if (merge_buf_write()) { // NO_LINT_DEBUG sql_print_error("Error writing sort buffer to disk."); return HA_ERR_INTERNAL_ERROR; } } - const ulonglong rec_offset= m_rec_buf_unsorted->curr_offset; + const ulonglong rec_offset = m_rec_buf_unsorted->curr_offset; /* Store key and value in temporary unsorted in memory buffer pointed to by @@ -163,14 +143,13 @@ int Rdb_index_merge::add(const rocksdb::Slice& key, m_offset_tree.emplace(m_rec_buf_unsorted->block.get() + rec_offset, m_comparator); - return 0; + return HA_EXIT_SUCCESS; } /** Sort + write merge buffer chunk out to disk. */ -int Rdb_index_merge::merge_buf_write() -{ +int Rdb_index_merge::merge_buf_write() { DBUG_ASSERT(m_merge_file.fd != -1); DBUG_ASSERT(m_rec_buf_unsorted != nullptr); DBUG_ASSERT(m_output_buf != nullptr); @@ -185,8 +164,7 @@ int Rdb_index_merge::merge_buf_write() Iterate through the offset tree. Should be ordered by the secondary key at this point. */ - for (const auto& rec : m_offset_tree) - { + for (const auto &rec : m_offset_tree) { DBUG_ASSERT(m_output_buf->curr_offset <= m_merge_buf_size); /* Read record from offset (should never fail) */ @@ -207,8 +185,7 @@ int Rdb_index_merge::merge_buf_write() then write into the respective merge buffer. */ if (my_seek(m_merge_file.fd, m_merge_file.num_sort_buffers * m_merge_buf_size, - SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) - { + SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) { // NO_LINT_DEBUG sql_print_error("Error seeking to location in merge file on disk."); return HA_ERR_INTERNAL_ERROR; @@ -220,9 +197,8 @@ int Rdb_index_merge::merge_buf_write() burst. */ if (my_write(m_merge_file.fd, m_output_buf->block.get(), - m_output_buf->total_size, MYF(MY_WME | MY_NABP)) || - mysql_file_sync(m_merge_file.fd, MYF(MY_WME))) - { + m_output_buf->total_size, MYF(MY_WME | MY_NABP)) || + mysql_file_sync(m_merge_file.fd, MYF(MY_WME))) { // NO_LINT_DEBUG sql_print_error("Error writing sorted merge buffer to disk."); return HA_ERR_INTERNAL_ERROR; @@ -234,23 +210,21 @@ int Rdb_index_merge::merge_buf_write() /* Reset everything for next run */ merge_reset(); - return 0; + return HA_EXIT_SUCCESS; } /** Prepare n-way merge of n sorted buffers on disk, using a heap sorted by secondary key records. */ -int Rdb_index_merge::merge_heap_prepare() -{ +int Rdb_index_merge::merge_heap_prepare() { DBUG_ASSERT(m_merge_min_heap.empty()); /* If the offset tree is not empty, there are still some records that need to be written to disk. Write them out now. */ - if (!m_offset_tree.empty() && merge_buf_write()) - { + if (!m_offset_tree.empty() && merge_buf_write()) { return HA_ERR_INTERNAL_ERROR; } @@ -260,39 +234,34 @@ int Rdb_index_merge::merge_heap_prepare() For an n-way merge, we need to read chunks of each merge file simultaneously. */ - ulonglong chunk_size= m_merge_combine_read_size/ - m_merge_file.num_sort_buffers; - if (chunk_size >= m_merge_buf_size) - { - chunk_size= m_merge_buf_size; + ulonglong chunk_size = + m_merge_combine_read_size / m_merge_file.num_sort_buffers; + if (chunk_size >= m_merge_buf_size) { + chunk_size = m_merge_buf_size; } /* Allocate buffers for each chunk */ - for (ulonglong i = 0; i < m_merge_file.num_sort_buffers; i++) - { - const auto entry= std::make_shared(m_comparator); + for (ulonglong i = 0; i < m_merge_file.num_sort_buffers; i++) { + const auto entry = std::make_shared(m_comparator); /* Read chunk_size bytes from each chunk on disk, and place inside respective chunk buffer. */ - const size_t total_size= - entry->prepare(m_merge_file.fd, i * m_merge_buf_size, chunk_size); + const size_t total_size = + entry->prepare(m_merge_file.fd, i * m_merge_buf_size, chunk_size); - if (total_size == (size_t) - 1) - { + if (total_size == (size_t)-1) { return HA_ERR_INTERNAL_ERROR; } /* Can reach this condition if an index was added on table w/ no rows */ - if (total_size - RDB_MERGE_CHUNK_LEN == 0) - { + if (total_size - RDB_MERGE_CHUNK_LEN == 0) { break; } /* Read the first record from each buffer to initially populate the heap */ - if (entry->read_rec(&entry->key, &entry->val)) - { + if (entry->read_rec(&entry->key, &entry->val)) { // NO_LINT_DEBUG sql_print_error("Chunk size is too small to process merge."); return HA_ERR_INTERNAL_ERROR; @@ -301,14 +270,14 @@ int Rdb_index_merge::merge_heap_prepare() m_merge_min_heap.push(std::move(entry)); } - return 0; + return HA_EXIT_SUCCESS; } /** Create and/or iterate through keys in the merge heap. */ -int Rdb_index_merge::next(rocksdb::Slice* const key, rocksdb::Slice* const val) -{ +int Rdb_index_merge::next(rocksdb::Slice *const key, + rocksdb::Slice *const val) { /* If table fits in one sort buffer, we can optimize by writing the sort buffer directly through to the sstfilewriter instead of @@ -317,20 +286,18 @@ int Rdb_index_merge::next(rocksdb::Slice* const key, rocksdb::Slice* const val) If there are no sort buffer records (alters on empty tables), also exit here. */ - if (m_merge_file.num_sort_buffers == 0) - { - if (m_offset_tree.empty()) - { + if (m_merge_file.num_sort_buffers == 0) { + if (m_offset_tree.empty()) { return -1; } - const auto rec= m_offset_tree.begin(); + const auto rec = m_offset_tree.begin(); /* Read record from offset */ merge_read_rec(rec->block, key, val); m_offset_tree.erase(rec); - return 0; + return HA_EXIT_SUCCESS; } int res; @@ -340,10 +307,8 @@ int Rdb_index_merge::next(rocksdb::Slice* const key, rocksdb::Slice* const val) of the external sort. Populate the heap with initial values from each disk chunk. */ - if (m_merge_min_heap.empty()) - { - if ((res= merge_heap_prepare())) - { + if (m_merge_min_heap.empty()) { + if ((res = merge_heap_prepare())) { // NO_LINT_DEBUG sql_print_error("Error during preparation of heap."); return res; @@ -354,7 +319,7 @@ int Rdb_index_merge::next(rocksdb::Slice* const key, rocksdb::Slice* const val) inside the SST file yet. */ merge_heap_top(key, val); - return 0; + return HA_EXIT_SUCCESS; } DBUG_ASSERT(!m_merge_min_heap.empty()); @@ -364,14 +329,13 @@ int Rdb_index_merge::next(rocksdb::Slice* const key, rocksdb::Slice* const val) /** Get current top record from the heap. */ -void Rdb_index_merge::merge_heap_top(rocksdb::Slice* const key, - rocksdb::Slice* const val) -{ +void Rdb_index_merge::merge_heap_top(rocksdb::Slice *const key, + rocksdb::Slice *const val) { DBUG_ASSERT(!m_merge_min_heap.empty()); - const std::shared_ptr& entry= m_merge_min_heap.top(); - *key= entry->key; - *val= entry->val; + const std::shared_ptr &entry = m_merge_min_heap.top(); + *key = entry->key; + *val = entry->val; } /** @@ -380,14 +344,13 @@ void Rdb_index_merge::merge_heap_top(rocksdb::Slice* const key, Returns -1 when there are no more records in the heap. */ -int Rdb_index_merge::merge_heap_pop_and_get_next(rocksdb::Slice* const key, - rocksdb::Slice* const val) -{ +int Rdb_index_merge::merge_heap_pop_and_get_next(rocksdb::Slice *const key, + rocksdb::Slice *const val) { /* Make a new reference to shared ptr so it doesn't get destroyed during pop(). We are going to push this entry back onto the heap. */ - const std::shared_ptr entry= m_merge_min_heap.top(); + const std::shared_ptr entry = m_merge_min_heap.top(); m_merge_min_heap.pop(); /* @@ -397,15 +360,13 @@ int Rdb_index_merge::merge_heap_pop_and_get_next(rocksdb::Slice* const key, Return without adding entry back onto heap. If heap is also empty, we must be finished with merge. */ - if (entry->chunk_info->is_chunk_finished()) - { - if (m_merge_min_heap.empty()) - { + if (entry->chunk_info->is_chunk_finished()) { + if (m_merge_min_heap.empty()) { return -1; } merge_heap_top(key, val); - return 0; + return HA_EXIT_SUCCESS; } /* @@ -417,16 +378,13 @@ int Rdb_index_merge::merge_heap_pop_and_get_next(rocksdb::Slice* const key, If merge_read_rec fails, it means the either the chunk was cut off or we've reached the end of the respective chunk. */ - if (entry->read_rec(&entry->key, &entry->val)) - { - if (entry->read_next_chunk_from_disk(m_merge_file.fd)) - { + if (entry->read_rec(&entry->key, &entry->val)) { + if (entry->read_next_chunk_from_disk(m_merge_file.fd)) { return HA_ERR_INTERNAL_ERROR; } /* Try reading record again, should never fail. */ - if (entry->read_rec(&entry->key, &entry->val)) - { + if (entry->read_rec(&entry->key, &entry->val)) { return HA_ERR_INTERNAL_ERROR; } } @@ -436,52 +394,46 @@ int Rdb_index_merge::merge_heap_pop_and_get_next(rocksdb::Slice* const key, /* Return the current top record on heap */ merge_heap_top(key, val); - return 0; + return HA_EXIT_SUCCESS; } -int Rdb_index_merge::merge_heap_entry::read_next_chunk_from_disk(File fd) -{ - if (chunk_info->read_next_chunk_from_disk(fd)) - { - return 1; +int Rdb_index_merge::merge_heap_entry::read_next_chunk_from_disk(File fd) { + if (chunk_info->read_next_chunk_from_disk(fd)) { + return HA_EXIT_FAILURE; } - block= chunk_info->block.get(); - return 0; + block = chunk_info->block.get(); + return HA_EXIT_SUCCESS; } -int Rdb_index_merge::merge_buf_info::read_next_chunk_from_disk(File fd) -{ +int Rdb_index_merge::merge_buf_info::read_next_chunk_from_disk(File fd) { disk_curr_offset += curr_offset; - if (my_seek(fd, disk_curr_offset, SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) - { + if (my_seek(fd, disk_curr_offset, SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) { // NO_LINT_DEBUG sql_print_error("Error seeking to location in merge file on disk."); - return 1; + return HA_EXIT_FAILURE; } /* Overwrite the old block */ - const size_t bytes_read= my_read(fd, block.get(), block_len, MYF(MY_WME)); - if (bytes_read == (size_t) -1) - { + const size_t bytes_read = my_read(fd, block.get(), block_len, MYF(MY_WME)); + if (bytes_read == (size_t)-1) { // NO_LINT_DEBUG sql_print_error("Error reading merge file from disk."); - return 1; + return HA_EXIT_FAILURE; } - curr_offset= 0; - return 0; + curr_offset = 0; + return HA_EXIT_SUCCESS; } /** Get records from offset within sort buffer and compare them. Sort by least to greatest. */ -int Rdb_index_merge::merge_record_compare(const uchar* const a_block, - const uchar* const b_block, - const rocksdb::Comparator* const comparator) -{ +int Rdb_index_merge::merge_record_compare( + const uchar *const a_block, const uchar *const b_block, + const rocksdb::Comparator *const comparator) { return comparator->Compare(as_slice(a_block), as_slice(b_block)); } @@ -489,114 +441,103 @@ int Rdb_index_merge::merge_record_compare(const uchar* const a_block, Given an offset in a merge sort buffer, read out the keys + values. After this, block will point to the next record in the buffer. **/ -void Rdb_index_merge::merge_read_rec(const uchar* const block, - rocksdb::Slice* const key, - rocksdb::Slice* const val) -{ +void Rdb_index_merge::merge_read_rec(const uchar *const block, + rocksdb::Slice *const key, + rocksdb::Slice *const val) { /* Read key at block offset into key slice and the value into value slice*/ read_slice(key, block); read_slice(val, block + RDB_MERGE_REC_DELIMITER + key->size()); } -void Rdb_index_merge::read_slice(rocksdb::Slice* slice, const uchar* block_ptr) -{ +void Rdb_index_merge::read_slice(rocksdb::Slice *slice, + const uchar *block_ptr) { uint64 slice_len; merge_read_uint64(&block_ptr, &slice_len); - *slice= rocksdb::Slice(reinterpret_cast(block_ptr), slice_len); + *slice = rocksdb::Slice(reinterpret_cast(block_ptr), slice_len); } -int Rdb_index_merge::merge_heap_entry::read_rec(rocksdb::Slice* const key, - rocksdb::Slice* const val) -{ - const uchar* block_ptr= block; +int Rdb_index_merge::merge_heap_entry::read_rec(rocksdb::Slice *const key, + rocksdb::Slice *const val) { + const uchar *block_ptr = block; const auto orig_offset = chunk_info->curr_offset; const auto orig_block = block; /* Read key at block offset into key slice and the value into value slice*/ - if (read_slice(key, &block_ptr) != 0) - { - return 1; + if (read_slice(key, &block_ptr) != 0) { + return HA_EXIT_FAILURE; } - chunk_info->curr_offset += (uintptr_t) block_ptr - (uintptr_t) block; - block += (uintptr_t) block_ptr - (uintptr_t) block; + chunk_info->curr_offset += (uintptr_t)block_ptr - (uintptr_t)block; + block += (uintptr_t)block_ptr - (uintptr_t)block; - if (read_slice(val, &block_ptr) != 0) - { - chunk_info->curr_offset= orig_offset; - block= orig_block; - return 1; + if (read_slice(val, &block_ptr) != 0) { + chunk_info->curr_offset = orig_offset; + block = orig_block; + return HA_EXIT_FAILURE; } - chunk_info->curr_offset += (uintptr_t) block_ptr - (uintptr_t) block; - block += (uintptr_t) block_ptr - (uintptr_t) block; + chunk_info->curr_offset += (uintptr_t)block_ptr - (uintptr_t)block; + block += (uintptr_t)block_ptr - (uintptr_t)block; - return 0; + return HA_EXIT_SUCCESS; } -int Rdb_index_merge::merge_heap_entry::read_slice(rocksdb::Slice* const slice, - const uchar** block_ptr) -{ - if (!chunk_info->has_space(RDB_MERGE_REC_DELIMITER)) - { - return 1; +int Rdb_index_merge::merge_heap_entry::read_slice(rocksdb::Slice *const slice, + const uchar **block_ptr) { + if (!chunk_info->has_space(RDB_MERGE_REC_DELIMITER)) { + return HA_EXIT_FAILURE; } uint64 slice_len; merge_read_uint64(block_ptr, &slice_len); - if (!chunk_info->has_space(RDB_MERGE_REC_DELIMITER + slice_len)) - { - return 1; + if (!chunk_info->has_space(RDB_MERGE_REC_DELIMITER + slice_len)) { + return HA_EXIT_FAILURE; } - *slice= rocksdb::Slice(reinterpret_cast(*block_ptr), slice_len); + *slice = + rocksdb::Slice(reinterpret_cast(*block_ptr), slice_len); *block_ptr += slice_len; - return 0; + return HA_EXIT_SUCCESS; } size_t Rdb_index_merge::merge_heap_entry::prepare(File fd, ulonglong f_offset, - ulonglong chunk_size) -{ - chunk_info= std::make_shared(chunk_size); + ulonglong chunk_size) { + chunk_info = std::make_shared(chunk_size); const size_t res = chunk_info->prepare(fd, f_offset); - if (res != (size_t) - 1) - { - block= chunk_info->block.get() + RDB_MERGE_CHUNK_LEN; + if (res != (size_t)-1) { + block = chunk_info->block.get() + RDB_MERGE_CHUNK_LEN; } return res; } -size_t Rdb_index_merge::merge_buf_info::prepare(File fd, ulonglong f_offset) -{ - disk_start_offset= f_offset; - disk_curr_offset= f_offset; +size_t Rdb_index_merge::merge_buf_info::prepare(File fd, ulonglong f_offset) { + disk_start_offset = f_offset; + disk_curr_offset = f_offset; /* Need to position cursor to the chunk it needs to be at on filesystem then read 'chunk_size' bytes into the respective chunk buffer. */ - if (my_seek(fd, f_offset, SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) - { + if (my_seek(fd, f_offset, SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) { // NO_LINT_DEBUG sql_print_error("Error seeking to location in merge file on disk."); - return (size_t) - 1; + return (size_t)-1; } - const size_t bytes_read= my_read(fd, block.get(), total_size, MYF(MY_WME)); - if (bytes_read == (size_t) - 1) - { + const size_t bytes_read = my_read(fd, block.get(), total_size, MYF(MY_WME)); + if (bytes_read == (size_t)-1) { // NO_LINT_DEBUG sql_print_error("Error reading merge file from disk."); - return (size_t) - 1; + return (size_t)-1; } /* Read the first 8 bytes of each chunk, this gives us the actual size of each chunk. */ - const uchar *block_ptr= block.get(); + const uchar *block_ptr = block.get(); merge_read_uint64(&block_ptr, &total_size); curr_offset += RDB_MERGE_CHUNK_LEN; return total_size; @@ -604,27 +545,23 @@ size_t Rdb_index_merge::merge_buf_info::prepare(File fd, ulonglong f_offset) /* Store key and value w/ their respective delimiters at the given offset */ void Rdb_index_merge::merge_buf_info::store_key_value( - const rocksdb::Slice& key, const rocksdb::Slice& val) -{ + const rocksdb::Slice &key, const rocksdb::Slice &val) { store_slice(key); store_slice(val); } -void Rdb_index_merge::merge_buf_info::store_slice(const rocksdb::Slice& slice) -{ +void Rdb_index_merge::merge_buf_info::store_slice(const rocksdb::Slice &slice) { /* Store length delimiter */ merge_store_uint64(&block[curr_offset], slice.size()); /* Store slice data */ memcpy(&block[curr_offset + RDB_MERGE_REC_DELIMITER], slice.data(), - slice.size()); + slice.size()); curr_offset += slice.size() + RDB_MERGE_REC_DELIMITER; } - -void Rdb_index_merge::merge_reset() -{ +void Rdb_index_merge::merge_reset() { /* Either error, or all values in the sort buffer have been written to disk, so we need to clear the offset tree. @@ -632,16 +569,14 @@ void Rdb_index_merge::merge_reset() m_offset_tree.clear(); /* Reset sort buffer block */ - if (m_rec_buf_unsorted && m_rec_buf_unsorted->block) - { - m_rec_buf_unsorted->curr_offset= 0; + if (m_rec_buf_unsorted && m_rec_buf_unsorted->block) { + m_rec_buf_unsorted->curr_offset = 0; } /* Reset output buf */ - if (m_output_buf && m_output_buf->block) - { - m_output_buf->curr_offset= 0; + if (m_output_buf && m_output_buf->block) { + m_output_buf->curr_offset = 0; } } -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_index_merge.h b/storage/rocksdb/rdb_index_merge.h index 86893bf316c..9d1469fc34e 100644 --- a/storage/rocksdb/rdb_index_merge.h +++ b/storage/rocksdb/rdb_index_merge.h @@ -18,13 +18,13 @@ /* MySQL header files */ #include "../sql/log.h" -#include "./handler.h" /* handler */ -#include "./my_global.h" /* ulonglong */ +#include "./handler.h" /* handler */ +#include "./my_global.h" /* ulonglong */ /* C++ standard header files */ +#include #include #include -#include /* RocksDB header files */ #include "rocksdb/db.h" @@ -46,13 +46,13 @@ class Rdb_key_def; class Rdb_tbl_def; class Rdb_index_merge { - Rdb_index_merge(const Rdb_index_merge& p)= delete; - Rdb_index_merge& operator=(const Rdb_index_merge& p)= delete; + Rdb_index_merge(const Rdb_index_merge &p) = delete; + Rdb_index_merge &operator=(const Rdb_index_merge &p) = delete; - public: +public: /* Information about temporary files used in external merge sort */ struct merge_file_info { - File fd= -1; /* file descriptor */ + File fd = -1; /* file descriptor */ ulong num_sort_buffers; /* number of sort buffers in temp file */ }; @@ -60,40 +60,37 @@ class Rdb_index_merge { struct merge_buf_info { /* heap memory allocated for main memory sort/merge */ std::unique_ptr block; - const ulonglong block_len; /* amount of data bytes allocated for block above */ + const ulonglong + block_len; /* amount of data bytes allocated for block above */ ulonglong curr_offset; /* offset of the record pointer for the block */ ulonglong disk_start_offset; /* where the chunk starts on disk */ - ulonglong disk_curr_offset; /* current offset on disk */ - ulonglong total_size; /* total # of data bytes in chunk */ + ulonglong disk_curr_offset; /* current offset on disk */ + ulonglong total_size; /* total # of data bytes in chunk */ - void store_key_value(const rocksdb::Slice& key, const rocksdb::Slice& val) - __attribute__((__nonnull__)); + void store_key_value(const rocksdb::Slice &key, const rocksdb::Slice &val) + MY_ATTRIBUTE((__nonnull__)); - void store_slice(const rocksdb::Slice& slice) - __attribute__((__nonnull__)); + void store_slice(const rocksdb::Slice &slice) MY_ATTRIBUTE((__nonnull__)); - size_t prepare(File fd, ulonglong f_offset) - __attribute__((__nonnull__)); + size_t prepare(File fd, ulonglong f_offset) MY_ATTRIBUTE((__nonnull__)); int read_next_chunk_from_disk(File fd) - __attribute__((__nonnull__, __warn_unused_result__)); + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - inline bool is_chunk_finished() const - { + inline bool is_chunk_finished() const { return curr_offset + disk_curr_offset - disk_start_offset == total_size; } - inline bool has_space(uint64 needed) const - { + inline bool has_space(uint64 needed) const { return curr_offset + needed <= block_len; } - explicit merge_buf_info(const ulonglong merge_block_size) : - block(nullptr), block_len(merge_block_size), curr_offset(0), - disk_start_offset(0), disk_curr_offset(0), total_size(merge_block_size) - { + explicit merge_buf_info(const ulonglong merge_block_size) + : block(nullptr), block_len(merge_block_size), curr_offset(0), + disk_start_offset(0), disk_curr_offset(0), + total_size(merge_block_size) { /* Will throw an exception if it runs out of memory here */ - block= std::unique_ptr(new uchar[merge_block_size]); + block = std::unique_ptr(new uchar[merge_block_size]); /* Initialize entire buffer to 0 to avoid valgrind errors */ memset(block.get(), 0, merge_block_size); @@ -101,132 +98,121 @@ class Rdb_index_merge { }; /* Represents an entry in the heap during merge phase of external sort */ - struct merge_heap_entry - { + struct merge_heap_entry { std::shared_ptr chunk_info; /* pointer to buffer info */ - uchar* block; /* pointer to heap memory where record is stored */ - const rocksdb::Comparator* const comparator; + uchar *block; /* pointer to heap memory where record is stored */ + const rocksdb::Comparator *const comparator; rocksdb::Slice key; /* current key pointed to by block ptr */ rocksdb::Slice val; size_t prepare(File fd, ulonglong f_offset, ulonglong chunk_size) - __attribute__((__nonnull__)); + MY_ATTRIBUTE((__nonnull__)); int read_next_chunk_from_disk(File fd) - __attribute__((__nonnull__, __warn_unused_result__)); + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - int read_rec(rocksdb::Slice* const key, rocksdb::Slice* const val) - __attribute__((__nonnull__, __warn_unused_result__)); + int read_rec(rocksdb::Slice *const key, rocksdb::Slice *const val) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - int read_slice(rocksdb::Slice* const slice, const uchar** block_ptr) - __attribute__((__nonnull__, __warn_unused_result__)); + int read_slice(rocksdb::Slice *const slice, const uchar **block_ptr) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - explicit merge_heap_entry(const rocksdb::Comparator* const comparator) : - chunk_info(nullptr), block(nullptr), comparator(comparator) {} + explicit merge_heap_entry(const rocksdb::Comparator *const comparator) + : chunk_info(nullptr), block(nullptr), comparator(comparator) {} }; - struct merge_heap_comparator - { - bool operator() (const std::shared_ptr& lhs, - const std::shared_ptr& rhs) - { + struct merge_heap_comparator { + bool operator()(const std::shared_ptr &lhs, + const std::shared_ptr &rhs) { return lhs->comparator->Compare(rhs->key, lhs->key) < 0; } }; /* Represents a record in unsorted buffer */ - struct merge_record - { - uchar* block; /* points to offset of key in sort buffer */ - const rocksdb::Comparator* const comparator; + struct merge_record { + uchar *block; /* points to offset of key in sort buffer */ + const rocksdb::Comparator *const comparator; - bool operator< (const merge_record &record) const - { + bool operator<(const merge_record &record) const { return merge_record_compare(this->block, record.block, comparator) < 0; } - merge_record(uchar* const block, - const rocksdb::Comparator* const comparator) : - block(block), comparator(comparator) {} + merge_record(uchar *const block, + const rocksdb::Comparator *const comparator) + : block(block), comparator(comparator) {} }; - private: - const char* m_tmpfile_path; - const ulonglong m_merge_buf_size; - const ulonglong m_merge_combine_read_size; - const rocksdb::Comparator* m_comparator; - struct merge_file_info m_merge_file; - std::shared_ptr m_rec_buf_unsorted; - std::shared_ptr m_output_buf; - std::set m_offset_tree; +private: + const char *m_tmpfile_path; + const ulonglong m_merge_buf_size; + const ulonglong m_merge_combine_read_size; + const rocksdb::Comparator *m_comparator; + struct merge_file_info m_merge_file; + std::shared_ptr m_rec_buf_unsorted; + std::shared_ptr m_output_buf; + std::set m_offset_tree; std::priority_queue, std::vector>, - merge_heap_comparator> m_merge_min_heap; + merge_heap_comparator> + m_merge_min_heap; - static inline void merge_store_uint64(uchar* const dst, uint64 n) - { + static inline void merge_store_uint64(uchar *const dst, uint64 n) { memcpy(dst, &n, sizeof(n)); } - static inline void merge_read_uint64(const uchar **buf_ptr, uint64* const dst) - { + static inline void merge_read_uint64(const uchar **buf_ptr, + uint64 *const dst) { DBUG_ASSERT(buf_ptr != nullptr); memcpy(dst, *buf_ptr, sizeof(uint64)); *buf_ptr += sizeof(uint64); } - static inline rocksdb::Slice as_slice(const uchar* block) - { + static inline rocksdb::Slice as_slice(const uchar *block) { uint64 len; merge_read_uint64(&block, &len); - return rocksdb::Slice(reinterpret_cast(block), len); + return rocksdb::Slice(reinterpret_cast(block), len); } - static int merge_record_compare(const uchar* a_block, const uchar* b_block, - const rocksdb::Comparator* const comparator) - __attribute__((__nonnull__, __warn_unused_result__)); + static int merge_record_compare(const uchar *a_block, const uchar *b_block, + const rocksdb::Comparator *const comparator) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - void merge_read_rec(const uchar* const block, rocksdb::Slice* const key, - rocksdb::Slice* const val) - __attribute__((__nonnull__)); + void merge_read_rec(const uchar *const block, rocksdb::Slice *const key, + rocksdb::Slice *const val) MY_ATTRIBUTE((__nonnull__)); - void read_slice(rocksdb::Slice* slice, const uchar* block_ptr) - __attribute__((__nonnull__)); + void read_slice(rocksdb::Slice *slice, const uchar *block_ptr) + MY_ATTRIBUTE((__nonnull__)); - public: - Rdb_index_merge(const char* const tmpfile_path, +public: + Rdb_index_merge(const char *const tmpfile_path, const ulonglong &merge_buf_size, const ulonglong &merge_combine_read_size, - const rocksdb::Comparator* const comparator); + const rocksdb::Comparator *const comparator); ~Rdb_index_merge(); - int init() - __attribute__((__nonnull__, __warn_unused_result__)); + int init() MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - int merge_file_create() - __attribute__((__nonnull__, __warn_unused_result__)); + int merge_file_create() MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - int add(const rocksdb::Slice& key, const rocksdb::Slice& val) - __attribute__((__nonnull__, __warn_unused_result__)); + int add(const rocksdb::Slice &key, const rocksdb::Slice &val) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - int merge_buf_write() - __attribute__((__nonnull__, __warn_unused_result__)); + int merge_buf_write() MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - int next(rocksdb::Slice* const key, rocksdb::Slice* const val) - __attribute__((__nonnull__, __warn_unused_result__)); + int next(rocksdb::Slice *const key, rocksdb::Slice *const val) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - int merge_heap_prepare() - __attribute__((__nonnull__, __warn_unused_result__)); + int merge_heap_prepare() MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - void merge_heap_top(rocksdb::Slice* key, rocksdb::Slice* val) - __attribute__((__nonnull__)); + void merge_heap_top(rocksdb::Slice *key, rocksdb::Slice *val) + MY_ATTRIBUTE((__nonnull__)); - int merge_heap_pop_and_get_next(rocksdb::Slice* const key, - rocksdb::Slice* const val) - __attribute__((__nonnull__, __warn_unused_result__)); + int merge_heap_pop_and_get_next(rocksdb::Slice *const key, + rocksdb::Slice *const val) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); void merge_reset(); }; -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_mutex_wrapper.cc b/storage/rocksdb/rdb_mutex_wrapper.cc index 5b1c9ba4c22..4a50af0c13d 100644 --- a/storage/rocksdb/rdb_mutex_wrapper.cc +++ b/storage/rocksdb/rdb_mutex_wrapper.cc @@ -17,43 +17,35 @@ /* This C++ file's header file */ #include "./rdb_mutex_wrapper.h" +/* MySQL header files */ +#include "../sql/sql_class.h" +#include "../sql/replication.h" + /* MyRocks header files */ #include "./ha_rocksdb.h" #include "./rdb_utils.h" -// Internal MySQL APIs not exposed in any header. -extern "C" -{ -void thd_enter_cond(MYSQL_THD thd, mysql_cond_t *cond, mysql_mutex_t *mutex, - const PSI_stage_info *stage, PSI_stage_info *old_stage); -void thd_exit_cond(MYSQL_THD thd, const PSI_stage_info *stage); -} - using namespace rocksdb; namespace myrocks { -static -PSI_stage_info stage_waiting_on_row_lock2= { 0, "Waiting for row lock", 0}; +static PSI_stage_info stage_waiting_on_row_lock2 = {0, "Waiting for row lock", + 0}; -static const int64_t MICROSECS= 1000*1000; +static const int64_t ONE_SECOND_IN_MICROSECS = 1000 * 1000; // A timeout as long as one full non-leap year worth of microseconds is as // good as infinite timeout. -static const int64_t BIG_TIMEOUT= MICROSECS * 60 * 60 * 24 * 365; +static const int64_t ONE_YEAR_IN_MICROSECS = + ONE_SECOND_IN_MICROSECS * 60 * 60 * 24 * 365; -Rdb_cond_var::Rdb_cond_var() { - mysql_cond_init(0, &m_cond, nullptr); -} +Rdb_cond_var::Rdb_cond_var() { mysql_cond_init(0, &m_cond, nullptr); } -Rdb_cond_var::~Rdb_cond_var() { - mysql_cond_destroy(&m_cond); -} +Rdb_cond_var::~Rdb_cond_var() { mysql_cond_destroy(&m_cond); } Status Rdb_cond_var::Wait(const std::shared_ptr mutex_arg) { - return WaitFor(mutex_arg, BIG_TIMEOUT); + return WaitFor(mutex_arg, ONE_YEAR_IN_MICROSECS); } - /* @brief Wait on condition variable. The caller must make sure that we own @@ -70,32 +62,30 @@ Status Rdb_cond_var::Wait(const std::shared_ptr mutex_arg) { Status Rdb_cond_var::WaitFor(const std::shared_ptr mutex_arg, - int64_t timeout_micros) -{ - auto *mutex_obj= reinterpret_cast(mutex_arg.get()); + int64_t timeout_micros) { + auto *mutex_obj = reinterpret_cast(mutex_arg.get()); DBUG_ASSERT(mutex_obj != nullptr); - mysql_mutex_t * const mutex_ptr= &mutex_obj->m_mutex; + mysql_mutex_t *const mutex_ptr = &mutex_obj->m_mutex; - int res= 0; + int res = 0; struct timespec wait_timeout; if (timeout_micros < 0) - timeout_micros= BIG_TIMEOUT; - set_timespec_nsec(wait_timeout, timeout_micros*1000); + timeout_micros = ONE_YEAR_IN_MICROSECS; + set_timespec_nsec(wait_timeout, timeout_micros * 1000); #ifndef STANDALONE_UNITTEST PSI_stage_info old_stage; mysql_mutex_assert_owner(mutex_ptr); - if (current_thd && mutex_obj->m_old_stage_info.count(current_thd) == 0) - { - my_core::thd_enter_cond(current_thd, &m_cond, mutex_ptr, - &stage_waiting_on_row_lock2, &old_stage); + if (current_thd && mutex_obj->m_old_stage_info.count(current_thd) == 0) { + THD_ENTER_COND(current_thd, &m_cond, mutex_ptr, &stage_waiting_on_row_lock2, + &old_stage); /* After the mysql_cond_timedwait we need make this call - my_core::thd_exit_cond(thd, &old_stage); + THD_EXIT_COND(thd, &old_stage); to inform the SQL layer that KILLable wait has ended. However, that will cause mutex to be released. Defer the release until the mutex @@ -105,15 +95,14 @@ Rdb_cond_var::WaitFor(const std::shared_ptr mutex_arg, } #endif - bool killed= false; + bool killed = false; - do - { - res= mysql_cond_timedwait(&m_cond, mutex_ptr, &wait_timeout); + do { + res = mysql_cond_timedwait(&m_cond, mutex_ptr, &wait_timeout); #ifndef STANDALONE_UNITTEST if (current_thd) - killed= my_core::thd_killed(current_thd); + killed = my_core::thd_killed(current_thd); #endif } while (!killed && res == EINTR); @@ -123,7 +112,6 @@ Rdb_cond_var::WaitFor(const std::shared_ptr mutex_arg, return Status::OK(); } - /* @note @@ -154,32 +142,21 @@ Rdb_cond_var::WaitFor(const std::shared_ptr mutex_arg, None of this looks like a problem for our use case. */ -void Rdb_cond_var::Notify() -{ - mysql_cond_signal(&m_cond); -} - +void Rdb_cond_var::Notify() { mysql_cond_signal(&m_cond); } /* @note This is called without holding the mutex that's used for waiting on the condition. See ::Notify(). */ -void Rdb_cond_var::NotifyAll() -{ - mysql_cond_broadcast(&m_cond); -} +void Rdb_cond_var::NotifyAll() { mysql_cond_broadcast(&m_cond); } - -Rdb_mutex::Rdb_mutex() -{ +Rdb_mutex::Rdb_mutex() { mysql_mutex_init(0 /* Don't register in P_S. */, &m_mutex, MY_MUTEX_INIT_FAST); } -Rdb_mutex::~Rdb_mutex() { - mysql_mutex_destroy(&m_mutex); -} +Rdb_mutex::~Rdb_mutex() { mysql_mutex_destroy(&m_mutex); } Status Rdb_mutex::Lock() { mysql_mutex_lock(&m_mutex); @@ -192,8 +169,7 @@ Status Rdb_mutex::Lock() { // If implementing a custom version of this class, the implementation may // choose to ignore the timeout. // Return OK on success, or other Status on failure. -Status Rdb_mutex::TryLockFor(int64_t timeout_time __attribute__((__unused__))) -{ +Status Rdb_mutex::TryLockFor(int64_t timeout_time MY_ATTRIBUTE((__unused__))) { /* Note: PThreads API has pthread_mutex_timedlock(), but mysql's mysql_mutex_* wrappers do not wrap that function. @@ -202,10 +178,8 @@ Status Rdb_mutex::TryLockFor(int64_t timeout_time __attribute__((__unused__))) return Status::OK(); } - #ifndef STANDALONE_UNITTEST -void Rdb_mutex::set_unlock_action(const PSI_stage_info* const old_stage_arg) -{ +void Rdb_mutex::set_unlock_action(const PSI_stage_info *const old_stage_arg) { DBUG_ASSERT(old_stage_arg != nullptr); mysql_mutex_assert_owner(&m_mutex); @@ -219,17 +193,16 @@ void Rdb_mutex::set_unlock_action(const PSI_stage_info* const old_stage_arg) // Unlock Mutex that was successfully locked by Lock() or TryLockUntil() void Rdb_mutex::UnLock() { #ifndef STANDALONE_UNITTEST - if (m_old_stage_info.count(current_thd) > 0) - { + if (m_old_stage_info.count(current_thd) > 0) { const std::shared_ptr old_stage = - m_old_stage_info[current_thd]; + m_old_stage_info[current_thd]; m_old_stage_info.erase(current_thd); /* The following will call mysql_mutex_unlock */ - my_core::thd_exit_cond(current_thd, old_stage.get()); + THD_EXIT_COND(current_thd, old_stage.get()); return; } #endif mysql_mutex_unlock(&m_mutex); } -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_mutex_wrapper.h b/storage/rocksdb/rdb_mutex_wrapper.h index 6edd78a1167..96ebd77a723 100644 --- a/storage/rocksdb/rdb_mutex_wrapper.h +++ b/storage/rocksdb/rdb_mutex_wrapper.h @@ -32,10 +32,11 @@ namespace myrocks { -class Rdb_mutex: public rocksdb::TransactionDBMutex { - Rdb_mutex(const Rdb_mutex& p) = delete; - Rdb_mutex& operator = (const Rdb_mutex& p)=delete; - public: +class Rdb_mutex : public rocksdb::TransactionDBMutex { + Rdb_mutex(const Rdb_mutex &p) = delete; + Rdb_mutex &operator=(const Rdb_mutex &p) = delete; + +public: Rdb_mutex(); virtual ~Rdb_mutex(); @@ -53,27 +54,27 @@ class Rdb_mutex: public rocksdb::TransactionDBMutex { // TimedOut if timed out, // or other Status on failure. // If returned status is OK, TransactionDB will eventually call UnLock(). - virtual rocksdb::Status TryLockFor( - int64_t timeout_time __attribute__((__unused__))) override; + virtual rocksdb::Status + TryLockFor(int64_t timeout_time MY_ATTRIBUTE((__unused__))) override; // Unlock Mutex that was successfully locked by Lock() or TryLockUntil() virtual void UnLock() override; - private: +private: mysql_mutex_t m_mutex; friend class Rdb_cond_var; #ifndef STANDALONE_UNITTEST - void set_unlock_action(const PSI_stage_info* const old_stage_arg); - std::unordered_map> m_old_stage_info; + void set_unlock_action(const PSI_stage_info *const old_stage_arg); + std::unordered_map> m_old_stage_info; #endif }; +class Rdb_cond_var : public rocksdb::TransactionDBCondVar { + Rdb_cond_var(const Rdb_cond_var &) = delete; + Rdb_cond_var &operator=(const Rdb_cond_var &) = delete; -class Rdb_cond_var: public rocksdb::TransactionDBCondVar { - Rdb_cond_var(const Rdb_cond_var&) = delete; - Rdb_cond_var& operator=(const Rdb_cond_var&) = delete; - public: +public: Rdb_cond_var(); virtual ~Rdb_cond_var(); @@ -112,33 +113,30 @@ class Rdb_cond_var: public rocksdb::TransactionDBCondVar { // Unblocks all threads waiting on *this. virtual void NotifyAll() override; - private: +private: mysql_cond_t m_cond; }; - class Rdb_mutex_factory : public rocksdb::TransactionDBMutexFactory { - public: - Rdb_mutex_factory(const Rdb_mutex_factory&) = delete; - Rdb_mutex_factory& operator=(const Rdb_mutex_factory&) = delete; +public: + Rdb_mutex_factory(const Rdb_mutex_factory &) = delete; + Rdb_mutex_factory &operator=(const Rdb_mutex_factory &) = delete; Rdb_mutex_factory() {} - /* - Override parent class's virtual methods of interrest. - */ + /* + Override parent class's virtual methods of interrest. + */ virtual std::shared_ptr AllocateMutex() override { - return - std::make_shared(); + return std::make_shared(); } virtual std::shared_ptr AllocateCondVar() override { - return - std::make_shared(); + return std::make_shared(); } virtual ~Rdb_mutex_factory() {} }; -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_perf_context.cc b/storage/rocksdb/rdb_perf_context.cc index 88d84061789..5ebc6cdab5c 100644 --- a/storage/rocksdb/rdb_perf_context.cc +++ b/storage/rocksdb/rdb_perf_context.cc @@ -35,71 +35,68 @@ namespace myrocks { // 2. Update sections (A), (B), and (C) below // 3. Update perf_context.test and show_engine.test -std::string rdb_pc_stat_types[]= -{ - // (A) These should be in the same order as the PC enum - "USER_KEY_COMPARISON_COUNT", - "BLOCK_CACHE_HIT_COUNT", - "BLOCK_READ_COUNT", - "BLOCK_READ_BYTE", - "BLOCK_READ_TIME", - "BLOCK_CHECKSUM_TIME", - "BLOCK_DECOMPRESS_TIME", - "INTERNAL_KEY_SKIPPED_COUNT", - "INTERNAL_DELETE_SKIPPED_COUNT", - "GET_SNAPSHOT_TIME", - "GET_FROM_MEMTABLE_TIME", - "GET_FROM_MEMTABLE_COUNT", - "GET_POST_PROCESS_TIME", - "GET_FROM_OUTPUT_FILES_TIME", - "SEEK_ON_MEMTABLE_TIME", - "SEEK_ON_MEMTABLE_COUNT", - "SEEK_CHILD_SEEK_TIME", - "SEEK_CHILD_SEEK_COUNT", - "SEEK_IN_HEAP_TIME", - "SEEK_INTERNAL_SEEK_TIME", - "FIND_NEXT_USER_ENTRY_TIME", - "WRITE_WAL_TIME", - "WRITE_MEMTABLE_TIME", - "WRITE_DELAY_TIME", - "WRITE_PRE_AND_POST_PROCESS_TIME", - "DB_MUTEX_LOCK_NANOS", - "DB_CONDITION_WAIT_NANOS", - "MERGE_OPERATOR_TIME_NANOS", - "READ_INDEX_BLOCK_NANOS", - "READ_FILTER_BLOCK_NANOS", - "NEW_TABLE_BLOCK_ITER_NANOS", - "NEW_TABLE_ITERATOR_NANOS", - "BLOCK_SEEK_NANOS", - "FIND_TABLE_NANOS", - "IO_THREAD_POOL_ID", - "IO_BYTES_WRITTEN", - "IO_BYTES_READ", - "IO_OPEN_NANOS", - "IO_ALLOCATE_NANOS", - "IO_WRITE_NANOS", - "IO_READ_NANOS", - "IO_RANGE_SYNC_NANOS", - "IO_LOGGER_NANOS" -}; +std::string rdb_pc_stat_types[] = { + // (A) These should be in the same order as the PC enum + "USER_KEY_COMPARISON_COUNT", + "BLOCK_CACHE_HIT_COUNT", + "BLOCK_READ_COUNT", + "BLOCK_READ_BYTE", + "BLOCK_READ_TIME", + "BLOCK_CHECKSUM_TIME", + "BLOCK_DECOMPRESS_TIME", + "INTERNAL_KEY_SKIPPED_COUNT", + "INTERNAL_DELETE_SKIPPED_COUNT", + "GET_SNAPSHOT_TIME", + "GET_FROM_MEMTABLE_TIME", + "GET_FROM_MEMTABLE_COUNT", + "GET_POST_PROCESS_TIME", + "GET_FROM_OUTPUT_FILES_TIME", + "SEEK_ON_MEMTABLE_TIME", + "SEEK_ON_MEMTABLE_COUNT", + "SEEK_CHILD_SEEK_TIME", + "SEEK_CHILD_SEEK_COUNT", + "SEEK_IN_HEAP_TIME", + "SEEK_INTERNAL_SEEK_TIME", + "FIND_NEXT_USER_ENTRY_TIME", + "WRITE_WAL_TIME", + "WRITE_MEMTABLE_TIME", + "WRITE_DELAY_TIME", + "WRITE_PRE_AND_POST_PROCESS_TIME", + "DB_MUTEX_LOCK_NANOS", + "DB_CONDITION_WAIT_NANOS", + "MERGE_OPERATOR_TIME_NANOS", + "READ_INDEX_BLOCK_NANOS", + "READ_FILTER_BLOCK_NANOS", + "NEW_TABLE_BLOCK_ITER_NANOS", + "NEW_TABLE_ITERATOR_NANOS", + "BLOCK_SEEK_NANOS", + "FIND_TABLE_NANOS", + "IO_THREAD_POOL_ID", + "IO_BYTES_WRITTEN", + "IO_BYTES_READ", + "IO_OPEN_NANOS", + "IO_ALLOCATE_NANOS", + "IO_WRITE_NANOS", + "IO_READ_NANOS", + "IO_RANGE_SYNC_NANOS", + "IO_LOGGER_NANOS"}; -#define IO_PERF_RECORD(_field_) \ - do { \ - if (rocksdb::perf_context._field_ > 0) \ - counters->m_value[idx] += rocksdb::perf_context._field_; \ - idx++; \ +#define IO_PERF_RECORD(_field_) \ + do { \ + if (rocksdb::perf_context._field_ > 0) \ + counters->m_value[idx] += rocksdb::perf_context._field_; \ + idx++; \ } while (0) -#define IO_STAT_RECORD(_field_) \ - do { \ - if (rocksdb::iostats_context._field_ > 0) \ - counters->m_value[idx] += rocksdb::iostats_context._field_; \ - idx++; \ +#define IO_STAT_RECORD(_field_) \ + do { \ + if (rocksdb::iostats_context._field_ > 0) \ + counters->m_value[idx] += rocksdb::iostats_context._field_; \ + idx++; \ } while (0) -static void harvest_diffs(Rdb_atomic_perf_counters * const counters) -{ +static void harvest_diffs(Rdb_atomic_perf_counters *const counters) { // (C) These should be in the same order as the PC enum - size_t idx= 0; + size_t idx = 0; IO_PERF_RECORD(user_key_comparison_count); IO_PERF_RECORD(block_cache_hit_count); IO_PERF_RECORD(block_read_count); @@ -148,35 +145,29 @@ static void harvest_diffs(Rdb_atomic_perf_counters * const counters) #undef IO_PERF_DIFF #undef IO_STAT_DIFF - static Rdb_atomic_perf_counters rdb_global_perf_counters; -void rdb_get_global_perf_counters(Rdb_perf_counters* const counters) -{ +void rdb_get_global_perf_counters(Rdb_perf_counters *const counters) { DBUG_ASSERT(counters != nullptr); counters->load(rdb_global_perf_counters); } -void Rdb_perf_counters::load(const Rdb_atomic_perf_counters &atomic_counters) -{ - for (int i= 0; i < PC_MAX_IDX; i++) { - m_value[i]= atomic_counters.m_value[i].load(std::memory_order_relaxed); +void Rdb_perf_counters::load(const Rdb_atomic_perf_counters &atomic_counters) { + for (int i = 0; i < PC_MAX_IDX; i++) { + m_value[i] = atomic_counters.m_value[i].load(std::memory_order_relaxed); } } -bool Rdb_io_perf::start(const uint32_t perf_context_level) -{ - const rocksdb::PerfLevel perf_level= - static_cast(perf_context_level); +bool Rdb_io_perf::start(const uint32_t perf_context_level) { + const rocksdb::PerfLevel perf_level = + static_cast(perf_context_level); - if (rocksdb::GetPerfLevel() != perf_level) - { + if (rocksdb::GetPerfLevel() != perf_level) { rocksdb::SetPerfLevel(perf_level); } - if (perf_level == rocksdb::kDisable) - { + if (perf_level == rocksdb::kDisable) { return false; } @@ -185,38 +176,33 @@ bool Rdb_io_perf::start(const uint32_t perf_context_level) return true; } -void Rdb_io_perf::end_and_record(const uint32_t perf_context_level) -{ - const rocksdb::PerfLevel perf_level= - static_cast(perf_context_level); +void Rdb_io_perf::end_and_record(const uint32_t perf_context_level) { + const rocksdb::PerfLevel perf_level = + static_cast(perf_context_level); - if (perf_level == rocksdb::kDisable) - { + if (perf_level == rocksdb::kDisable) { return; } - if (m_atomic_counters) - { + if (m_atomic_counters) { harvest_diffs(m_atomic_counters); } harvest_diffs(&rdb_global_perf_counters); - if (m_shared_io_perf_read && - (rocksdb::perf_context.block_read_byte != 0 || - rocksdb::perf_context.block_read_count != 0 || - rocksdb::perf_context.block_read_time != 0)) - { + if (m_shared_io_perf_read && (rocksdb::perf_context.block_read_byte != 0 || + rocksdb::perf_context.block_read_count != 0 || + rocksdb::perf_context.block_read_time != 0)) { my_io_perf_t io_perf_read; io_perf_read.init(); - io_perf_read.bytes= rocksdb::perf_context.block_read_byte; - io_perf_read.requests= rocksdb::perf_context.block_read_count; + io_perf_read.bytes = rocksdb::perf_context.block_read_byte; + io_perf_read.requests = rocksdb::perf_context.block_read_count; /* Rocksdb does not distinguish between I/O service and wait time, so just use svc time. */ - io_perf_read.svc_time_max= io_perf_read.svc_time= + io_perf_read.svc_time_max = io_perf_read.svc_time = rocksdb::perf_context.block_read_time; m_shared_io_perf_read->sum(io_perf_read); @@ -224,17 +210,15 @@ void Rdb_io_perf::end_and_record(const uint32_t perf_context_level) } if (m_stats) { - if (rocksdb::perf_context.internal_key_skipped_count != 0) - { + if (rocksdb::perf_context.internal_key_skipped_count != 0) { m_stats->key_skipped += rocksdb::perf_context.internal_key_skipped_count; } - if (rocksdb::perf_context.internal_delete_skipped_count != 0) - { + if (rocksdb::perf_context.internal_delete_skipped_count != 0) { m_stats->delete_skipped += rocksdb::perf_context.internal_delete_skipped_count; } } } -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_perf_context.h b/storage/rocksdb/rdb_perf_context.h index e6439c2e613..87c6426342e 100644 --- a/storage/rocksdb/rdb_perf_context.h +++ b/storage/rocksdb/rdb_perf_context.h @@ -80,8 +80,7 @@ class Rdb_perf_counters; A collection of performance counters that can be safely incremented by multiple threads since it stores atomic datapoints. */ -struct Rdb_atomic_perf_counters -{ +struct Rdb_atomic_perf_counters { std::atomic_ullong m_value[PC_MAX_IDX]; }; @@ -89,11 +88,11 @@ struct Rdb_atomic_perf_counters A collection of performance counters that is meant to be incremented by a single thread. */ -class Rdb_perf_counters -{ - Rdb_perf_counters(const Rdb_perf_counters&) = delete; - Rdb_perf_counters& operator=(const Rdb_perf_counters&) = delete; - public: +class Rdb_perf_counters { + Rdb_perf_counters(const Rdb_perf_counters &) = delete; + Rdb_perf_counters &operator=(const Rdb_perf_counters &) = delete; + +public: Rdb_perf_counters() = default; uint64_t m_value[PC_MAX_IDX]; @@ -105,36 +104,34 @@ extern std::string rdb_pc_stat_types[PC_MAX_IDX]; /* Perf timers for data reads */ -class Rdb_io_perf -{ +class Rdb_io_perf { // Context management - Rdb_atomic_perf_counters *m_atomic_counters= nullptr; - my_io_perf_atomic_t *m_shared_io_perf_read= nullptr; - ha_statistics *m_stats= nullptr; + Rdb_atomic_perf_counters *m_atomic_counters = nullptr; + my_io_perf_atomic_t *m_shared_io_perf_read = nullptr; + ha_statistics *m_stats = nullptr; - public: - Rdb_io_perf(const Rdb_io_perf&) = delete; - Rdb_io_perf& operator=(const Rdb_io_perf&) = delete; +public: + Rdb_io_perf(const Rdb_io_perf &) = delete; + Rdb_io_perf &operator=(const Rdb_io_perf &) = delete; - void init(Rdb_atomic_perf_counters* const atomic_counters, - my_io_perf_atomic_t* const shared_io_perf_read, - ha_statistics* const stats) - { + void init(Rdb_atomic_perf_counters *const atomic_counters, + my_io_perf_atomic_t *const shared_io_perf_read, + ha_statistics *const stats) { DBUG_ASSERT(atomic_counters != nullptr); DBUG_ASSERT(shared_io_perf_read != nullptr); DBUG_ASSERT(stats != nullptr); - m_atomic_counters= atomic_counters; - m_shared_io_perf_read= shared_io_perf_read; - m_stats= stats; + m_atomic_counters = atomic_counters; + m_shared_io_perf_read = shared_io_perf_read; + m_stats = stats; } bool start(const uint32_t perf_context_level); void end_and_record(const uint32_t perf_context_level); - explicit Rdb_io_perf() : m_atomic_counters(nullptr), - m_shared_io_perf_read(nullptr), - m_stats(nullptr) {} + explicit Rdb_io_perf() + : m_atomic_counters(nullptr), m_shared_io_perf_read(nullptr), + m_stats(nullptr) {} }; -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_sst_info.cc b/storage/rocksdb/rdb_sst_info.cc index ce457cc73a7..cfbefb2ce6d 100644 --- a/storage/rocksdb/rdb_sst_info.cc +++ b/storage/rocksdb/rdb_sst_info.cc @@ -37,26 +37,20 @@ namespace myrocks { -Rdb_sst_file::Rdb_sst_file(rocksdb::DB* const db, - rocksdb::ColumnFamilyHandle* const cf, - const rocksdb::DBOptions& db_options, - const std::string& name, const bool tracing) : - m_db(db), - m_cf(cf), - m_db_options(db_options), - m_sst_file_writer(nullptr), - m_name(name), - m_tracing(tracing) -{ +Rdb_sst_file::Rdb_sst_file(rocksdb::DB *const db, + rocksdb::ColumnFamilyHandle *const cf, + const rocksdb::DBOptions &db_options, + const std::string &name, const bool tracing) + : m_db(db), m_cf(cf), m_db_options(db_options), m_sst_file_writer(nullptr), + m_name(name), m_tracing(tracing) { DBUG_ASSERT(db != nullptr); DBUG_ASSERT(cf != nullptr); } -Rdb_sst_file::~Rdb_sst_file() -{ +Rdb_sst_file::~Rdb_sst_file() { // Make sure we clean up delete m_sst_file_writer; - m_sst_file_writer= nullptr; + m_sst_file_writer = nullptr; // In case something went wrong attempt to delete the temporary file. // If everything went fine that file will have been renamed and this @@ -64,98 +58,86 @@ Rdb_sst_file::~Rdb_sst_file() std::remove(m_name.c_str()); } -rocksdb::Status Rdb_sst_file::open() -{ +rocksdb::Status Rdb_sst_file::open() { DBUG_ASSERT(m_sst_file_writer == nullptr); rocksdb::ColumnFamilyDescriptor cf_descr; - rocksdb::Status s= m_cf->GetDescriptor(&cf_descr); - if (!s.ok()) - { + rocksdb::Status s = m_cf->GetDescriptor(&cf_descr); + if (!s.ok()) { return s; } // Create an sst file writer with the current options and comparator - const rocksdb::Comparator* comparator= m_cf->GetComparator(); + const rocksdb::Comparator *comparator = m_cf->GetComparator(); const rocksdb::EnvOptions env_options(m_db_options); const rocksdb::Options options(m_db_options, cf_descr.options); - m_sst_file_writer= + m_sst_file_writer = new rocksdb::SstFileWriter(env_options, options, comparator, m_cf); - s= m_sst_file_writer->Open(m_name); - if (m_tracing) - { + s = m_sst_file_writer->Open(m_name); + if (m_tracing) { // NO_LINT_DEBUG sql_print_information("SST Tracing: Open(%s) returned %s", m_name.c_str(), s.ok() ? "ok" : "not ok"); } - if (!s.ok()) - { + if (!s.ok()) { delete m_sst_file_writer; - m_sst_file_writer= nullptr; + m_sst_file_writer = nullptr; } return s; } -rocksdb::Status Rdb_sst_file::put(const rocksdb::Slice& key, - const rocksdb::Slice& value) -{ +rocksdb::Status Rdb_sst_file::put(const rocksdb::Slice &key, + const rocksdb::Slice &value) { DBUG_ASSERT(m_sst_file_writer != nullptr); // Add the specified key/value to the sst file writer return m_sst_file_writer->Add(key, value); } -std::string Rdb_sst_file::generateKey(const std::string& key) -{ - static char const hexdigit[]= { - '0', '1', '2', '3', '4', '5', '6', '7', - '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' - }; +std::string Rdb_sst_file::generateKey(const std::string &key) { + static char const hexdigit[] = {'0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; std::string res; res.reserve(key.size() * 2); - for (auto ch : key) - { - res += hexdigit[((uint8_t) ch) >> 4]; - res += hexdigit[((uint8_t) ch) & 0x0F]; + for (auto ch : key) { + res += hexdigit[((uint8_t)ch) >> 4]; + res += hexdigit[((uint8_t)ch) & 0x0F]; } return res; } // This function is run by the background thread -rocksdb::Status Rdb_sst_file::commit() -{ +rocksdb::Status Rdb_sst_file::commit() { DBUG_ASSERT(m_sst_file_writer != nullptr); rocksdb::Status s; - rocksdb::ExternalSstFileInfo fileinfo; ///Finish may should be modified + rocksdb::ExternalSstFileInfo fileinfo; /// Finish may should be modified // Close out the sst file - s= m_sst_file_writer->Finish(&fileinfo); - if (m_tracing) - { + s = m_sst_file_writer->Finish(&fileinfo); + if (m_tracing) { // NO_LINT_DEBUG sql_print_information("SST Tracing: Finish returned %s", s.ok() ? "ok" : "not ok"); } - if (s.ok()) - { - if (m_tracing) - { + if (s.ok()) { + if (m_tracing) { // NO_LINT_DEBUG sql_print_information("SST Tracing: Adding file %s, smallest key: %s, " "largest key: %s, file size: %" PRIu64 ", " - "num_entries: %" PRIu64, fileinfo.file_path.c_str(), + "num_entries: %" PRIu64, + fileinfo.file_path.c_str(), generateKey(fileinfo.smallest_key).c_str(), generateKey(fileinfo.largest_key).c_str(), fileinfo.file_size, fileinfo.num_entries); @@ -169,10 +151,9 @@ rocksdb::Status Rdb_sst_file::commit() opts.snapshot_consistency = false; opts.allow_global_seqno = false; opts.allow_blocking_flush = false; - s= m_db->IngestExternalFile(m_cf, { m_name }, opts); + s = m_db->IngestExternalFile(m_cf, {m_name}, opts); - if (m_tracing) - { + if (m_tracing) { // NO_LINT_DEBUG sql_print_information("SST Tracing: AddFile(%s) returned %s", fileinfo.file_path.c_str(), @@ -181,106 +162,84 @@ rocksdb::Status Rdb_sst_file::commit() } delete m_sst_file_writer; - m_sst_file_writer= nullptr; + m_sst_file_writer = nullptr; return s; } -Rdb_sst_info::Rdb_sst_info(rocksdb::DB* const db, const std::string& tablename, - const std::string& indexname, - rocksdb::ColumnFamilyHandle* const cf, - const rocksdb::DBOptions& db_options, - const bool& tracing) : - m_db(db), - m_cf(cf), - m_db_options(db_options), - m_curr_size(0), - m_sst_count(0), - m_error_msg(""), +Rdb_sst_info::Rdb_sst_info(rocksdb::DB *const db, const std::string &tablename, + const std::string &indexname, + rocksdb::ColumnFamilyHandle *const cf, + const rocksdb::DBOptions &db_options, + const bool &tracing) + : m_db(db), m_cf(cf), m_db_options(db_options), m_curr_size(0), + m_sst_count(0), m_error_msg(""), #if defined(RDB_SST_INFO_USE_THREAD) - m_queue(), - m_mutex(), - m_cond(), - m_thread(nullptr), - m_finished(false), + m_queue(), m_mutex(), m_cond(), m_thread(nullptr), m_finished(false), #endif - m_sst_file(nullptr), - m_tracing(tracing) -{ - m_prefix= db->GetName() + "/"; + m_sst_file(nullptr), m_tracing(tracing) { + m_prefix = db->GetName() + "/"; std::string normalized_table; - if (rdb_normalize_tablename(tablename.c_str(), &normalized_table)) - { + if (rdb_normalize_tablename(tablename.c_str(), &normalized_table)) { // We failed to get a normalized table name. This should never happen, // but handle it anyway. - m_prefix += "fallback_" + - std::to_string( - reinterpret_cast(reinterpret_cast(this))) + "_" + - indexname + "_"; - } - else - { + m_prefix += "fallback_" + std::to_string(reinterpret_cast( + reinterpret_cast(this))) + + "_" + indexname + "_"; + } else { m_prefix += normalized_table + "_" + indexname + "_"; } rocksdb::ColumnFamilyDescriptor cf_descr; - const rocksdb::Status s= m_cf->GetDescriptor(&cf_descr); - if (!s.ok()) - { + const rocksdb::Status s = m_cf->GetDescriptor(&cf_descr); + if (!s.ok()) { // Default size if we can't get the cf's target size - m_max_size= 64*1024*1024; - } - else - { + m_max_size = 64 * 1024 * 1024; + } else { // Set the maximum size to 3 times the cf's target size - m_max_size= cf_descr.options.target_file_size_base * 3; + m_max_size = cf_descr.options.target_file_size_base * 3; } } -Rdb_sst_info::~Rdb_sst_info() -{ +Rdb_sst_info::~Rdb_sst_info() { DBUG_ASSERT(m_sst_file == nullptr); #if defined(RDB_SST_INFO_USE_THREAD) DBUG_ASSERT(m_thread == nullptr); #endif } -int Rdb_sst_info::open_new_sst_file() -{ +int Rdb_sst_info::open_new_sst_file() { DBUG_ASSERT(m_sst_file == nullptr); // Create the new sst file's name - const std::string name= m_prefix + std::to_string(m_sst_count++) + m_suffix; + const std::string name = m_prefix + std::to_string(m_sst_count++) + m_suffix; // Create the new sst file object - m_sst_file= new Rdb_sst_file(m_db, m_cf, m_db_options, name, m_tracing); + m_sst_file = new Rdb_sst_file(m_db, m_cf, m_db_options, name, m_tracing); // Open the sst file - const rocksdb::Status s= m_sst_file->open(); - if (!s.ok()) - { + const rocksdb::Status s = m_sst_file->open(); + if (!s.ok()) { set_error_msg(s.ToString()); delete m_sst_file; - m_sst_file= nullptr; - return 1; + m_sst_file = nullptr; + return HA_EXIT_FAILURE; } - m_curr_size= 0; + m_curr_size = 0; - return 0; + return HA_EXIT_SUCCESS; } -void Rdb_sst_info::close_curr_sst_file() -{ +void Rdb_sst_info::close_curr_sst_file() { DBUG_ASSERT(m_sst_file != nullptr); DBUG_ASSERT(m_curr_size > 0); #if defined(RDB_SST_INFO_USE_THREAD) - if (m_thread == nullptr) - { + if (m_thread == nullptr) { // We haven't already started a background thread, so start one - m_thread= new std::thread(thread_fcn, this); + m_thread = new std::thread(thread_fcn, this); } DBUG_ASSERT(m_thread != nullptr); @@ -294,9 +253,8 @@ void Rdb_sst_info::close_curr_sst_file() // Notify the background thread that there is a new entry in the queue m_cond.notify_one(); #else - const rocksdb::Status s= m_sst_file->commit(); - if (!s.ok()) - { + const rocksdb::Status s = m_sst_file->commit(); + if (!s.ok()) { set_error_msg(s.ToString()); } @@ -304,34 +262,28 @@ void Rdb_sst_info::close_curr_sst_file() #endif // Reset for next sst file - m_sst_file= nullptr; - m_curr_size= 0; + m_sst_file = nullptr; + m_curr_size = 0; } -int Rdb_sst_info::put(const rocksdb::Slice& key, - const rocksdb::Slice& value) -{ +int Rdb_sst_info::put(const rocksdb::Slice &key, const rocksdb::Slice &value) { int rc; - if (m_curr_size >= m_max_size) - { + if (m_curr_size >= m_max_size) { // The current sst file has reached its maximum, close it out close_curr_sst_file(); // While we are here, check to see if we have had any errors from the // background thread - we don't want to wait for the end to report them - if (!m_error_msg.empty()) - { - return 1; + if (!m_error_msg.empty()) { + return HA_EXIT_FAILURE; } } - if (m_curr_size == 0) - { + if (m_curr_size == 0) { // We don't have an sst file open - open one - rc= open_new_sst_file(); - if (rc != 0) - { + rc = open_new_sst_file(); + if (rc != 0) { return rc; } } @@ -339,51 +291,45 @@ int Rdb_sst_info::put(const rocksdb::Slice& key, DBUG_ASSERT(m_sst_file != nullptr); // Add the key/value to the current sst file - const rocksdb::Status s= m_sst_file->put(key, value); - if (!s.ok()) - { + const rocksdb::Status s = m_sst_file->put(key, value); + if (!s.ok()) { set_error_msg(s.ToString()); - return 1; + return HA_EXIT_FAILURE; } m_curr_size += key.size() + value.size(); - return 0; + return HA_EXIT_SUCCESS; } -int Rdb_sst_info::commit() -{ - if (m_curr_size > 0) - { +int Rdb_sst_info::commit() { + if (m_curr_size > 0) { // Close out any existing files close_curr_sst_file(); } #if defined(RDB_SST_INFO_USE_THREAD) - if (m_thread != nullptr) - { + if (m_thread != nullptr) { // Tell the background thread we are done - m_finished= true; + m_finished = true; m_cond.notify_one(); // Wait for the background thread to finish m_thread->join(); delete m_thread; - m_thread= nullptr; + m_thread = nullptr; } #endif // Did we get any errors? - if (!m_error_msg.empty()) - { - return 1; + if (!m_error_msg.empty()) { + return HA_EXIT_FAILURE; } - return 0; + return HA_EXIT_SUCCESS; } -void Rdb_sst_info::set_error_msg(const std::string& msg) -{ +void Rdb_sst_info::set_error_msg(const std::string &msg) { #if defined(RDB_SST_INFO_USE_THREAD) // Both the foreground and background threads can set the error message // so lock the mutex to protect it. We only want the first error that @@ -391,41 +337,35 @@ void Rdb_sst_info::set_error_msg(const std::string& msg) const std::lock_guard guard(m_mutex); #endif my_printf_error(ER_UNKNOWN_ERROR, "bulk load error: %s", MYF(0), msg.c_str()); - if (m_error_msg.empty()) - { - m_error_msg= msg; + if (m_error_msg.empty()) { + m_error_msg = msg; } } #if defined(RDB_SST_INFO_USE_THREAD) // Static thread function - the Rdb_sst_info object is in 'object' -void Rdb_sst_info::thread_fcn(void* object) -{ - reinterpret_cast(object)->run_thread(); +void Rdb_sst_info::thread_fcn(void *object) { + reinterpret_cast(object)->run_thread(); } -void Rdb_sst_info::run_thread() -{ +void Rdb_sst_info::run_thread() { const std::unique_lock lk(m_mutex); - do - { + do { // Wait for notification or 1 second to pass m_cond.wait_for(lk, std::chrono::seconds(1)); // Inner loop pulls off all Rdb_sst_file entries and processes them - while (!m_queue.empty()) - { - const Rdb_sst_file* const sst_file= m_queue.front(); + while (!m_queue.empty()) { + const Rdb_sst_file *const sst_file = m_queue.front(); m_queue.pop(); // Release the lock - we don't want to hold it while committing the file lk.unlock(); // Close out the sst file and add it to the database - const rocksdb::Status s= sst_file->commit(); - if (!s.ok()) - { + const rocksdb::Status s = sst_file->commit(); + if (!s.ok()) { set_error_msg(s.ToString()); } @@ -443,14 +383,12 @@ void Rdb_sst_info::run_thread() } #endif -void Rdb_sst_info::init(const rocksdb::DB* const db) -{ - const std::string path= db->GetName() + FN_DIRSEP; - struct st_my_dir* const dir_info= my_dir(path.c_str(), MYF(MY_DONT_SORT)); +void Rdb_sst_info::init(const rocksdb::DB *const db) { + const std::string path = db->GetName() + FN_DIRSEP; + struct st_my_dir *const dir_info = my_dir(path.c_str(), MYF(MY_DONT_SORT)); // Access the directory - if (dir_info == nullptr) - { + if (dir_info == nullptr) { // NO_LINT_DEBUG sql_print_warning("RocksDB: Could not access database directory: %s", path.c_str()); @@ -458,16 +396,14 @@ void Rdb_sst_info::init(const rocksdb::DB* const db) } // Scan through the files in the directory - const struct fileinfo* file_info= dir_info->dir_entry; - for (uint ii= 0; ii < dir_info->number_off_files; ii++, file_info++) - { + const struct fileinfo *file_info = dir_info->dir_entry; + for (uint ii = 0; ii < dir_info->number_off_files; ii++, file_info++) { // find any files ending with m_suffix ... - const std::string name= file_info->name; - const size_t pos= name.find(m_suffix); - if (pos != std::string::npos && name.size() - pos == m_suffix.size()) - { + const std::string name = file_info->name; + const size_t pos = name.find(m_suffix); + if (pos != std::string::npos && name.size() - pos == m_suffix.size()) { // ... and remove them - const std::string fullname= path + name; + const std::string fullname = path + name; my_delete(fullname.c_str(), MYF(0)); } } @@ -476,5 +412,5 @@ void Rdb_sst_info::init(const rocksdb::DB* const db) my_dirend(dir_info); } -std::string Rdb_sst_info::m_suffix= ".bulk_load.tmp"; -} // namespace myrocks +std::string Rdb_sst_info::m_suffix = ".bulk_load.tmp"; +} // namespace myrocks diff --git a/storage/rocksdb/rdb_sst_info.h b/storage/rocksdb/rdb_sst_info.h index 933357c8f08..45d44fc848b 100644 --- a/storage/rocksdb/rdb_sst_info.h +++ b/storage/rocksdb/rdb_sst_info.h @@ -33,78 +33,77 @@ namespace myrocks { class Rdb_sst_file { - private: - Rdb_sst_file(const Rdb_sst_file& p)= delete; - Rdb_sst_file& operator=(const Rdb_sst_file& p)= delete; +private: + Rdb_sst_file(const Rdb_sst_file &p) = delete; + Rdb_sst_file &operator=(const Rdb_sst_file &p) = delete; - rocksdb::DB* const m_db; - rocksdb::ColumnFamilyHandle* const m_cf; - const rocksdb::DBOptions& m_db_options; - rocksdb::SstFileWriter* m_sst_file_writer; - const std::string m_name; - const bool m_tracing; + rocksdb::DB *const m_db; + rocksdb::ColumnFamilyHandle *const m_cf; + const rocksdb::DBOptions &m_db_options; + rocksdb::SstFileWriter *m_sst_file_writer; + const std::string m_name; + const bool m_tracing; - std::string generateKey(const std::string& key); + std::string generateKey(const std::string &key); - public: - Rdb_sst_file(rocksdb::DB* const db, - rocksdb::ColumnFamilyHandle* const cf, - const rocksdb::DBOptions& db_options, const std::string& name, +public: + Rdb_sst_file(rocksdb::DB *const db, rocksdb::ColumnFamilyHandle *const cf, + const rocksdb::DBOptions &db_options, const std::string &name, const bool tracing); ~Rdb_sst_file(); rocksdb::Status open(); - rocksdb::Status put(const rocksdb::Slice& key, const rocksdb::Slice& value); + rocksdb::Status put(const rocksdb::Slice &key, const rocksdb::Slice &value); rocksdb::Status commit(); }; class Rdb_sst_info { - private: - Rdb_sst_info(const Rdb_sst_info& p)= delete; - Rdb_sst_info& operator=(const Rdb_sst_info& p)= delete; +private: + Rdb_sst_info(const Rdb_sst_info &p) = delete; + Rdb_sst_info &operator=(const Rdb_sst_info &p) = delete; - rocksdb::DB* const m_db; - rocksdb::ColumnFamilyHandle* const m_cf; - const rocksdb::DBOptions& m_db_options; - uint64_t m_curr_size; - uint64_t m_max_size; - uint m_sst_count; - std::string m_error_msg; - std::string m_prefix; - static std::string m_suffix; + rocksdb::DB *const m_db; + rocksdb::ColumnFamilyHandle *const m_cf; + const rocksdb::DBOptions &m_db_options; + uint64_t m_curr_size; + uint64_t m_max_size; + uint m_sst_count; + std::string m_error_msg; + std::string m_prefix; + static std::string m_suffix; #if defined(RDB_SST_INFO_USE_THREAD) - std::queue m_queue; - std::mutex m_mutex; - std::condition_variable m_cond; - std::thread* m_thread; - bool m_finished; + std::queue m_queue; + std::mutex m_mutex; + std::condition_variable m_cond; + std::thread *m_thread; + bool m_finished; #endif - Rdb_sst_file* m_sst_file; - const bool m_tracing; + Rdb_sst_file *m_sst_file; + const bool m_tracing; int open_new_sst_file(); void close_curr_sst_file(); - void set_error_msg(const std::string& msg); + void set_error_msg(const std::string &msg); #if defined(RDB_SST_INFO_USE_THREAD) void run_thread(); - static void thread_fcn(void* object); + static void thread_fcn(void *object); #endif - public: - Rdb_sst_info(rocksdb::DB* const db, const std::string& tablename, - const std::string& indexname, - rocksdb::ColumnFamilyHandle* const cf, - const rocksdb::DBOptions& db_options, const bool &tracing); +public: + Rdb_sst_info(rocksdb::DB *const db, const std::string &tablename, + const std::string &indexname, + rocksdb::ColumnFamilyHandle *const cf, + const rocksdb::DBOptions &db_options, const bool &tracing); ~Rdb_sst_info(); - int put(const rocksdb::Slice& key, const rocksdb::Slice& value); + int put(const rocksdb::Slice &key, const rocksdb::Slice &value); int commit(); - const std::string& error_message() const { return m_error_msg; } + const std::string &error_message() const { return m_error_msg; } - static void init(const rocksdb::DB* const db); + static void init(const rocksdb::DB *const db); }; -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_threads.cc b/storage/rocksdb/rdb_threads.cc index 3f00bc13325..0bc590e4cf8 100644 --- a/storage/rocksdb/rdb_threads.cc +++ b/storage/rocksdb/rdb_threads.cc @@ -16,7 +16,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef USE_PRAGMA_IMPLEMENTATION -#pragma implementation // gcc: Class implementation +#pragma implementation // gcc: Class implementation #endif /* The C++ file's header */ @@ -24,58 +24,64 @@ namespace myrocks { -void* Rdb_thread::thread_func(void* const thread_ptr) -{ +void *Rdb_thread::thread_func(void *const thread_ptr) { DBUG_ASSERT(thread_ptr != nullptr); - Rdb_thread* const thread= static_cast(thread_ptr); - if (!thread->m_run_once.exchange(true)) - { + Rdb_thread *const thread = static_cast(thread_ptr); + if (!thread->m_run_once.exchange(true)) { thread->run(); thread->uninit(); } return nullptr; } - void Rdb_thread::init( #ifdef HAVE_PSI_INTERFACE - my_core::PSI_mutex_key stop_bg_psi_mutex_key, - my_core::PSI_cond_key stop_bg_psi_cond_key + my_core::PSI_mutex_key stop_bg_psi_mutex_key, + my_core::PSI_cond_key stop_bg_psi_cond_key #endif - ) -{ + ) { DBUG_ASSERT(!m_run_once); mysql_mutex_init(stop_bg_psi_mutex_key, &m_signal_mutex, MY_MUTEX_INIT_FAST); mysql_cond_init(stop_bg_psi_cond_key, &m_signal_cond, nullptr); } - -void Rdb_thread::uninit() -{ +void Rdb_thread::uninit() { mysql_mutex_destroy(&m_signal_mutex); mysql_cond_destroy(&m_signal_cond); } - -int Rdb_thread::create_thread( +int Rdb_thread::create_thread(const std::string &thread_name #ifdef HAVE_PSI_INTERFACE - PSI_thread_key background_psi_thread_key + , + PSI_thread_key background_psi_thread_key #endif - ) -{ - return mysql_thread_create(background_psi_thread_key, - &m_handle, nullptr, thread_func, this); + ) { + DBUG_ASSERT(!thread_name.empty()); + + int err = mysql_thread_create(background_psi_thread_key, &m_handle, nullptr, + thread_func, this); + + if (!err) { + /* + mysql_thread_create() ends up doing some work underneath and setting the + thread name as "my-func". This isn't what we want. Our intent is to name + the threads according to their purpose so that when displayed under the + debugger then they'll be more easily identifiable. Therefore we'll reset + the name if thread was successfully created. + */ + err = pthread_setname_np(m_handle, thread_name.c_str()); + } + + return err; } - -void Rdb_thread::signal(const bool &stop_thread) -{ +void Rdb_thread::signal(const bool &stop_thread) { mysql_mutex_lock(&m_signal_mutex); if (stop_thread) { - m_stop= true; + m_stop = true; } mysql_cond_signal(&m_signal_cond); mysql_mutex_unlock(&m_signal_mutex); } -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_threads.h b/storage/rocksdb/rdb_threads.h index c06dba438c0..b7890b03576 100644 --- a/storage/rocksdb/rdb_threads.h +++ b/storage/rocksdb/rdb_threads.h @@ -16,6 +16,9 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #pragma once +/* C++ standard header files */ +#include + /* MySQL includes */ #include "./my_global.h" #include @@ -26,91 +29,81 @@ namespace myrocks { -class Rdb_thread -{ - private: +class Rdb_thread { +private: // Disable Copying - Rdb_thread(const Rdb_thread&); - Rdb_thread& operator=(const Rdb_thread&); + Rdb_thread(const Rdb_thread &); + Rdb_thread &operator=(const Rdb_thread &); // Make sure we run only once std::atomic_bool m_run_once; - pthread_t m_handle; + pthread_t m_handle; - protected: - mysql_mutex_t m_signal_mutex; - mysql_cond_t m_signal_cond; - bool m_stop= false; +protected: + mysql_mutex_t m_signal_mutex; + mysql_cond_t m_signal_cond; + bool m_stop = false; - public: +public: Rdb_thread() : m_run_once(false) {} #ifdef HAVE_PSI_INTERFACE - void init(my_core::PSI_mutex_key stop_bg_psi_mutex_key, - my_core::PSI_cond_key stop_bg_psi_cond_key); - int create_thread( - my_core::PSI_thread_key background_psi_thread_key); + void init(my_core::PSI_mutex_key stop_bg_psi_mutex_key, + my_core::PSI_cond_key stop_bg_psi_cond_key); + int create_thread(const std::string &thread_name, + my_core::PSI_thread_key background_psi_thread_key); #else void init(); - int create_thread(); + int create_thread(const std::string &thread_name); #endif virtual void run(void) = 0; - void signal(const bool &stop_thread= false); + void signal(const bool &stop_thread = false); - int join() - { - return pthread_join(m_handle, nullptr); - } + int join() { return pthread_join(m_handle, nullptr); } void uninit(); virtual ~Rdb_thread() {} - private: - static void* thread_func(void* const thread_ptr); +private: + static void *thread_func(void *const thread_ptr); }; - /** MyRocks background thread control N.B. This is on top of RocksDB's own background threads (@see rocksdb::CancelAllBackgroundWork()) */ -class Rdb_background_thread : public Rdb_thread -{ - private: - bool m_save_stats= false; +class Rdb_background_thread : public Rdb_thread { +private: + bool m_save_stats = false; - void reset() - { + void reset() { mysql_mutex_assert_owner(&m_signal_mutex); - m_stop= false; - m_save_stats= false; + m_stop = false; + m_save_stats = false; } - public: +public: virtual void run() override; - void request_save_stats() - { + void request_save_stats() { mysql_mutex_lock(&m_signal_mutex); - m_save_stats= true; + m_save_stats = true; mysql_mutex_unlock(&m_signal_mutex); } }; - /* Drop index thread control */ -struct Rdb_drop_index_thread : public Rdb_thread -{ +struct Rdb_drop_index_thread : public Rdb_thread { virtual void run() override; }; -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_utils.cc b/storage/rocksdb/rdb_utils.cc index 599f11c5681..900d0f9be19 100644 --- a/storage/rocksdb/rdb_utils.cc +++ b/storage/rocksdb/rdb_utils.cc @@ -32,14 +32,12 @@ namespace myrocks { /* Skip past any spaces in the input */ -const char* rdb_skip_spaces(const struct charset_info_st* const cs, - const char *str) -{ +const char *rdb_skip_spaces(const struct charset_info_st *const cs, + const char *str) { DBUG_ASSERT(cs != nullptr); DBUG_ASSERT(str != nullptr); - while (my_isspace(cs, *str)) - { + while (my_isspace(cs, *str)) { str++; } @@ -51,18 +49,15 @@ const char* rdb_skip_spaces(const struct charset_info_st* const cs, Note that str1 can be longer but we only compare up to the number of characters in str2. */ -bool rdb_compare_strings_ic(const char* const str1, const char* const str2) -{ +bool rdb_compare_strings_ic(const char *const str1, const char *const str2) { DBUG_ASSERT(str1 != nullptr); DBUG_ASSERT(str2 != nullptr); // Scan through the strings size_t ii; - for (ii = 0; str2[ii]; ii++) - { + for (ii = 0; str2[ii]; ii++) { if (toupper(static_cast(str1[ii])) != - toupper(static_cast(str2[ii]))) - { + toupper(static_cast(str2[ii]))) { return false; } } @@ -74,11 +69,10 @@ bool rdb_compare_strings_ic(const char* const str1, const char* const str2) Scan through an input string looking for pattern, ignoring case and skipping all data enclosed in quotes. */ -const char* rdb_find_in_string(const char *str, const char *pattern, - bool * const succeeded) -{ - char quote = '\0'; - bool escape = false; +const char *rdb_find_in_string(const char *str, const char *pattern, + bool *const succeeded) { + char quote = '\0'; + bool escape = false; DBUG_ASSERT(str != nullptr); DBUG_ASSERT(pattern != nullptr); @@ -86,38 +80,30 @@ const char* rdb_find_in_string(const char *str, const char *pattern, *succeeded = false; - for ( ; *str; str++) - { + for (; *str; str++) { /* If we found a our starting quote character */ - if (*str == quote) - { + if (*str == quote) { /* If it was escaped ignore it */ - if (escape) - { + if (escape) { escape = false; } /* Otherwise we are now outside of the quoted string */ - else - { + else { quote = '\0'; } } /* Else if we are currently inside a quoted string? */ - else if (quote != '\0') - { + else if (quote != '\0') { /* If so, check for the escape character */ escape = !escape && *str == '\\'; } /* Else if we found a quote we are starting a quoted string */ - else if (*str == '"' || *str == '\'' || *str == '`') - { + else if (*str == '"' || *str == '\'' || *str == '`') { quote = *str; } /* Else we are outside of a quoted string - look for our pattern */ - else - { - if (rdb_compare_strings_ic(str, pattern)) - { + else { + if (rdb_compare_strings_ic(str, pattern)) { *succeeded = true; return str; } @@ -132,10 +118,9 @@ const char* rdb_find_in_string(const char *str, const char *pattern, /* See if the next valid token matches the specified string */ -const char* rdb_check_next_token(const struct charset_info_st* const cs, - const char *str, const char* const pattern, - bool* const succeeded) -{ +const char *rdb_check_next_token(const struct charset_info_st *const cs, + const char *str, const char *const pattern, + bool *const succeeded) { DBUG_ASSERT(cs != nullptr); DBUG_ASSERT(str != nullptr); DBUG_ASSERT(pattern != nullptr); @@ -145,8 +130,7 @@ const char* rdb_check_next_token(const struct charset_info_st* const cs, str = rdb_skip_spaces(cs, str); // See if the next characters match the pattern - if (rdb_compare_strings_ic(str, pattern)) - { + if (rdb_compare_strings_ic(str, pattern)) { *succeeded = true; return str + strlen(pattern); } @@ -158,43 +142,35 @@ const char* rdb_check_next_token(const struct charset_info_st* const cs, /* Parse id */ -const char* rdb_parse_id(const struct charset_info_st* const cs, - const char *str, std::string * const id) -{ +const char *rdb_parse_id(const struct charset_info_st *const cs, + const char *str, std::string *const id) { DBUG_ASSERT(cs != nullptr); DBUG_ASSERT(str != nullptr); // Move past any spaces str = rdb_skip_spaces(cs, str); - if (*str == '\0') - { + if (*str == '\0') { return str; } char quote = '\0'; - if (*str == '`' || *str == '"') - { + if (*str == '`' || *str == '"') { quote = *str++; } - size_t len = 0; - const char* start = str; + size_t len = 0; + const char *start = str; - if (quote != '\0') - { - for ( ; ; ) - { - if (*str == '\0') - { + if (quote != '\0') { + for (;;) { + if (*str == '\0') { return str; } - if (*str == quote) - { + if (*str == quote) { str++; - if (*str != quote) - { + if (*str != quote) { break; } } @@ -202,27 +178,21 @@ const char* rdb_parse_id(const struct charset_info_st* const cs, str++; len++; } - } - else - { - while (!my_isspace(cs, *str) && *str != '(' && *str != ')' && - *str != '.' && *str != ',' && *str != '\0') - { + } else { + while (!my_isspace(cs, *str) && *str != '(' && *str != ')' && *str != '.' && + *str != ',' && *str != '\0') { str++; len++; } } // If the user requested the id create it and return it - if (id != nullptr) - { + if (id != nullptr) { *id = std::string(""); id->reserve(len); - while (len--) - { + while (len--) { *id += *start; - if (*start++ == quote) - { + if (*start++ == quote) { start++; } } @@ -234,8 +204,8 @@ const char* rdb_parse_id(const struct charset_info_st* const cs, /* Skip id */ -const char* rdb_skip_id(const struct charset_info_st* const cs, const char *str) -{ +const char *rdb_skip_id(const struct charset_info_st *const cs, + const char *str) { DBUG_ASSERT(cs != nullptr); DBUG_ASSERT(str != nullptr); @@ -243,19 +213,16 @@ const char* rdb_skip_id(const struct charset_info_st* const cs, const char *str) } static const std::size_t rdb_hex_bytes_per_char = 2; -static const std::array rdb_hexdigit = -{ - { '0', '1', '2', '3', '4', '5', '6', '7', - '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' } -}; +static const std::array rdb_hexdigit = {{'0', '1', '2', '3', '4', '5', + '6', '7', '8', '9', 'a', 'b', + 'c', 'd', 'e', 'f'}}; /* Convert data into a hex string with optional maximum length. If the data is larger than the maximum length trancate it and append "..". */ std::string rdb_hexdump(const char *data, const std::size_t data_len, - const std::size_t maxsize) -{ + const std::size_t maxsize) { DBUG_ASSERT(data != nullptr); // Count the elements in the string @@ -264,8 +231,7 @@ std::string rdb_hexdump(const char *data, const std::size_t data_len, std::size_t len = elems * rdb_hex_bytes_per_char; std::string str; - if (maxsize != 0 && len > maxsize) - { + if (maxsize != 0 && len > maxsize) { // If the amount of output is too large adjust the settings // and leave room for the ".." at the end elems = (maxsize - 2) / rdb_hex_bytes_per_char; @@ -276,34 +242,29 @@ std::string rdb_hexdump(const char *data, const std::size_t data_len, str.reserve(len); // Loop through the input data and build the output string - for (std::size_t ii = 0; ii < elems; ii++, data++) - { - uint8_t ch = (uint8_t) *data; + for (std::size_t ii = 0; ii < elems; ii++, data++) { + uint8_t ch = (uint8_t)*data; str += rdb_hexdigit[ch >> 4]; str += rdb_hexdigit[ch & 0x0F]; } // If we can't fit it all add the ".." - if (elems != data_len) - { + if (elems != data_len) { str += ".."; } return str; } - /* Attempt to access the database subdirectory to see if it exists */ -bool rdb_database_exists(const std::string& db_name) -{ - const std::string dir = std::string(mysql_real_data_home) + FN_DIRSEP - + db_name; - struct st_my_dir* const dir_info = my_dir(dir.c_str(), - MYF(MY_DONT_SORT | MY_WANT_STAT)); - if (dir_info == nullptr) - { +bool rdb_database_exists(const std::string &db_name) { + const std::string dir = + std::string(mysql_real_data_home) + FN_DIRSEP + db_name; + struct st_my_dir *const dir_info = + my_dir(dir.c_str(), MYF(MY_DONT_SORT | MY_WANT_STAT)); + if (dir_info == nullptr) { return false; } @@ -311,4 +272,4 @@ bool rdb_database_exists(const std::string& db_name) return true; } -} // namespace myrocks +} // namespace myrocks diff --git a/storage/rocksdb/rdb_utils.h b/storage/rocksdb/rdb_utils.h index 7d63ff9c220..b337ed108d3 100644 --- a/storage/rocksdb/rdb_utils.h +++ b/storage/rocksdb/rdb_utils.h @@ -26,7 +26,7 @@ #include "rocksdb/slice.h" #ifdef HAVE_JEMALLOC - #include +#include #endif namespace myrocks { @@ -38,7 +38,7 @@ namespace myrocks { #ifndef interface #define interface struct -#endif // interface +#endif // interface /* Introduce C-style pseudo-namespaces, a handy way to make code more readble @@ -56,7 +56,7 @@ namespace myrocks { // to non-obvious MySQL functions, like the ones that do not start with well // known prefixes: "my_", "sql_", and "mysql_". #define my_core -#endif // my_core +#endif // my_core /* The intent behind a SHIP_ASSERT() macro is to have a mechanism for validating @@ -74,14 +74,14 @@ namespace myrocks { */ #ifndef SHIP_ASSERT -#define SHIP_ASSERT(expr) \ - do { \ - if (!(expr)) { \ - my_safe_printf_stderr("\nShip assert failure: \'%s\'\n", #expr); \ - abort_with_stack_traces(); \ - } \ +#define SHIP_ASSERT(expr) \ + do { \ + if (!(expr)) { \ + my_safe_printf_stderr("\nShip assert failure: \'%s\'\n", #expr); \ + abort_with_stack_traces(); \ + } \ } while (0) -#endif // SHIP_ASSERT +#endif // SHIP_ASSERT /* Assert a implies b. @@ -97,23 +97,50 @@ namespace myrocks { a and b must be both true or both false. */ #ifndef DBUG_ASSERT_IFF -#define DBUG_ASSERT_IFF(a, b) \ +#define DBUG_ASSERT_IFF(a, b) \ DBUG_ASSERT(static_cast(a) == static_cast(b)) #endif +/* + Intent behind this macro is to avoid manually typing the function name every + time we want to add the debugging statement and use the compiler for this + work. This avoids typical refactoring problems when one renames a function, + but the tracing message doesn't get updated. + + We could use __func__ or __FUNCTION__ macros, but __PRETTY_FUNCTION__ + contains the signature of the function as well as its bare name and provides + therefore more context when interpreting the logs. +*/ +#define DBUG_ENTER_FUNC() DBUG_ENTER(__PRETTY_FUNCTION__) + +/* + Error handling pattern used across MySQL abides by the following rules: "All + functions that can report an error (usually an allocation error), should + return 0/FALSE/false on success, 1/TRUE/true on failure." + + https://dev.mysql.com/doc/internals/en/additional-suggestions.html has more + details. + + To increase the comprehension and readability of MyRocks codebase we'll use + constants similar to ones from C standard (EXIT_SUCCESS and EXIT_FAILURE) to + make sure that both failure and success paths are clearly identifiable. The + definitions of FALSE and TRUE come from . +*/ +#define HA_EXIT_SUCCESS FALSE +#define HA_EXIT_FAILURE TRUE + /* Generic constant. */ -const size_t RDB_MAX_HEXDUMP_LEN= 1000; +const size_t RDB_MAX_HEXDUMP_LEN = 1000; /* Helper function to get an NULL terminated uchar* out of a given MySQL String. */ -inline uchar* rdb_mysql_str_to_uchar_str(my_core::String *str) -{ +inline uchar *rdb_mysql_str_to_uchar_str(my_core::String *str) { DBUG_ASSERT(str != nullptr); - return reinterpret_cast(str->c_ptr()); + return reinterpret_cast(str->c_ptr()); } /* @@ -121,17 +148,15 @@ inline uchar* rdb_mysql_str_to_uchar_str(my_core::String *str) given STL string. */ -inline const uchar* rdb_std_str_to_uchar_ptr(const std::string &str) -{ - return reinterpret_cast(str.data()); +inline const uchar *rdb_std_str_to_uchar_ptr(const std::string &str) { + return reinterpret_cast(str.data()); } /* Helper function to convert seconds to milliseconds. */ -constexpr int rdb_convert_sec_to_ms(int sec) -{ +constexpr int rdb_convert_sec_to_ms(int sec) { return std::chrono::milliseconds(std::chrono::seconds(sec)).count(); } @@ -140,10 +165,9 @@ constexpr int rdb_convert_sec_to_ms(int sec) given RocksDB item. */ -inline const uchar* rdb_slice_to_uchar_ptr(const rocksdb::Slice *item) -{ +inline const uchar *rdb_slice_to_uchar_ptr(const rocksdb::Slice *item) { DBUG_ASSERT(item != nullptr); - return reinterpret_cast(item->data()); + return reinterpret_cast(item->data()); } /* @@ -152,12 +176,11 @@ inline const uchar* rdb_slice_to_uchar_ptr(const rocksdb::Slice *item) scenario for cases where it has been verified that this intervention has noticeable benefits. */ -inline int purge_all_jemalloc_arenas() -{ +inline int purge_all_jemalloc_arenas() { #ifdef HAVE_JEMALLOC unsigned narenas = 0; size_t sz = sizeof(unsigned); - char name[25] = { 0 }; + char name[25] = {0}; // Get the number of arenas first. Please see `jemalloc` documentation for // all the various options. @@ -184,28 +207,28 @@ inline int purge_all_jemalloc_arenas() Helper functions to parse strings. */ -const char* rdb_skip_spaces(const struct charset_info_st* const cs, +const char *rdb_skip_spaces(const struct charset_info_st *const cs, const char *str) - __attribute__((__nonnull__, __warn_unused_result__)); + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); -bool rdb_compare_strings_ic(const char* const str1, const char* const str2) - __attribute__((__nonnull__, __warn_unused_result__)); +bool rdb_compare_strings_ic(const char *const str1, const char *const str2) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); -const char* rdb_find_in_string(const char *str, const char *pattern, - bool * const succeeded) - __attribute__((__nonnull__, __warn_unused_result__)); +const char *rdb_find_in_string(const char *str, const char *pattern, + bool *const succeeded) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); -const char* rdb_check_next_token(const struct charset_info_st* const cs, - const char *str, const char* const pattern, - bool * const succeeded) - __attribute__((__nonnull__, __warn_unused_result__)); +const char *rdb_check_next_token(const struct charset_info_st *const cs, + const char *str, const char *const pattern, + bool *const succeeded) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); -const char* rdb_parse_id(const struct charset_info_st* const cs, - const char *str, std::string * const id) - __attribute__((__nonnull__(1, 2), __warn_unused_result__)); +const char *rdb_parse_id(const struct charset_info_st *const cs, + const char *str, std::string *const id) + MY_ATTRIBUTE((__nonnull__(1, 2), __warn_unused_result__)); -const char* rdb_skip_id(const struct charset_info_st* const cs, const char *str) - __attribute__((__nonnull__, __warn_unused_result__)); +const char *rdb_skip_id(const struct charset_info_st *const cs, const char *str) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); /* Helper functions to populate strings. @@ -213,11 +236,11 @@ const char* rdb_skip_id(const struct charset_info_st* const cs, const char *str) std::string rdb_hexdump(const char *data, const std::size_t data_len, const std::size_t maxsize = 0) - __attribute__((__nonnull__)); + MY_ATTRIBUTE((__nonnull__)); /* Helper function to see if a database exists */ -bool rdb_database_exists(const std::string& db_name); +bool rdb_database_exists(const std::string &db_name); -} // namespace myrocks +} // namespace myrocks From 884fd9ac2ba59a8d6a86a53bb52df175e130f38b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 7 Feb 2017 20:40:55 +0200 Subject: [PATCH 137/233] Make RocksDB run git submodule init and update if rocksdb is not fetched --- storage/rocksdb/CMakeLists.txt | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index e9e63dcfcd7..e14cbd7f31a 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -1,5 +1,10 @@ # TODO: Copyrights - +IF(NOT EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/Makefile AND GIT_EXECUTABLE) + EXECUTE_PROCESS(COMMAND "${GIT_EXECUTABLE}" submodule init + WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}") + EXECUTE_PROCESS(COMMAND "${GIT_EXECUTABLE}" submodule update + WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}") +ENDIF() IF (NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/Makefile") MESSAGE(SEND_ERROR "Missing Makefile in rocksdb directory. Try \"git submodule update\".") From 840f8eab71bfccaef147d54445e33735e07932b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 7 Feb 2017 22:06:59 +0200 Subject: [PATCH 138/233] Make mysql_ldb work on clang --- storage/rocksdb/tools/mysql_ldb.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/rocksdb/tools/mysql_ldb.cc b/storage/rocksdb/tools/mysql_ldb.cc index a88d99a54c7..4664aa3f3d1 100644 --- a/storage/rocksdb/tools/mysql_ldb.cc +++ b/storage/rocksdb/tools/mysql_ldb.cc @@ -9,7 +9,7 @@ int main(int argc, char** argv) { rocksdb::Options db_options; - const myrocks::Rdb_pk_comparator pk_comparator; + myrocks::Rdb_pk_comparator pk_comparator; db_options.comparator= &pk_comparator; rocksdb::LDBTool tool; From f46176cbefa7d0feae6c434f3b75308cc2e6b2a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 7 Feb 2017 22:10:38 +0200 Subject: [PATCH 139/233] Make rocksdb build with clang We need to provide -fPIC for ROCKSDB files as well as -frtti if we compile with Clang --- storage/rocksdb/CMakeLists.txt | 7 ++++--- storage/rocksdb/build_rocksdb.cmake | 4 ++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index e14cbd7f31a..106330a2c30 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -32,7 +32,7 @@ ELSEIF (CMAKE_CXX_COMPILER_ID MATCHES "Clang") (CLANG_VERSION_STRING AND CLANG_VERSION_STRING VERSION_LESS 3.3)) SKIP_ROCKSDB_PLUGIN("${OLD_COMPILER_MSG}") ENDIF() - SET(CXX11_FLAGS "-stdlib=libc++ -std=c++11") + SET(CXX11_FLAGS "-std=c++11 -stdlib=libstdc++") ELSEIF(MSVC) IF (MSVC_VERSION LESS 1900) SKIP_ROCKSDB_PLUGIN("${OLD_COMPILER_MSG}") @@ -96,7 +96,8 @@ ADD_DEPENDENCIES(rocksdb_aux_lib GenError) TARGET_LINK_LIBRARIES(rocksdb_aux_lib rocksdblib ${ZLIB_LIBRARY}) TARGET_LINK_LIBRARIES(rocksdb_se rocksdb_aux_lib) -IF(CMAKE_COMPILER_IS_GNUCXX) +IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + # MARIAROCKS_NOT_YET: Add -frtti flag when compiling RocksDB files. # TODO: is this the right way to do this? # - SQL layer and storage/rocksdb/*.cc are compiled with -fnortti @@ -139,7 +140,7 @@ TARGET_LINK_LIBRARIES(ldb rocksdb_tools rocksdblib) MYSQL_ADD_EXECUTABLE(mysql_ldb tools/mysql_ldb.cc) TARGET_LINK_LIBRARIES(mysql_ldb rocksdb_tools rocksdb_aux_lib) -IF(CMAKE_COMPILER_IS_GNUCXX) +IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") SET_TARGET_PROPERTIES(rocksdb_tools sst_dump ldb mysql_ldb PROPERTIES COMPILE_FLAGS -frtti) ENDIF() IF(MSVC) diff --git a/storage/rocksdb/build_rocksdb.cmake b/storage/rocksdb/build_rocksdb.cmake index 35e83af73dc..14dfe746ae1 100644 --- a/storage/rocksdb/build_rocksdb.cmake +++ b/storage/rocksdb/build_rocksdb.cmake @@ -332,6 +332,6 @@ list(APPEND SOURCES ${CMAKE_CURRENT_BINARY_DIR}/build_version.cc) ADD_CONVENIENCE_LIBRARY(rocksdblib STATIC ${SOURCES}) target_link_libraries(rocksdblib ${THIRDPARTY_LIBS} ${SYSTEM_LIBS}) -if(CMAKE_COMPILER_IS_GNUCXX) - set_target_properties(rocksdblib PROPERTIES COMPILE_FLAGS "-fno-builtin-memcmp -frtti") +IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + set_target_properties(rocksdblib PROPERTIES COMPILE_FLAGS "-fPIC -fno-builtin-memcmp -frtti") endif() From e2f3739aa1eb86c1fd3fb15589b28aefe9b51c61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 7 Feb 2017 22:12:06 +0200 Subject: [PATCH 140/233] Ignore dynamically generated build_version.cc file --- storage/rocksdb/.gitignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 storage/rocksdb/.gitignore diff --git a/storage/rocksdb/.gitignore b/storage/rocksdb/.gitignore new file mode 100644 index 00000000000..23457b2d17b --- /dev/null +++ b/storage/rocksdb/.gitignore @@ -0,0 +1 @@ +build_version.cc From 241e8a15a29902e72060d2e74b544453ddaa056a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 7 Feb 2017 22:12:36 +0200 Subject: [PATCH 141/233] Ignore "invisible" files --- storage/rocksdb/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/rocksdb/.gitignore b/storage/rocksdb/.gitignore index 23457b2d17b..adf3e154c36 100644 --- a/storage/rocksdb/.gitignore +++ b/storage/rocksdb/.gitignore @@ -1 +1,2 @@ build_version.cc +.* From de49fd842a8132abb94a6ee25a84fb95d7d9aaaa Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Thu, 9 Feb 2017 16:55:02 +0000 Subject: [PATCH 142/233] RocksDB : Add lookup / compiling with optional compression libraries. This change add WITH_ROCKSDB_{LZ4,BZIP2,ZSTD,snappy} CMake variables that can be set to ON/OFF/AUTO. If variable has default value AUTO, rocksdb links with corresponding compression library. OFF disables compiling/linking with specific compression library, ON forces compiling with it (cmake would throw error if library is not available) Support for ZLIB is added unconditionally, as it is always there. --- cmake/FindLZ4.cmake | 9 ++++++ cmake/FindZSTD.cmake | 18 +++++++++++ storage/rocksdb/CMakeLists.txt | 31 ------------------ storage/rocksdb/build_rocksdb.cmake | 49 +++++++++++++++++++++++------ 4 files changed, 66 insertions(+), 41 deletions(-) create mode 100644 cmake/FindLZ4.cmake create mode 100644 cmake/FindZSTD.cmake diff --git a/cmake/FindLZ4.cmake b/cmake/FindLZ4.cmake new file mode 100644 index 00000000000..e97dd63e2b0 --- /dev/null +++ b/cmake/FindLZ4.cmake @@ -0,0 +1,9 @@ +find_path(LZ4_INCLUDE_DIR NAMES lz4.h) +find_library(LZ4_LIBRARY NAMES lz4) + +include(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS( + LZ4 DEFAULT_MSG + LZ4_LIBRARY LZ4_INCLUDE_DIR) + +mark_as_advanced(LZ4_INCLUDE_DIR LZ4_LIBRARY) diff --git a/cmake/FindZSTD.cmake b/cmake/FindZSTD.cmake new file mode 100644 index 00000000000..0fd73501327 --- /dev/null +++ b/cmake/FindZSTD.cmake @@ -0,0 +1,18 @@ +find_path( + ZSTD_INCLUDE_DIR + NAMES "zstd.h" +) + +find_library( + ZSTD_LIBRARY + NAMES zstd +) + +set(ZSTD_LIBRARIES ${ZSTD_LIBRARY}) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args( + ZSTD DEFAULT_MSG ZSTD_INCLUDE_DIR ZSTD_LIBRARIES) + +mark_as_advanced(ZSTD_INCLUDE_DIR ZSTD_LIBRARIES ZSTD_FOUND) + diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index 106330a2c30..0c8025d3d18 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -152,34 +152,3 @@ IF(MSVC) # Some checks in C++ runtime that make debug build much slower ADD_DEFINITIONS(-D_ITERATOR_DEBUG_LEVEL=0) ENDIF() - -# Optional compression libraries. -# -# TODO: search compression libraries properly. -# Use FIND_PACKAGE, CHECK_LIBRARY_EXISTS etc -IF(MARIAROCKS_NOT_YET) -IF (NOT "$ENV{WITH_SNAPPY}" STREQUAL "") - SET(rocksdb_static_libs ${rocksdb_static_libs} - $ENV{WITH_SNAPPY}/libsnappy${PIC_EXT}.a) - ADD_DEFINITIONS(-DSNAPPY) -ENDIF() - -IF (NOT "$ENV{WITH_LZ4}" STREQUAL "") - SET(rocksdb_static_libs ${rocksdb_static_libs} - $ENV{WITH_LZ4}/liblz4${PIC_EXT}.a) - ADD_DEFINITIONS(-DLZ4) -ENDIF() - -IF (NOT "$ENV{WITH_BZ2}" STREQUAL "") - SET(rocksdb_static_libs ${rocksdb_static_libs} - $ENV{WITH_BZ2}/libbz2${PIC_EXT}.a) - ADD_DEFINITIONS(-DBZIP2) -ENDIF() - -# link ZSTD only if instructed -IF (NOT "$ENV{WITH_ZSTD}" STREQUAL "") - SET(rocksdb_static_libs ${rocksdb_static_libs} - $ENV{WITH_ZSTD}/libzstd${PIC_EXT}.a) - ADD_DEFINITIONS(-DZSTD) -ENDIF() -ENDIF(MARIAROCKS_NOT_YET) diff --git a/storage/rocksdb/build_rocksdb.cmake b/storage/rocksdb/build_rocksdb.cmake index 14dfe746ae1..a482172f000 100644 --- a/storage/rocksdb/build_rocksdb.cmake +++ b/storage/rocksdb/build_rocksdb.cmake @@ -11,8 +11,6 @@ INCLUDE_DIRECTORIES( ${ROCKSDB_SOURCE_DIR}/third-party/gtest-1.7.0/fused-src ) - - list(APPEND CMAKE_MODULE_PATH "${ROCKSDB_SOURCE_DIR}/cmake/modules/") if(WIN32) @@ -29,18 +27,49 @@ else() add_definitions(-DROCKSDB_JEMALLOC) set(WITH_JEMALLOC ON) endif() - option(WITH_ROCKSDB_SNAPPY "build RocksDB with SNAPPY" OFF) - if(WITH_ROCKSDB_SNAPPY) - find_package(snappy REQUIRED) - add_definitions(-DSNAPPY) - include_directories(${SNAPPY_INCLUDE_DIR}) - list(APPEND THIRDPARTY_LIBS ${SNAPPY_LIBRARIES}) - endif() endif() +# Optional compression libraries. +foreach(compression_lib LZ4 BZIP2 ZSTD snappy) + FIND_PACKAGE(${compression_lib} QUIET) + SET(WITH_ROCKSDB_${compression_lib} AUTO CACHE STRING + "Build RocksDB with ${compression_lib} compression. Possible values are 'ON', 'OFF', 'AUTO' and default is 'AUTO'") + if(${WITH_ROCKSDB_${compression_lib}} STREQUAL "ON" AND NOT ${${compression_lib}_FOUND}) + MESSAGE(FATAL_ERROR + "${compression_lib} library was not found, but WITH_ROCKSDB${compression_lib} option is ON.\ + Either set WITH_ROCKSDB${compression_lib} to OFF, or make sure ${compression_lib} is installed") + endif() +endforeach() + +if(LZ4_FOUND AND (NOT WITH_ROCKSDB_LZ4 STREQUAL "OFF")) + add_definitions(-DLZ4) + include_directories(${LZ4_INCLUDE_DIR}) + list(APPEND THIRDPARTY_LIBS ${LZ4_LIBRARY}) +endif() + +if(BZIP2_FOUND AND (NOT WITH_ROCKSDB_BZIP2 STREQUAL "OFF")) + add_definitions(-DBZIP2) + include_directories(${BZIP2_INCLUDE_DIR}) + list(APPEND THIRDPARTY_LIBS ${BZIP2_LIBRARIES}) +endif() + +if(SNAPPY_FOUND AND (NOT WITH_ROCKSDB_SNAPPY STREQUAL "OFF")) + add_definitions(-DSNAPPY) + include_directories(${SNAPPY_INCLUDE_DIR}) + list(APPEND THIRDPARTY_LIBS ${SNAPPY_LIBRARIES}) +endif() + +if(ZSTD_FOUND AND (NOT WITH_ROCKSDB_ZSTD STREQUAL "OFF")) + add_definitions(-DZSTD) + include_directories(${ZSTD_INCLUDE_DIR}) + list(APPEND THIRDPARTY_LIBS ${ZSTD_LIBRARY}) +endif() + +add_definitions(-DZLIB) +list(APPEND THIRDPARTY_LIBS ${ZLIB_LIBRARY}) if(CMAKE_SYSTEM_NAME MATCHES "Cygwin") add_definitions(-fno-builtin-memcmp -DCYGWIN) @@ -330,7 +359,7 @@ CONFIGURE_FILE(${ROCKSDB_SOURCE_DIR}/util/build_version.cc.in build_version.cc @ INCLUDE_DIRECTORIES(${ROCKSDB_SOURCE_DIR}/util) list(APPEND SOURCES ${CMAKE_CURRENT_BINARY_DIR}/build_version.cc) -ADD_CONVENIENCE_LIBRARY(rocksdblib STATIC ${SOURCES}) +ADD_CONVENIENCE_LIBRARY(rocksdblib ${SOURCES}) target_link_libraries(rocksdblib ${THIRDPARTY_LIBS} ${SYSTEM_LIBS}) IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") set_target_properties(rocksdblib PROPERTIES COMPILE_FLAGS "-fPIC -fno-builtin-memcmp -frtti") From 7facbc548db6761d60955982bd40dd83cbd00c85 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 19 Feb 2017 18:51:26 +0300 Subject: [PATCH 143/233] MariaRocks: fix a few tests Test suite parameters for 'rocksdb' test suite were disabled in order to get mysqld to start at all when ha_rocksdb is a dynamic plugin. A lot of tests depend on these parameters being enabled, though. Put them back by using the loose- form. --- storage/rocksdb/mysql-test/rocksdb/my.cnf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/my.cnf b/storage/rocksdb/mysql-test/rocksdb/my.cnf index 5f85ab8b54d..c6817df9d18 100644 --- a/storage/rocksdb/mysql-test/rocksdb/my.cnf +++ b/storage/rocksdb/mysql-test/rocksdb/my.cnf @@ -7,5 +7,5 @@ default-storage-engine=rocksdb sql-mode=NO_ENGINE_SUBSTITUTION explicit-defaults-for-timestamp=1 -#rocksdb_lock_wait_timeout=1 -#rocksdb_strict_collation_check=0 +loose-rocksdb_lock_wait_timeout=1 +loose-rocksdb_strict_collation_check=0 From e57ab94cce630799c4a52ac7a44abe7415755bac Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 21 Feb 2017 03:51:09 +0300 Subject: [PATCH 144/233] Update rocksdb submodule to match the rocksdb version used in upstream This change should have been a part of Merge 'merge-myrocks' into 'bb-10.2-mariarocks' Merged cset: Copy of commit d1bb19b8f751875472211312c8e810143a7ba4b6 We probably should make submodule info a part of the mergetree process. --- storage/rocksdb/rocksdb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/rocksdb/rocksdb b/storage/rocksdb/rocksdb index 335981d4737..bc5d7b70299 160000 --- a/storage/rocksdb/rocksdb +++ b/storage/rocksdb/rocksdb @@ -1 +1 @@ -Subproject commit 335981d47371cef0c48ebb58af53f2c8b8d8bd35 +Subproject commit bc5d7b70299b763127f3714055a63ebe7e04ad47 From ea5cc017e97f02d5010dcb35d26b759978758edf Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 21 Feb 2017 16:49:18 +0300 Subject: [PATCH 145/233] Test fixes in rocksdb_sys_vars test suite - Get the suite to work with dynamically-linked plugin (ha_rocksdb.so) - Due to the push to keep everything MyRocks-related in storage/rocksdb, there is no mysql-test/include/have_rocksdb.* anymore. Make a copy of storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb*, hopefully these files wont be changed [often]. - Maria-fication of rocksdb_persistent_cache_path test. --- .../rocksdb_sys_vars/include/have_rocksdb.inc | 10 ++++++++++ .../rocksdb_sys_vars/include/have_rocksdb.opt | 12 ++++++++++++ storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf | 3 +-- .../rocksdb/mysql-test/rocksdb_sys_vars/suite.opt | 2 +- .../t/rocksdb_persistent_cache_path_basic.test | 2 +- 5 files changed, 25 insertions(+), 4 deletions(-) create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/include/have_rocksdb.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/include/have_rocksdb.opt diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/have_rocksdb.inc b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/have_rocksdb.inc new file mode 100644 index 00000000000..1f762d38c64 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/have_rocksdb.inc @@ -0,0 +1,10 @@ +if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'rocksdb' AND support IN ('YES', 'DEFAULT', 'ENABLED')`) +{ + --skip Test requires engine RocksDB. +} + +--disable_query_log +# Table statistics can vary depending on when the memtables are flushed, so +# flush them at the beginning of the test to ensure the test runs consistently. +set global rocksdb_force_flush_memtable_now = true; +--enable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/have_rocksdb.opt b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/have_rocksdb.opt new file mode 100644 index 00000000000..36d7dda1609 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/have_rocksdb.opt @@ -0,0 +1,12 @@ +--loose-enable-rocksdb +--loose-enable-rocksdb_global_info +--loose-enable-rocksdb_ddl +--loose-enable-rocksdb_cf_options +--loose-enable_rocksdb_perf_context +--loose-enable_rocksdb_perf_context_global +--loose-enable-rocksdb_index_file_map +--loose-enable-rocksdb_dbstats +--loose-enable-rocksdb_cfstats +--loose-enable-rocksdb_lock_info +--loose-enable-rocksdb_trx +--loose-enable-rocksdb_locks diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf b/storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf index 5bb50e1da28..1e9b0a9d3bb 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf @@ -1,11 +1,10 @@ !include include/default_my.cnf [server] -rocksdb skip-innodb default-storage-engine=rocksdb sql-mode=NO_ENGINE_SUBSTITUTION explicit-defaults-for-timestamp=1 -rocksdb_lock_wait_timeout=1 +loose-rocksdb_lock_wait_timeout=1 diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.opt b/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.opt index 8907deed6d8..431fc331458 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.opt +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.opt @@ -1,2 +1,2 @@ ---ignore-db-dirs=.rocksdb +--ignore-db-dirs=.rocksdb --plugin-load=$HA_ROCKSDB_SO diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_path_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_path_basic.test index c0840274253..1a1146a17cc 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_path_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_path_basic.test @@ -10,7 +10,7 @@ CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; --let $read_only=1 --let $session=0 --let $sticky=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; From f080c93dcd8d5e0bc2c874efaf949a2097967494 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 6 Mar 2017 14:50:14 +0300 Subject: [PATCH 146/233] MariaRocks: (Temporarily?) disable MTR tests that run RQG --- storage/rocksdb/mysql-test/rocksdb/t/disabled.def | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index 27f4dabdf1f..0e6aaa3f553 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -14,3 +14,13 @@ optimizer_loose_index_scans: MariaDB doesnt support Skip Scan gap_lock_issue254: MDEV-11735: MyRocks: Gap Lock detector support gap_lock_raise_error: MDEV-11735: MyRocks: Gap Lock detector support +# +# The idea of including RQG as a submodule and running RQG as part of +# MTR tests doesn't seem to be a good fit in MariaDB atm. +# +# The objection is that MTR tests are deterministic and can be run in +# a constrained environment. +# +rqg_examples +rqg_runtime +rqg_transactions From e7948e34ee76cb1525126561f6e812facc1af22a Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 6 Mar 2017 23:59:29 +0300 Subject: [PATCH 147/233] Fix the previous cset: use a proper disabled.def syntax --- storage/rocksdb/mysql-test/rocksdb/t/disabled.def | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index 0e6aaa3f553..0efe609ae63 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -21,6 +21,6 @@ gap_lock_raise_error: MDEV-11735: MyRocks: Gap Lock detector support # The objection is that MTR tests are deterministic and can be run in # a constrained environment. # -rqg_examples -rqg_runtime -rqg_transactions +rqg_examples : Test that use RQG are disabled +rqg_runtime : Test that use RQG are disabled +rqg_transactions : Test that use RQG are disabled From 48a5dd945b8f22ba4bceda7558b9f650fbc0fa43 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 7 Mar 2017 11:08:41 +0300 Subject: [PATCH 148/233] MariaRocks: fix compilation on kvm-rpm-centos7-amd64 Define __STDC_FORMAT_MACROS before using PRIu64. RocksDB does this in many files --- storage/rocksdb/rdb_sst_info.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/storage/rocksdb/rdb_sst_info.cc b/storage/rocksdb/rdb_sst_info.cc index 8c5a7aa022b..aedb9fdfefe 100644 --- a/storage/rocksdb/rdb_sst_info.cc +++ b/storage/rocksdb/rdb_sst_info.cc @@ -14,6 +14,9 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +/* For PRIu64 use below: */ +#define __STDC_FORMAT_MACROS + #include /* This C++ file's header file */ From f9e63b7c59557efd36dfa4b3254cbe8b0af4ff06 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 7 Mar 2017 11:38:26 +0300 Subject: [PATCH 149/233] MariaRocks port: __PRETTY_FUNCTION__ is a gcc extension. Use __func__ when it is not available For example, on Windows. --- storage/rocksdb/rdb_utils.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/storage/rocksdb/rdb_utils.h b/storage/rocksdb/rdb_utils.h index a1d630da41f..6cd47405f48 100644 --- a/storage/rocksdb/rdb_utils.h +++ b/storage/rocksdb/rdb_utils.h @@ -113,7 +113,11 @@ namespace myrocks { contains the signature of the function as well as its bare name and provides therefore more context when interpreting the logs. */ -#define DBUG_ENTER_FUNC() DBUG_ENTER(__PRETTY_FUNCTION__) +#ifdef __GNUC__ +# define DBUG_ENTER_FUNC() DBUG_ENTER(__PRETTY_FUNCTION__) +#else +# define DBUG_ENTER_FUNC() DBUG_ENTER(__func__) +#endif /* Error handling pattern used across MySQL abides by the following rules: "All From e19f1dd61e7cd59efca746a204ecf05648b21c48 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 7 Mar 2017 14:11:19 +0000 Subject: [PATCH 150/233] Fix build on Windows. Remove some GCC specific pragmas, use #ifdef __GNUC__ in other places. Only use pthread_setname_np on Linux. Fix a mismerge --- storage/rocksdb/ha_rocksdb.cc | 13 ++++--------- storage/rocksdb/rdb_threads.cc | 3 ++- storage/rocksdb/rdb_threads.h | 7 +++++-- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 724d8cc495b..b6818234fad 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -7992,12 +7992,7 @@ int ha_rocksdb::info(uint flag) { uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]; auto r = get_range(pk_index(table, m_tbl_def), buf); uint64_t sz = 0; - -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" rdb->GetApproximateSizes(m_pk_descr->get_cf(), &r, 1, &sz, true); -#pragma GCC diagnostic pop - stats.records = sz / ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE; stats.data_file_length = sz; @@ -8838,11 +8833,8 @@ ha_rows ha_rocksdb::records_in_range(uint inx, key_range *const min_key, disk_size = ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE; } -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" // Getting statistics, including from Memtables rdb->GetApproximateSizes(kd.get_cf(), &r, 1, &sz, true); -#pragma GCC diagnostic pop ret = rows * sz / disk_size; @@ -10160,8 +10152,10 @@ const char *get_rdb_io_error_string(const RDB_IO_ERROR_TYPE err_type) { // In case of core dump generation we want this function NOT to be optimized // so that we can capture as much data as possible to debug the root cause // more efficiently. +#ifdef __GNUC__ #pragma GCC push_options #pragma GCC optimize("O0") +#endif void rdb_handle_io_error(const rocksdb::Status status, const RDB_IO_ERROR_TYPE err_type) { @@ -10234,8 +10228,9 @@ void rdb_handle_io_error(const rocksdb::Status status, } } } - +#ifdef __GNUC__ #pragma GCC pop_options +#endif Rdb_dict_manager *rdb_get_dict_manager(void) { return &dict_manager; } diff --git a/storage/rocksdb/rdb_threads.cc b/storage/rocksdb/rdb_threads.cc index 1dd99ba596a..91da0ae818c 100644 --- a/storage/rocksdb/rdb_threads.cc +++ b/storage/rocksdb/rdb_threads.cc @@ -62,7 +62,7 @@ int Rdb_thread::create_thread(const std::string &thread_name int err = mysql_thread_create(background_psi_thread_key, &m_handle, nullptr, thread_func, this); - +#ifdef __linux__ if (!err) { /* mysql_thread_create() ends up doing some work underneath and setting the @@ -73,6 +73,7 @@ int Rdb_thread::create_thread(const std::string &thread_name */ err = pthread_setname_np(m_handle, thread_name.c_str()); } +#endif return err; } diff --git a/storage/rocksdb/rdb_threads.h b/storage/rocksdb/rdb_threads.h index 662b78c9608..2a421ec3c53 100644 --- a/storage/rocksdb/rdb_threads.h +++ b/storage/rocksdb/rdb_threads.h @@ -75,8 +75,10 @@ public: void signal(const bool &stop_thread = false); - int join() { return pthread_join(m_handle, nullptr); } + int join() + { #ifndef _WIN32 + return pthread_join(m_handle, nullptr); #else /* mysys on Windows creates "detached" threads in pthread_create(). @@ -88,9 +90,10 @@ public: If thread is already finished before pthread_join(), we get EINVAL, and it is safe to ignore and handle this as success. */ - (void)pthread_join(m_handle, nullptr); + pthread_join(m_handle, nullptr); return 0; #endif + } void uninit(); From fd39f25ca70d548a4ec1ed36c4c6c30072d4e47b Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 10 Mar 2017 14:05:17 +0300 Subject: [PATCH 151/233] MariaRocks: fix compilation on Oracle Linux Server 7.3. #define __STDC_FORMAT_MACROS. Unfortunately there is no single location that would be #includ'ed before everything else. Have to put the #define into each .cc file --- storage/rocksdb/ha_rocksdb.cc | 3 +++ storage/rocksdb/rdb_cf_manager.cc | 3 +++ storage/rocksdb/rdb_datadic.cc | 3 +++ 3 files changed, 9 insertions(+) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index b6818234fad..2a8d5f8513c 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -20,6 +20,9 @@ #define MYSQL_SERVER 1 +/* For use of 'PRIu64': */ +#define __STDC_FORMAT_MACROS + #include #include diff --git a/storage/rocksdb/rdb_cf_manager.cc b/storage/rocksdb/rdb_cf_manager.cc index 710365a0b25..22f60ef3e61 100644 --- a/storage/rocksdb/rdb_cf_manager.cc +++ b/storage/rocksdb/rdb_cf_manager.cc @@ -18,6 +18,9 @@ #pragma implementation // gcc: Class implementation #endif +/* For use of 'PRIu64': */ +#define __STDC_FORMAT_MACROS + #include #include diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index 79229561939..ba13b52f1b4 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -18,6 +18,9 @@ #pragma implementation // gcc: Class implementation #endif +/* For use of 'PRIu64': */ +#define __STDC_FORMAT_MACROS + #include /* This C++ file's header file */ #include "./rdb_datadic.h" From 65d01da29c03c44f3c059a999c7c1cc6e001ead8 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 11 Mar 2017 07:17:42 +0300 Subject: [PATCH 152/233] Copy of commit ba00e640f658ad8d0a4dff09a497a51b8a4de935 Author: Herman Lee Date: Wed Feb 22 06:30:06 2017 -0800 Improve add_index_alter_cardinality test Summary: Split add_index_inplace_cardinality test out and add a debug_sync point to it so that the flush of the memtable occurs while the alter is running. Closes https://github.com/facebook/mysql-5.6/pull/539 Reviewed By: alxyang Differential Revision: D4597887 Pulled By: hermanlee fbshipit-source-id: faedda2 --- storage/rocksdb/.clang-format | 93 ++ storage/rocksdb/CMakeLists.txt | 6 + storage/rocksdb/ha_rocksdb.cc | 861 +++++++++++------- storage/rocksdb/ha_rocksdb.h | 73 +- storage/rocksdb/logger.h | 19 +- .../rocksdb/r/2pc_group_commit.result | 8 +- .../r/add_index_inplace_cardinality.result | 21 + .../r/blind_delete_without_tx_api.result | 85 ++ .../mysql-test/rocksdb/r/bulk_load.result | 32 +- .../mysql-test/rocksdb/r/collation.result | 1 + .../rocksdb/r/fail_system_cf.result | 2 +- .../mysql-test/rocksdb/r/handler_basic.result | 2 +- .../mysql-test/rocksdb/r/issue290.result | 4 +- .../mysql-test/rocksdb/r/issue495.result | 32 + .../mysql-test/rocksdb/r/mysqldump.result | 45 + .../mysql-test/rocksdb/r/partition.result | 663 +++++++++++++- .../mysql-test/rocksdb/r/rocksdb.result | 31 +- .../rocksdb/r/rocksdb_cf_per_partition.result | 409 +++++++++ .../mysql-test/rocksdb/r/show_engine.result | 74 -- .../rocksdb/r/tbl_opt_data_index_dir.result | 41 +- .../mysql-test/rocksdb/r/transaction.result | 210 ++--- .../mysql-test/rocksdb/r/unique_check.result | 12 + .../mysql-test/rocksdb/r/write_sync.result | 8 +- .../rocksdb/t/2pc_group_commit.test | 8 +- .../rocksdb/t/add_index_inplace.test | 2 - .../add_index_inplace_cardinality-master.opt | 1 + .../t/add_index_inplace_cardinality.test | 44 + .../rocksdb/t/blind_delete_without_tx_api.cnf | 11 + .../t/blind_delete_without_tx_api.test | 129 +++ .../mysql-test/rocksdb/t/bulk_load.test | 4 +- .../mysql-test/rocksdb/t/collation.test | 3 + .../mysql-test/rocksdb/t/handler_basic.test | 3 +- .../mysql-test/rocksdb/t/issue290.test | 2 +- .../mysql-test/rocksdb/t/issue495.test | 29 + .../mysql-test/rocksdb/t/mysqldump.test | 3 + .../mysql-test/rocksdb/t/partition.test | 723 ++++++++++++++- .../rocksdb/t/persistent_cache.test | 4 +- .../rocksdb/mysql-test/rocksdb/t/rocksdb.test | 14 + .../rocksdb/t/rocksdb_cf_per_partition.test | 494 ++++++++++ .../mysql-test/rocksdb/t/show_engine.test | 4 + .../rocksdb/t/tbl_opt_data_index_dir.test | 54 +- .../rocksdb/t/transaction_select.inc | 13 +- .../mysql-test/rocksdb/t/unique_check.test | 26 + .../mysql-test/rocksdb/t/write_sync.test | 8 +- .../t/rpl_crash_safe_wal_corrupt.cnf | 2 + .../t/rpl_gtid_crash_safe-master.opt | 2 +- .../t/rpl_gtid_crash_safe-slave.opt | 2 +- .../t/rpl_gtid_crash_safe_wal_corrupt.cnf | 2 + .../rpl_rocksdb_2pc_crash_recover-master.opt | 2 +- ...ksdb_blind_delete_primary_key_basic.result | 100 ++ .../r/rocksdb_compact_cf_basic.result | 1 + .../r/rocksdb_delayed_write_rate_basic.result | 85 ++ .../r/rocksdb_disabledatasync_basic.result | 7 - ...cksdb_flush_log_at_trx_commit_basic.result | 93 ++ ...sdb_flush_memtable_on_analyze_basic.result | 6 +- ..._force_compute_memtable_stats_basic.result | 15 + .../r/rocksdb_master_skip_tx_api_basic.result | 100 ++ ...sdb_persistent_cache_size_mb_basic.result} | 8 +- .../r/rocksdb_wal_recovery_mode_basic.result | 10 +- .../r/rocksdb_write_sync_basic.result | 114 --- ...ocksdb_blind_delete_primary_key_basic.test | 18 + .../t/rocksdb_compact_cf_basic.test | 4 + .../t/rocksdb_delayed_write_rate_basic.test | 22 + .../t/rocksdb_disabledatasync_basic.test | 6 - ...rocksdb_flush_log_at_trx_commit_basic.test | 18 + ...cksdb_flush_memtable_on_analyze_basic.test | 2 + ...db_force_compute_memtable_stats_basic.test | 23 + ... => rocksdb_master_skip_tx_api_basic.test} | 4 +- ...cksdb_persistent_cache_size_mb_basic.test} | 2 +- storage/rocksdb/properties_collector.cc | 22 +- storage/rocksdb/rdb_cf_manager.cc | 67 +- storage/rocksdb/rdb_datadic.cc | 248 +++-- storage/rocksdb/rdb_datadic.h | 40 +- storage/rocksdb/rdb_i_s.cc | 10 +- storage/rocksdb/rdb_mutex_wrapper.cc | 6 +- storage/rocksdb/rdb_psi.cc | 113 +++ storage/rocksdb/rdb_psi.h | 55 ++ storage/rocksdb/rdb_sst_info.cc | 21 +- storage/rocksdb/rdb_sst_info.h | 5 +- storage/rocksdb/rdb_threads.cc | 29 +- storage/rocksdb/rdb_threads.h | 33 +- storage/rocksdb/rdb_utils.cc | 17 + storage/rocksdb/rdb_utils.h | 38 + storage/rocksdb/tools/mysql_ldb.cc | 6 +- .../unittest/test_properties_collector.cc | 44 +- 85 files changed, 4754 insertions(+), 959 deletions(-) create mode 100644 storage/rocksdb/.clang-format create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_cardinality.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/blind_delete_without_tx_api.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/issue495.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_per_partition.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_cardinality-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_cardinality.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/issue495.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_per_partition.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_blind_delete_primary_key_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delayed_write_rate_basic.result delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disabledatasync_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_log_at_trx_commit_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_compute_memtable_stats_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_master_skip_tx_api_basic.result rename storage/rocksdb/mysql-test/rocksdb_sys_vars/r/{rocksdb_persistent_cache_size_basic.result => rocksdb_persistent_cache_size_mb_basic.result} (61%) delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_sync_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_blind_delete_primary_key_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delayed_write_rate_basic.test delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disabledatasync_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_log_at_trx_commit_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_compute_memtable_stats_basic.test rename storage/rocksdb/mysql-test/rocksdb_sys_vars/t/{rocksdb_write_sync_basic.test => rocksdb_master_skip_tx_api_basic.test} (83%) rename storage/rocksdb/mysql-test/rocksdb_sys_vars/t/{rocksdb_persistent_cache_size_basic.test => rocksdb_persistent_cache_size_mb_basic.test} (89%) create mode 100644 storage/rocksdb/rdb_psi.cc create mode 100644 storage/rocksdb/rdb_psi.h diff --git a/storage/rocksdb/.clang-format b/storage/rocksdb/.clang-format new file mode 100644 index 00000000000..d80b012dd4b --- /dev/null +++ b/storage/rocksdb/.clang-format @@ -0,0 +1,93 @@ +--- +Language: Cpp +# BasedOnStyle: LLVM +AccessModifierOffset: -2 +AlignAfterOpenBracket: Align +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +AlignEscapedNewlinesLeft: false +AlignOperands: true +AlignTrailingComments: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: All +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: false +AlwaysBreakTemplateDeclarations: false +BinPackArguments: true +BinPackParameters: true +BraceWrapping: + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Attach +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: true +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +ConstructorInitializerAllOnOneLineOrOnePerLine: false +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] +IncludeCategories: + - Regex: '^"(llvm|llvm-c|clang|clang-c)/' + Priority: 2 + - Regex: '^(<|"(gtest|isl|json)/)' + Priority: 3 + - Regex: '.*' + Priority: 1 +IncludeIsMainRegex: '$' +IndentCaseLabels: false +IndentWidth: 2 +IndentWrappedFunctionNames: false +KeepEmptyLinesAtTheStartOfBlocks: true +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBlockIndentWidth: 2 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PenaltyBreakBeforeFirstCallParameter: 19 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 60 +PointerAlignment: Right +ReflowComments: true +SortIncludes: true +SpaceAfterCStyleCast: false +SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: ControlStatements +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Cpp11 +TabWidth: 8 +UseTab: Never +JavaScriptQuotes: Leave +... diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index a5d07a12d96..12e88b24b49 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -28,6 +28,11 @@ INCLUDE_DIRECTORIES( ADD_DEFINITIONS(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DOS_LINUX -DZLIB) +CHECK_FUNCTION_EXISTS(fallocate HAVE_FALLOCATE) +IF(HAVE_FALLOCATE) + ADD_DEFINITIONS(-DROCKSDB_FALLOCATE_PRESENT) +ENDIF() + SET(ROCKSDB_SOURCES ha_rocksdb.cc ha_rocksdb.h ha_rocksdb_proto.h logger.h @@ -41,6 +46,7 @@ SET(ROCKSDB_SOURCES rdb_index_merge.cc rdb_index_merge.h rdb_perf_context.cc rdb_perf_context.h rdb_mutex_wrapper.cc rdb_mutex_wrapper.h + rdb_psi.h rdb_psi.cc rdb_sst_info.cc rdb_sst_info.h rdb_utils.cc rdb_utils.h rdb_buff.h rdb_threads.cc rdb_threads.h diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 59c6e1ab33b..d3157b0b800 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -59,6 +59,7 @@ #include "./rdb_i_s.h" #include "./rdb_index_merge.h" #include "./rdb_mutex_wrapper.h" +#include "./rdb_psi.h" #include "./rdb_threads.h" // Internal MySQL APIs not exposed in any header. @@ -302,7 +303,7 @@ static void rocksdb_set_pause_background_work( my_core::THD *const thd MY_ATTRIBUTE((__unused__)), struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { - mysql_mutex_lock(&rdb_sysvars_mutex); + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); const bool pause_requested = *static_cast(save); if (rocksdb_pause_background_work != pause_requested) { if (pause_requested) { @@ -312,7 +313,7 @@ static void rocksdb_set_pause_background_work( } rocksdb_pause_background_work = pause_requested; } - mysql_mutex_unlock(&rdb_sysvars_mutex); + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } static void rocksdb_set_compaction_options(THD *thd, @@ -329,6 +330,10 @@ static void rocksdb_set_rate_limiter_bytes_per_sec(THD *thd, void *var_ptr, const void *save); +static void rocksdb_set_delayed_write_rate(THD *thd, + struct st_mysql_sys_var *var, + void *var_ptr, const void *save); + static void rdb_set_collation_exception_list(const char *exception_list); static void rocksdb_set_collation_exception_list(THD *thd, struct st_mysql_sys_var *var, @@ -350,14 +355,16 @@ static long long rocksdb_block_cache_size; /* Use unsigned long long instead of uint64_t because of MySQL compatibility */ static unsigned long long // NOLINT(runtime/int) rocksdb_rate_limiter_bytes_per_sec; +static unsigned long long rocksdb_delayed_write_rate; static unsigned long // NOLINT(runtime/int) - rocksdb_persistent_cache_size; + rocksdb_persistent_cache_size_mb; static uint64_t rocksdb_info_log_level; static char *rocksdb_wal_dir; static char *rocksdb_persistent_cache_path; static uint64_t rocksdb_index_type; static char rocksdb_background_sync; static uint32_t rocksdb_debug_optimizer_n_rows; +static my_bool rocksdb_force_compute_memtable_stats; static my_bool rocksdb_debug_optimizer_no_zero_cardinality; static uint32_t rocksdb_wal_recovery_mode; static uint32_t rocksdb_access_hint_on_compaction_start; @@ -413,11 +420,11 @@ static void rocksdb_set_rocksdb_info_log_level( const void *const save) { DBUG_ASSERT(save != nullptr); - mysql_mutex_lock(&rdb_sysvars_mutex); + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); rocksdb_info_log_level = *static_cast(save); rocksdb_db_options.info_log->SetInfoLogLevel( static_cast(rocksdb_info_log_level)); - mysql_mutex_unlock(&rdb_sysvars_mutex); + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } static const char *index_type_names[] = {"kBinarySearch", "kHashSearch", NullS}; @@ -478,6 +485,12 @@ static MYSQL_THDVAR_BOOL( "update and delete", nullptr, nullptr, FALSE); +static MYSQL_THDVAR_BOOL( + blind_delete_primary_key, PLUGIN_VAR_RQCMDARG, + "Deleting rows by primary key lookup, without reading rows (Blind Deletes)." + " Blind delete is disabled if the table has secondary key", + nullptr, nullptr, FALSE); + static MYSQL_THDVAR_STR( read_free_rpl_tables, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, "List of tables that will use read-free replication on the slave " @@ -561,6 +574,13 @@ static MYSQL_SYSVAR_ULONGLONG( nullptr, rocksdb_set_rate_limiter_bytes_per_sec, /* default */ 0L, /* min */ 0L, /* max */ MAX_RATE_LIMITER_BYTES_PER_SEC, 0); +static MYSQL_SYSVAR_ULONGLONG(delayed_write_rate, rocksdb_delayed_write_rate, + PLUGIN_VAR_RQCMDARG, + "DBOptions::delayed_write_rate", nullptr, + rocksdb_set_delayed_write_rate, + rocksdb_db_options.delayed_write_rate, 0, + UINT64_MAX, 0); + static MYSQL_SYSVAR_ENUM( info_log_level, rocksdb_info_log_level, PLUGIN_VAR_RQCMDARG, "Filter level for info logs to be written mysqld error log. " @@ -579,8 +599,9 @@ static MYSQL_THDVAR_INT( static MYSQL_SYSVAR_UINT( wal_recovery_mode, rocksdb_wal_recovery_mode, PLUGIN_VAR_RQCMDARG, - "DBOptions::wal_recovery_mode for RocksDB", nullptr, nullptr, - /* default */ (uint)rocksdb::WALRecoveryMode::kPointInTimeRecovery, + "DBOptions::wal_recovery_mode for RocksDB. Default is kAbsoluteConsistency", + nullptr, nullptr, + /* default */ (uint)rocksdb::WALRecoveryMode::kAbsoluteConsistency, /* min */ (uint)rocksdb::WALRecoveryMode::kTolerateCorruptedTailRecords, /* max */ (uint)rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords, 0); @@ -637,13 +658,6 @@ static MYSQL_SYSVAR_ULONG(max_total_wal_size, nullptr, rocksdb_db_options.max_total_wal_size, /* min */ 0L, /* max */ LONG_MAX, 0); -static MYSQL_SYSVAR_BOOL( - disabledatasync, - *reinterpret_cast(&rocksdb_db_options.disableDataSync), - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::disableDataSync for RocksDB", nullptr, nullptr, - rocksdb_db_options.disableDataSync); - static MYSQL_SYSVAR_BOOL( use_fsync, *reinterpret_cast(&rocksdb_db_options.use_fsync), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, @@ -662,10 +676,10 @@ static MYSQL_SYSVAR_STR( nullptr, ""); static MYSQL_SYSVAR_ULONG( - persistent_cache_size, rocksdb_persistent_cache_size, + persistent_cache_size_mb, rocksdb_persistent_cache_size_mb, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Size of cache for BlockBasedTableOptions::persistent_cache for RocksDB", - nullptr, nullptr, rocksdb_persistent_cache_size, + "Size of cache in MB for BlockBasedTableOptions::persistent_cache " + "for RocksDB", nullptr, nullptr, rocksdb_persistent_cache_size_mb, /* min */ 0L, /* max */ ULONG_MAX, 0); static MYSQL_SYSVAR_ULONG( @@ -946,9 +960,11 @@ static MYSQL_SYSVAR_BOOL(background_sync, rocksdb_background_sync, "turns on background syncs for RocksDB", nullptr, nullptr, FALSE); -static MYSQL_THDVAR_BOOL(write_sync, PLUGIN_VAR_RQCMDARG, - "WriteOptions::sync for RocksDB", nullptr, nullptr, - rocksdb::WriteOptions().sync); +static MYSQL_THDVAR_UINT(flush_log_at_trx_commit, PLUGIN_VAR_RQCMDARG, + "Sync on transaction commit. Similar to " + "innodb_flush_log_at_trx_commit. 1: sync on commit, " + "0,2: not sync on commit", + nullptr, nullptr, 1, 0, 2, 0); static MYSQL_THDVAR_BOOL(write_disable_wal, PLUGIN_VAR_RQCMDARG, "WriteOptions::disableWAL for RocksDB", nullptr, @@ -986,6 +1002,12 @@ static MYSQL_SYSVAR_UINT( "Test only to override rocksdb estimates of table size in a memtable", nullptr, nullptr, 0, /* min */ 0, /* max */ INT_MAX, 0); +static MYSQL_SYSVAR_BOOL(force_compute_memtable_stats, + rocksdb_force_compute_memtable_stats, + PLUGIN_VAR_RQCMDARG, + "Force to always compute memtable stats", + nullptr, nullptr, TRUE); + static MYSQL_SYSVAR_BOOL( debug_optimizer_no_zero_cardinality, rocksdb_debug_optimizer_no_zero_cardinality, PLUGIN_VAR_RQCMDARG, @@ -1085,6 +1107,7 @@ static MYSQL_SYSVAR_BOOL( "Counting SingleDelete as rocksdb_compaction_sequential_deletes", nullptr, nullptr, rocksdb_compaction_sequential_deletes_count_sd); + static MYSQL_SYSVAR_BOOL( print_snapshot_conflict_queries, rocksdb_print_snapshot_conflict_queries, PLUGIN_VAR_RQCMDARG, @@ -1104,6 +1127,11 @@ static MYSQL_THDVAR_BOOL(verify_row_debug_checksums, PLUGIN_VAR_RQCMDARG, "Verify checksums when reading index/table records", nullptr, nullptr, false /* default value */); +static MYSQL_THDVAR_BOOL(master_skip_tx_api, PLUGIN_VAR_RQCMDARG, + "Skipping holding any lock on row access. " + "Not effective on slave.", + nullptr, nullptr, false); + static MYSQL_SYSVAR_UINT( validate_tables, rocksdb_validate_tables, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, @@ -1154,6 +1182,7 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = { MYSQL_SYSVAR(skip_unique_check_tables), MYSQL_SYSVAR(trace_sst_api), MYSQL_SYSVAR(commit_in_the_middle), + MYSQL_SYSVAR(blind_delete_primary_key), MYSQL_SYSVAR(read_free_rpl_tables), MYSQL_SYSVAR(bulk_load_size), MYSQL_SYSVAR(merge_buf_size), @@ -1167,14 +1196,14 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = { MYSQL_SYSVAR(error_if_exists), MYSQL_SYSVAR(paranoid_checks), MYSQL_SYSVAR(rate_limiter_bytes_per_sec), + MYSQL_SYSVAR(delayed_write_rate), MYSQL_SYSVAR(info_log_level), MYSQL_SYSVAR(max_open_files), MYSQL_SYSVAR(max_total_wal_size), - MYSQL_SYSVAR(disabledatasync), MYSQL_SYSVAR(use_fsync), MYSQL_SYSVAR(wal_dir), MYSQL_SYSVAR(persistent_cache_path), - MYSQL_SYSVAR(persistent_cache_size), + MYSQL_SYSVAR(persistent_cache_size_mb), MYSQL_SYSVAR(delete_obsolete_files_period_micros), MYSQL_SYSVAR(base_background_compactions), MYSQL_SYSVAR(max_background_compactions), @@ -1224,7 +1253,7 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = { MYSQL_SYSVAR(background_sync), - MYSQL_SYSVAR(write_sync), + MYSQL_SYSVAR(flush_log_at_trx_commit), MYSQL_SYSVAR(write_disable_wal), MYSQL_SYSVAR(write_ignore_missing_column_families), @@ -1234,6 +1263,7 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = { MYSQL_SYSVAR(records_in_range), MYSQL_SYSVAR(force_index_records_in_range), MYSQL_SYSVAR(debug_optimizer_n_rows), + MYSQL_SYSVAR(force_compute_memtable_stats), MYSQL_SYSVAR(debug_optimizer_no_zero_cardinality), MYSQL_SYSVAR(compact_cf), @@ -1259,6 +1289,7 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = { MYSQL_SYSVAR(checksums_pct), MYSQL_SYSVAR(store_row_debug_checksums), MYSQL_SYSVAR(verify_row_debug_checksums), + MYSQL_SYSVAR(master_skip_tx_api), MYSQL_SYSVAR(validate_tables), MYSQL_SYSVAR(table_stats_sampling_pct), @@ -1268,7 +1299,7 @@ static rocksdb::WriteOptions rdb_get_rocksdb_write_options(my_core::THD *const thd) { rocksdb::WriteOptions opt; - opt.sync = THDVAR(thd, write_sync); + opt.sync = THDVAR(thd, flush_log_at_trx_commit) == 1; opt.disableWAL = THDVAR(thd, write_disable_wal); opt.ignore_missing_column_families = THDVAR(thd, write_ignore_missing_column_families); @@ -1291,85 +1322,6 @@ Rdb_open_tables_map::get_hash_key(Rdb_table_handler *const table_handler, return reinterpret_cast(table_handler->m_table_name); } -/* - The following is needed as an argument for mysql_stage_register, - irrespectively of whether we're compiling with P_S or not. -*/ -PSI_stage_info stage_waiting_on_row_lock = {0, "Waiting for row lock", 0}; - -#ifdef HAVE_PSI_INTERFACE -static PSI_thread_key rdb_background_psi_thread_key; -static PSI_thread_key rdb_drop_idx_psi_thread_key; - -static PSI_stage_info *all_rocksdb_stages[] = {&stage_waiting_on_row_lock}; - -static my_core::PSI_mutex_key rdb_psi_open_tbls_mutex_key, - rdb_signal_bg_psi_mutex_key, rdb_signal_drop_idx_psi_mutex_key, - rdb_collation_data_mutex_key, rdb_mem_cmp_space_mutex_key, - key_mutex_tx_list, rdb_sysvars_psi_mutex_key; - -static PSI_mutex_info all_rocksdb_mutexes[] = { - {&rdb_psi_open_tbls_mutex_key, "open tables", PSI_FLAG_GLOBAL}, - {&rdb_signal_bg_psi_mutex_key, "stop background", PSI_FLAG_GLOBAL}, - {&rdb_signal_drop_idx_psi_mutex_key, "signal drop index", PSI_FLAG_GLOBAL}, - {&rdb_collation_data_mutex_key, "collation data init", PSI_FLAG_GLOBAL}, - {&rdb_mem_cmp_space_mutex_key, "collation space char data init", - PSI_FLAG_GLOBAL}, - {&key_mutex_tx_list, "tx_list", PSI_FLAG_GLOBAL}, - {&rdb_sysvars_psi_mutex_key, "setting sysvar", PSI_FLAG_GLOBAL}, -}; - -static PSI_rwlock_key key_rwlock_collation_exception_list; -static PSI_rwlock_key key_rwlock_read_free_rpl_tables; -static PSI_rwlock_key key_rwlock_skip_unique_check_tables; - -static PSI_rwlock_info all_rocksdb_rwlocks[] = { - {&key_rwlock_collation_exception_list, "collation_exception_list", - PSI_FLAG_GLOBAL}, - {&key_rwlock_read_free_rpl_tables, "read_free_rpl_tables", PSI_FLAG_GLOBAL}, - {&key_rwlock_skip_unique_check_tables, "skip_unique_check_tables", - PSI_FLAG_GLOBAL}, -}; - -PSI_cond_key rdb_signal_bg_psi_cond_key, rdb_signal_drop_idx_psi_cond_key; - -static PSI_cond_info all_rocksdb_conds[] = { - {&rdb_signal_bg_psi_cond_key, "cond signal background", PSI_FLAG_GLOBAL}, - {&rdb_signal_drop_idx_psi_cond_key, "cond signal drop index", - PSI_FLAG_GLOBAL}, -}; - -static PSI_thread_info all_rocksdb_threads[] = { - {&rdb_background_psi_thread_key, "background", PSI_FLAG_GLOBAL}, - {&rdb_drop_idx_psi_thread_key, "drop index", PSI_FLAG_GLOBAL}, -}; - -static void init_rocksdb_psi_keys() { - const char *const category = "rocksdb"; - int count; - - if (PSI_server == nullptr) - return; - - count = array_elements(all_rocksdb_mutexes); - PSI_server->register_mutex(category, all_rocksdb_mutexes, count); - - count = array_elements(all_rocksdb_rwlocks); - PSI_server->register_rwlock(category, all_rocksdb_rwlocks, count); - - count = array_elements(all_rocksdb_conds); - // TODO Disabling PFS for conditions due to the bug - // https://github.com/MySQLOnRocksDB/mysql-5.6/issues/92 - // PSI_server->register_cond(category, all_rocksdb_conds, count); - - count = array_elements(all_rocksdb_stages); - mysql_stage_register(category, all_rocksdb_stages, count); - - count = array_elements(all_rocksdb_threads); - mysql_thread_register(category, all_rocksdb_threads, count); -} -#endif - /* Drop index thread's control */ @@ -1503,10 +1455,12 @@ public: static void walk_tx_list(Rdb_tx_list_walker *walker) { DBUG_ASSERT(walker != nullptr); - mysql_mutex_lock(&s_tx_list_mutex); + RDB_MUTEX_LOCK_CHECK(s_tx_list_mutex); + for (auto it : s_tx_list) walker->process_tran(it); - mysql_mutex_unlock(&s_tx_list_mutex); + + RDB_MUTEX_UNLOCK_CHECK(s_tx_list_mutex); } int set_status_error(THD *const thd, const rocksdb::Status &s, @@ -1850,15 +1804,15 @@ public: explicit Rdb_transaction(THD *const thd) : m_thd(thd), m_tbl_io_perf(nullptr) { - mysql_mutex_lock(&s_tx_list_mutex); + RDB_MUTEX_LOCK_CHECK(s_tx_list_mutex); s_tx_list.insert(this); - mysql_mutex_unlock(&s_tx_list_mutex); + RDB_MUTEX_UNLOCK_CHECK(s_tx_list_mutex); } virtual ~Rdb_transaction() { - mysql_mutex_lock(&s_tx_list_mutex); + RDB_MUTEX_LOCK_CHECK(s_tx_list_mutex); s_tx_list.erase(this); - mysql_mutex_unlock(&s_tx_list_mutex); + RDB_MUTEX_UNLOCK_CHECK(s_tx_list_mutex); } }; @@ -2075,7 +2029,7 @@ public: tx_opts.lock_timeout = rdb_convert_sec_to_ms(m_timeout_sec); tx_opts.deadlock_detect = THDVAR(m_thd, deadlock_detect); - write_opts.sync = THDVAR(m_thd, write_sync); + write_opts.sync = THDVAR(m_thd, flush_log_at_trx_commit) == 1; write_opts.disableWAL = THDVAR(m_thd, write_disable_wal); write_opts.ignore_missing_column_families = THDVAR(m_thd, write_ignore_missing_column_families); @@ -2288,7 +2242,7 @@ public: void start_tx() override { reset(); - write_opts.sync = THDVAR(m_thd, write_sync); + write_opts.sync = THDVAR(m_thd, flush_log_at_trx_commit) == 1; write_opts.disableWAL = THDVAR(m_thd, write_disable_wal); write_opts.ignore_missing_column_families = THDVAR(m_thd, write_ignore_missing_column_families); @@ -2367,10 +2321,15 @@ static Rdb_transaction *get_or_create_tx(THD *const thd) { Rdb_transaction *&tx = get_tx_from_thd(thd); // TODO: this is called too many times.. O(#rows) if (tx == nullptr) { - if (rpl_skip_tx_api && thd->rli_slave) + if ((rpl_skip_tx_api && thd->rli_slave) || + (THDVAR(thd, master_skip_tx_api) && !thd->rli_slave)) + { tx = new Rdb_writebatch_impl(thd); + } else + { tx = new Rdb_transaction_impl(thd); + } tx->set_params(THDVAR(thd, lock_wait_timeout), THDVAR(thd, max_row_locks)); tx->start_tx(); } else { @@ -2474,7 +2433,7 @@ static int rocksdb_prepare(handlerton *const hton, THD *const thd, return HA_EXIT_FAILURE; } if (thd->durability_property == HA_IGNORE_DURABILITY && - THDVAR(thd, write_sync)) { + (THDVAR(thd, flush_log_at_trx_commit) == 1)) { /** we set the log sequence as '1' just to trigger hton->flush_logs */ @@ -3232,9 +3191,7 @@ static int rocksdb_init_func(void *const p) { // Validate the assumption about the size of ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN. static_assert(sizeof(longlong) == 8, "Assuming that longlong is 8 bytes."); -#ifdef HAVE_PSI_INTERFACE init_rocksdb_psi_keys(); -#endif rocksdb_hton = (handlerton *)p; mysql_mutex_init(rdb_psi_open_tbls_mutex_key, &rdb_open_tables.m_mutex, @@ -3298,6 +3255,8 @@ static int rocksdb_init_func(void *const p) { rocksdb_db_options.rate_limiter = rocksdb_rate_limiter; } + rocksdb_db_options.delayed_write_rate = rocksdb_delayed_write_rate; + std::shared_ptr myrocks_logger = std::make_shared(); rocksdb::Status s = rocksdb::CreateLoggerFromOptions( rocksdb_datadir, rocksdb_db_options, &rocksdb_db_options.info_log); @@ -3383,24 +3342,25 @@ static int rocksdb_init_func(void *const p) { rocksdb_set_compaction_options(nullptr, nullptr, nullptr, nullptr); - mysql_mutex_lock(&rdb_sysvars_mutex); + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); DBUG_ASSERT(rocksdb_table_stats_sampling_pct <= RDB_TBL_STATS_SAMPLE_PCT_MAX); properties_collector_factory->SetTableStatsSamplingPct( rocksdb_table_stats_sampling_pct); - mysql_mutex_unlock(&rdb_sysvars_mutex); + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } - if (rocksdb_persistent_cache_size > 0) { + if (rocksdb_persistent_cache_size_mb > 0) { std::shared_ptr pcache; + uint64_t cache_size_bytes= rocksdb_persistent_cache_size_mb * 1024 * 1024; rocksdb::NewPersistentCache( rocksdb::Env::Default(), std::string(rocksdb_persistent_cache_path), - rocksdb_persistent_cache_size, myrocks_logger, true, &pcache); + cache_size_bytes, myrocks_logger, true, &pcache); rocksdb_tbl_options.persistent_cache = pcache; } else if (strlen(rocksdb_persistent_cache_path)) { - sql_print_error("RocksDB: Must specify rocksdb_persistent_cache_size"); + sql_print_error("RocksDB: Must specify rocksdb_persistent_cache_size_mb"); DBUG_RETURN(1); } @@ -3581,7 +3541,7 @@ static int rocksdb_done_func(void *const p) { // signal the drop index thread to stop rdb_drop_idx_thread.signal(true); - // Flush all memtables for not lose data, even if WAL is disabled. + // Flush all memtables for not losing data, even if WAL is disabled. rocksdb_flush_all_memtables(); // Stop all rocksdb background work @@ -3652,6 +3612,16 @@ static int rocksdb_done_func(void *const p) { DBUG_RETURN(error); } +static inline void rocksdb_smart_seek(bool seek_backward, + rocksdb::Iterator *const iter, + const rocksdb::Slice &key_slice) { + if (seek_backward) { + iter->SeekForPrev(key_slice); + } else { + iter->Seek(key_slice); + } +} + /** @brief Example of simple lock controls. The "table_handler" it creates is a @@ -3670,7 +3640,7 @@ Rdb_open_tables_map::get_table_handler(const char *const table_name) { length = (uint)strlen(table_name); // First, look up the table in the hash map. - mysql_mutex_lock(&m_mutex); + RDB_MUTEX_LOCK_CHECK(m_mutex); if (!(table_handler = reinterpret_cast(my_hash_search( &m_hash, reinterpret_cast(table_name), length)))) { // Since we did not find it in the hash map, attempt to create and add it @@ -3679,7 +3649,7 @@ Rdb_open_tables_map::get_table_handler(const char *const table_name) { MYF(MY_WME | MY_ZEROFILL), &table_handler, sizeof(*table_handler), &tmp_name, length + 1, NullS)))) { // Allocating a new Rdb_table_handler and a new table name failed. - mysql_mutex_unlock(&m_mutex); + RDB_MUTEX_UNLOCK_CHECK(m_mutex); return nullptr; } @@ -3690,7 +3660,7 @@ Rdb_open_tables_map::get_table_handler(const char *const table_name) { if (my_hash_insert(&m_hash, reinterpret_cast(table_handler))) { // Inserting into the hash map failed. - mysql_mutex_unlock(&m_mutex); + RDB_MUTEX_UNLOCK_CHECK(m_mutex); my_free(table_handler); return nullptr; } @@ -3701,7 +3671,7 @@ Rdb_open_tables_map::get_table_handler(const char *const table_name) { DBUG_ASSERT(table_handler->m_ref_count >= 0); table_handler->m_ref_count++; - mysql_mutex_unlock(&m_mutex); + RDB_MUTEX_UNLOCK_CHECK(m_mutex); return table_handler; } @@ -3715,7 +3685,7 @@ std::vector Rdb_open_tables_map::get_table_names(void) const { const Rdb_table_handler *table_handler; std::vector names; - mysql_mutex_lock(&m_mutex); + RDB_MUTEX_LOCK_CHECK(m_mutex); for (i = 0; (table_handler = reinterpret_cast( my_hash_const_element(&m_hash, i))); i++) { @@ -3723,7 +3693,7 @@ std::vector Rdb_open_tables_map::get_table_names(void) const { names.push_back(table_handler->m_table_name); } DBUG_ASSERT(i == m_hash.records); - mysql_mutex_unlock(&m_mutex); + RDB_MUTEX_UNLOCK_CHECK(m_mutex); return names; } @@ -3872,7 +3842,7 @@ int ha_rocksdb::read_hidden_pk_id_from_rowkey(longlong *const hidden_pk_id) { void Rdb_open_tables_map::release_table_handler( Rdb_table_handler *const table_handler) { - mysql_mutex_lock(&m_mutex); + RDB_MUTEX_LOCK_CHECK(m_mutex); DBUG_ASSERT(table_handler != nullptr); DBUG_ASSERT(table_handler->m_ref_count > 0); @@ -3885,7 +3855,7 @@ void Rdb_open_tables_map::release_table_handler( my_free(table_handler); } - mysql_mutex_unlock(&m_mutex); + RDB_MUTEX_UNLOCK_CHECK(m_mutex); } static handler *rocksdb_create_handler(my_core::handlerton *const hton, @@ -4137,6 +4107,92 @@ int ha_rocksdb::convert_record_from_storage_format( return convert_record_from_storage_format(key, &retrieved_rec_slice, buf); } +int ha_rocksdb::convert_blob_from_storage_format( + my_core::Field_blob *const blob, + Rdb_string_reader *const reader, + bool decode) +{ + /* Get the number of bytes needed to store length*/ + const uint length_bytes = blob->pack_length() - portable_sizeof_char_ptr; + + const char *data_len_str; + if (!(data_len_str = reader->read(length_bytes))) { + return HA_ERR_INTERNAL_ERROR; + } + + memcpy(blob->ptr, data_len_str, length_bytes); + + const uint32 data_len = blob->get_length( + reinterpret_cast(data_len_str), length_bytes, + table->s->db_low_byte_first); + const char *blob_ptr; + if (!(blob_ptr = reader->read(data_len))) { + return HA_ERR_INTERNAL_ERROR; + } + + if (decode) { + // set 8-byte pointer to 0, like innodb does (relevant for 32-bit + // platforms) + memset(blob->ptr + length_bytes, 0, 8); + memcpy(blob->ptr + length_bytes, &blob_ptr, sizeof(uchar **)); + } + + return HA_EXIT_SUCCESS; +} + +int ha_rocksdb::convert_varchar_from_storage_format( + my_core::Field_varstring *const field_var, + Rdb_string_reader *const reader, + bool decode) +{ + const char *data_len_str; + if (!(data_len_str = reader->read(field_var->length_bytes))) + return HA_ERR_INTERNAL_ERROR; + + uint data_len; + /* field_var->length_bytes is 1 or 2 */ + if (field_var->length_bytes == 1) { + data_len = (uchar)data_len_str[0]; + } else { + DBUG_ASSERT(field_var->length_bytes == 2); + data_len = uint2korr(data_len_str); + } + + if (data_len > field_var->field_length) { + /* The data on disk is longer than table DDL allows? */ + return HA_ERR_INTERNAL_ERROR; + } + + if (!reader->read(data_len)) { + return HA_ERR_INTERNAL_ERROR; + } + + if (decode) { + memcpy(field_var->ptr, data_len_str, field_var->length_bytes + data_len); + } + + return HA_EXIT_SUCCESS; +} + +int ha_rocksdb::convert_field_from_storage_format( + my_core::Field *const field, + Rdb_string_reader *const reader, + bool decode, + uint len) +{ + const char *data_bytes; + if (len > 0) { + if ((data_bytes = reader->read(len)) == nullptr) { + return HA_ERR_INTERNAL_ERROR; + } + + if (decode) + memcpy(field->ptr, data_bytes, len); + } + + return HA_EXIT_SUCCESS; +} + /* @brief Unpack the record in this->m_retrieved_record and this->m_last_rowkey from @@ -4168,7 +4224,6 @@ int ha_rocksdb::convert_record_from_storage_format( DBUG_ASSERT(buf != nullptr); Rdb_string_reader reader(value); - const my_ptrdiff_t ptr_diff = buf - table->record[0]; /* Decode PK fields from the key @@ -4208,6 +4263,7 @@ int ha_rocksdb::convert_record_from_storage_format( return HA_ERR_INTERNAL_ERROR; } + int err = HA_EXIT_SUCCESS; for (auto it = m_decoders_vect.begin(); it != m_decoders_vect.end(); it++) { const Rdb_field_encoder *const field_dec = it->m_field_enc; const bool decode = it->m_decode; @@ -4221,89 +4277,49 @@ int ha_rocksdb::convert_record_from_storage_format( if (it->m_skip && !reader.read(it->m_skip)) return HA_ERR_INTERNAL_ERROR; + uint field_offset = field->ptr - table->record[0]; + uint null_offset = field->null_offset(); + bool maybe_null = field->real_maybe_null(); + field->move_field(buf + field_offset, + maybe_null ? buf + null_offset : nullptr, + field->null_bit); + // WARNING! - Don't return before restoring field->ptr and field->null_ptr! + if (isNull) { if (decode) { /* This sets the NULL-bit of this record */ - field->set_null(ptr_diff); + field->set_null(); /* Besides that, set the field value to default value. CHECKSUM TABLE depends on this. */ - uint field_offset = field->ptr - table->record[0]; - memcpy(buf + field_offset, table->s->default_values + field_offset, + memcpy(field->ptr, table->s->default_values + field_offset, field->pack_length()); } - continue; } else { - if (decode) - field->set_notnull(ptr_diff); + if (decode) { + field->set_notnull(); + } + + if (field_dec->m_field_type == MYSQL_TYPE_BLOB) { + err = convert_blob_from_storage_format( + (my_core::Field_blob *) field, &reader, decode); + } else if (field_dec->m_field_type == MYSQL_TYPE_VARCHAR) { + err = convert_varchar_from_storage_format( + (my_core::Field_varstring *) field, &reader, decode); + } else { + err = convert_field_from_storage_format( + field, &reader, decode, field_dec->m_pack_length_in_rec); + } } - if (field_dec->m_field_type == MYSQL_TYPE_BLOB) { - my_core::Field_blob *const blob = (my_core::Field_blob *)field; - /* Get the number of bytes needed to store length*/ - const uint length_bytes = blob->pack_length() - portable_sizeof_char_ptr; + // Restore field->ptr and field->null_ptr + field->move_field(table->record[0] + field_offset, + maybe_null ? table->record[0] + null_offset : nullptr, + field->null_bit); - blob->move_field_offset(ptr_diff); - - const char *data_len_str; - if (!(data_len_str = reader.read(length_bytes))) { - blob->move_field_offset(-ptr_diff); - return HA_ERR_INTERNAL_ERROR; - } - - memcpy(blob->ptr, data_len_str, length_bytes); - - const uint32 data_len = blob->get_length( - (uchar *)data_len_str, length_bytes, table->s->db_low_byte_first); - const char *blob_ptr; - if (!(blob_ptr = reader.read(data_len))) { - blob->move_field_offset(-ptr_diff); - return HA_ERR_INTERNAL_ERROR; - } - - if (decode) { - // set 8-byte pointer to 0, like innodb does (relevant for 32-bit - // platforms) - memset(blob->ptr + length_bytes, 0, 8); - memcpy(blob->ptr + length_bytes, &blob_ptr, sizeof(uchar **)); - blob->move_field_offset(-ptr_diff); - } - } else if (field_dec->m_field_type == MYSQL_TYPE_VARCHAR) { - Field_varstring *const field_var = (Field_varstring *)field; - const char *data_len_str; - if (!(data_len_str = reader.read(field_var->length_bytes))) - return HA_ERR_INTERNAL_ERROR; - - uint data_len; - /* field_var->length_bytes is 1 or 2 */ - if (field_var->length_bytes == 1) { - data_len = (uchar)data_len_str[0]; - } else { - DBUG_ASSERT(field_var->length_bytes == 2); - data_len = uint2korr(data_len_str); - } - if (data_len > field->field_length) { - /* The data on disk is longer than table DDL allows? */ - return HA_ERR_INTERNAL_ERROR; - } - if (!reader.read(data_len)) - return HA_ERR_INTERNAL_ERROR; - - if (decode) { - memcpy(field_var->ptr + ptr_diff, data_len_str, - field_var->length_bytes + data_len); - } - } else { - const char *data_bytes; - const uint len = field_dec->m_pack_length_in_rec; - if (len > 0) { - if ((data_bytes = reader.read(len)) == nullptr) { - return HA_ERR_INTERNAL_ERROR; - } - if (decode) - memcpy(field->ptr + ptr_diff, data_bytes, len); - } + if (err != HA_EXIT_SUCCESS) { + return err; } } @@ -4723,6 +4739,14 @@ bool ha_rocksdb::get_error_message(const int error, String *const buf) { buf->append(tx->m_detailed_error); temp_error = true; break; + case HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED: + buf->append("Specifying DATA DIRECTORY for an individual table is not " + "supported."); + break; + case HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED: + buf->append("Specifying INDEX DIRECTORY for an individual table is not " + "supported."); + break; default: // We can be called with the values which are < HA_ERR_FIRST because most // MySQL internal functions will just return HA_EXIT_FAILURE in case of @@ -4791,7 +4815,7 @@ int ha_rocksdb::create_key_defs( */ if (create_cfs(table_arg, tbl_def_arg, &cfs)) { DBUG_RETURN(HA_EXIT_FAILURE); - }; + } if (!old_tbl_def_arg) { /* @@ -4845,6 +4869,7 @@ int ha_rocksdb::create_cfs( DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); char tablename_sys[NAME_LEN + 1]; @@ -4884,35 +4909,53 @@ int ha_rocksdb::create_cfs( } } - /* - index comment has Column Family name. If there was no comment, we get - NULL, and it means use the default column family. - */ - const char *const comment = get_key_comment(i, table_arg, tbl_def_arg); + // Internal consistency check to make sure that data in TABLE and + // Rdb_tbl_def structures matches. Either both are missing or both are + // specified. Yes, this is critical enough to make it into SHIP_ASSERT. + SHIP_ASSERT(!table_arg->part_info == tbl_def_arg->base_partition().empty()); + + // Generate the name for the column family to use. + bool per_part_match_found = false; + std::string cf_name = generate_cf_name(i, table_arg, tbl_def_arg, + &per_part_match_found); + const char *const key_name = get_key_name(i, table_arg, tbl_def_arg); - if (looks_like_per_index_cf_typo(comment)) { + if (looks_like_per_index_cf_typo(cf_name.c_str())) { my_error(ER_NOT_SUPPORTED_YET, MYF(0), - "column family name looks like a typo of $per_index_cf"); + "column family name looks like a typo of $per_index_cf."); DBUG_RETURN(HA_EXIT_FAILURE); } - /* Prevent create from using the system column family */ - if (comment && strcmp(DEFAULT_SYSTEM_CF_NAME, comment) == 0) { + + // Prevent create from using the system column family. + if (!cf_name.empty() && strcmp(DEFAULT_SYSTEM_CF_NAME, + cf_name.c_str()) == 0) { my_error(ER_WRONG_ARGUMENTS, MYF(0), - "column family not valid for storing index data"); + "column family not valid for storing index data."); DBUG_RETURN(HA_EXIT_FAILURE); } + bool is_auto_cf_flag; + + // Here's how `get_or_create_cf` will use the input parameters: + // + // `cf_name` - will be used as a CF name. + // `key_name` - will be only used in case of "$per_index_cf". cf_handle = - cf_manager.get_or_create_cf(rdb, comment, tbl_def_arg->full_tablename(), - key_name, &is_auto_cf_flag); - if (!cf_handle) + cf_manager.get_or_create_cf(rdb, cf_name.c_str(), + tbl_def_arg->full_tablename(), key_name, + &is_auto_cf_flag); + + if (!cf_handle) { DBUG_RETURN(HA_EXIT_FAILURE); + } auto &cf = (*cfs)[i]; + cf.cf_handle = cf_handle; - cf.is_reverse_cf = Rdb_cf_manager::is_cf_name_reverse(comment); + cf.is_reverse_cf = Rdb_cf_manager::is_cf_name_reverse(cf_name.c_str()); cf.is_auto_cf = is_auto_cf_flag; + cf.is_per_partition_cf = per_part_match_found; } DBUG_RETURN(HA_EXIT_SUCCESS); @@ -4982,7 +5025,8 @@ int ha_rocksdb::create_inplace_key_defs( new_key_descr[i] = std::make_shared( okd.get_index_number(), i, okd.get_cf(), index_dict_version, index_type, kv_version, okd.m_is_reverse_cf, okd.m_is_auto_cf, - okd.m_name.c_str(), dict_manager.get_stats(gl_index_id)); + okd.m_is_per_partition_cf, okd.m_name.c_str(), + dict_manager.get_stats(gl_index_id)); } else if (create_key_def(table_arg, i, tbl_def_arg, &new_key_descr[i], cfs[i])) { DBUG_RETURN(HA_EXIT_FAILURE); @@ -5118,7 +5162,8 @@ int ha_rocksdb::create_key_def(const TABLE *const table_arg, const uint &i, const char *const key_name = get_key_name(i, table_arg, m_tbl_def); *new_key_def = std::make_shared( index_id, i, cf_info.cf_handle, index_dict_version, index_type, - kv_version, cf_info.is_reverse_cf, cf_info.is_auto_cf, key_name); + kv_version, cf_info.is_reverse_cf, cf_info.is_auto_cf, + cf_info.is_per_partition_cf, key_name); DBUG_RETURN(HA_EXIT_SUCCESS); } @@ -5271,6 +5316,22 @@ int ha_rocksdb::create(const char *const name, TABLE *const table_arg, HA_CREATE_INFO *const create_info) { DBUG_ENTER_FUNC(); + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(create_info != nullptr); + + if (create_info->data_file_name) { + // DATA DIRECTORY is used to create tables under a specific location + // outside the MySQL data directory. We don't support this for MyRocks. + // The `rocksdb_datadir` setting should be used to configure RocksDB data + // directory. + DBUG_RETURN(HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED); + } + + if (create_info->index_file_name) { + // Similar check for INDEX DIRECTORY as well. + DBUG_RETURN(HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED); + } + int res; std::string str; @@ -5441,15 +5502,7 @@ int ha_rocksdb::read_key_exact(const Rdb_key_def &kd, index_tuple= lookup_tuple. lookup_tuple may be a prefix of the index. */ - if (kd.m_is_reverse_cf) { - if (!full_key_match) { - if (!iter->Valid()) - iter->SeekToLast(); - else - iter->Prev(); - } - } - + rocksdb_smart_seek(kd.m_is_reverse_cf, iter, key_slice); if (!iter->Valid() || !kd.value_matches_prefix(iter->key(), key_slice)) { /* Got a record that is not equal to the lookup value, or even a record @@ -5467,24 +5520,20 @@ int ha_rocksdb::read_before_key(const Rdb_key_def &kd, We are looking for record with the biggest t.key such that t.key < lookup_tuple. */ - if (kd.m_is_reverse_cf) { - if (m_scan_it->Valid() && full_key_match && - kd.value_matches_prefix(m_scan_it->key(), key_slice)) { - /* We are using full key and we've hit an exact match */ + rocksdb_smart_seek(!kd.m_is_reverse_cf, m_scan_it, key_slice); + if (m_scan_it->Valid() && full_key_match && + kd.value_matches_prefix(m_scan_it->key(), key_slice)) { + /* We are using full key and we've hit an exact match */ + if (kd.m_is_reverse_cf) { m_scan_it->Next(); - } - } else { - if (m_scan_it->Valid()) + } else { m_scan_it->Prev(); - else - m_scan_it->SeekToLast(); + } } - return m_scan_it->Valid() ? HA_EXIT_SUCCESS : HA_ERR_KEY_NOT_FOUND; } int ha_rocksdb::read_after_key(const Rdb_key_def &kd, - const bool &full_key_match, const rocksdb::Slice &key_slice) { /* We are looking for the first record such that @@ -5494,22 +5543,7 @@ int ha_rocksdb::read_after_key(const Rdb_key_def &kd, with HA_READ_AFTER_KEY, $GT = '>', with HA_READ_KEY_OR_NEXT, $GT = '>=' */ - if (kd.m_is_reverse_cf) { - if (!m_scan_it->Valid()) { - m_scan_it->SeekToLast(); - } else { - /* - We should step back - - when not using full extended key - - when using full extended key and when we've got an exact match - */ - if (!full_key_match || - !kd.value_matches_prefix(m_scan_it->key(), key_slice)) { - m_scan_it->Prev(); - } - } - } - + rocksdb_smart_seek(kd.m_is_reverse_cf, m_scan_it, key_slice); return m_scan_it->Valid() ? HA_EXIT_SUCCESS : HA_ERR_KEY_NOT_FOUND; } @@ -5538,7 +5572,7 @@ int ha_rocksdb::position_to_correct_key(const Rdb_key_def &kd, break; case HA_READ_AFTER_KEY: case HA_READ_KEY_OR_NEXT: - rc = read_after_key(kd, full_key_match, key_slice); + rc = read_after_key(kd, key_slice); if (rc == 0 && !kd.covers_key(m_scan_it->key())) { /* The record we've got is not from this index */ rc = HA_ERR_KEY_NOT_FOUND; @@ -5905,8 +5939,10 @@ int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key, */ const uint size = kd.pack_index_tuple(table, m_pack_buffer, m_pk_packed_tuple, key, keypart_map); - rc = get_row_by_rowid(buf, m_pk_packed_tuple, size); - if (!rc) { + bool skip_lookup = is_blind_delete_enabled(); + rc = get_row_by_rowid(buf, m_pk_packed_tuple, size, + skip_lookup); + if (!rc && !skip_lookup) { stats.rows_read++; stats.rows_index_first++; update_row_stats(ROWS_READ); @@ -6339,6 +6375,17 @@ rocksdb::Status ha_rocksdb::get_for_update( return s; } +bool ha_rocksdb::is_blind_delete_enabled() +{ + THD *thd = ha_thd(); + return (THDVAR(thd, blind_delete_primary_key) && + thd->lex->sql_command == SQLCOM_DELETE && + thd->lex->table_count == 1 && + table->s->keys == 1 && + !has_hidden_pk(table) && + !thd->rli_slave); +} + /* Given a rowid (i.e. packed PK) as a parameter, get the record. @@ -6348,7 +6395,8 @@ rocksdb::Status ha_rocksdb::get_for_update( */ int ha_rocksdb::get_row_by_rowid(uchar *const buf, const char *const rowid, - const uint rowid_size) { + const uint rowid_size, + const bool skip_lookup) { DBUG_ENTER_FUNC(); DBUG_ASSERT(buf != nullptr); @@ -6374,6 +6422,16 @@ int ha_rocksdb::get_row_by_rowid(uchar *const buf, const char *const rowid, bool found; rocksdb::Status s; + /* Pretend row found without looking up */ + if (skip_lookup) + { + stats.rows_deleted_blind++; + update_row_stats(ROWS_DELETED_BLIND); + m_last_rowkey.copy((const char *)rowid, rowid_size, &my_charset_bin); + table->status = 0; + DBUG_RETURN(0); + } + if (m_lock_rows == RDB_LOCK_NONE) { tx->acquire_snapshot(true); s = tx->get(m_pk_descr->get_cf(), key_slice, &m_retrieved_record); @@ -6554,6 +6612,7 @@ int ha_rocksdb::index_first_intern(uchar *const buf) { for (;;) { setup_scan_iterator(kd, &index_key, false, !kd.m_is_reverse_cf, Rdb_key_def::INDEX_NUMBER_SIZE); + m_scan_it->Seek(index_key); m_skip_scan_it_next_call = true; rc = index_next_with_direction(buf, true); @@ -6649,21 +6708,7 @@ int ha_rocksdb::index_last_intern(uchar *const buf) { for (;;) { setup_scan_iterator(kd, &index_key, false, kd.m_is_reverse_cf, Rdb_key_def::INDEX_NUMBER_SIZE); - - if (!m_scan_it->Valid()) { - /* - RocksDB keyspace has no records that follow our index. It could be, our - index is the last in the keyspace. Navigate to last record in the - keyspace. - */ - m_scan_it->SeekToLast(); - } else { - /* - Iterator is pointing at some record right after the end of the index. - Move one record back. - */ - m_scan_it->Prev(); - } + m_scan_it->SeekForPrev(index_key); m_skip_scan_it_next_call = false; if (is_pk(active_index, table, m_tbl_def)) { @@ -6726,11 +6771,15 @@ bool ha_rocksdb::skip_unique_check() const { 2) this table is in the whitelist of tables to skip and the replication lag has reached a large enough value (see unique_check_lag_threshold and unique_check_lage_reset_threshold) - 3) the user set unique_checks option to 0 + 3) the user set unique_checks option to 0, and the table does not have + any indexes. If the table has secondary keys, then those might becomes + inconsisted/corrupted */ return THDVAR(table->in_use, bulk_load) || (m_force_skip_unique_check && m_skip_unique_check) || - my_core::thd_test_options(table->in_use, OPTION_RELAXED_UNIQUE_CHECKS); + (my_core::thd_test_options(table->in_use, + OPTION_RELAXED_UNIQUE_CHECKS) && + m_tbl_def->m_key_count == 1); } void ha_rocksdb::set_force_skip_unique_check(bool skip) { @@ -6805,15 +6854,31 @@ bool ha_rocksdb::is_pk(const uint index, const TABLE *const table_arg, is_hidden_pk(index, table_arg, tbl_def_arg); } +/* + Formats the string and returns the column family name assignment part for a + specific partition. +*/ +const std::string ha_rocksdb::gen_cf_name_qualifier_for_partition( + const std::string& prefix) { + DBUG_ASSERT(!prefix.empty()); + + return prefix + RDB_PER_PARTITION_QUALIFIER_NAME_SEP + RDB_CF_NAME_QUALIFIER + + RDB_PER_PARTITION_QUALIFIER_VALUE_SEP; +} + const char *ha_rocksdb::get_key_name(const uint index, const TABLE *const table_arg, const Rdb_tbl_def *const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); if (is_hidden_pk(index, table_arg, tbl_def_arg)) { return HIDDEN_PK_NAME; } + DBUG_ASSERT(table_arg->key_info != nullptr); + DBUG_ASSERT(table_arg->key_info[index].name != nullptr); + return table_arg->key_info[index].name; } @@ -6821,14 +6886,84 @@ const char *ha_rocksdb::get_key_comment(const uint index, const TABLE *const table_arg, const Rdb_tbl_def *const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); if (is_hidden_pk(index, table_arg, tbl_def_arg)) { return nullptr; } + DBUG_ASSERT(table_arg->key_info != nullptr); + return table_arg->key_info[index].comment.str; } +const std::string ha_rocksdb::generate_cf_name(const uint index, + const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg, + bool *per_part_match_found) { + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + DBUG_ASSERT(per_part_match_found != nullptr); + + // When creating CF-s the caller needs to know if there was a custom CF name + // specified for a given paritition. + *per_part_match_found = false; + + // Index comment is used to define the column family name specification(s). + // If there was no comment, we get an emptry string, and it means "use the + // default column family". + const char *const comment = get_key_comment(index, table_arg, tbl_def_arg); + + // `get_key_comment` can return `nullptr`, that's why this. + std::string key_comment = comment ? comment : ""; + + // If table has partitions then we need to check if user has requested to + // create a column family with a specific name on a per partition basis. + if (table_arg->part_info != nullptr) { + std::string partition_name = tbl_def_arg->base_partition(); + DBUG_ASSERT(!partition_name.empty()); + + // Let's fetch the comment for a index and check if there's a custom key + // name specified for a partition we are handling. + std::vector v = myrocks::parse_into_tokens(key_comment, + RDB_QUALIFIER_SEP); + std::string part_to_search = gen_cf_name_qualifier_for_partition( + partition_name); + DBUG_ASSERT(!part_to_search.empty()); + + // Basic O(N) search for a matching assignment. At most we expect maybe + // ten or so elements here. + for (const auto &it : v) { + if (it.substr(0, part_to_search.length()) == part_to_search) { + // We found a prefix match. Try to parse it as an assignment. + std::vector tokens = myrocks::parse_into_tokens(it, + RDB_PER_PARTITION_QUALIFIER_VALUE_SEP); + + // We found a custom name, it was in the form we expected it to be. + // Return that instead of whatever we initially wanted to return. In + // a case below the `foo` part will be returned to the caller. + // + // p3_cfname=foo + // + // If no value was specified then we'll return an empty string which + // later gets translated into using a default CF. + if (tokens.size() == 2) { + *per_part_match_found = true; + return tokens[1]; + } else { + return ""; + } + } + } + + // At this point we tried to search for a custom CF name for a partition, + // but none was specified. Therefore default one will be used. + return ""; + } + + return key_comment; +} + int ha_rocksdb::write_row(uchar *const buf) { DBUG_ENTER_FUNC(); @@ -7188,7 +7323,7 @@ int ha_rocksdb::finalize_bulk_load() { return rc; } - mysql_mutex_lock(&m_bulk_load_mutex); + RDB_MUTEX_LOCK_CHECK(m_bulk_load_mutex); /* We need this check because it's possible that m_sst_info has been @@ -7203,7 +7338,8 @@ int ha_rocksdb::finalize_bulk_load() { mysql prints via my_printf_error. */ sql_print_error("Failed to commit bulk loaded sst file to the " - "data store (%s)", m_sst_info->error_message().c_str()); + "data store (%s)", + m_sst_info->error_message().c_str()); my_printf_error(ER_UNKNOWN_ERROR, "Failed to commit bulk loaded sst file to the " @@ -7217,7 +7353,8 @@ int ha_rocksdb::finalize_bulk_load() { m_bulk_load_tx = nullptr; } - mysql_mutex_unlock(&m_bulk_load_mutex); + RDB_MUTEX_UNLOCK_CHECK(m_bulk_load_mutex); + return rc; } @@ -7462,7 +7599,7 @@ int ha_rocksdb::update_write_row(const uchar *const old_data, } /* - Open a cursor and position it at the passed record + Open a cursor */ void ha_rocksdb::setup_scan_iterator(const Rdb_key_def &kd, @@ -7522,12 +7659,6 @@ void ha_rocksdb::setup_scan_iterator(const Rdb_key_def &kd, } m_scan_it_skips_bloom = skip_bloom; } - /* - Seek() will "Position at the first key in the source that at or past - target". - The operation cannot fail. - */ - m_scan_it->Seek(*slice); } void ha_rocksdb::release_scan_iterator() { @@ -7551,6 +7682,7 @@ void ha_rocksdb::setup_iterator_for_rnd_scan() { rocksdb::Slice table_key((const char *)m_pk_packed_tuple, key_size); setup_scan_iterator(*m_pk_descr, &table_key); + m_scan_it->Seek(table_key); m_skip_scan_it_next_call = true; } @@ -7859,20 +7991,33 @@ int ha_rocksdb::info(uint flag) { update_stats(); } - if (stats.records == 0) { - // most likely, the table is in memtable - // try to deduce from GetApproximateSizes + // if number of records is hardcoded, we do not want to force computation + // of memtable cardinalities + if (stats.records == 0 || + (rocksdb_force_compute_memtable_stats && + rocksdb_debug_optimizer_n_rows == 0)) + { + // First, compute SST files stats uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]; auto r = get_range(pk_index(table, m_tbl_def), buf); uint64_t sz = 0; -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - rdb->GetApproximateSizes(m_pk_descr->get_cf(), &r, 1, &sz, true); -#pragma GCC diagnostic pop + uint8_t include_flags = rocksdb::DB::INCLUDE_FILES; + // recompute SST files stats only if records count is 0 + if (stats.records == 0) { + rdb->GetApproximateSizes(m_pk_descr->get_cf(), &r, 1, &sz, + include_flags); + stats.records+= sz/ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE; + stats.data_file_length+= sz; + } - stats.records = sz / ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE; - stats.data_file_length = sz; + // Second, compute memtable stats + uint64_t memtableCount; + uint64_t memtableSize; + rdb->GetApproximateMemTableStats(m_pk_descr->get_cf(), r, + &memtableCount, &memtableSize); + stats.records += memtableCount; + stats.data_file_length += memtableSize; if (rocksdb_debug_optimizer_n_rows > 0) stats.records = rocksdb_debug_optimizer_n_rows; @@ -8301,12 +8446,36 @@ ha_rocksdb::get_range(const int &i, return myrocks::get_range(*m_key_descr_arr[i], buf); } +static bool is_myrocks_index_empty( + rocksdb::ColumnFamilyHandle *cfh, const bool is_reverse_cf, + const rocksdb::ReadOptions &read_opts, + const uint index_id) +{ + bool index_removed = false; + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE] = {0}; + rdb_netbuf_store_uint32(key_buf, index_id); + const rocksdb::Slice key = + rocksdb::Slice(reinterpret_cast(key_buf), sizeof(key_buf)); + std::unique_ptr it(rdb->NewIterator(read_opts, cfh)); + rocksdb_smart_seek(is_reverse_cf, it.get(), key); + if (!it->Valid()) { + index_removed = true; + } else { + if (memcmp(it->key().data(), key_buf, + Rdb_key_def::INDEX_NUMBER_SIZE)) { + // Key does not have same prefix + index_removed = true; + } + } + return index_removed; +} + /* Drop index thread's main logic */ void Rdb_drop_index_thread::run() { - mysql_mutex_lock(&m_signal_mutex); + RDB_MUTEX_LOCK_CHECK(m_signal_mutex); for (;;) { // The stop flag might be set by shutdown command @@ -8331,7 +8500,7 @@ void Rdb_drop_index_thread::run() { } // make sure, no program error is returned DBUG_ASSERT(ret == 0 || ret == ETIMEDOUT); - mysql_mutex_unlock(&m_signal_mutex); + RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex); std::unordered_set indices; dict_manager.get_ongoing_drop_indexes(&indices); @@ -8353,11 +8522,11 @@ void Rdb_drop_index_thread::run() { DBUG_ASSERT(cfh); const bool is_reverse_cf = cf_flags & Rdb_key_def::REVERSE_CF_FLAG; - bool index_removed = false; - uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE] = {0}; - rdb_netbuf_store_uint32(key_buf, d.index_id); - const rocksdb::Slice key = - rocksdb::Slice((char *)key_buf, sizeof(key_buf)); + if (is_myrocks_index_empty(cfh, is_reverse_cf, read_opts, d.index_id)) + { + finished.insert(d); + continue; + } uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]; rocksdb::Range range = get_range(d.index_id, buf, is_reverse_cf ? 1 : 0, is_reverse_cf ? 0 : 1); @@ -8381,25 +8550,8 @@ void Rdb_drop_index_thread::run() { } rdb_handle_io_error(status, RDB_IO_ERROR_BG_THREAD); } - std::unique_ptr it(rdb->NewIterator(read_opts, cfh)); - it->Seek(key); - if (is_reverse_cf) { - if (!it->Valid()) { - it->SeekToLast(); - } else { - it->Prev(); - } - } - if (!it->Valid()) { - index_removed = true; - } else { - if (memcmp(it->key().data(), key_buf, - Rdb_key_def::INDEX_NUMBER_SIZE)) { - // Key does not have same prefix - index_removed = true; - } - } - if (index_removed) { + if (is_myrocks_index_empty(cfh, is_reverse_cf, read_opts, d.index_id)) + { finished.insert(d); } } @@ -8408,10 +8560,10 @@ void Rdb_drop_index_thread::run() { dict_manager.finish_drop_indexes(finished); } } - mysql_mutex_lock(&m_signal_mutex); + RDB_MUTEX_LOCK_CHECK(m_signal_mutex); } - mysql_mutex_unlock(&m_signal_mutex); + RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex); } Rdb_tbl_def *ha_rocksdb::get_table_if_exists(const char *const tablename) { @@ -8683,10 +8835,12 @@ ha_rows ha_rocksdb::records_in_range(uint inx, key_range *const min_key, #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wdeprecated-declarations" // Getting statistics, including from Memtables - rdb->GetApproximateSizes(kd.get_cf(), &r, 1, &sz, true); -#pragma GCC diagnostic pop - + uint8_t include_flags = rocksdb::DB::INCLUDE_FILES; + rdb->GetApproximateSizes(kd.get_cf(), &r, 1, &sz, include_flags); ret = rows * sz / disk_size; + uint64_t memTableCount; + rdb->GetApproximateMemTableStats(kd.get_cf(), r, &memTableCount, &sz); + ret += memTableCount; /* GetApproximateSizes() gives estimates so ret might exceed stats.records. @@ -8764,6 +8918,7 @@ int ha_rocksdb::calculate_stats(const TABLE *const table_arg, THD *const thd, std::unordered_map> ranges; std::unordered_set ids_to_check; + std::unordered_map ids_to_keyparts; std::vector buf(table_arg->s->keys * 2 * Rdb_key_def::INDEX_NUMBER_SIZE); for (uint i = 0; i < table_arg->s->keys; i++) { @@ -8771,6 +8926,7 @@ int ha_rocksdb::calculate_stats(const TABLE *const table_arg, THD *const thd, const Rdb_key_def &kd = *m_key_descr_arr[i]; ranges[kd.get_cf()].push_back(get_range(i, bufp)); ids_to_check.insert(kd.get_gl_index_id()); + ids_to_keyparts[kd.get_gl_index_id()] = kd.get_key_parts(); } // for analyze statements, force flush on memtable to get accurate cardinality @@ -8800,6 +8956,8 @@ int ha_rocksdb::calculate_stats(const TABLE *const table_arg, THD *const thd, // Initialize the stats to 0. If there are no files that contain // this gl_index_id, then 0 should be stored for the cached stats. stats[it] = Rdb_index_stats(it); + DBUG_ASSERT(ids_to_keyparts.count(it) > 0); + stats[it].m_distinct_keys_per_prefix.resize(ids_to_keyparts[it]); } for (const auto &it : props) { std::vector sst_stats; @@ -9271,6 +9429,15 @@ int ha_rocksdb::inplace_populate_sk( dict_manager.add_create_index(create_index_ids, batch); dict_manager.commit(batch); + /* + Add uncommitted key definitons to ddl_manager. We need to do this + so that the property collector can find this keydef when it needs to + update stats. The property collector looks for the keydef in the + data dictionary, but it won't be there yet since this key definition + is still in the creation process. + */ + ddl_manager.add_uncommitted_keydefs(indexes); + const bool hidden_pk_exists = has_hidden_pk(table); int res = 0; @@ -9489,6 +9656,8 @@ bool ha_rocksdb::commit_inplace_alter_table( Rdb_inplace_alter_ctx *const ctx0 = static_cast(ha_alter_info->handler_ctx); + DEBUG_SYNC(ha_thd(), "rocksdb.commit_in_place_alter_table"); + /* IMPORTANT: When rollback is requested, mysql will abort with an assertion failure. That means every failed commit during inplace alter @@ -9523,6 +9692,9 @@ bool ha_rocksdb::commit_inplace_alter_table( delete ctx0->m_new_tdef; } + /* Remove uncommitted key definitons from ddl_manager */ + ddl_manager.remove_uncommitted_keydefs(ctx0->m_added_indexes); + /* Rollback any partially created indexes */ dict_manager.rollback_ongoing_index_creation(); @@ -9582,6 +9754,12 @@ bool ha_rocksdb::commit_inplace_alter_table( */ DBUG_ASSERT(0); } + + /* + Remove uncommitted key definitons from ddl_manager, as they are now + committed into the data dictionary. + */ + ddl_manager.remove_uncommitted_keydefs(ctx->m_added_indexes); } if (dict_manager.commit(batch)) { @@ -9750,6 +9928,7 @@ static void myrocks_update_status() { export_stats.rows_inserted = global_stats.rows[ROWS_INSERTED]; export_stats.rows_read = global_stats.rows[ROWS_READ]; export_stats.rows_updated = global_stats.rows[ROWS_UPDATED]; + export_stats.rows_deleted_blind = global_stats.rows[ROWS_DELETED_BLIND]; export_stats.system_rows_deleted = global_stats.system_rows[ROWS_DELETED]; export_stats.system_rows_inserted = global_stats.system_rows[ROWS_INSERTED]; @@ -9765,6 +9944,8 @@ static SHOW_VAR myrocks_status_variables[] = { DEF_STATUS_VAR_FUNC("rows_read", &export_stats.rows_read, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("rows_updated", &export_stats.rows_updated, SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("rows_deleted_blind", + &export_stats.rows_deleted_blind, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("system_rows_deleted", &export_stats.system_rows_deleted, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("system_rows_inserted", @@ -9870,7 +10051,7 @@ void Rdb_background_thread::run() { // Wait until the next timeout or until we receive a signal to stop the // thread. Request to stop the thread should only be triggered when the // storage engine is being unloaded. - mysql_mutex_lock(&m_signal_mutex); + RDB_MUTEX_LOCK_CHECK(m_signal_mutex); const auto ret MY_ATTRIBUTE((__unused__)) = mysql_cond_timedwait(&m_signal_cond, &m_signal_mutex, &ts_next_sync); @@ -9879,7 +10060,7 @@ void Rdb_background_thread::run() { const bool local_stop = m_stop; const bool local_save_stats = m_save_stats; reset(); - mysql_mutex_unlock(&m_signal_mutex); + RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex); if (local_stop) { // If we're here then that's because condition variable was signaled by @@ -9963,11 +10144,8 @@ bool can_use_bloom_filter(THD *thd, const Rdb_key_def &kd, shorter require all parts of the key to be available for the short key match. */ - if (use_all_keys && prefix_extractor->InRange(eq_cond)) - can_use = true; - else if (!is_ascending) - can_use = false; - else if (prefix_extractor->SameResultWhenAppended(eq_cond)) + if ((use_all_keys && prefix_extractor->InRange(eq_cond)) + || prefix_extractor->SameResultWhenAppended(eq_cond)) can_use = true; else can_use = false; @@ -10138,7 +10316,7 @@ void rocksdb_set_table_stats_sampling_pct( my_core::THD *const thd MY_ATTRIBUTE((__unused__)), my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { - mysql_mutex_lock(&rdb_sysvars_mutex); + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); const uint32_t new_val = *static_cast(save); @@ -10151,7 +10329,7 @@ void rocksdb_set_table_stats_sampling_pct( } } - mysql_mutex_unlock(&rdb_sysvars_mutex); + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } /* @@ -10185,6 +10363,15 @@ void rocksdb_set_rate_limiter_bytes_per_sec( } } +void rocksdb_set_delayed_write_rate(THD *thd, struct st_mysql_sys_var *var, + void *var_ptr, const void *save) { + const uint64_t new_val = *static_cast(save); + if (rocksdb_delayed_write_rate != new_val) { + rocksdb_delayed_write_rate = new_val; + rocksdb_db_options.delayed_write_rate = new_val; + } +} + void rdb_set_collation_exception_list(const char *const exception_list) { DBUG_ASSERT(rdb_collation_exceptions != nullptr); @@ -10200,7 +10387,7 @@ void rocksdb_set_collation_exception_list(THD *const thd, const void *const save) { const char *const val = *static_cast(save); - rdb_set_collation_exception_list(val); + rdb_set_collation_exception_list(val == nullptr ? "" : val); *static_cast(var_ptr) = val; } @@ -10229,13 +10416,15 @@ static void rocksdb_set_max_background_compactions( const void *const save) { DBUG_ASSERT(save != nullptr); - mysql_mutex_lock(&rdb_sysvars_mutex); + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); + rocksdb_db_options.max_background_compactions = *static_cast(save); rocksdb_db_options.env->SetBackgroundThreads( rocksdb_db_options.max_background_compactions, rocksdb::Env::Priority::LOW); - mysql_mutex_unlock(&rdb_sysvars_mutex); + + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } void rdb_queue_save_stats_request() { rdb_bg_thread.request_save_stats(); } diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index db95a4d919a..afb9edd9e1f 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -121,6 +121,34 @@ const char *const BG_THREAD_NAME = "myrocks-bg"; */ const char *const INDEX_THREAD_NAME = "myrocks-index"; +/* + Separator between partition name and the qualifier. Sample usage: + + - p0_cfname=foo + - p3_tts_col=bar +*/ +const char RDB_PER_PARTITION_QUALIFIER_NAME_SEP = '_'; + +/* + Separator between qualifier name and value. Sample usage: + + - p0_cfname=foo + - p3_tts_col=bar +*/ +const char RDB_PER_PARTITION_QUALIFIER_VALUE_SEP = '='; + +/* + Separator between multiple qualifier assignments. Sample usage: + + - p0_cfname=foo;p1_cfname=bar;p2_cfname=baz +*/ +const char RDB_QUALIFIER_SEP = ';'; + +/* + Qualifier name for a custom per partition column family. +*/ +const char *const RDB_CF_NAME_QUALIFIER = "cfname"; + /* Default, minimal valid, and maximum valid sampling rate values when collecting statistics about table. @@ -192,7 +220,9 @@ const char *const INDEX_THREAD_NAME = "myrocks-index"; #define HA_ERR_ROCKSDB_UNIQUE_NOT_SUPPORTED (HA_ERR_LAST + 1) #define HA_ERR_ROCKSDB_PK_REQUIRED (HA_ERR_LAST + 2) #define HA_ERR_ROCKSDB_TOO_MANY_LOCKS (HA_ERR_LAST + 3) -#define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TOO_MANY_LOCKS +#define HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED (HA_ERR_LAST + 4) +#define HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED (HA_ERR_LAST + 5) +#define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED inline bool looks_like_per_index_cf_typo(const char *const name) { return (name && name[0] == '$' && strcmp(name, PER_INDEX_CF_NAME)); @@ -258,6 +288,7 @@ enum operation_type { ROWS_INSERTED, ROWS_READ, ROWS_UPDATED, + ROWS_DELETED_BLIND, ROWS_MAX }; @@ -282,6 +313,7 @@ struct st_export_stats { ulonglong rows_inserted; ulonglong rows_read; ulonglong rows_updated; + ulonglong rows_deleted_blind; ulonglong system_rows_deleted; ulonglong system_rows_inserted; @@ -474,13 +506,13 @@ class ha_rocksdb : public my_core::handler { const rocksdb::Slice &key, std::string *const value) const; int get_row_by_rowid(uchar *const buf, const char *const rowid, - const uint rowid_size) + const uint rowid_size, const bool skip_lookup = false) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); int get_row_by_rowid(uchar *const buf, const uchar *const rowid, - const uint rowid_size) + const uint rowid_size, const bool skip_lookup = false) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)) { return get_row_by_rowid(buf, reinterpret_cast(rowid), - rowid_size); + rowid_size, skip_lookup); } void update_auto_incr_val(); @@ -491,6 +523,7 @@ class ha_rocksdb : public my_core::handler { MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); bool can_use_single_delete(const uint &index) const MY_ATTRIBUTE((__warn_unused_result__)); + bool is_blind_delete_enabled(); bool skip_unique_check() const MY_ATTRIBUTE((__warn_unused_result__)); void set_force_skip_unique_check(bool skip) override; bool commit_in_the_middle() MY_ATTRIBUTE((__warn_unused_result__)); @@ -667,6 +700,21 @@ public: int rename_table(const char *const from, const char *const to) override MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int convert_blob_from_storage_format(my_core::Field_blob *const blob, + Rdb_string_reader *const reader, + bool decode) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int convert_varchar_from_storage_format( + my_core::Field_varstring *const field_var, + Rdb_string_reader *const reader, bool decode) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int convert_field_from_storage_format(my_core::Field *const field, + Rdb_string_reader *const reader, + bool decode, uint len) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int convert_record_from_storage_format(const rocksdb::Slice *const key, const rocksdb::Slice *const value, uchar *const buf) @@ -681,6 +729,17 @@ public: rocksdb::Slice *const packed_rec) MY_ATTRIBUTE((__nonnull__)); + static const std::string gen_cf_name_qualifier_for_partition( + const std::string &s); + + static const std::vector parse_into_tokens(const std::string &s, + const char delim); + + static const std::string generate_cf_name(const uint index, + const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg, + bool *per_part_match_found); + static const char *get_key_name(const uint index, const TABLE *const table_arg, const Rdb_tbl_def *const tbl_def_arg) @@ -702,7 +761,6 @@ public: static bool is_pk(const uint index, const TABLE *table_arg, const Rdb_tbl_def *tbl_def_arg) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - /** @brief unireg.cc will call max_supported_record_length(), max_supported_keys(), max_supported_key_parts(), uint max_supported_key_length() @@ -827,6 +885,7 @@ private: rocksdb::ColumnFamilyHandle *cf_handle; bool is_reverse_cf; bool is_auto_cf; + bool is_per_partition_cf; }; struct update_row_info { @@ -946,10 +1005,8 @@ private: int read_before_key(const Rdb_key_def &kd, const bool &using_full_key, const rocksdb::Slice &key_slice) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - int read_after_key(const Rdb_key_def &kd, const bool &using_full_key, - const rocksdb::Slice &key_slice) + int read_after_key(const Rdb_key_def &kd, const rocksdb::Slice &key_slice) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - int position_to_correct_key( const Rdb_key_def &kd, const enum ha_rkey_function &find_flag, const bool &full_key_match, const uchar *const key, diff --git a/storage/rocksdb/logger.h b/storage/rocksdb/logger.h index f639f807549..ca75caf9df5 100644 --- a/storage/rocksdb/logger.h +++ b/storage/rocksdb/logger.h @@ -23,6 +23,10 @@ namespace myrocks { class Rdb_logger : public rocksdb::Logger { public: + explicit Rdb_logger(const rocksdb::InfoLogLevel log_level = + rocksdb::InfoLogLevel::ERROR_LEVEL) + : m_mysql_log_level(log_level) {} + void Logv(const rocksdb::InfoLogLevel log_level, const char *format, va_list ap) override { DBUG_ASSERT(format != nullptr); @@ -33,7 +37,7 @@ public: m_logger->Logv(log_level, format, ap); } - if (log_level < GetInfoLogLevel()) { + if (log_level < m_mysql_log_level) { return; } @@ -61,8 +65,21 @@ public: m_logger = logger; } + void SetInfoLogLevel(const rocksdb::InfoLogLevel log_level) override { + // The InfoLogLevel for the logger is used by rocksdb to filter + // messages, so it needs to be the lower of the two loggers + rocksdb::InfoLogLevel base_level = log_level; + + if (m_logger && m_logger->GetInfoLogLevel() < base_level) { + base_level = m_logger->GetInfoLogLevel(); + } + rocksdb::Logger::SetInfoLogLevel(base_level); + m_mysql_log_level = log_level; + } + private: std::shared_ptr m_logger; + rocksdb::InfoLogLevel m_mysql_log_level; }; } // namespace myrocks diff --git a/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result b/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result index 97238282ebe..06452a5437f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result @@ -5,7 +5,7 @@ USE mysqlslap; CREATE TABLE t1(id BIGINT AUTO_INCREMENT, value BIGINT, PRIMARY KEY(id)) ENGINE=rocksdb; # 2PC enabled, MyRocks durability enabled SET GLOBAL rocksdb_enable_2pc=0; -SET GLOBAL rocksdb_write_sync=1; +SET GLOBAL rocksdb_flush_log_at_trx_commit=1; ## 2PC + durability + single thread select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; select case when variable_value-@c = 1000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; @@ -18,7 +18,7 @@ case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' else ' false # 2PC enabled, MyRocks durability disabled SET GLOBAL rocksdb_enable_2pc=0; -SET GLOBAL rocksdb_write_sync=0; +SET GLOBAL rocksdb_flush_log_at_trx_commit=0; select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; case when variable_value-@c = 0 then 'true' else 'false' end @@ -29,7 +29,7 @@ case when variable_value-@c = 0 then 'true' else 'false' end true # 2PC disabled, MyRocks durability enabled SET GLOBAL rocksdb_enable_2pc=1; -SET GLOBAL rocksdb_write_sync=1; +SET GLOBAL rocksdb_flush_log_at_trx_commit=1; select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; case when variable_value-@c = 0 then 'true' else 'false' end @@ -39,6 +39,6 @@ select case when variable_value-@c = 0 then 'true' else 'false' end from informa case when variable_value-@c = 0 then 'true' else 'false' end false SET GLOBAL rocksdb_enable_2pc=1; -SET GLOBAL rocksdb_write_sync=0; +SET GLOBAL rocksdb_flush_log_at_trx_commit=1; DROP TABLE t1; DROP DATABASE mysqlslap; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_cardinality.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_cardinality.result new file mode 100644 index 00000000000..f1ccff01e16 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_cardinality.result @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (i INT PRIMARY KEY, j INT) ENGINE = ROCKSDB; +INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10); +SET debug_sync= 'rocksdb.commit_in_place_alter_table WAIT_FOR flushed'; +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +SET GLOBAL rocksdb_force_flush_memtable_now = 1; +SET debug_sync= 'now SIGNAL flushed'; +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = +(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL +WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY"); +COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX +# # SSTNAME 5 # # # # # 5 +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = +(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL +WHERE TABLE_NAME = 't1' AND INDEX_NAME = "kj"); +COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX +# # SSTNAME 5 # # # # # 5,5 +SET debug_sync='RESET'; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/blind_delete_without_tx_api.result b/storage/rocksdb/mysql-test/rocksdb/r/blind_delete_without_tx_api.result new file mode 100644 index 00000000000..a3fc25cc81b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/blind_delete_without_tx_api.result @@ -0,0 +1,85 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +set @save_rocksdb_blind_delete_primary_key=@@session.rocksdb_blind_delete_primary_key; +set @save_rocksdb_master_skip_tx_api=@@session.rocksdb_master_skip_tx_api; +DROP TABLE IF EXISTS t1,t2; +create table t1 (id int primary key, value int, value2 varchar(200)) engine=rocksdb; +create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +SET session rocksdb_blind_delete_primary_key=1; +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +variable_value-@c +1000 +SELECT count(*) FROM t1; +count(*) +9000 +include/sync_slave_sql_with_master.inc +SELECT count(*) FROM t1; +count(*) +9000 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +variable_value-@c +0 +SELECT count(*) FROM t2; +count(*) +9000 +SET session rocksdb_master_skip_tx_api=1; +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +variable_value-@c +1000 +SELECT count(*) FROM t1; +count(*) +8000 +SELECT count(*) FROM t2; +count(*) +8000 +include/sync_slave_sql_with_master.inc +SELECT count(*) FROM t1; +count(*) +8000 +SELECT count(*) FROM t2; +count(*) +8000 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +DELETE FROM t1 WHERE id BETWEEN 3001 AND 4000; +DELETE FROM t2 WHERE id BETWEEN 3001 AND 4000; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +variable_value-@c +0 +SELECT count(*) FROM t1; +count(*) +7000 +SELECT count(*) FROM t2; +count(*) +7000 +include/sync_slave_sql_with_master.inc +SELECT count(*) FROM t1; +count(*) +7000 +SELECT count(*) FROM t2; +count(*) +7000 +DELETE FROM t1 WHERE id = 10; +SELECT count(*) FROM t1; +count(*) +7000 +call mtr.add_suppression("Slave SQL.*Could not execute Delete_rows event on table test.t1.*Error_code.*"); +call mtr.add_suppression("Slave: Can't find record in 't1'.*"); +include/wait_for_slave_sql_error.inc [errno=1032] +set @save_rocksdb_read_free_rpl_tables=@@global.rocksdb_read_free_rpl_tables; +set global rocksdb_read_free_rpl_tables="t.*"; +START SLAVE; +include/sync_slave_sql_with_master.inc +SELECT count(*) FROM t1; +count(*) +7000 +set global rocksdb_read_free_rpl_tables=@save_rocksdb_read_free_rpl_tables; +SET session rocksdb_blind_delete_primary_key=@save_rocksdb_blind_delete_primary_key; +SET session rocksdb_master_skip_tx_api=@save_rocksdb_master_skip_tx_api; +DROP TABLE t1, t2; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result index 50b73a98111..d859c8551b2 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result @@ -1,4 +1,4 @@ -DROP TABLE IF EXISTS t1, t2; +DROP TABLE IF EXISTS t1, t2, t3; CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'; CREATE TABLE t2(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'; CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin' @@ -19,9 +19,9 @@ LOAD DATA INFILE INTO TABLE t3; set rocksdb_bulk_load=0; SHOW TABLE STATUS WHERE name LIKE 't%'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL -t2 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL -t3 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned +t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned ANALYZE TABLE t1, t2, t3; Table Op Msg_type Msg_text test.t1 analyze status OK @@ -29,36 +29,36 @@ test.t2 analyze status OK test.t3 analyze status OK SHOW TABLE STATUS WHERE name LIKE 't%'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL -t2 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL -t3 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned +t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned select count(pk) from t1; count(pk) -10000000 +5000000 select count(a) from t1; count(a) -10000000 +5000000 select count(b) from t1; count(b) -10000000 +5000000 select count(pk) from t2; count(pk) -10000000 +5000000 select count(a) from t2; count(a) -10000000 +5000000 select count(b) from t2; count(b) -10000000 +5000000 select count(pk) from t3; count(pk) -10000000 +5000000 select count(a) from t3; count(a) -10000000 +5000000 select count(b) from t3; count(b) -10000000 +5000000 longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp test.bulk_load.tmp DROP TABLE t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/collation.result b/storage/rocksdb/mysql-test/rocksdb/r/collation.result index b6bde05cc70..edd3fd1f187 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/collation.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/collation.result @@ -125,4 +125,5 @@ CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=ro CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; ERROR HY000: Unsupported collation on string indexed column test.abcd.value Use binary collation (binary, latin1_bin, utf8_bin). DROP TABLE abc; +SET GLOBAL rocksdb_strict_collation_exceptions=null; SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/fail_system_cf.result b/storage/rocksdb/mysql-test/rocksdb/r/fail_system_cf.result index 1ae56ae5f05..df90f2b3670 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/fail_system_cf.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/fail_system_cf.result @@ -1,4 +1,4 @@ DROP TABLE IF EXISTS t1; CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT '__system__') ENGINE = ROCKSDB; -ERROR HY000: Incorrect arguments to column family not valid for storing index data +ERROR HY000: Incorrect arguments to column family not valid for storing index data. DROP TABLE IF EXISTS t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result b/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result index 1ab8bd7678c..f2e2a8316b7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result @@ -66,7 +66,7 @@ Handler_read_prev 0 Handler_read_rnd 0 Handler_read_rnd_next 10 FLUSH STATUS; -SELECT * FROM t1 WHERE b <=5 ORDER BY b; +SELECT * FROM t1 FORCE INDEX(b) WHERE b <=5 ORDER BY b; id a b 4 NULL 4 5 NULL 5 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue290.result b/storage/rocksdb/mysql-test/rocksdb/r/issue290.result index 8b1a35648c0..1a83a93bcbb 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/issue290.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue290.result @@ -22,7 +22,7 @@ insert into linktable (id1, link_type, id2) values (2, 1, 7); insert into linktable (id1, link_type, id2) values (2, 1, 8); insert into linktable (id1, link_type, id2) values (2, 1, 9); insert into linktable (id1, link_type, id2) values (2, 1, 10); -explain select id1, id2, link_type, data from linktable where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5); +explain select id1, id2, link_type, data from linktable force index(primary) where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE linktable range PRIMARY,id1_type PRIMARY 24 NULL # Using where +1 SIMPLE linktable range PRIMARY PRIMARY 24 NULL # Using where drop table linktable; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue495.result b/storage/rocksdb/mysql-test/rocksdb/r/issue495.result new file mode 100644 index 00000000000..2560ec577ed --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue495.result @@ -0,0 +1,32 @@ +drop table if exists t; +Warnings: +Note 1051 Unknown table 'test.t' +create table t ( +a int, +b int, +c varchar(12249) collate latin1_bin, +d datetime, +e int, +f int, +g blob, +h int, +i int, +key (b,e), +key (h,b) +) engine=rocksdb +partition by linear hash (i) partitions 8 ; +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +select i from t group by h; +i +1 +select i from t group by h; +i +1 +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result b/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result index 849257d08fa..1cc90c61c01 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result @@ -124,6 +124,51 @@ UNLOCK TABLES; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; 2 +==== mysqldump with --innodb-stats-on-metadata ==== + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; +-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=3893; +DROP TABLE IF EXISTS `r1`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `r1` ( + `id1` int(11) NOT NULL DEFAULT '0', + `id2` int(11) NOT NULL DEFAULT '0', + `id3` varchar(100) NOT NULL DEFAULT '', + `id4` int(11) NOT NULL DEFAULT '0', + `value1` int(11) DEFAULT NULL, + `value2` int(11) DEFAULT NULL, + `value3` int(11) DEFAULT NULL, + `value4` int(11) DEFAULT NULL, + PRIMARY KEY (`id1`,`id2`,`id3`,`id4`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; +/* ORDERING KEY : (null) */; + +LOCK TABLES `r1` WRITE; +/*!40000 ALTER TABLE `r1` DISABLE KEYS */; +INSERT INTO `r1` VALUES (1,1,'1',1,1,1,1,1),(1,1,'1',2,2,2,2,2),(1,1,'2',1,3,3,3,3),(1,1,'2',2,4,4,4,4),(1,2,'1',1,5,5,5,5),(1,2,'1',2,6,6,6,6),(1,2,'2',1,7,7,7,7),(1,2,'2',2,8,8,8,8),(2,1,'1',1,9,9,9,9),(2,1,'1',2,10,10,10,10),(2,1,'2',1,11,11,11,11),(2,1,'2',2,12,12,12,12),(2,2,'1',1,13,13,13,13),(2,2,'1',2,14,14,14,14),(2,2,'2',1,15,15,15,15),(2,2,'2',2,16,16,16,16); +/*!40000 ALTER TABLE `r1` ENABLE KEYS */; +UNLOCK TABLES; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + SET GLOBAL binlog_format=statement; SET GLOBAL binlog_format=row; drop table r1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/partition.result b/storage/rocksdb/mysql-test/rocksdb/r/partition.result index 76085cc1d27..4175ca7db46 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/partition.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/partition.result @@ -2,7 +2,55 @@ DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS VAR_POP; DROP TABLE IF EXISTS TEMP0; DROP TABLE IF EXISTS VAR_SAMP; +DROP TABLE IF EXISTS ti; +DROP TABLE IF EXISTS members; +DROP TABLE IF EXISTS members_2; +DROP TABLE IF EXISTS employees; +DROP TABLE IF EXISTS employees_2; +DROP TABLE IF EXISTS employees_3; +DROP TABLE IF EXISTS quarterly_report_status; +DROP TABLE IF EXISTS employees_4; +DROP TABLE IF EXISTS h2; +DROP TABLE IF EXISTS rcx; +DROP TABLE IF EXISTS r1; +DROP TABLE IF EXISTS rc1; +DROP TABLE IF EXISTS rx; +DROP TABLE IF EXISTS rc2; +DROP TABLE IF EXISTS rc3; +DROP TABLE IF EXISTS rc4; +DROP TABLE IF EXISTS employees_by_lname; +DROP TABLE IF EXISTS customers_1; +DROP TABLE IF EXISTS customers_2; +DROP TABLE IF EXISTS customers_3; +DROP TABLE IF EXISTS employees_hash; +DROP TABLE IF EXISTS employees_hash_1; +DROP TABLE IF EXISTS t1_hash; +DROP TABLE IF EXISTS employees_linear_hash; +DROP TABLE IF EXISTS t1_linear_hash; +DROP TABLE IF EXISTS k1; +DROP TABLE IF EXISTS k2; +DROP TABLE IF EXISTS tm1; +DROP TABLE IF EXISTS tk; +DROP TABLE IF EXISTS ts; +DROP TABLE IF EXISTS ts_1; +DROP TABLE IF EXISTS ts_3; +DROP TABLE IF EXISTS ts_4; +DROP TABLE IF EXISTS ts_5; +DROP TABLE IF EXISTS trb3; +DROP TABLE IF EXISTS tr; +DROP TABLE IF EXISTS members_3; +DROP TABLE IF EXISTS clients; +DROP TABLE IF EXISTS clients_lk; +DROP TABLE IF EXISTS trb1; CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; +Table Op Msg_type Msg_text +test.t1 optimize status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 repair status OK +Table Op Msg_type Msg_text +test.t1 check status OK SHOW TABLES; Tables_in_test TEMP0 @@ -24,7 +72,614 @@ i j k SELECT COUNT(*) FROM t1; COUNT(*) 1000 -DROP TABLE t1; -DROP TABLE VAR_POP; -DROP TABLE TEMP0; -DROP TABLE VAR_SAMP; +CREATE TABLE ti( +id INT, +amount DECIMAL(7,2), +tr_date DATE +) ENGINE=ROCKSDB +PARTITION BY HASH(MONTH(tr_date)) +PARTITIONS 6; +CREATE TABLE members ( +firstname VARCHAR(25) NOT NULL, +lastname VARCHAR(25) NOT NULL, +username VARCHAR(16) NOT NULL, +email VARCHAR(35), +joined DATE NOT NULL +) ENGINE=ROCKSDB +PARTITION BY KEY(joined) +PARTITIONS 6; +CREATE TABLE members_2 ( +firstname VARCHAR(25) NOT NULL, +lastname VARCHAR(25) NOT NULL, +username VARCHAR(16) NOT NULL, +email VARCHAR(35), +joined DATE NOT NULL +) ENGINE=ROCKSDB +PARTITION BY RANGE(YEAR(joined)) ( +PARTITION p0 VALUES LESS THAN (1960), +PARTITION p1 VALUES LESS THAN (1970), +PARTITION p2 VALUES LESS THAN (1980), +PARTITION p3 VALUES LESS THAN (1990), +PARTITION p4 VALUES LESS THAN MAXVALUE +); +CREATE TABLE t2 (val INT) +ENGINE=ROCKSDB +PARTITION BY LIST(val)( +PARTITION mypart VALUES IN (1,3,5), +PARTITION MyPart VALUES IN (2,4,6) +); +ERROR HY000: Duplicate partition name MyPart +CREATE TABLE employees ( +id INT NOT NULL, +fname VARCHAR(30), +lname VARCHAR(30), +hired DATE NOT NULL DEFAULT '1970-01-01', +separated DATE NOT NULL DEFAULT '9999-12-31', +job_code INT NOT NULL, +store_id INT NOT NULL +) ENGINE=ROCKSDB +PARTITION BY RANGE (store_id) ( +PARTITION p0 VALUES LESS THAN (6), +PARTITION p1 VALUES LESS THAN (11), +PARTITION p2 VALUES LESS THAN (16), +PARTITION p3 VALUES LESS THAN MAXVALUE +); +CREATE TABLE employees_2 ( +id INT NOT NULL, +fname VARCHAR(30), +lname VARCHAR(30), +hired DATE NOT NULL DEFAULT '1970-01-01', +separated DATE NOT NULL DEFAULT '9999-12-31', +job_code INT NOT NULL, +store_id INT NOT NULL +) ENGINE=ROCKSDB +PARTITION BY RANGE (job_code) ( +PARTITION p0 VALUES LESS THAN (100), +PARTITION p1 VALUES LESS THAN (1000), +PARTITION p2 VALUES LESS THAN (10000) +); +CREATE TABLE employees_3 ( +id INT NOT NULL, +fname VARCHAR(30), +lname VARCHAR(30), +hired DATE NOT NULL DEFAULT '1970-01-01', +separated DATE NOT NULL DEFAULT '9999-12-31', +job_code INT, +store_id INT +) ENGINE=ROCKSDB +PARTITION BY RANGE (YEAR(separated)) ( +PARTITION p0 VALUES LESS THAN (1991), +PARTITION p1 VALUES LESS THAN (1996), +PARTITION p2 VALUES LESS THAN (2001), +PARTITION p3 VALUES LESS THAN MAXVALUE +); +CREATE TABLE quarterly_report_status ( +report_id INT NOT NULL, +report_status VARCHAR(20) NOT NULL, +report_updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) ENGINE=ROCKSDB +PARTITION BY RANGE (UNIX_TIMESTAMP(report_updated)) ( +PARTITION p0 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-01-01 00:00:00') ), +PARTITION p1 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-04-01 00:00:00') ), +PARTITION p2 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-07-01 00:00:00') ), +PARTITION p3 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-10-01 00:00:00') ), +PARTITION p4 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-01-01 00:00:00') ), +PARTITION p5 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-04-01 00:00:00') ), +PARTITION p6 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-07-01 00:00:00') ), +PARTITION p7 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-10-01 00:00:00') ), +PARTITION p8 VALUES LESS THAN ( UNIX_TIMESTAMP('2010-01-01 00:00:00') ), +PARTITION p9 VALUES LESS THAN (MAXVALUE) +); +CREATE TABLE employees_4 ( +id INT NOT NULL, +fname VARCHAR(30), +lname VARCHAR(30), +hired DATE NOT NULL DEFAULT '1970-01-01', +separated DATE NOT NULL DEFAULT '9999-12-31', +job_code INT, +store_id INT +) ENGINE=ROCKSDB +PARTITION BY LIST(store_id) ( +PARTITION pNorth VALUES IN (3,5,6,9,17), +PARTITION pEast VALUES IN (1,2,10,11,19,20), +PARTITION pWest VALUES IN (4,12,13,14,18), +PARTITION pCentral VALUES IN (7,8,15,16) +); +CREATE TABLE h2 ( +c1 INT, +c2 INT +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION p0 VALUES IN (1, 4, 7), +PARTITION p1 VALUES IN (2, 5, 8) +); +INSERT INTO h2 VALUES (3, 5); +ERROR HY000: Table has no partition for value 3 +CREATE TABLE rcx ( +a INT, +b INT, +c CHAR(3), +d INT +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS(a,d,c) ( +PARTITION p0 VALUES LESS THAN (5,10,'ggg'), +PARTITION p1 VALUES LESS THAN (10,20,'mmm'), +PARTITION p2 VALUES LESS THAN (15,30,'sss'), +PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE) +); +CREATE TABLE r1 ( +a INT, +b INT +) ENGINE=ROCKSDB +PARTITION BY RANGE (a) ( +PARTITION p0 VALUES LESS THAN (5), +PARTITION p1 VALUES LESS THAN (MAXVALUE) +); +INSERT INTO r1 VALUES (5,10), (5,11), (5,12); +CREATE TABLE rc1 ( +a INT, +b INT +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS(a, b) ( +PARTITION p0 VALUES LESS THAN (5, 12), +PARTITION p3 VALUES LESS THAN (MAXVALUE, MAXVALUE) +); +INSERT INTO rc1 VALUES (5,10), (5,11), (5,12); +SELECT (5,10) < (5,12), (5,11) < (5,12), (5,12) < (5,12); +(5,10) < (5,12) (5,11) < (5,12) (5,12) < (5,12) +1 1 0 +CREATE TABLE rx ( +a INT, +b INT +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS (a) ( +PARTITION p0 VALUES LESS THAN (5), +PARTITION p1 VALUES LESS THAN (MAXVALUE) +); +INSERT INTO rx VALUES (5,10), (5,11), (5,12); +CREATE TABLE rc2 ( +a INT, +b INT +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS(a,b) ( +PARTITION p0 VALUES LESS THAN (0,10), +PARTITION p1 VALUES LESS THAN (10,20), +PARTITION p2 VALUES LESS THAN (10,30), +PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE) +); +CREATE TABLE rc3 ( +a INT, +b INT +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS(a,b) ( +PARTITION p0 VALUES LESS THAN (0,10), +PARTITION p1 VALUES LESS THAN (10,20), +PARTITION p2 VALUES LESS THAN (10,30), +PARTITION p3 VALUES LESS THAN (10,35), +PARTITION p4 VALUES LESS THAN (20,40), +PARTITION p5 VALUES LESS THAN (MAXVALUE,MAXVALUE) +); +CREATE TABLE rc4 ( +a INT, +b INT, +c INT +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS(a,b,c) ( +PARTITION p0 VALUES LESS THAN (0,25,50), +PARTITION p1 VALUES LESS THAN (10,20,100), +PARTITION p2 VALUES LESS THAN (10,30,50), +PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE) +); +SELECT (0,25,50) < (10,20,100), (10,20,100) < (10,30,50); +(0,25,50) < (10,20,100) (10,20,100) < (10,30,50) +1 1 +CREATE TABLE rcf ( +a INT, +b INT, +c INT +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS(a,b,c) ( +PARTITION p0 VALUES LESS THAN (0,25,50), +PARTITION p1 VALUES LESS THAN (20,20,100), +PARTITION p2 VALUES LESS THAN (10,30,50), +PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE) +); +ERROR HY000: VALUES LESS THAN value must be strictly increasing for each partition +CREATE TABLE employees_by_lname ( +id INT NOT NULL, +fname VARCHAR(30), +lname VARCHAR(30), +hired DATE NOT NULL DEFAULT '1970-01-01', +separated DATE NOT NULL DEFAULT '9999-12-31', +job_code INT NOT NULL, +store_id INT NOT NULL +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS (lname) ( +PARTITION p0 VALUES LESS THAN ('g'), +PARTITION p1 VALUES LESS THAN ('m'), +PARTITION p2 VALUES LESS THAN ('t'), +PARTITION p3 VALUES LESS THAN (MAXVALUE) +); +ALTER TABLE employees_by_lname PARTITION BY RANGE COLUMNS (lname) ( +PARTITION p0 VALUES LESS THAN ('g'), +PARTITION p1 VALUES LESS THAN ('m'), +PARTITION p2 VALUES LESS THAN ('t'), +PARTITION p3 VALUES LESS THAN (MAXVALUE) +); +ALTER TABLE employees_by_lname PARTITION BY RANGE COLUMNS (hired) ( +PARTITION p0 VALUES LESS THAN ('1970-01-01'), +PARTITION p1 VALUES LESS THAN ('1980-01-01'), +PARTITION p2 VALUES LESS THAN ('1990-01-01'), +PARTITION p3 VALUES LESS THAN ('2000-01-01'), +PARTITION p4 VALUES LESS THAN ('2010-01-01'), +PARTITION p5 VALUES LESS THAN (MAXVALUE) +); +CREATE TABLE customers_1 ( +first_name VARCHAR(25), +last_name VARCHAR(25), +street_1 VARCHAR(30), +street_2 VARCHAR(30), +city VARCHAR(15), +renewal DATE +) ENGINE=ROCKSDB +PARTITION BY LIST COLUMNS(city) ( +PARTITION pRegion_1 VALUES IN('Oskarshamn', 'Högsby', 'MönsterÃ¥s'), +PARTITION pRegion_2 VALUES IN('Vimmerby', 'Hultsfred', 'Västervik'), +PARTITION pRegion_3 VALUES IN('Nässjö', 'Eksjö', 'Vetlanda'), +PARTITION pRegion_4 VALUES IN('Uppvidinge', 'Alvesta', 'Växjo') +); +CREATE TABLE customers_2 ( +first_name VARCHAR(25), +last_name VARCHAR(25), +street_1 VARCHAR(30), +street_2 VARCHAR(30), +city VARCHAR(15), +renewal DATE +) ENGINE=ROCKSDB +PARTITION BY LIST COLUMNS(renewal) ( +PARTITION pWeek_1 VALUES IN('2010-02-01', '2010-02-02', '2010-02-03', +'2010-02-04', '2010-02-05', '2010-02-06', '2010-02-07'), +PARTITION pWeek_2 VALUES IN('2010-02-08', '2010-02-09', '2010-02-10', +'2010-02-11', '2010-02-12', '2010-02-13', '2010-02-14'), +PARTITION pWeek_3 VALUES IN('2010-02-15', '2010-02-16', '2010-02-17', +'2010-02-18', '2010-02-19', '2010-02-20', '2010-02-21'), +PARTITION pWeek_4 VALUES IN('2010-02-22', '2010-02-23', '2010-02-24', +'2010-02-25', '2010-02-26', '2010-02-27', '2010-02-28') +); +CREATE TABLE customers_3 ( +first_name VARCHAR(25), +last_name VARCHAR(25), +street_1 VARCHAR(30), +street_2 VARCHAR(30), +city VARCHAR(15), +renewal DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS(renewal) ( +PARTITION pWeek_1 VALUES LESS THAN('2010-02-09'), +PARTITION pWeek_2 VALUES LESS THAN('2010-02-15'), +PARTITION pWeek_3 VALUES LESS THAN('2010-02-22'), +PARTITION pWeek_4 VALUES LESS THAN('2010-03-01') +); +CREATE TABLE employees_hash ( +id INT NOT NULL, +fname VARCHAR(30), +lname VARCHAR(30), +hired DATE NOT NULL DEFAULT '1970-01-01', +separated DATE NOT NULL DEFAULT '9999-12-31', +job_code INT, +store_id INT +) ENGINE=ROCKSDB +PARTITION BY HASH(store_id) +PARTITIONS 4; +CREATE TABLE employees_hash_1 ( +id INT NOT NULL, +fname VARCHAR(30), +lname VARCHAR(30), +hired DATE NOT NULL DEFAULT '1970-01-01', +separated DATE NOT NULL DEFAULT '9999-12-31', +job_code INT, +store_id INT +) ENGINE=ROCKSDB +PARTITION BY HASH( YEAR(hired) ) +PARTITIONS 4; +CREATE TABLE t1_hash ( +col1 INT, +col2 CHAR(5), +col3 DATE +) ENGINE=ROCKSDB +PARTITION BY HASH( YEAR(col3) ) +PARTITIONS 4; +CREATE TABLE employees_linear_hash ( +id INT NOT NULL, +fname VARCHAR(30), +lname VARCHAR(30), +hired DATE NOT NULL DEFAULT '1970-01-01', +separated DATE NOT NULL DEFAULT '9999-12-31', +job_code INT, +store_id INT +) ENGINE=ROCKSDB +PARTITION BY LINEAR HASH( YEAR(hired) ) +PARTITIONS 4; +CREATE TABLE t1_linear_hash ( +col1 INT, +col2 CHAR(5), +col3 DATE +) ENGINE=ROCKSDB +PARTITION BY LINEAR HASH( YEAR(col3) ) +PARTITIONS 6; +CREATE TABLE k1 ( +id INT NOT NULL PRIMARY KEY, +name VARCHAR(20) +) ENGINE=ROCKSDB +PARTITION BY KEY() +PARTITIONS 2; +CREATE TABLE k2 ( +id INT NOT NULL, +name VARCHAR(20), +UNIQUE KEY (id) +) ENGINE=ROCKSDB +PARTITION BY KEY() +PARTITIONS 2; +CREATE TABLE tm1 ( +s1 CHAR(32) PRIMARY KEY +) ENGINE=ROCKSDB +PARTITION BY KEY(s1) +PARTITIONS 10; +CREATE TABLE tk ( +col1 INT NOT NULL, +col2 CHAR(5), +col3 DATE +) ENGINE=ROCKSDB +PARTITION BY LINEAR KEY (col1) +PARTITIONS 3; +CREATE TABLE ts ( +id INT, +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE( YEAR(purchased) ) +SUBPARTITION BY HASH( TO_DAYS(purchased) ) +SUBPARTITIONS 2 ( +PARTITION p0 VALUES LESS THAN (1990), +PARTITION p1 VALUES LESS THAN (2000), +PARTITION p2 VALUES LESS THAN MAXVALUE +); +CREATE TABLE ts_1 ( +id INT, +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE( YEAR(purchased) ) +SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( +PARTITION p0 VALUES LESS THAN (1990) ( +SUBPARTITION s0, +SUBPARTITION s1 +), +PARTITION p1 VALUES LESS THAN (2000) ( +SUBPARTITION s2, +SUBPARTITION s3 +), +PARTITION p2 VALUES LESS THAN MAXVALUE ( +SUBPARTITION s4, +SUBPARTITION s5 +) +); +CREATE TABLE ts_2 ( +id INT, +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE( YEAR(purchased) ) +SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( +PARTITION p0 VALUES LESS THAN (1990) ( +SUBPARTITION s0, +SUBPARTITION s1 +), +PARTITION p1 VALUES LESS THAN (2000), +PARTITION p2 VALUES LESS THAN MAXVALUE ( +SUBPARTITION s2, +SUBPARTITION s3 +) +); +ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near ' +PARTITION p2 VALUES LESS THAN MAXVALUE ( +SUBPARTITION s2, +SUBPARTITION s3 +) +)' at line 11 +CREATE TABLE ts_3 ( +id INT, +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE( YEAR(purchased) ) +SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( +PARTITION p0 VALUES LESS THAN (1990) ( +SUBPARTITION s0, +SUBPARTITION s1 +), +PARTITION p1 VALUES LESS THAN (2000) ( +SUBPARTITION s2, +SUBPARTITION s3 +), +PARTITION p2 VALUES LESS THAN MAXVALUE ( +SUBPARTITION s4, +SUBPARTITION s5 +) +); +CREATE TABLE ts_4 ( +id INT, +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE( YEAR(purchased) ) +SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( +PARTITION p0 VALUES LESS THAN (1990) ( +SUBPARTITION s0, +SUBPARTITION s1 +), +PARTITION p1 VALUES LESS THAN (2000) ( +SUBPARTITION s2, +SUBPARTITION s3 +), +PARTITION p2 VALUES LESS THAN MAXVALUE ( +SUBPARTITION s4, +SUBPARTITION s5 +) +); +CREATE TABLE ts_5 ( +id INT, +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE(YEAR(purchased)) +SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( +PARTITION p0 VALUES LESS THAN (1990) ( +SUBPARTITION s0a, +SUBPARTITION s0b +), +PARTITION p1 VALUES LESS THAN (2000) ( +SUBPARTITION s1a, +SUBPARTITION s1b +), +PARTITION p2 VALUES LESS THAN MAXVALUE ( +SUBPARTITION s2a, +SUBPARTITION s2b +) +); +CREATE TABLE trb3 ( +id INT, +name VARCHAR(50), +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE( YEAR(purchased) ) ( +PARTITION p0 VALUES LESS THAN (1990), +PARTITION p1 VALUES LESS THAN (1995), +PARTITION p2 VALUES LESS THAN (2000), +PARTITION p3 VALUES LESS THAN (2005) +); +ALTER TABLE trb3 PARTITION BY KEY(id) PARTITIONS 2; +CREATE TABLE tr ( +id INT, +name VARCHAR(50), +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE( YEAR(purchased) ) ( +PARTITION p0 VALUES LESS THAN (1990), +PARTITION p1 VALUES LESS THAN (1995), +PARTITION p2 VALUES LESS THAN (2000), +PARTITION p3 VALUES LESS THAN (2005) +); +INSERT INTO tr VALUES +(1, 'desk organiser', '2003-10-15'), +(2, 'CD player', '1993-11-05'), +(3, 'TV set', '1996-03-10'), +(4, 'bookcase', '1982-01-10'), +(5, 'exercise bike', '2004-05-09'), +(6, 'sofa', '1987-06-05'), +(7, 'popcorn maker', '2001-11-22'), +(8, 'aquarium', '1992-08-04'), +(9, 'study desk', '1984-09-16'), +(10, 'lava lamp', '1998-12-25'); +SELECT * FROM tr WHERE purchased BETWEEN '1995-01-01' AND '1999-12-31'; +id name purchased +3 TV set 1996-03-10 +10 lava lamp 1998-12-25 +ALTER TABLE tr DROP PARTITION p2; +SELECT * FROM tr WHERE purchased BETWEEN '1995-01-01' AND '1999-12-31'; +id name purchased +CREATE TABLE members_3 ( +id INT, +fname VARCHAR(25), +lname VARCHAR(25), +dob DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE( YEAR(dob) ) ( +PARTITION p0 VALUES LESS THAN (1970), +PARTITION p1 VALUES LESS THAN (1980), +PARTITION p2 VALUES LESS THAN (1990) +); +ALTER TABLE members_3 ADD PARTITION (PARTITION p3 VALUES LESS THAN (2000)); +ALTER TABLE members_3 ADD PARTITION (PARTITION n VALUES LESS THAN (1960)); +ERROR HY000: VALUES LESS THAN value must be strictly increasing for each partition +CREATE TABLE clients ( +id INT, +fname VARCHAR(30), +lname VARCHAR(30), +signed DATE +) ENGINE=ROCKSDB +PARTITION BY HASH( MONTH(signed) ) +PARTITIONS 12; +ALTER TABLE clients COALESCE PARTITION 4; +CREATE TABLE clients_lk ( +id INT, +fname VARCHAR(30), +lname VARCHAR(30), +signed DATE +) ENGINE=ROCKSDB +PARTITION BY LINEAR KEY(signed) +PARTITIONS 12; +ALTER TABLE clients COALESCE PARTITION 18; +ERROR HY000: Cannot remove all partitions, use DROP TABLE instead +ALTER TABLE clients ADD PARTITION PARTITIONS 6; +CREATE TABLE trb1 ( +id INT, +name VARCHAR(50), +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE(id) ( +PARTITION p0 VALUES LESS THAN (3), +PARTITION p1 VALUES LESS THAN (7), +PARTITION p2 VALUES LESS THAN (9), +PARTITION p3 VALUES LESS THAN (11) +); +INSERT INTO trb1 VALUES +(1, 'desk organiser', '2003-10-15'), +(2, 'CD player', '1993-11-05'), +(3, 'TV set', '1996-03-10'), +(4, 'bookcase', '1982-01-10'), +(5, 'exercise bike', '2004-05-09'), +(6, 'sofa', '1987-06-05'), +(7, 'popcorn maker', '2001-11-22'), +(8, 'aquarium', '1992-08-04'), +(9, 'study desk', '1984-09-16'), +(10, 'lava lamp', '1998-12-25'); +ALTER TABLE trb1 ADD PRIMARY KEY (id); +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS VAR_POP; +DROP TABLE IF EXISTS TEMP0; +DROP TABLE IF EXISTS VAR_SAMP; +DROP TABLE IF EXISTS ti; +DROP TABLE IF EXISTS members; +DROP TABLE IF EXISTS members_2; +DROP TABLE IF EXISTS employees; +DROP TABLE IF EXISTS employees_2; +DROP TABLE IF EXISTS employees_3; +DROP TABLE IF EXISTS quarterly_report_status; +DROP TABLE IF EXISTS employees_4; +DROP TABLE IF EXISTS h2; +DROP TABLE IF EXISTS rcx; +DROP TABLE IF EXISTS r1; +DROP TABLE IF EXISTS rc1; +DROP TABLE IF EXISTS rx; +DROP TABLE IF EXISTS rc2; +DROP TABLE IF EXISTS rc3; +DROP TABLE IF EXISTS rc4; +DROP TABLE IF EXISTS employees_by_lname; +DROP TABLE IF EXISTS customers_1; +DROP TABLE IF EXISTS customers_2; +DROP TABLE IF EXISTS customers_3; +DROP TABLE IF EXISTS employees_hash; +DROP TABLE IF EXISTS employees_hash_1; +DROP TABLE IF EXISTS t1_hash; +DROP TABLE IF EXISTS employees_linear_hash; +DROP TABLE IF EXISTS t1_linear_hash; +DROP TABLE IF EXISTS k1; +DROP TABLE IF EXISTS k2; +DROP TABLE IF EXISTS tm1; +DROP TABLE IF EXISTS tk; +DROP TABLE IF EXISTS ts; +DROP TABLE IF EXISTS ts_1; +DROP TABLE IF EXISTS ts_3; +DROP TABLE IF EXISTS ts_4; +DROP TABLE IF EXISTS ts_5; +DROP TABLE IF EXISTS trb3; +DROP TABLE IF EXISTS tr; +DROP TABLE IF EXISTS members_3; +DROP TABLE IF EXISTS clients; +DROP TABLE IF EXISTS clients_lk; +DROP TABLE IF EXISTS trb1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result index 9fb28791834..8a02bb8258b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result @@ -864,6 +864,7 @@ rocksdb_allow_mmap_reads OFF rocksdb_allow_mmap_writes OFF rocksdb_background_sync OFF rocksdb_base_background_compactions 1 +rocksdb_blind_delete_primary_key OFF rocksdb_block_cache_size 536870912 rocksdb_block_restart_interval 16 rocksdb_block_size 4096 @@ -889,14 +890,16 @@ rocksdb_db_write_buffer_size 0 rocksdb_deadlock_detect OFF rocksdb_debug_optimizer_no_zero_cardinality ON rocksdb_default_cf_options +rocksdb_delayed_write_rate 16777216 rocksdb_delete_obsolete_files_period_micros 21600000000 -rocksdb_disabledatasync OFF rocksdb_enable_2pc ON rocksdb_enable_bulk_load_api ON rocksdb_enable_thread_tracking OFF rocksdb_enable_write_thread_adaptive_yield OFF rocksdb_error_if_exists OFF +rocksdb_flush_log_at_trx_commit 1 rocksdb_flush_memtable_on_analyze ON +rocksdb_force_compute_memtable_stats ON rocksdb_force_flush_memtable_now OFF rocksdb_force_index_records_in_range 0 rocksdb_hash_index_allow_collision ON @@ -908,6 +911,7 @@ rocksdb_lock_scanned_rows OFF rocksdb_lock_wait_timeout 1 rocksdb_log_file_time_to_roll 0 rocksdb_manifest_preallocation_size 4194304 +rocksdb_master_skip_tx_api OFF rocksdb_max_background_compactions 1 rocksdb_max_background_flushes 1 rocksdb_max_log_file_size 0 @@ -925,7 +929,7 @@ rocksdb_paranoid_checks ON rocksdb_pause_background_work ON rocksdb_perf_context_level 0 rocksdb_persistent_cache_path -rocksdb_persistent_cache_size 0 +rocksdb_persistent_cache_size_mb 0 rocksdb_pin_l0_filter_and_index_blocks_in_cache ON rocksdb_print_snapshot_conflict_queries OFF rocksdb_rate_limiter_bytes_per_sec 0 @@ -953,25 +957,37 @@ rocksdb_validate_tables 1 rocksdb_verify_row_debug_checksums OFF rocksdb_wal_bytes_per_sync 0 rocksdb_wal_dir -rocksdb_wal_recovery_mode 2 +rocksdb_wal_recovery_mode 1 rocksdb_wal_size_limit_mb 0 rocksdb_wal_ttl_seconds 0 rocksdb_whole_key_filtering ON rocksdb_write_disable_wal OFF rocksdb_write_ignore_missing_column_families OFF -rocksdb_write_sync OFF create table t47 (pk int primary key, col1 varchar(12)) engine=rocksdb; insert into t47 values (1, 'row1'); insert into t47 values (2, 'row2'); set rocksdb_bulk_load=1; insert into t47 values (3, 'row3'),(4, 'row4'); set rocksdb_bulk_load=0; +connect con1,localhost,root,,; +set rocksdb_bulk_load=1; +insert into t47 values (10, 'row10'),(11, 'row11'); +connection default; +set rocksdb_bulk_load=1; +insert into t47 values (100, 'row100'),(101, 'row101'); +disconnect con1; +connection default; +set rocksdb_bulk_load=0; select * from t47; pk col1 1 row1 2 row2 3 row3 4 row4 +10 row10 +11 row11 +100 row100 +101 row101 drop table t47; # # Fix TRUNCATE over empty table (transaction is committed when it wasn't @@ -1410,6 +1426,7 @@ rocksdb_rows_deleted # rocksdb_rows_inserted # rocksdb_rows_read # rocksdb_rows_updated # +rocksdb_rows_deleted_blind # rocksdb_system_rows_deleted # rocksdb_system_rows_inserted # rocksdb_system_rows_read # @@ -1482,6 +1499,7 @@ ROCKSDB_ROWS_DELETED ROCKSDB_ROWS_INSERTED ROCKSDB_ROWS_READ ROCKSDB_ROWS_UPDATED +ROCKSDB_ROWS_DELETED_BLIND ROCKSDB_SYSTEM_ROWS_DELETED ROCKSDB_SYSTEM_ROWS_INSERTED ROCKSDB_SYSTEM_ROWS_READ @@ -1556,6 +1574,7 @@ ROCKSDB_ROWS_DELETED ROCKSDB_ROWS_INSERTED ROCKSDB_ROWS_READ ROCKSDB_ROWS_UPDATED +ROCKSDB_ROWS_DELETED_BLIND ROCKSDB_SYSTEM_ROWS_DELETED ROCKSDB_SYSTEM_ROWS_INSERTED ROCKSDB_SYSTEM_ROWS_READ @@ -1737,7 +1756,7 @@ key1 int, PRIMARY KEY (id), index (key1) comment 'test.t1.key1' ) engine=rocksdb; -ERROR HY000: Column Family Flag is different from existing flag. Assign a new CF flag, or do not change existing CF flag. +ERROR HY000: Column family ('test.t1.key1') flag (0) is different from an existing flag (2). Assign a new CF flag, or do not change existing CF flag. create table t1_err ( id int not null, key1 int, @@ -1763,7 +1782,7 @@ key1 int, PRIMARY KEY (id), index (key1) comment '$per_idnex_cf' )engine=rocksdb; -ERROR 42000: This version of MySQL doesn't yet support 'column family name looks like a typo of $per_index_cf' +ERROR 42000: This version of MySQL doesn't yet support 'column family name looks like a typo of $per_index_cf.' # # Issue #22: SELECT ... FOR UPDATE takes a long time # diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_per_partition.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_per_partition.result new file mode 100644 index 00000000000..05ac3f4f62d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_per_partition.result @@ -0,0 +1,409 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'testcomment' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='testcomment'; +cf_name +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'rev:testrevcomment' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='rev:testrevcomment'; +cf_name +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=my_custom_cf;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +set @@global.rocksdb_compact_cf = 'foo'; +set @@global.rocksdb_compact_cf = 'my_custom_cf'; +set @@global.rocksdb_compact_cf = 'baz'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='foo'; +cf_name +foo +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_custom_cf'; +cf_name +my_custom_cf +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='baz'; +cf_name +baz +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=t1-p0;custom_p1_cfname=rev:bar;custom_p2_cfname=t1-p2' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +set @@global.rocksdb_compact_cf = 't1-p0'; +set @@global.rocksdb_compact_cf = 'rev:bar'; +set @@global.rocksdb_compact_cf = 't1-p2'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='t1-p0'; +cf_name +t1-p0 +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='rev:bar'; +cf_name +rev:bar +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='t1-p2'; +cf_name +t1-p2 +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=cf-zero;custom_p1_cfname=cf-one;custom_p2_cfname=cf-zero' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9), +PARTITION custom_p3 VALUES IN (10, 20, 30) +); +set @@global.rocksdb_compact_cf = 'cf-zero'; +set @@global.rocksdb_compact_cf = 'cf-one'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='cf-zero'; +cf_name +cf-zero +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='cf-one'; +cf_name +cf-one +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +INSERT INTO t1 VALUES (1, 1, "one", null); +INSERT INTO t1 VALUES (2, 2, "two", null); +INSERT INTO t1 VALUES (3, 3, "three", null); +INSERT INTO t1 VALUES (5, 5, "five", null); +INSERT INTO t1 VALUES (9, 9, "nine", null); +SELECT * FROM t1; +c1 c2 name event +1 1 one NULL +2 2 two NULL +5 5 five NULL +3 3 three NULL +9 9 nine NULL +ALTER TABLE t1 DROP PRIMARY KEY; +SELECT * FROM t1; +c1 c2 name event +1 1 one NULL +2 2 two NULL +5 5 five NULL +3 3 three NULL +9 9 nine NULL +set @@global.rocksdb_compact_cf = 'foo'; +set @@global.rocksdb_compact_cf = 'bar'; +set @@global.rocksdb_compact_cf = 'baz'; +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +INSERT INTO t1 VALUES (1, 1, "one", null); +INSERT INTO t1 VALUES (2, 2, "two", null); +INSERT INTO t1 VALUES (3, 3, "three", null); +INSERT INTO t1 VALUES (5, 5, "five", null); +INSERT INTO t1 VALUES (9, 9, "nine", null); +ALTER TABLE t1 DROP PRIMARY KEY; +ALTER TABLE t1 ADD PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=p0_cf;custom_p1_cfname=p1_cf'; +set @@global.rocksdb_compact_cf = 'p0_cf'; +set @@global.rocksdb_compact_cf = 'p1_cf'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p0_cf'; +cf_name +p0_cf +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p1_cf'; +cf_name +p1_cf +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +INSERT INTO t1 VALUES (1, 1, "one", null); +INSERT INTO t1 VALUES (2, 2, "two", null); +INSERT INTO t1 VALUES (3, 3, "three", null); +INSERT INTO t1 VALUES (5, 5, "five", null); +INSERT INTO t1 VALUES (9, 9, "nine", null); +ALTER TABLE t1 PARTITION BY LIST(c1) ( +PARTITION custom_p3 VALUES IN (1, 4, 7), +PARTITION custom_p4 VALUES IN (2, 5, 8, 3, 6, 9) +); +ALTER TABLE t1 DROP PRIMARY KEY; +ALTER TABLE t1 ADD PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p3_cfname=p3_cf;custom_p4_cfname=p4_cf'; +set @@global.rocksdb_compact_cf = 'p3_cf'; +set @@global.rocksdb_compact_cf = 'p4_cf'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p3_cf'; +cf_name +p3_cf +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p4_cf'; +cf_name +p4_cf +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=;' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +DROP TABLE t1; +CREATE TABLE `t2` ( +`col1` bigint(20) NOT NULL, +`col2` varbinary(64) NOT NULL, +`col3` varbinary(256) NOT NULL, +`col4` bigint(20) NOT NULL, +`col5` mediumblob NOT NULL, +PRIMARY KEY (`col1`,`col2`,`col3`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +PARTITION BY LIST COLUMNS (`col2`) ( +PARTITION custom_p0 VALUES IN (0x12345), +PARTITION custom_p1 VALUES IN (0x23456), +PARTITION custom_p2 VALUES IN (0x34567), +PARTITION custom_p3 VALUES IN (0x45678), +PARTITION custom_p4 VALUES IN (0x56789), +PARTITION custom_p5 VALUES IN (0x6789A), +PARTITION custom_p6 VALUES IN (0x789AB), +PARTITION custom_p7 VALUES IN (0x89ABC) +); +DROP TABLE t2; +CREATE TABLE `t2` ( +`col1` bigint(20) NOT NULL, +`col2` varbinary(64) NOT NULL, +`col3` varbinary(256) NOT NULL, +`col4` bigint(20) NOT NULL, +`col5` mediumblob NOT NULL, +PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=my_cf0;custom_p1_cfname=my_cf1' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +PARTITION BY LIST COLUMNS (`col2`) ( +PARTITION custom_p0 VALUES IN (0x12345), +PARTITION custom_p1 VALUES IN (0x23456), +PARTITION custom_p2 VALUES IN (0x34567), +PARTITION custom_p3 VALUES IN (0x45678), +PARTITION custom_p4 VALUES IN (0x56789), +PARTITION custom_p5 VALUES IN (0x6789A), +PARTITION custom_p6 VALUES IN (0x789AB), +PARTITION custom_p7 VALUES IN (0x89ABC) +); +set @@global.rocksdb_compact_cf = 'my_cf0'; +set @@global.rocksdb_compact_cf = 'my_cf1'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_cf0'; +cf_name +my_cf0 +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_cf1'; +cf_name +my_cf1 +INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3); +INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5); +SELECT col1, HEX(col2), HEX(col3), col4, HEX(col5) FROM t2; +col1 HEX(col2) HEX(col3) col4 HEX(col5) +100 012345 01 1 02 +200 012345 01 1 02 +300 012345 01 1 02 +100 023456 02 1 03 +100 034567 04 1 05 +400 089ABC 04 1 05 +EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x12345; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t2 custom_p0 index NULL PRIMARY 332 NULL 3 Using where; Using index +EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x23456; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t2 custom_p1 index NULL PRIMARY 332 NULL 2 Using where; Using index +ALTER TABLE t2 DROP PRIMARY KEY; +ALTER TABLE t2 ADD PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=new_cf0;custom_p1_cfname=new_cf1'; +set @@global.rocksdb_compact_cf = 'new_cf0'; +set @@global.rocksdb_compact_cf = 'new_cf1'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='new_cf0'; +cf_name +new_cf0 +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='new_cf1'; +cf_name +new_cf1 +INSERT INTO t2 VALUES (500, 0x12345, 0x5, 1, 0x2); +INSERT INTO t2 VALUES (700, 0x23456, 0x7, 1, 0x3); +EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x12345; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t2 custom_p0 index NULL PRIMARY 332 NULL 4 Using where; Using index +EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x23456; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t2 custom_p1 index NULL PRIMARY 332 NULL 2 Using where; Using index +SELECT col1, HEX(col2), HEX(col3), col4, HEX(col5) FROM t2; +col1 HEX(col2) HEX(col3) col4 HEX(col5) +100 012345 01 1 02 +200 012345 01 1 02 +300 012345 01 1 02 +500 012345 05 1 02 +100 023456 02 1 03 +700 023456 07 1 03 +100 034567 04 1 05 +400 089ABC 04 1 05 +DROP TABLE t2; +CREATE TABLE `t2` ( +`col1` bigint(20) NOT NULL, +`col2` varbinary(64) NOT NULL, +`col3` varbinary(256) NOT NULL, +`col4` bigint(20) NOT NULL, +`col5` mediumblob NOT NULL, +PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=test_cf0;custom_p1_cfname=test_cf1', +KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=test_cf5' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +PARTITION BY LIST COLUMNS (`col2`) ( +PARTITION custom_p0 VALUES IN (0x12345), +PARTITION custom_p1 VALUES IN (0x23456), +PARTITION custom_p2 VALUES IN (0x34567), +PARTITION custom_p3 VALUES IN (0x45678), +PARTITION custom_p4 VALUES IN (0x56789), +PARTITION custom_p5 VALUES IN (0x6789A), +PARTITION custom_p6 VALUES IN (0x789AB), +PARTITION custom_p7 VALUES IN (0x89ABC) +); +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf0'; +cf_name +test_cf0 +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf1'; +cf_name +test_cf1 +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf5'; +cf_name +test_cf5 +INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3); +INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (500, 0x6789A, 0x5, 1, 0x7); +EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col2 = 0x6789A AND col4 = 1; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t2 custom_p5 ref col2 col2 74 const,const 1 Using where +ALTER TABLE t2 DROP KEY `col2`; +ALTER TABLE t2 ADD KEY (`col3`, `col4`) COMMENT 'custom_p5_cfname=another_cf_for_p5'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='another_cf_for_p5'; +cf_name +another_cf_for_p5 +EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col3 = 0x4 AND col2 = 0x34567; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t2 custom_p2 ref col3 col3 258 const 1 Using where +DROP TABLE t2; +CREATE TABLE `t2` ( +`col1` bigint(20) NOT NULL, +`col2` varbinary(64) NOT NULL, +`col3` varbinary(256) NOT NULL, +`col4` bigint(20) NOT NULL, +`col5` mediumblob NOT NULL, +PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=test_cf0;custom_p1_cfname=test_cf1', +UNIQUE KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=unique_test_cf5' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +PARTITION BY LIST COLUMNS (`col2`) ( +PARTITION custom_p0 VALUES IN (0x12345), +PARTITION custom_p1 VALUES IN (0x23456), +PARTITION custom_p2 VALUES IN (0x34567), +PARTITION custom_p3 VALUES IN (0x45678), +PARTITION custom_p4 VALUES IN (0x56789), +PARTITION custom_p5 VALUES IN (0x6789A), +PARTITION custom_p6 VALUES IN (0x789AB), +PARTITION custom_p7 VALUES IN (0x89ABC) +); +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='unique_test_cf5'; +cf_name +unique_test_cf5 +INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2); +ERROR 23000: Duplicate entry '\x01#E-1' for key 'col2' +INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2); +ERROR 23000: Duplicate entry '\x01#E-1' for key 'col2' +INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3); +INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (500, 0x6789A, 0x5, 1, 0x7); +DROP TABLE t2; +CREATE TABLE t1 ( +`a` int, +PRIMARY KEY (a) COMMENT "sharedcf" +) ENGINE=ROCKSDB; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='sharedcf'; +cf_name +sharedcf +CREATE TABLE t2 ( +`a` INT, +`b` DATE, +`c` VARCHAR(42), +PRIMARY KEY (`a`) COMMENT "custom_p0_cfname=sharedcf;custom_p2_cfname=notsharedcf" +) ENGINE=ROCKSDB +PARTITION BY LIST(`a`) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='notsharedcf'; +cf_name +notsharedcf +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result b/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result index 7f1584938d6..19d794da848 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result @@ -12,7 +12,6 @@ Type Name Status DBSTATS rocksdb # CF_COMPACTION __system__ # CF_COMPACTION cf_t1 # -CF_COMPACTION cf_t4 # CF_COMPACTION default # CF_COMPACTION rev:cf_t2 # Memory_Stats rocksdb # @@ -48,15 +47,6 @@ cf_t1 NUM_ENTRIES_ACTIVE_MEM_TABLE # cf_t1 NUM_ENTRIES_IMM_MEM_TABLES # cf_t1 NON_BLOCK_CACHE_SST_MEM_USAGE # cf_t1 NUM_LIVE_VERSIONS # -cf_t4 NUM_IMMUTABLE_MEM_TABLE # -cf_t4 MEM_TABLE_FLUSH_PENDING # -cf_t4 COMPACTION_PENDING # -cf_t4 CUR_SIZE_ACTIVE_MEM_TABLE # -cf_t4 CUR_SIZE_ALL_MEM_TABLES # -cf_t4 NUM_ENTRIES_ACTIVE_MEM_TABLE # -cf_t4 NUM_ENTRIES_IMM_MEM_TABLES # -cf_t4 NON_BLOCK_CACHE_SST_MEM_USAGE # -cf_t4 NUM_LIVE_VERSIONS # default NUM_IMMUTABLE_MEM_TABLE # default MEM_TABLE_FLUSH_PENDING # default COMPACTION_PENDING # @@ -117,7 +107,6 @@ __system__ RATE_LIMIT_DELAY_MAX_MILLISECONDS # __system__ ARENA_BLOCK_SIZE # __system__ DISABLE_AUTO_COMPACTIONS # __system__ PURGE_REDUNDANT_KVS_WHILE_FLUSH # -__system__ VERIFY_CHECKSUM_IN_COMPACTION # __system__ MAX_SEQUENTIAL_SKIP_IN_ITERATIONS # __system__ MEMTABLE_FACTORY # __system__ INPLACE_UPDATE_SUPPORT # @@ -126,7 +115,6 @@ __system__ MEMTABLE_PREFIX_BLOOM_BITS_RATIO # __system__ MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE # __system__ BLOOM_LOCALITY # __system__ MAX_SUCCESSIVE_MERGES # -__system__ MIN_PARTIAL_MERGE_OPERANDS # __system__ OPTIMIZE_FILTERS_FOR_HITS # __system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL # __system__ COMPRESSION_TYPE # @@ -173,7 +161,6 @@ cf_t1 RATE_LIMIT_DELAY_MAX_MILLISECONDS # cf_t1 ARENA_BLOCK_SIZE # cf_t1 DISABLE_AUTO_COMPACTIONS # cf_t1 PURGE_REDUNDANT_KVS_WHILE_FLUSH # -cf_t1 VERIFY_CHECKSUM_IN_COMPACTION # cf_t1 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS # cf_t1 MEMTABLE_FACTORY # cf_t1 INPLACE_UPDATE_SUPPORT # @@ -182,7 +169,6 @@ cf_t1 MEMTABLE_PREFIX_BLOOM_BITS_RATIO # cf_t1 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE # cf_t1 BLOOM_LOCALITY # cf_t1 MAX_SUCCESSIVE_MERGES # -cf_t1 MIN_PARTIAL_MERGE_OPERANDS # cf_t1 OPTIMIZE_FILTERS_FOR_HITS # cf_t1 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL # cf_t1 COMPRESSION_TYPE # @@ -206,62 +192,6 @@ cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE # cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION # cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL # cf_t1 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION # -cf_t4 COMPARATOR # -cf_t4 MERGE_OPERATOR # -cf_t4 COMPACTION_FILTER # -cf_t4 COMPACTION_FILTER_FACTORY # -cf_t4 WRITE_BUFFER_SIZE # -cf_t4 MAX_WRITE_BUFFER_NUMBER # -cf_t4 MIN_WRITE_BUFFER_NUMBER_TO_MERGE # -cf_t4 NUM_LEVELS # -cf_t4 LEVEL0_FILE_NUM_COMPACTION_TRIGGER # -cf_t4 LEVEL0_SLOWDOWN_WRITES_TRIGGER # -cf_t4 LEVEL0_STOP_WRITES_TRIGGER # -cf_t4 MAX_MEM_COMPACTION_LEVEL # -cf_t4 TARGET_FILE_SIZE_BASE # -cf_t4 TARGET_FILE_SIZE_MULTIPLIER # -cf_t4 MAX_BYTES_FOR_LEVEL_BASE # -cf_t4 LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES # -cf_t4 MAX_BYTES_FOR_LEVEL_MULTIPLIER # -cf_t4 SOFT_RATE_LIMIT # -cf_t4 HARD_RATE_LIMIT # -cf_t4 RATE_LIMIT_DELAY_MAX_MILLISECONDS # -cf_t4 ARENA_BLOCK_SIZE # -cf_t4 DISABLE_AUTO_COMPACTIONS # -cf_t4 PURGE_REDUNDANT_KVS_WHILE_FLUSH # -cf_t4 VERIFY_CHECKSUM_IN_COMPACTION # -cf_t4 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS # -cf_t4 MEMTABLE_FACTORY # -cf_t4 INPLACE_UPDATE_SUPPORT # -cf_t4 INPLACE_UPDATE_NUM_LOCKS # -cf_t4 MEMTABLE_PREFIX_BLOOM_BITS_RATIO # -cf_t4 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE # -cf_t4 BLOOM_LOCALITY # -cf_t4 MAX_SUCCESSIVE_MERGES # -cf_t4 MIN_PARTIAL_MERGE_OPERANDS # -cf_t4 OPTIMIZE_FILTERS_FOR_HITS # -cf_t4 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL # -cf_t4 COMPRESSION_TYPE # -cf_t4 COMPRESSION_PER_LEVEL # -cf_t4 COMPRESSION_OPTS # -cf_t4 BOTTOMMOST_COMPRESSION # -cf_t4 PREFIX_EXTRACTOR # -cf_t4 COMPACTION_STYLE # -cf_t4 COMPACTION_OPTIONS_UNIVERSAL # -cf_t4 COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE # -cf_t4 BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # -cf_t4 BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE # -cf_t4 BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # -cf_t4 BLOCK_BASED_TABLE_FACTORY::CHECKSUM # -cf_t4 BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE # -cf_t4 BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY # -cf_t4 BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING # -cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE # -cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # -cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE # -cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION # -cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL # -cf_t4 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION # default COMPARATOR # default MERGE_OPERATOR # default COMPACTION_FILTER # @@ -285,7 +215,6 @@ default RATE_LIMIT_DELAY_MAX_MILLISECONDS # default ARENA_BLOCK_SIZE # default DISABLE_AUTO_COMPACTIONS # default PURGE_REDUNDANT_KVS_WHILE_FLUSH # -default VERIFY_CHECKSUM_IN_COMPACTION # default MAX_SEQUENTIAL_SKIP_IN_ITERATIONS # default MEMTABLE_FACTORY # default INPLACE_UPDATE_SUPPORT # @@ -294,7 +223,6 @@ default MEMTABLE_PREFIX_BLOOM_BITS_RATIO # default MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE # default BLOOM_LOCALITY # default MAX_SUCCESSIVE_MERGES # -default MIN_PARTIAL_MERGE_OPERANDS # default OPTIMIZE_FILTERS_FOR_HITS # default MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL # default COMPRESSION_TYPE # @@ -341,7 +269,6 @@ rev:cf_t2 RATE_LIMIT_DELAY_MAX_MILLISECONDS # rev:cf_t2 ARENA_BLOCK_SIZE # rev:cf_t2 DISABLE_AUTO_COMPACTIONS # rev:cf_t2 PURGE_REDUNDANT_KVS_WHILE_FLUSH # -rev:cf_t2 VERIFY_CHECKSUM_IN_COMPACTION # rev:cf_t2 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS # rev:cf_t2 MEMTABLE_FACTORY # rev:cf_t2 INPLACE_UPDATE_SUPPORT # @@ -350,7 +277,6 @@ rev:cf_t2 MEMTABLE_PREFIX_BLOOM_BITS_RATIO # rev:cf_t2 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE # rev:cf_t2 BLOOM_LOCALITY # rev:cf_t2 MAX_SUCCESSIVE_MERGES # -rev:cf_t2 MIN_PARTIAL_MERGE_OPERANDS # rev:cf_t2 OPTIMIZE_FILTERS_FOR_HITS # rev:cf_t2 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL # rev:cf_t2 COMPRESSION_TYPE # diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result index 5821369ae57..bbdd604097f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result @@ -1,20 +1,23 @@ DROP TABLE IF EXISTS t1; -CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '' INDEX DIRECTORY = ''; -SHOW CREATE TABLE t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a` int(11) NOT NULL, - `b` char(8) DEFAULT NULL, - PRIMARY KEY (`a`) -) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 -ALTER TABLE t1 INDEX DIRECTORY = ''; -Warnings: -Warning 1618 option ignored -SHOW CREATE TABLE t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a` int(11) NOT NULL, - `b` char(8) DEFAULT NULL, - PRIMARY KEY (`a`) -) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 -DROP TABLE t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '/foo/bar/data'; +ERROR HY000: Got error 197 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INDEX DIRECTORY = '/foo/bar/index'; +ERROR HY000: Got error 198 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB +CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=rocksdb PARTITION BY RANGE (id) +( +PARTITION P0 VALUES LESS THAN (1000) +DATA DIRECTORY = '/foo/bar/data/', +PARTITION P1 VALUES LESS THAN (2000) +DATA DIRECTORY = '/foo/bar/data/', +PARTITION P2 VALUES LESS THAN (MAXVALUE) +); +ERROR HY000: Got error 197 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB +CREATE TABLE t1 (id int not null primary key) ENGINE=rocksdb PARTITION BY RANGE (id) +( +PARTITION P0 VALUES LESS THAN (1000) +INDEX DIRECTORY = '/foo/bar/data/', +PARTITION P1 VALUES LESS THAN (2000) +INDEX DIRECTORY = '/foo/bar/data/', +PARTITION P2 VALUES LESS THAN (MAXVALUE) +); +ERROR HY000: Got error 198 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB diff --git a/storage/rocksdb/mysql-test/rocksdb/r/transaction.result b/storage/rocksdb/mysql-test/rocksdb/r/transaction.result index c24ef9fc1c8..fe13c1633a8 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/transaction.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/transaction.result @@ -26,23 +26,23 @@ value select * from t1 where value2=5; id value value2 5 5 5 -select * from t1 where id < 3; +select * from t1 where id < 3 order by id; id value value2 1 1 1 2 2 2 -select * from t1 where value < 3; +select * from t1 where value < 3 order by id; id value value2 1 1 1 2 2 2 -select value from t1 where value < 3; +select value from t1 where value < 3 order by id; value 1 2 -select * from t1 where value2 < 3; +select * from t1 where value2 < 3 order by id; id value value2 1 1 1 2 2 2 -select * from t1; +select * from t1 order by id; id value value2 1 1 1 2 2 2 @@ -54,7 +54,7 @@ id value value2 9 9 9 10 10 10 11 11 11 -select value from t1; +select value from t1 order by id; value 1 2 @@ -93,23 +93,23 @@ value select * from t1 where value2=5; id value value2 5 5 5 -select * from t1 where id < 3; +select * from t1 where id < 3 order by id; id value value2 1 1 1 2 2 2 -select * from t1 where value < 3; +select * from t1 where value < 3 order by id; id value value2 1 1 1 2 2 2 -select value from t1 where value < 3; +select value from t1 where value < 3 order by id; value 1 2 -select * from t1 where value2 < 3; +select * from t1 where value2 < 3 order by id; id value value2 1 1 1 2 2 2 -select * from t1; +select * from t1 order by id; id value value2 1 1 1 2 2 2 @@ -121,7 +121,7 @@ id value value2 8 8 8 9 9 9 10 10 10 -select value from t1; +select value from t1 order by id; value 1 2 @@ -159,22 +159,22 @@ value select * from t1 where value2=5; id value value2 5 5 5 -select * from t1 where id < 3; +select * from t1 where id < 3 order by id; id value value2 1 1 100 2 2 2 -select * from t1 where value < 3; +select * from t1 where value < 3 order by id; id value value2 1 1 100 2 2 2 -select value from t1 where value < 3; +select value from t1 where value < 3 order by id; value 1 2 -select * from t1 where value2 < 3; +select * from t1 where value2 < 3 order by id; id value value2 2 2 2 -select * from t1; +select * from t1 order by id; id value value2 1 1 100 2 2 2 @@ -185,7 +185,7 @@ id value value2 8 8 8 9 9 9 10 10 10 -select value from t1; +select value from t1 order by id; value 1 2 @@ -221,21 +221,21 @@ value select * from t1 where value2=5; id value value2 5 5 5 -select * from t1 where id < 3; +select * from t1 where id < 3 order by id; id value value2 1 100 1 2 2 2 -select * from t1 where value < 3; +select * from t1 where value < 3 order by id; id value value2 2 2 2 -select value from t1 where value < 3; +select value from t1 where value < 3 order by id; value 2 -select * from t1 where value2 < 3; +select * from t1 where value2 < 3 order by id; id value value2 1 100 1 2 2 2 -select * from t1; +select * from t1 order by id; id value value2 1 100 1 2 2 2 @@ -246,8 +246,9 @@ id value value2 8 8 8 9 9 9 10 10 10 -select value from t1; +select value from t1 order by id; value +100 2 3 4 @@ -256,7 +257,6 @@ value 8 9 10 -100 rollback; begin; update t1 set id=100 where id=1; @@ -283,22 +283,22 @@ value select * from t1 where value2=5; id value value2 5 5 5 -select * from t1 where id < 3; +select * from t1 where id < 3 order by id; id value value2 2 2 2 -select * from t1 where value < 3; +select * from t1 where value < 3 order by id; id value value2 +2 2 2 100 1 1 -2 2 2 -select value from t1 where value < 3; +select value from t1 where value < 3 order by id; value -1 2 -select * from t1 where value2 < 3; +1 +select * from t1 where value2 < 3 order by id; id value value2 2 2 2 100 1 1 -select * from t1; +select * from t1 order by id; id value value2 2 2 2 3 3 3 @@ -309,9 +309,8 @@ id value value2 9 9 9 10 10 10 100 1 1 -select value from t1; +select value from t1 order by id; value -1 2 3 4 @@ -320,6 +319,7 @@ value 8 9 10 +1 rollback; begin; update t1 set value2=100 where value=1; @@ -346,22 +346,22 @@ value select * from t1 where value2=5; id value value2 5 5 5 -select * from t1 where id < 3; +select * from t1 where id < 3 order by id; id value value2 1 1 100 2 2 2 -select * from t1 where value < 3; +select * from t1 where value < 3 order by id; id value value2 1 1 100 2 2 2 -select value from t1 where value < 3; +select value from t1 where value < 3 order by id; value 1 2 -select * from t1 where value2 < 3; +select * from t1 where value2 < 3 order by id; id value value2 2 2 2 -select * from t1; +select * from t1 order by id; id value value2 1 1 100 2 2 2 @@ -372,7 +372,7 @@ id value value2 8 8 8 9 9 9 10 10 10 -select value from t1; +select value from t1 order by id; value 1 2 @@ -408,21 +408,21 @@ value select * from t1 where value2=5; id value value2 5 5 5 -select * from t1 where id < 3; +select * from t1 where id < 3 order by id; id value value2 1 100 1 2 2 2 -select * from t1 where value < 3; +select * from t1 where value < 3 order by id; id value value2 2 2 2 -select value from t1 where value < 3; +select value from t1 where value < 3 order by id; value 2 -select * from t1 where value2 < 3; +select * from t1 where value2 < 3 order by id; id value value2 1 100 1 2 2 2 -select * from t1; +select * from t1 order by id; id value value2 1 100 1 2 2 2 @@ -433,8 +433,9 @@ id value value2 8 8 8 9 9 9 10 10 10 -select value from t1; +select value from t1 order by id; value +100 2 3 4 @@ -443,7 +444,6 @@ value 8 9 10 -100 rollback; begin; update t1 set id=100 where value=1; @@ -470,22 +470,22 @@ value select * from t1 where value2=5; id value value2 5 5 5 -select * from t1 where id < 3; +select * from t1 where id < 3 order by id; id value value2 2 2 2 -select * from t1 where value < 3; +select * from t1 where value < 3 order by id; id value value2 +2 2 2 100 1 1 -2 2 2 -select value from t1 where value < 3; +select value from t1 where value < 3 order by id; value -1 2 -select * from t1 where value2 < 3; +1 +select * from t1 where value2 < 3 order by id; id value value2 2 2 2 100 1 1 -select * from t1; +select * from t1 order by id; id value value2 2 2 2 3 3 3 @@ -496,9 +496,8 @@ id value value2 9 9 9 10 10 10 100 1 1 -select value from t1; +select value from t1 order by id; value -1 2 3 4 @@ -507,6 +506,7 @@ value 8 9 10 +1 rollback; begin; update t1 set value2=100 where value2=1; @@ -533,22 +533,22 @@ value select * from t1 where value2=5; id value value2 5 5 5 -select * from t1 where id < 3; +select * from t1 where id < 3 order by id; id value value2 1 1 100 2 2 2 -select * from t1 where value < 3; +select * from t1 where value < 3 order by id; id value value2 1 1 100 2 2 2 -select value from t1 where value < 3; +select value from t1 where value < 3 order by id; value 1 2 -select * from t1 where value2 < 3; +select * from t1 where value2 < 3 order by id; id value value2 2 2 2 -select * from t1; +select * from t1 order by id; id value value2 1 1 100 2 2 2 @@ -559,7 +559,7 @@ id value value2 8 8 8 9 9 9 10 10 10 -select value from t1; +select value from t1 order by id; value 1 2 @@ -595,21 +595,21 @@ value select * from t1 where value2=5; id value value2 5 5 5 -select * from t1 where id < 3; +select * from t1 where id < 3 order by id; id value value2 1 100 1 2 2 2 -select * from t1 where value < 3; +select * from t1 where value < 3 order by id; id value value2 2 2 2 -select value from t1 where value < 3; +select value from t1 where value < 3 order by id; value 2 -select * from t1 where value2 < 3; +select * from t1 where value2 < 3 order by id; id value value2 1 100 1 2 2 2 -select * from t1; +select * from t1 order by id; id value value2 1 100 1 2 2 2 @@ -620,8 +620,9 @@ id value value2 8 8 8 9 9 9 10 10 10 -select value from t1; +select value from t1 order by id; value +100 2 3 4 @@ -630,7 +631,6 @@ value 8 9 10 -100 rollback; begin; update t1 set id=100 where value2=1; @@ -657,22 +657,22 @@ value select * from t1 where value2=5; id value value2 5 5 5 -select * from t1 where id < 3; +select * from t1 where id < 3 order by id; id value value2 2 2 2 -select * from t1 where value < 3; +select * from t1 where value < 3 order by id; id value value2 +2 2 2 100 1 1 -2 2 2 -select value from t1 where value < 3; +select value from t1 where value < 3 order by id; value -1 2 -select * from t1 where value2 < 3; +1 +select * from t1 where value2 < 3 order by id; id value value2 2 2 2 100 1 1 -select * from t1; +select * from t1 order by id; id value value2 2 2 2 3 3 3 @@ -683,9 +683,8 @@ id value value2 9 9 9 10 10 10 100 1 1 -select value from t1; +select value from t1 order by id; value -1 2 3 4 @@ -694,6 +693,7 @@ value 8 9 10 +1 rollback; begin; delete from t1 where id=1; @@ -717,19 +717,19 @@ value select * from t1 where value2=5; id value value2 5 5 5 -select * from t1 where id < 3; +select * from t1 where id < 3 order by id; id value value2 2 2 2 -select * from t1 where value < 3; +select * from t1 where value < 3 order by id; id value value2 2 2 2 -select value from t1 where value < 3; +select value from t1 where value < 3 order by id; value 2 -select * from t1 where value2 < 3; +select * from t1 where value2 < 3 order by id; id value value2 2 2 2 -select * from t1; +select * from t1 order by id; id value value2 2 2 2 3 3 3 @@ -739,7 +739,7 @@ id value value2 8 8 8 9 9 9 10 10 10 -select value from t1; +select value from t1 order by id; value 2 3 @@ -772,19 +772,19 @@ value select * from t1 where value2=5; id value value2 5 5 5 -select * from t1 where id < 3; +select * from t1 where id < 3 order by id; id value value2 2 2 2 -select * from t1 where value < 3; +select * from t1 where value < 3 order by id; id value value2 2 2 2 -select value from t1 where value < 3; +select value from t1 where value < 3 order by id; value 2 -select * from t1 where value2 < 3; +select * from t1 where value2 < 3 order by id; id value value2 2 2 2 -select * from t1; +select * from t1 order by id; id value value2 2 2 2 3 3 3 @@ -794,7 +794,7 @@ id value value2 8 8 8 9 9 9 10 10 10 -select value from t1; +select value from t1 order by id; value 2 3 @@ -827,19 +827,19 @@ value select * from t1 where value2=5; id value value2 5 5 5 -select * from t1 where id < 3; +select * from t1 where id < 3 order by id; id value value2 2 2 2 -select * from t1 where value < 3; +select * from t1 where value < 3 order by id; id value value2 2 2 2 -select value from t1 where value < 3; +select value from t1 where value < 3 order by id; value 2 -select * from t1 where value2 < 3; +select * from t1 where value2 < 3 order by id; id value value2 2 2 2 -select * from t1; +select * from t1 order by id; id value value2 2 2 2 3 3 3 @@ -849,7 +849,7 @@ id value value2 8 8 8 9 9 9 10 10 10 -select value from t1; +select value from t1 order by id; value 2 3 @@ -892,23 +892,23 @@ value select * from t1 where value2=5; id value value2 100 5 5 -select * from t1 where id < 3; +select * from t1 where id < 3 order by id; id value value2 1 1 1 2 2 2 -select * from t1 where value < 3; +select * from t1 where value < 3 order by id; id value value2 1 1 1 2 2 2 -select value from t1 where value < 3; +select value from t1 where value < 3 order by id; value 1 2 -select * from t1 where value2 < 3; +select * from t1 where value2 < 3 order by id; id value value2 1 1 1 2 2 2 -select * from t1; +select * from t1 order by id; id value value2 1 1 1 2 2 2 @@ -920,17 +920,17 @@ id value value2 13 13 13 100 5 5 115 3 3 -select value from t1; +select value from t1 order by id; value 1 2 -3 -5 +103 6 10 11 12 13 -103 +5 +3 rollback; drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result index 0f3e7200d8a..47c58725ff9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result @@ -70,3 +70,15 @@ id id2 value 1 1 1 set debug_sync='RESET'; drop table t1, t2; +drop table if exists t1,t2,t3; +create table t1 (id int, value int, primary key (id)) engine=rocksdb; +create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb; +create table t3 (id int, value int) engine=rocksdb; +SET @old_val = @@session.unique_checks; +set @@session.unique_checks = FALSE; +insert into t1 values (1, 1), (1, 2); +insert into t2 values (1, 1, 1), (1, 2, 1); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +insert into t3 values (1, 1), (1, 1); +set @@session.unique_checks = @old_val; +drop table t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result b/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result index 433c00c9fc8..6ba50a3796f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result @@ -1,7 +1,7 @@ SET GLOBAL rocksdb_write_disable_wal=false; SET GLOBAL rocksdb_write_ignore_missing_column_families=true; create table aaa (id int primary key, i int) engine rocksdb; -SET LOCAL rocksdb_write_sync=off; +SET LOCAL rocksdb_flush_log_at_trx_commit=0; select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; insert aaa(id, i) values(1,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; @@ -15,7 +15,7 @@ insert aaa(id, i) values(3,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; variable_value-@a 0 -SET LOCAL rocksdb_write_sync=1; +SET LOCAL rocksdb_flush_log_at_trx_commit=1; insert aaa(id, i) values(4,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; variable_value-@a @@ -29,11 +29,11 @@ select variable_value-@a from information_schema.global_status where variable_na variable_value-@a 3 SET GLOBAL rocksdb_background_sync=on; -SET LOCAL rocksdb_write_sync=off; +SET LOCAL rocksdb_flush_log_at_trx_commit=0; insert aaa(id, i) values(7,1); truncate table aaa; drop table aaa; -SET GLOBAL rocksdb_write_sync=off; +SET GLOBAL rocksdb_flush_log_at_trx_commit=1; SET GLOBAL rocksdb_write_disable_wal=false; SET GLOBAL rocksdb_write_ignore_missing_column_families=false; SET GLOBAL rocksdb_background_sync=off; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test b/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test index 90af6617794..009fd7beaf2 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test @@ -14,7 +14,7 @@ CREATE TABLE t1(id BIGINT AUTO_INCREMENT, value BIGINT, PRIMARY KEY(id)) ENGINE= --echo # 2PC enabled, MyRocks durability enabled SET GLOBAL rocksdb_enable_2pc=0; -SET GLOBAL rocksdb_write_sync=1; +SET GLOBAL rocksdb_flush_log_at_trx_commit=1; --echo ## 2PC + durability + single thread select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; @@ -29,7 +29,7 @@ select case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' --echo # 2PC enabled, MyRocks durability disabled SET GLOBAL rocksdb_enable_2pc=0; -SET GLOBAL rocksdb_write_sync=0; +SET GLOBAL rocksdb_flush_log_at_trx_commit=0; select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; --exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)" @@ -42,7 +42,7 @@ select case when variable_value-@c = 0 then 'true' else 'false' end from informa --echo # 2PC disabled, MyRocks durability enabled SET GLOBAL rocksdb_enable_2pc=1; -SET GLOBAL rocksdb_write_sync=1; +SET GLOBAL rocksdb_flush_log_at_trx_commit=1; select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; --exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)" @@ -59,6 +59,6 @@ select case when variable_value-@c = 0 then 'true' else 'false' end from informa SET GLOBAL rocksdb_enable_2pc=1; -SET GLOBAL rocksdb_write_sync=0; +SET GLOBAL rocksdb_flush_log_at_trx_commit=1; DROP TABLE t1; DROP DATABASE mysqlslap; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test index 7d3f4091bb4..e5abc7e5b34 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test @@ -341,5 +341,3 @@ while ($i <= $max) { #SHOW TABLE STATUS WHERE name LIKE 't1'; DROP TABLE t1; - - diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_cardinality-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_cardinality-master.opt new file mode 100644 index 00000000000..436edf2b40c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_cardinality-master.opt @@ -0,0 +1 @@ +--rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_cardinality.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_cardinality.test new file mode 100644 index 00000000000..148edf7a3d2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_cardinality.test @@ -0,0 +1,44 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Test that fast secondary index creation updates cardinality properly +CREATE TABLE t1 (i INT PRIMARY KEY, j INT) ENGINE = ROCKSDB; +INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10); + +SET debug_sync= 'rocksdb.commit_in_place_alter_table WAIT_FOR flushed'; +send ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; + +connect (con1,localhost,root,,); + +# Flush memtable out to SST +SET GLOBAL rocksdb_force_flush_memtable_now = 1; +SET debug_sync= 'now SIGNAL flushed'; + +connection default; +reap; + +# Return the data for the primary key of t1 +--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = + (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL + WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY"); + +# Return the data for the secondary index of t1 +--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = + (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL + WHERE TABLE_NAME = 't1' AND INDEX_NAME = "kj"); + +disconnect con1; +SET debug_sync='RESET'; + +# cleanup +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.cnf b/storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.cnf new file mode 100644 index 00000000000..a76f1244bab --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.cnf @@ -0,0 +1,11 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +sync_binlog=0 +binlog_format=row +slave-exec-mode=strict + +[mysqld.2] +sync_binlog=0 +binlog_format=row +slave-exec-mode=strict diff --git a/storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.test b/storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.test new file mode 100644 index 00000000000..0481634f346 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.test @@ -0,0 +1,129 @@ +--source include/have_rocksdb.inc + +source include/master-slave.inc; + +connection master; + +set @save_rocksdb_blind_delete_primary_key=@@session.rocksdb_blind_delete_primary_key; +set @save_rocksdb_master_skip_tx_api=@@session.rocksdb_master_skip_tx_api; + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings +create table t1 (id int primary key, value int, value2 varchar(200)) engine=rocksdb; +create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; + +--disable_query_log +let $t = 1; +while ($t <= 2) { + let $i = 1; + while ($i <= 10000) { + let $insert = INSERT INTO t$t VALUES($i, $i, REPEAT('x', 150)); + inc $i; + eval $insert; + } + inc $t; +} +--enable_query_log + +SET session rocksdb_blind_delete_primary_key=1; +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +# Deleting 1000 rows from t1 +--disable_query_log +let $i = 1; +while ($i <= 1000) { + let $insert = DELETE FROM t1 WHERE id=$i; + inc $i; + eval $insert; +} +--enable_query_log +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +SELECT count(*) FROM t1; + +--source include/sync_slave_sql_with_master.inc +connection slave; +SELECT count(*) FROM t1; +connection master; + +# Deleting 1000 rows from t2 (blind delete disabled because of secondary key) +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +--disable_query_log +let $i = 1; +while ($i <= 1000) { + let $insert = DELETE FROM t2 WHERE id=$i; + inc $i; + eval $insert; +} +--enable_query_log +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +SELECT count(*) FROM t2; + +SET session rocksdb_master_skip_tx_api=1; + +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +--disable_query_log +let $t = 1; +while ($t <= 2) { + let $i = 1001; + while ($i <= 2000) { + let $insert = DELETE FROM t$t WHERE id=$i; + inc $i; + eval $insert; + } + inc $t; +} +--enable_query_log +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +SELECT count(*) FROM t1; +SELECT count(*) FROM t2; +--source include/sync_slave_sql_with_master.inc +connection slave; +SELECT count(*) FROM t1; +SELECT count(*) FROM t2; +connection master; + + +# Range Deletes (blind delete disabled) +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +DELETE FROM t1 WHERE id BETWEEN 3001 AND 4000; +DELETE FROM t2 WHERE id BETWEEN 3001 AND 4000; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +SELECT count(*) FROM t1; +SELECT count(*) FROM t2; +--source include/sync_slave_sql_with_master.inc +connection slave; +SELECT count(*) FROM t1; +SELECT count(*) FROM t2; +connection master; + + +# Deleting same keys (slaves stop) +DELETE FROM t1 WHERE id = 10; +SELECT count(*) FROM t1; +connection slave; +call mtr.add_suppression("Slave SQL.*Could not execute Delete_rows event on table test.t1.*Error_code.*"); +call mtr.add_suppression("Slave: Can't find record in 't1'.*"); +# wait until we have the expected error +--let $slave_sql_errno= convert_error(ER_KEY_NOT_FOUND) +--source include/wait_for_slave_sql_error.inc + +connection slave; +set @save_rocksdb_read_free_rpl_tables=@@global.rocksdb_read_free_rpl_tables; +set global rocksdb_read_free_rpl_tables="t.*"; +START SLAVE; +connection master; +--source include/sync_slave_sql_with_master.inc +connection slave; +SELECT count(*) FROM t1; +connection master; + + +# cleanup +connection slave; +set global rocksdb_read_free_rpl_tables=@save_rocksdb_read_free_rpl_tables; +connection master; +SET session rocksdb_blind_delete_primary_key=@save_rocksdb_blind_delete_primary_key; +SET session rocksdb_master_skip_tx_api=@save_rocksdb_master_skip_tx_api; + +DROP TABLE t1, t2; +--source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test index c80b789ae37..de332baa463 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test @@ -1,7 +1,7 @@ --source include/have_rocksdb.inc --disable_warnings -DROP TABLE IF EXISTS t1, t2; +DROP TABLE IF EXISTS t1, t2, t3; --enable_warnings # Create a table with a primary key and one secondary key as well as one @@ -25,7 +25,7 @@ CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE perl; my $fn = $ENV{'ROCKSDB_INFILE'}; open(my $fh, '>>', $fn) || die "perl open($fn): $!"; -my $max = 10000000; +my $max = 5000000; my @chars = ("A".."Z", "a".."z", "0".."9"); my @lowerchars = ("a".."z"); my @powers_of_26 = (26 * 26 * 26 * 26, 26 * 26 * 26, 26 * 26, 26, 1); diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation.test b/storage/rocksdb/mysql-test/rocksdb/t/collation.test index 030c3e21454..22bd5db7050 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/collation.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/collation.test @@ -177,5 +177,8 @@ CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=ro CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE abc; +# test bad regex (null caused a crash) - Issue 493 +SET GLOBAL rocksdb_strict_collation_exceptions=null; + # cleanup SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/handler_basic.test b/storage/rocksdb/mysql-test/rocksdb/t/handler_basic.test index 91a492ef26e..7b1652c759b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/handler_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/handler_basic.test @@ -37,7 +37,7 @@ SELECT * FROM t1; SHOW SESSION STATUS LIKE 'Handler_read%'; FLUSH STATUS; -SELECT * FROM t1 WHERE b <=5 ORDER BY b; +SELECT * FROM t1 FORCE INDEX(b) WHERE b <=5 ORDER BY b; SHOW SESSION STATUS LIKE 'Handler_read%'; FLUSH STATUS; @@ -50,4 +50,3 @@ SHOW SESSION STATUS LIKE 'Handler_read%'; # Cleanup DROP TABLE t1; - diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue290.test b/storage/rocksdb/mysql-test/rocksdb/t/issue290.test index 99c0479ec21..5ea8799c627 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/issue290.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue290.test @@ -35,6 +35,6 @@ insert into linktable (id1, link_type, id2) values (2, 1, 9); insert into linktable (id1, link_type, id2) values (2, 1, 10); --replace_column 9 # -explain select id1, id2, link_type, data from linktable where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5); +explain select id1, id2, link_type, data from linktable force index(primary) where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5); drop table linktable; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue495.test b/storage/rocksdb/mysql-test/rocksdb/t/issue495.test new file mode 100644 index 00000000000..bb215ebcd99 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue495.test @@ -0,0 +1,29 @@ +drop table if exists t; +create table t ( + a int, + b int, + c varchar(12249) collate latin1_bin, + d datetime, + e int, + f int, + g blob, + h int, + i int, + key (b,e), + key (h,b) +) engine=rocksdb +partition by linear hash (i) partitions 8 ; + +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +select i from t group by h; +select i from t group by h; + +drop table t; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test index 107790f0c9a..bff44744b1a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test @@ -51,6 +51,9 @@ SET GLOBAL default_storage_engine=rocksdb; --exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test --exec grep "START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT" $MYSQLTEST_VARDIR/mysqld.1/mysqld.log | wc -l +# Sanity test mysqldump when the --innodb-stats-on-metadata is specified (no effect) +--echo ==== mysqldump with --innodb-stats-on-metadata ==== +--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --innodb-stats-on-metadata test # wiping general log so that this test case doesn't fail with --repeat --exec echo "" > $MYSQLTEST_VARDIR/mysqld.1/mysqld.log diff --git a/storage/rocksdb/mysql-test/rocksdb/t/partition.test b/storage/rocksdb/mysql-test/rocksdb/t/partition.test index 9f3ccb9dd6c..d104878b17a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/partition.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/partition.test @@ -8,6 +8,46 @@ DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS VAR_POP; DROP TABLE IF EXISTS TEMP0; DROP TABLE IF EXISTS VAR_SAMP; +DROP TABLE IF EXISTS ti; +DROP TABLE IF EXISTS members; +DROP TABLE IF EXISTS members_2; +DROP TABLE IF EXISTS employees; +DROP TABLE IF EXISTS employees_2; +DROP TABLE IF EXISTS employees_3; +DROP TABLE IF EXISTS quarterly_report_status; +DROP TABLE IF EXISTS employees_4; +DROP TABLE IF EXISTS h2; +DROP TABLE IF EXISTS rcx; +DROP TABLE IF EXISTS r1; +DROP TABLE IF EXISTS rc1; +DROP TABLE IF EXISTS rx; +DROP TABLE IF EXISTS rc2; +DROP TABLE IF EXISTS rc3; +DROP TABLE IF EXISTS rc4; +DROP TABLE IF EXISTS employees_by_lname; +DROP TABLE IF EXISTS customers_1; +DROP TABLE IF EXISTS customers_2; +DROP TABLE IF EXISTS customers_3; +DROP TABLE IF EXISTS employees_hash; +DROP TABLE IF EXISTS employees_hash_1; +DROP TABLE IF EXISTS t1_hash; +DROP TABLE IF EXISTS employees_linear_hash; +DROP TABLE IF EXISTS t1_linear_hash; +DROP TABLE IF EXISTS k1; +DROP TABLE IF EXISTS k2; +DROP TABLE IF EXISTS tm1; +DROP TABLE IF EXISTS tk; +DROP TABLE IF EXISTS ts; +DROP TABLE IF EXISTS ts_1; +DROP TABLE IF EXISTS ts_3; +DROP TABLE IF EXISTS ts_4; +DROP TABLE IF EXISTS ts_5; +DROP TABLE IF EXISTS trb3; +DROP TABLE IF EXISTS tr; +DROP TABLE IF EXISTS members_3; +DROP TABLE IF EXISTS clients; +DROP TABLE IF EXISTS clients_lk; +DROP TABLE IF EXISTS trb1; --enable_warnings @@ -22,6 +62,12 @@ while ($i <= $max) { eval $insert; } +ALTER TABLE t1 REBUILD PARTITION p0, p1; +ALTER TABLE t1 OPTIMIZE PARTITION p0, p1; +ALTER TABLE t1 ANALYZE PARTITION p3; +ALTER TABLE t1 REPAIR PARTITION p0,p1; +ALTER TABLE t1 CHECK PARTITION p1; + # Parition string is "#P#". To verify that parsing is done correctly then we'll # verify if tables containing "P" somwhere can be created correctly. CREATE TABLE VAR_POP (a int) ENGINE = ROCKSDB; @@ -35,8 +81,677 @@ SHOW TABLES; SELECT * FROM t1 ORDER BY i LIMIT 10; SELECT COUNT(*) FROM t1; -DROP TABLE t1; -DROP TABLE VAR_POP; -DROP TABLE TEMP0; -DROP TABLE VAR_SAMP; +# +# Test-cases above are copied from +# https://dev.mysql.com/doc/refman/5.6/en/partitioning.html to validate that the +# partitioning related examples work with MyRocks. +# +# Create a table that is partitioned by hash into 6 partitions. +CREATE TABLE ti( + id INT, + amount DECIMAL(7,2), + tr_date DATE +) ENGINE=ROCKSDB + PARTITION BY HASH(MONTH(tr_date)) + PARTITIONS 6; + +CREATE TABLE members ( + firstname VARCHAR(25) NOT NULL, + lastname VARCHAR(25) NOT NULL, + username VARCHAR(16) NOT NULL, + email VARCHAR(35), + joined DATE NOT NULL +) ENGINE=ROCKSDB + PARTITION BY KEY(joined) + PARTITIONS 6; + +CREATE TABLE members_2 ( + firstname VARCHAR(25) NOT NULL, + lastname VARCHAR(25) NOT NULL, + username VARCHAR(16) NOT NULL, + email VARCHAR(35), + joined DATE NOT NULL +) ENGINE=ROCKSDB + PARTITION BY RANGE(YEAR(joined)) ( + PARTITION p0 VALUES LESS THAN (1960), + PARTITION p1 VALUES LESS THAN (1970), + PARTITION p2 VALUES LESS THAN (1980), + PARTITION p3 VALUES LESS THAN (1990), + PARTITION p4 VALUES LESS THAN MAXVALUE + ); + +# Partition names are not case-sensitive. +--error 1517 +CREATE TABLE t2 (val INT) + ENGINE=ROCKSDB + PARTITION BY LIST(val)( + PARTITION mypart VALUES IN (1,3,5), + PARTITION MyPart VALUES IN (2,4,6) + ); + +CREATE TABLE employees ( + id INT NOT NULL, + fname VARCHAR(30), + lname VARCHAR(30), + hired DATE NOT NULL DEFAULT '1970-01-01', + separated DATE NOT NULL DEFAULT '9999-12-31', + job_code INT NOT NULL, + store_id INT NOT NULL +) ENGINE=ROCKSDB + PARTITION BY RANGE (store_id) ( + PARTITION p0 VALUES LESS THAN (6), + PARTITION p1 VALUES LESS THAN (11), + PARTITION p2 VALUES LESS THAN (16), + PARTITION p3 VALUES LESS THAN MAXVALUE + ); + +CREATE TABLE employees_2 ( + id INT NOT NULL, + fname VARCHAR(30), + lname VARCHAR(30), + hired DATE NOT NULL DEFAULT '1970-01-01', + separated DATE NOT NULL DEFAULT '9999-12-31', + job_code INT NOT NULL, + store_id INT NOT NULL +) ENGINE=ROCKSDB + PARTITION BY RANGE (job_code) ( + PARTITION p0 VALUES LESS THAN (100), + PARTITION p1 VALUES LESS THAN (1000), + PARTITION p2 VALUES LESS THAN (10000) + ); + +CREATE TABLE employees_3 ( + id INT NOT NULL, + fname VARCHAR(30), + lname VARCHAR(30), + hired DATE NOT NULL DEFAULT '1970-01-01', + separated DATE NOT NULL DEFAULT '9999-12-31', + job_code INT, + store_id INT +) ENGINE=ROCKSDB + PARTITION BY RANGE (YEAR(separated)) ( + PARTITION p0 VALUES LESS THAN (1991), + PARTITION p1 VALUES LESS THAN (1996), + PARTITION p2 VALUES LESS THAN (2001), + PARTITION p3 VALUES LESS THAN MAXVALUE + ); + +CREATE TABLE quarterly_report_status ( + report_id INT NOT NULL, + report_status VARCHAR(20) NOT NULL, + report_updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) ENGINE=ROCKSDB + PARTITION BY RANGE (UNIX_TIMESTAMP(report_updated)) ( + PARTITION p0 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-01-01 00:00:00') ), + PARTITION p1 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-04-01 00:00:00') ), + PARTITION p2 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-07-01 00:00:00') ), + PARTITION p3 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-10-01 00:00:00') ), + PARTITION p4 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-01-01 00:00:00') ), + PARTITION p5 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-04-01 00:00:00') ), + PARTITION p6 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-07-01 00:00:00') ), + PARTITION p7 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-10-01 00:00:00') ), + PARTITION p8 VALUES LESS THAN ( UNIX_TIMESTAMP('2010-01-01 00:00:00') ), + PARTITION p9 VALUES LESS THAN (MAXVALUE) + ); + +CREATE TABLE employees_4 ( + id INT NOT NULL, + fname VARCHAR(30), + lname VARCHAR(30), + hired DATE NOT NULL DEFAULT '1970-01-01', + separated DATE NOT NULL DEFAULT '9999-12-31', + job_code INT, + store_id INT +) ENGINE=ROCKSDB + PARTITION BY LIST(store_id) ( + PARTITION pNorth VALUES IN (3,5,6,9,17), + PARTITION pEast VALUES IN (1,2,10,11,19,20), + PARTITION pWest VALUES IN (4,12,13,14,18), + PARTITION pCentral VALUES IN (7,8,15,16) + ); + +CREATE TABLE h2 ( + c1 INT, + c2 INT +) ENGINE=ROCKSDB + PARTITION BY LIST(c1) ( + PARTITION p0 VALUES IN (1, 4, 7), + PARTITION p1 VALUES IN (2, 5, 8) + ); + +# ERROR 1526 (HY000): Table has no partition for value 3 +--error 1526 +INSERT INTO h2 VALUES (3, 5); + +CREATE TABLE rcx ( + a INT, + b INT, + c CHAR(3), + d INT +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS(a,d,c) ( + PARTITION p0 VALUES LESS THAN (5,10,'ggg'), + PARTITION p1 VALUES LESS THAN (10,20,'mmm'), + PARTITION p2 VALUES LESS THAN (15,30,'sss'), + PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE) + ); + +CREATE TABLE r1 ( + a INT, + b INT +) ENGINE=ROCKSDB + PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (5), + PARTITION p1 VALUES LESS THAN (MAXVALUE) + ); + +INSERT INTO r1 VALUES (5,10), (5,11), (5,12); + +CREATE TABLE rc1 ( + a INT, + b INT +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS(a, b) ( + PARTITION p0 VALUES LESS THAN (5, 12), + PARTITION p3 VALUES LESS THAN (MAXVALUE, MAXVALUE) + ); + +INSERT INTO rc1 VALUES (5,10), (5,11), (5,12); +SELECT (5,10) < (5,12), (5,11) < (5,12), (5,12) < (5,12); + +CREATE TABLE rx ( + a INT, + b INT +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS (a) ( + PARTITION p0 VALUES LESS THAN (5), + PARTITION p1 VALUES LESS THAN (MAXVALUE) + ); + +INSERT INTO rx VALUES (5,10), (5,11), (5,12); + +CREATE TABLE rc2 ( + a INT, + b INT +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS(a,b) ( + PARTITION p0 VALUES LESS THAN (0,10), + PARTITION p1 VALUES LESS THAN (10,20), + PARTITION p2 VALUES LESS THAN (10,30), + PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE) + ); + +CREATE TABLE rc3 ( + a INT, + b INT +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS(a,b) ( + PARTITION p0 VALUES LESS THAN (0,10), + PARTITION p1 VALUES LESS THAN (10,20), + PARTITION p2 VALUES LESS THAN (10,30), + PARTITION p3 VALUES LESS THAN (10,35), + PARTITION p4 VALUES LESS THAN (20,40), + PARTITION p5 VALUES LESS THAN (MAXVALUE,MAXVALUE) +); + +CREATE TABLE rc4 ( + a INT, + b INT, + c INT +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS(a,b,c) ( + PARTITION p0 VALUES LESS THAN (0,25,50), + PARTITION p1 VALUES LESS THAN (10,20,100), + PARTITION p2 VALUES LESS THAN (10,30,50), + PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE) + ); + +SELECT (0,25,50) < (10,20,100), (10,20,100) < (10,30,50); + +-- ERROR 1493 (HY000): VALUES LESS THAN value must be strictly increasing for each partition + +--error 1493 +CREATE TABLE rcf ( + a INT, + b INT, + c INT +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS(a,b,c) ( + PARTITION p0 VALUES LESS THAN (0,25,50), + PARTITION p1 VALUES LESS THAN (20,20,100), + PARTITION p2 VALUES LESS THAN (10,30,50), + PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE) + ); + +CREATE TABLE employees_by_lname ( + id INT NOT NULL, + fname VARCHAR(30), + lname VARCHAR(30), + hired DATE NOT NULL DEFAULT '1970-01-01', + separated DATE NOT NULL DEFAULT '9999-12-31', + job_code INT NOT NULL, + store_id INT NOT NULL +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS (lname) ( + PARTITION p0 VALUES LESS THAN ('g'), + PARTITION p1 VALUES LESS THAN ('m'), + PARTITION p2 VALUES LESS THAN ('t'), + PARTITION p3 VALUES LESS THAN (MAXVALUE) + ); + +ALTER TABLE employees_by_lname PARTITION BY RANGE COLUMNS (lname) ( + PARTITION p0 VALUES LESS THAN ('g'), + PARTITION p1 VALUES LESS THAN ('m'), + PARTITION p2 VALUES LESS THAN ('t'), + PARTITION p3 VALUES LESS THAN (MAXVALUE) +); + +ALTER TABLE employees_by_lname PARTITION BY RANGE COLUMNS (hired) ( + PARTITION p0 VALUES LESS THAN ('1970-01-01'), + PARTITION p1 VALUES LESS THAN ('1980-01-01'), + PARTITION p2 VALUES LESS THAN ('1990-01-01'), + PARTITION p3 VALUES LESS THAN ('2000-01-01'), + PARTITION p4 VALUES LESS THAN ('2010-01-01'), + PARTITION p5 VALUES LESS THAN (MAXVALUE) +); + +CREATE TABLE customers_1 ( + first_name VARCHAR(25), + last_name VARCHAR(25), + street_1 VARCHAR(30), + street_2 VARCHAR(30), + city VARCHAR(15), + renewal DATE +) ENGINE=ROCKSDB + PARTITION BY LIST COLUMNS(city) ( + PARTITION pRegion_1 VALUES IN('Oskarshamn', 'Högsby', 'MönsterÃ¥s'), + PARTITION pRegion_2 VALUES IN('Vimmerby', 'Hultsfred', 'Västervik'), + PARTITION pRegion_3 VALUES IN('Nässjö', 'Eksjö', 'Vetlanda'), + PARTITION pRegion_4 VALUES IN('Uppvidinge', 'Alvesta', 'Växjo') + ); + +CREATE TABLE customers_2 ( + first_name VARCHAR(25), + last_name VARCHAR(25), + street_1 VARCHAR(30), + street_2 VARCHAR(30), + city VARCHAR(15), + renewal DATE +) ENGINE=ROCKSDB + PARTITION BY LIST COLUMNS(renewal) ( + PARTITION pWeek_1 VALUES IN('2010-02-01', '2010-02-02', '2010-02-03', + '2010-02-04', '2010-02-05', '2010-02-06', '2010-02-07'), + PARTITION pWeek_2 VALUES IN('2010-02-08', '2010-02-09', '2010-02-10', + '2010-02-11', '2010-02-12', '2010-02-13', '2010-02-14'), + PARTITION pWeek_3 VALUES IN('2010-02-15', '2010-02-16', '2010-02-17', + '2010-02-18', '2010-02-19', '2010-02-20', '2010-02-21'), + PARTITION pWeek_4 VALUES IN('2010-02-22', '2010-02-23', '2010-02-24', + '2010-02-25', '2010-02-26', '2010-02-27', '2010-02-28') + ); + +CREATE TABLE customers_3 ( + first_name VARCHAR(25), + last_name VARCHAR(25), + street_1 VARCHAR(30), + street_2 VARCHAR(30), + city VARCHAR(15), + renewal DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS(renewal) ( + PARTITION pWeek_1 VALUES LESS THAN('2010-02-09'), + PARTITION pWeek_2 VALUES LESS THAN('2010-02-15'), + PARTITION pWeek_3 VALUES LESS THAN('2010-02-22'), + PARTITION pWeek_4 VALUES LESS THAN('2010-03-01') + ); + +CREATE TABLE employees_hash ( + id INT NOT NULL, + fname VARCHAR(30), + lname VARCHAR(30), + hired DATE NOT NULL DEFAULT '1970-01-01', + separated DATE NOT NULL DEFAULT '9999-12-31', + job_code INT, + store_id INT +) ENGINE=ROCKSDB + PARTITION BY HASH(store_id) + PARTITIONS 4; + +CREATE TABLE employees_hash_1 ( + id INT NOT NULL, + fname VARCHAR(30), + lname VARCHAR(30), + hired DATE NOT NULL DEFAULT '1970-01-01', + separated DATE NOT NULL DEFAULT '9999-12-31', + job_code INT, + store_id INT +) ENGINE=ROCKSDB + PARTITION BY HASH( YEAR(hired) ) + PARTITIONS 4; + +CREATE TABLE t1_hash ( + col1 INT, + col2 CHAR(5), + col3 DATE +) ENGINE=ROCKSDB + PARTITION BY HASH( YEAR(col3) ) + PARTITIONS 4; + +CREATE TABLE employees_linear_hash ( + id INT NOT NULL, + fname VARCHAR(30), + lname VARCHAR(30), + hired DATE NOT NULL DEFAULT '1970-01-01', + separated DATE NOT NULL DEFAULT '9999-12-31', + job_code INT, + store_id INT +) ENGINE=ROCKSDB + PARTITION BY LINEAR HASH( YEAR(hired) ) + PARTITIONS 4; + +CREATE TABLE t1_linear_hash ( + col1 INT, + col2 CHAR(5), + col3 DATE +) ENGINE=ROCKSDB + PARTITION BY LINEAR HASH( YEAR(col3) ) + PARTITIONS 6; + +CREATE TABLE k1 ( + id INT NOT NULL PRIMARY KEY, + name VARCHAR(20) +) ENGINE=ROCKSDB + PARTITION BY KEY() + PARTITIONS 2; + +CREATE TABLE k2 ( + id INT NOT NULL, + name VARCHAR(20), + UNIQUE KEY (id) +) ENGINE=ROCKSDB + PARTITION BY KEY() + PARTITIONS 2; + +CREATE TABLE tm1 ( + s1 CHAR(32) PRIMARY KEY +) ENGINE=ROCKSDB + PARTITION BY KEY(s1) + PARTITIONS 10; + +CREATE TABLE tk ( + col1 INT NOT NULL, + col2 CHAR(5), + col3 DATE +) ENGINE=ROCKSDB + PARTITION BY LINEAR KEY (col1) + PARTITIONS 3; + +CREATE TABLE ts ( + id INT, + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE( YEAR(purchased) ) + SUBPARTITION BY HASH( TO_DAYS(purchased) ) + SUBPARTITIONS 2 ( + PARTITION p0 VALUES LESS THAN (1990), + PARTITION p1 VALUES LESS THAN (2000), + PARTITION p2 VALUES LESS THAN MAXVALUE + ); + +CREATE TABLE ts_1 ( + id INT, + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE( YEAR(purchased) ) + SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( + PARTITION p0 VALUES LESS THAN (1990) ( + SUBPARTITION s0, + SUBPARTITION s1 + ), + PARTITION p1 VALUES LESS THAN (2000) ( + SUBPARTITION s2, + SUBPARTITION s3 + ), + PARTITION p2 VALUES LESS THAN MAXVALUE ( + SUBPARTITION s4, + SUBPARTITION s5 + ) + ); + +--error 1064 +CREATE TABLE ts_2 ( + id INT, + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE( YEAR(purchased) ) + SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( + PARTITION p0 VALUES LESS THAN (1990) ( + SUBPARTITION s0, + SUBPARTITION s1 + ), + PARTITION p1 VALUES LESS THAN (2000), + PARTITION p2 VALUES LESS THAN MAXVALUE ( + SUBPARTITION s2, + SUBPARTITION s3 + ) + ); + +CREATE TABLE ts_3 ( + id INT, + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE( YEAR(purchased) ) + SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( + PARTITION p0 VALUES LESS THAN (1990) ( + SUBPARTITION s0, + SUBPARTITION s1 + ), + PARTITION p1 VALUES LESS THAN (2000) ( + SUBPARTITION s2, + SUBPARTITION s3 + ), + PARTITION p2 VALUES LESS THAN MAXVALUE ( + SUBPARTITION s4, + SUBPARTITION s5 + ) + ); + +CREATE TABLE ts_4 ( + id INT, + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE( YEAR(purchased) ) + SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( + PARTITION p0 VALUES LESS THAN (1990) ( + SUBPARTITION s0, + SUBPARTITION s1 + ), + PARTITION p1 VALUES LESS THAN (2000) ( + SUBPARTITION s2, + SUBPARTITION s3 + ), + PARTITION p2 VALUES LESS THAN MAXVALUE ( + SUBPARTITION s4, + SUBPARTITION s5 + ) + ); + +CREATE TABLE ts_5 ( + id INT, + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE(YEAR(purchased)) + SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( + PARTITION p0 VALUES LESS THAN (1990) ( + SUBPARTITION s0a, + SUBPARTITION s0b + ), + PARTITION p1 VALUES LESS THAN (2000) ( + SUBPARTITION s1a, + SUBPARTITION s1b + ), + PARTITION p2 VALUES LESS THAN MAXVALUE ( + SUBPARTITION s2a, + SUBPARTITION s2b + ) + ); + +CREATE TABLE trb3 ( + id INT, + name VARCHAR(50), + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE( YEAR(purchased) ) ( + PARTITION p0 VALUES LESS THAN (1990), + PARTITION p1 VALUES LESS THAN (1995), + PARTITION p2 VALUES LESS THAN (2000), + PARTITION p3 VALUES LESS THAN (2005) + ); + +ALTER TABLE trb3 PARTITION BY KEY(id) PARTITIONS 2; + +CREATE TABLE tr ( + id INT, + name VARCHAR(50), + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE( YEAR(purchased) ) ( + PARTITION p0 VALUES LESS THAN (1990), + PARTITION p1 VALUES LESS THAN (1995), + PARTITION p2 VALUES LESS THAN (2000), + PARTITION p3 VALUES LESS THAN (2005) + ); + +INSERT INTO tr VALUES + (1, 'desk organiser', '2003-10-15'), + (2, 'CD player', '1993-11-05'), + (3, 'TV set', '1996-03-10'), + (4, 'bookcase', '1982-01-10'), + (5, 'exercise bike', '2004-05-09'), + (6, 'sofa', '1987-06-05'), + (7, 'popcorn maker', '2001-11-22'), + (8, 'aquarium', '1992-08-04'), + (9, 'study desk', '1984-09-16'), + (10, 'lava lamp', '1998-12-25'); + +SELECT * FROM tr WHERE purchased BETWEEN '1995-01-01' AND '1999-12-31'; + +ALTER TABLE tr DROP PARTITION p2; + +SELECT * FROM tr WHERE purchased BETWEEN '1995-01-01' AND '1999-12-31'; + +CREATE TABLE members_3 ( + id INT, + fname VARCHAR(25), + lname VARCHAR(25), + dob DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE( YEAR(dob) ) ( + PARTITION p0 VALUES LESS THAN (1970), + PARTITION p1 VALUES LESS THAN (1980), + PARTITION p2 VALUES LESS THAN (1990) + ); + +ALTER TABLE members_3 ADD PARTITION (PARTITION p3 VALUES LESS THAN (2000)); + +# ERROR 1493 (HY000): VALUES LESS THAN value must be strictly increasing for each partition +--error 1493 +ALTER TABLE members_3 ADD PARTITION (PARTITION n VALUES LESS THAN (1960)); + +CREATE TABLE clients ( + id INT, + fname VARCHAR(30), + lname VARCHAR(30), + signed DATE +) ENGINE=ROCKSDB + PARTITION BY HASH( MONTH(signed) ) + PARTITIONS 12; + +ALTER TABLE clients COALESCE PARTITION 4; + +CREATE TABLE clients_lk ( + id INT, + fname VARCHAR(30), + lname VARCHAR(30), + signed DATE +) ENGINE=ROCKSDB + PARTITION BY LINEAR KEY(signed) + PARTITIONS 12; + +# ERROR 1508 (HY000): Cannot remove all partitions, use DROP TABLE instead +--error 1508 +ALTER TABLE clients COALESCE PARTITION 18; + +ALTER TABLE clients ADD PARTITION PARTITIONS 6; + +CREATE TABLE trb1 ( + id INT, + name VARCHAR(50), + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE(id) ( + PARTITION p0 VALUES LESS THAN (3), + PARTITION p1 VALUES LESS THAN (7), + PARTITION p2 VALUES LESS THAN (9), + PARTITION p3 VALUES LESS THAN (11) + ); + +INSERT INTO trb1 VALUES + (1, 'desk organiser', '2003-10-15'), + (2, 'CD player', '1993-11-05'), + (3, 'TV set', '1996-03-10'), + (4, 'bookcase', '1982-01-10'), + (5, 'exercise bike', '2004-05-09'), + (6, 'sofa', '1987-06-05'), + (7, 'popcorn maker', '2001-11-22'), + (8, 'aquarium', '1992-08-04'), + (9, 'study desk', '1984-09-16'), + (10, 'lava lamp', '1998-12-25'); + +ALTER TABLE trb1 ADD PRIMARY KEY (id); + +# Clean up. +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS VAR_POP; +DROP TABLE IF EXISTS TEMP0; +DROP TABLE IF EXISTS VAR_SAMP; +DROP TABLE IF EXISTS ti; +DROP TABLE IF EXISTS members; +DROP TABLE IF EXISTS members_2; +DROP TABLE IF EXISTS employees; +DROP TABLE IF EXISTS employees_2; +DROP TABLE IF EXISTS employees_3; +DROP TABLE IF EXISTS quarterly_report_status; +DROP TABLE IF EXISTS employees_4; +DROP TABLE IF EXISTS h2; +DROP TABLE IF EXISTS rcx; +DROP TABLE IF EXISTS r1; +DROP TABLE IF EXISTS rc1; +DROP TABLE IF EXISTS rx; +DROP TABLE IF EXISTS rc2; +DROP TABLE IF EXISTS rc3; +DROP TABLE IF EXISTS rc4; +DROP TABLE IF EXISTS employees_by_lname; +DROP TABLE IF EXISTS customers_1; +DROP TABLE IF EXISTS customers_2; +DROP TABLE IF EXISTS customers_3; +DROP TABLE IF EXISTS employees_hash; +DROP TABLE IF EXISTS employees_hash_1; +DROP TABLE IF EXISTS t1_hash; +DROP TABLE IF EXISTS employees_linear_hash; +DROP TABLE IF EXISTS t1_linear_hash; +DROP TABLE IF EXISTS k1; +DROP TABLE IF EXISTS k2; +DROP TABLE IF EXISTS tm1; +DROP TABLE IF EXISTS tk; +DROP TABLE IF EXISTS ts; +DROP TABLE IF EXISTS ts_1; +DROP TABLE IF EXISTS ts_3; +DROP TABLE IF EXISTS ts_4; +DROP TABLE IF EXISTS ts_5; +DROP TABLE IF EXISTS trb3; +DROP TABLE IF EXISTS tr; +DROP TABLE IF EXISTS members_3; +DROP TABLE IF EXISTS clients; +DROP TABLE IF EXISTS clients_lk; +DROP TABLE IF EXISTS trb1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test b/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test index ec00ddee5db..03d1d0a60bc 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test @@ -11,7 +11,7 @@ DROP TABLE IF EXISTS t1; # restart server with correct parameters shutdown_server 10; ---exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size=1000000000" >$_expect_file_name +--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size_mb=100" >$_expect_file_name --sleep 5 --enable_reconnect --source include/wait_until_connected_again.inc @@ -29,7 +29,7 @@ select * from t1 where a = 1; # restart server to re-read cache --exec echo "wait" >$_expect_file_name shutdown_server 10; ---exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size=1000000000" >$_expect_file_name +--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size_mb=100" >$_expect_file_name --sleep 5 --enable_reconnect --source include/wait_until_connected_again.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test index ed26d036e9a..5581ed3f95a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test @@ -792,6 +792,20 @@ insert into t47 values (2, 'row2'); set rocksdb_bulk_load=1; insert into t47 values (3, 'row3'),(4, 'row4'); set rocksdb_bulk_load=0; +# Check concurrent bulk loading +--connect (con1,localhost,root,,) +set rocksdb_bulk_load=1; +insert into t47 values (10, 'row10'),(11, 'row11'); +--connection default +set rocksdb_bulk_load=1; +insert into t47 values (100, 'row100'),(101, 'row101'); +--disconnect con1 +--connection default +set rocksdb_bulk_load=0; +--disable_query_log +let $wait_condition = select count(*) = 8 as c from t47; +--source include/wait_condition.inc +--enable_query_log select * from t47; drop table t47; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_per_partition.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_per_partition.test new file mode 100644 index 00000000000..7cffa2e62a6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_per_partition.test @@ -0,0 +1,494 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +--enable_warnings + +# +# Create a table with multiple partitions, but in the comment don't specify +# that per-partition based column families (CF) should be created. Expect that +# default CF will be used and new one won't be created. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'testcomment' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +# Expecting no results here. +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='testcomment'; + +DROP TABLE t1; + +# +# Same test case as above, only with the reverse CF. Should result in the same +# behavior. No new CF-s created, only default one will be used. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'rev:testrevcomment' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +# Expecting no results here. +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='rev:testrevcomment'; + +DROP TABLE t1; + +# +# Create a table with multiple partitions and request for separate CF to be +# created per every partition. As a result we expect three different CF-s to be +# created. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=my_custom_cf;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +set @@global.rocksdb_compact_cf = 'foo'; +set @@global.rocksdb_compact_cf = 'my_custom_cf'; +set @@global.rocksdb_compact_cf = 'baz'; + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='foo'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_custom_cf'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='baz'; + +DROP TABLE t1; + +# +# Same test case as above, only one of the partitions has "rev:" prefix. The +# intent here is to make sure that qualifier can specify reverse CF as well. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=t1-p0;custom_p1_cfname=rev:bar;custom_p2_cfname=t1-p2' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +set @@global.rocksdb_compact_cf = 't1-p0'; +set @@global.rocksdb_compact_cf = 'rev:bar'; +set @@global.rocksdb_compact_cf = 't1-p2'; + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='t1-p0'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='rev:bar'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='t1-p2'; + +DROP TABLE t1; + + +# +# Create a table with multiple partitions and assign two partitions to the same +# CF, third one gets a separate partition, and fourth one will belong to a +# default one. As a result we expect two new CF-s to be created. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=cf-zero;custom_p1_cfname=cf-one;custom_p2_cfname=cf-zero' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9), + PARTITION custom_p3 VALUES IN (10, 20, 30) +); + +set @@global.rocksdb_compact_cf = 'cf-zero'; +set @@global.rocksdb_compact_cf = 'cf-one'; + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='cf-zero'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='cf-one'; + +DROP TABLE t1; + +# +# Create a table with CF-s per partition and verify that ALTER TABLE + DROP +# INDEX work for that scenario and data is persisted. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +INSERT INTO t1 VALUES (1, 1, "one", null); +INSERT INTO t1 VALUES (2, 2, "two", null); +INSERT INTO t1 VALUES (3, 3, "three", null); +INSERT INTO t1 VALUES (5, 5, "five", null); +INSERT INTO t1 VALUES (9, 9, "nine", null); + +SELECT * FROM t1; +ALTER TABLE t1 DROP PRIMARY KEY; +SELECT * FROM t1; + +# +# Verify that we can compact custom CF-s. +# +set @@global.rocksdb_compact_cf = 'foo'; +set @@global.rocksdb_compact_cf = 'bar'; +set @@global.rocksdb_compact_cf = 'baz'; + +DROP TABLE t1; + +# +# Create a table with CF-s per partition and verify that ALTER TABLE + DROP +# INDEX + ADD INDEX work for that scenario and data is persisted and new cf_name_str +# are created. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +INSERT INTO t1 VALUES (1, 1, "one", null); +INSERT INTO t1 VALUES (2, 2, "two", null); +INSERT INTO t1 VALUES (3, 3, "three", null); +INSERT INTO t1 VALUES (5, 5, "five", null); +INSERT INTO t1 VALUES (9, 9, "nine", null); + +ALTER TABLE t1 DROP PRIMARY KEY; +ALTER TABLE t1 ADD PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=p0_cf;custom_p1_cfname=p1_cf'; + +set @@global.rocksdb_compact_cf = 'p0_cf'; +set @@global.rocksdb_compact_cf = 'p1_cf'; + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p0_cf'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p1_cf'; + +DROP TABLE t1; + +# +# Create a table CF-s per partition, use ALTER TABLE to change the way it's +# partitioned and verify that new CF-s will be created. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +INSERT INTO t1 VALUES (1, 1, "one", null); +INSERT INTO t1 VALUES (2, 2, "two", null); +INSERT INTO t1 VALUES (3, 3, "three", null); +INSERT INTO t1 VALUES (5, 5, "five", null); +INSERT INTO t1 VALUES (9, 9, "nine", null); + +ALTER TABLE t1 PARTITION BY LIST(c1) ( + PARTITION custom_p3 VALUES IN (1, 4, 7), + PARTITION custom_p4 VALUES IN (2, 5, 8, 3, 6, 9) +); + +ALTER TABLE t1 DROP PRIMARY KEY; +ALTER TABLE t1 ADD PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p3_cfname=p3_cf;custom_p4_cfname=p4_cf'; + +set @@global.rocksdb_compact_cf = 'p3_cf'; +set @@global.rocksdb_compact_cf = 'p4_cf'; + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p3_cf'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p4_cf'; + +DROP TABLE t1; + +# +# Create a table CF-s per partition, use empty qualifier name. Verify that no +# new CF-s are created. This will also make sure that nothing gets added for +# `custom_p2`. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=;' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +DROP TABLE t1; + +# +# Verify some basic partition related operations when using PARTITION BY LIST +# COLUMNS on a VARBINARY column on a table with more complicated schema. +# + +# +# Verify that creating the table without COMMENT actually works. +# +CREATE TABLE `t2` ( + `col1` bigint(20) NOT NULL, + `col2` varbinary(64) NOT NULL, + `col3` varbinary(256) NOT NULL, + `col4` bigint(20) NOT NULL, + `col5` mediumblob NOT NULL, + PRIMARY KEY (`col1`,`col2`,`col3`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 + PARTITION BY LIST COLUMNS (`col2`) ( + PARTITION custom_p0 VALUES IN (0x12345), + PARTITION custom_p1 VALUES IN (0x23456), + PARTITION custom_p2 VALUES IN (0x34567), + PARTITION custom_p3 VALUES IN (0x45678), + PARTITION custom_p4 VALUES IN (0x56789), + PARTITION custom_p5 VALUES IN (0x6789A), + PARTITION custom_p6 VALUES IN (0x789AB), + PARTITION custom_p7 VALUES IN (0x89ABC) +); + +DROP TABLE t2; + +# +# Create the same table with two custom CF-s per partition as specified in the +# COMMENT. +# +CREATE TABLE `t2` ( + `col1` bigint(20) NOT NULL, + `col2` varbinary(64) NOT NULL, + `col3` varbinary(256) NOT NULL, + `col4` bigint(20) NOT NULL, + `col5` mediumblob NOT NULL, + PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=my_cf0;custom_p1_cfname=my_cf1' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 + PARTITION BY LIST COLUMNS (`col2`) ( + PARTITION custom_p0 VALUES IN (0x12345), + PARTITION custom_p1 VALUES IN (0x23456), + PARTITION custom_p2 VALUES IN (0x34567), + PARTITION custom_p3 VALUES IN (0x45678), + PARTITION custom_p4 VALUES IN (0x56789), + PARTITION custom_p5 VALUES IN (0x6789A), + PARTITION custom_p6 VALUES IN (0x789AB), + PARTITION custom_p7 VALUES IN (0x89ABC) +); + +# Verify that CF-s were created earlier. +set @@global.rocksdb_compact_cf = 'my_cf0'; +set @@global.rocksdb_compact_cf = 'my_cf1'; + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_cf0'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_cf1'; + +# Insert some random data. +INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3); +INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5); + +# Verify it's there. +SELECT col1, HEX(col2), HEX(col3), col4, HEX(col5) FROM t2; + +# Verify it's being fetched from the right partition. This tests partitioning +# functionality, but we want to make sure that by adding CF-s per partition we +# don't regress anything. +EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x12345; +EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x23456; + +# Delete the current PK and create a new one referencing different CF-s. We +# need to verity that new CF-s will be created and no data will be lost in +# process. +ALTER TABLE t2 DROP PRIMARY KEY; +ALTER TABLE t2 ADD PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=new_cf0;custom_p1_cfname=new_cf1'; + +# Verify that new CF-s are created as well. +set @@global.rocksdb_compact_cf = 'new_cf0'; +set @@global.rocksdb_compact_cf = 'new_cf1'; + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='new_cf0'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='new_cf1'; + +# Insert some more random data. +INSERT INTO t2 VALUES (500, 0x12345, 0x5, 1, 0x2); +INSERT INTO t2 VALUES (700, 0x23456, 0x7, 1, 0x3); + +# Verify that partition mappings are still intact. +EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x12345; +EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x23456; + +# Verify that no data is lost. +SELECT col1, HEX(col2), HEX(col3), col4, HEX(col5) FROM t2; + +DROP TABLE t2; + +# +# Create the same table with two custom CF-s per partition as specified in the +# COMMENT. Use both the PK and SK when creating the table. +# +CREATE TABLE `t2` ( + `col1` bigint(20) NOT NULL, + `col2` varbinary(64) NOT NULL, + `col3` varbinary(256) NOT NULL, + `col4` bigint(20) NOT NULL, + `col5` mediumblob NOT NULL, + PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=test_cf0;custom_p1_cfname=test_cf1', + KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=test_cf5' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 + PARTITION BY LIST COLUMNS (`col2`) ( + PARTITION custom_p0 VALUES IN (0x12345), + PARTITION custom_p1 VALUES IN (0x23456), + PARTITION custom_p2 VALUES IN (0x34567), + PARTITION custom_p3 VALUES IN (0x45678), + PARTITION custom_p4 VALUES IN (0x56789), + PARTITION custom_p5 VALUES IN (0x6789A), + PARTITION custom_p6 VALUES IN (0x789AB), + PARTITION custom_p7 VALUES IN (0x89ABC) +); + +# Verify that CF-s were created for PK. +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf0'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf1'; + +# Verify that CF-s were created for SK. +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf5'; + +# Insert some random data. +INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3); +INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (500, 0x6789A, 0x5, 1, 0x7); + +# Basic verification that correct partition and key are used when searching. +EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col2 = 0x6789A AND col4 = 1; + +# Remove the key. +ALTER TABLE t2 DROP KEY `col2`; + +# Add a new key and expect new CF to be created as well. +ALTER TABLE t2 ADD KEY (`col3`, `col4`) COMMENT 'custom_p5_cfname=another_cf_for_p5'; + +# Verify that CF-s were created for SK. +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='another_cf_for_p5'; + +# Verify that correct partition and key are used when searching. +EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col3 = 0x4 AND col2 = 0x34567; + +DROP TABLE t2; + +# +# Verify the same scenario as before, but with a UNIQUE KEY in addition to PK. +# +CREATE TABLE `t2` ( + `col1` bigint(20) NOT NULL, + `col2` varbinary(64) NOT NULL, + `col3` varbinary(256) NOT NULL, + `col4` bigint(20) NOT NULL, + `col5` mediumblob NOT NULL, + PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=test_cf0;custom_p1_cfname=test_cf1', + UNIQUE KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=unique_test_cf5' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 + PARTITION BY LIST COLUMNS (`col2`) ( + PARTITION custom_p0 VALUES IN (0x12345), + PARTITION custom_p1 VALUES IN (0x23456), + PARTITION custom_p2 VALUES IN (0x34567), + PARTITION custom_p3 VALUES IN (0x45678), + PARTITION custom_p4 VALUES IN (0x56789), + PARTITION custom_p5 VALUES IN (0x6789A), + PARTITION custom_p6 VALUES IN (0x789AB), + PARTITION custom_p7 VALUES IN (0x89ABC) +); + +# Verify that CF-s were created for SK. +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='unique_test_cf5'; + +INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2); + +--error ER_DUP_ENTRY +INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2); + +--error ER_DUP_ENTRY +INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2); + +INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3); +INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (500, 0x6789A, 0x5, 1, 0x7); + +DROP TABLE t2; + +# +# Verify that both partitioned and non-partitioned table can share a CF. +# +CREATE TABLE t1 ( + `a` int, + PRIMARY KEY (a) COMMENT "sharedcf" +) ENGINE=ROCKSDB; + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='sharedcf'; + +# +# We expect this to succeed. +# +CREATE TABLE t2 ( + `a` INT, + `b` DATE, + `c` VARCHAR(42), + PRIMARY KEY (`a`) COMMENT "custom_p0_cfname=sharedcf;custom_p2_cfname=notsharedcf" +) ENGINE=ROCKSDB + PARTITION BY LIST(`a`) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='notsharedcf'; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test b/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test index 834887ddffb..434cfe91248 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test @@ -16,6 +16,10 @@ DROP TABLE IF EXISTS t4; CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB; CREATE TABLE t2 (j INT, PRIMARY KEY (j) COMMENT 'rev:cf_t2') ENGINE = ROCKSDB; CREATE TABLE t3 (k INT, PRIMARY KEY (k) COMMENT 'cf_t1') ENGINE = ROCKSDB; + +# With partition based column family creation we now expect all the partitions +# to belong to a default column family because mapping wasn't specified in +# this case. CREATE TABLE t4 (l INT, PRIMARY KEY (l) COMMENT 'cf_t4') ENGINE = ROCKSDB PARTITION BY KEY(l) PARTITIONS 4; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test index a188d298cb0..ab3f240dd54 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test @@ -1,37 +1,41 @@ --source include/have_rocksdb.inc -# -# Check whether DATA DIRECTORY and INDEX DIRECTORY -# are supported in CREATE and ALTER TABLE # -# Note: the test does not check whether the options -# have any real effect on the table, only -# that they are accepted -# (and apparently ignored) -# - ---let $data_dir = $MYSQLTEST_VARDIR/storage_engine_data_dir/ ---let $index_dir = $MYSQLTEST_VARDIR/storage_engine_index_dir/ ---mkdir $data_dir ---mkdir $index_dir +# Check that when either DATA DIRECTORY or INDEX DIRECTORY are specified +# then MyRocks returns an appropriate error. We don't support this +# functionality and therefore shouldn't just silently accept the values. +# --disable_warnings DROP TABLE IF EXISTS t1; --enable_warnings ---replace_result $data_dir $index_dir -eval CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '$data_dir' INDEX DIRECTORY = '$index_dir'; ---replace_result $data_dir $index_dir -SHOW CREATE TABLE t1; +--error 1296 +eval CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '/foo/bar/data'; ---replace_result $data_dir $index_dir -eval ALTER TABLE t1 INDEX DIRECTORY = '$data_dir'; ---replace_result $data_dir $index_dir -SHOW CREATE TABLE t1; +--error 1296 +eval CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INDEX DIRECTORY = '/foo/bar/index'; -DROP TABLE t1; - ---rmdir $data_dir ---rmdir $index_dir +# +# Verify that we'll get the same error codes when using the partitions. +# +--error 1296 +CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=rocksdb PARTITION BY RANGE (id) +( + PARTITION P0 VALUES LESS THAN (1000) + DATA DIRECTORY = '/foo/bar/data/', + PARTITION P1 VALUES LESS THAN (2000) + DATA DIRECTORY = '/foo/bar/data/', + PARTITION P2 VALUES LESS THAN (MAXVALUE) +); +--error 1296 +CREATE TABLE t1 (id int not null primary key) ENGINE=rocksdb PARTITION BY RANGE (id) +( + PARTITION P0 VALUES LESS THAN (1000) + INDEX DIRECTORY = '/foo/bar/data/', + PARTITION P1 VALUES LESS THAN (2000) + INDEX DIRECTORY = '/foo/bar/data/', + PARTITION P2 VALUES LESS THAN (MAXVALUE) +); diff --git a/storage/rocksdb/mysql-test/rocksdb/t/transaction_select.inc b/storage/rocksdb/mysql-test/rocksdb/t/transaction_select.inc index 4feb427be49..932a450e7c2 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/transaction_select.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/transaction_select.inc @@ -6,10 +6,9 @@ select * from t1 where id=5; select * from t1 where value=5; select value from t1 where value=5; select * from t1 where value2=5; -select * from t1 where id < 3; -select * from t1 where value < 3; -select value from t1 where value < 3; -select * from t1 where value2 < 3; -select * from t1; -select value from t1; - +select * from t1 where id < 3 order by id; +select * from t1 where value < 3 order by id; +select value from t1 where value < 3 order by id; +select * from t1 where value2 < 3 order by id; +select * from t1 order by id; +select value from t1 order by id; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_check.test b/storage/rocksdb/mysql-test/rocksdb/t/unique_check.test index 73465e7627a..86ae15924cb 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/unique_check.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_check.test @@ -143,3 +143,29 @@ disconnect con1; disconnect con2; disconnect con3; drop table t1, t2; + +# skip_unique checks should skip checks only for tables that don't have +# secondary indexes +connection default; +--disable_warnings +drop table if exists t1,t2,t3; +--enable_warnings + +# table with PK only +create table t1 (id int, value int, primary key (id)) engine=rocksdb; +# table with PK and SK +create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb; +# table with hidden PK +create table t3 (id int, value int) engine=rocksdb; + +SET @old_val = @@session.unique_checks; +set @@session.unique_checks = FALSE; + +insert into t1 values (1, 1), (1, 2); +--error ER_DUP_ENTRY +insert into t2 values (1, 1, 1), (1, 2, 1); +insert into t3 values (1, 1), (1, 1); + +set @@session.unique_checks = @old_val; +# cleanup +drop table t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test index 84a85fab32d..672687b044e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test @@ -5,7 +5,7 @@ SET GLOBAL rocksdb_write_ignore_missing_column_families=true; create table aaa (id int primary key, i int) engine rocksdb; -SET LOCAL rocksdb_write_sync=off; +SET LOCAL rocksdb_flush_log_at_trx_commit=0; --exec sleep 30 select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; insert aaa(id, i) values(1,1); @@ -15,7 +15,7 @@ select variable_value-@a from information_schema.global_status where variable_na insert aaa(id, i) values(3,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; -SET LOCAL rocksdb_write_sync=1; +SET LOCAL rocksdb_flush_log_at_trx_commit=1; insert aaa(id, i) values(4,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; insert aaa(id, i) values(5,1); @@ -24,7 +24,7 @@ insert aaa(id, i) values(6,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; SET GLOBAL rocksdb_background_sync=on; -SET LOCAL rocksdb_write_sync=off; +SET LOCAL rocksdb_flush_log_at_trx_commit=0; insert aaa(id, i) values(7,1); let $status_var=rocksdb_wal_synced; @@ -35,7 +35,7 @@ truncate table aaa; # Cleanup drop table aaa; -SET GLOBAL rocksdb_write_sync=off; +SET GLOBAL rocksdb_flush_log_at_trx_commit=1; SET GLOBAL rocksdb_write_disable_wal=false; SET GLOBAL rocksdb_write_ignore_missing_column_families=false; SET GLOBAL rocksdb_background_sync=off; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf index 71c81a892ed..bbffb0ec116 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf @@ -3,9 +3,11 @@ [mysqld.1] log_slave_updates rocksdb_enable_2pc=OFF +rocksdb_wal_recovery_mode=2 [mysqld.2] relay_log_recovery=1 relay_log_info_repository=TABLE log_slave_updates rocksdb_enable_2pc=OFF +rocksdb_wal_recovery_mode=2 diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-master.opt index d828b6c01f4..397310d37b4 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-master.opt @@ -1 +1 @@ ---gtid_mode=ON --enforce_gtid_consistency --log_slave_updates +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates --rocksdb_enable_2pc=OFF diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-slave.opt index aac6c6caadb..3f959684a75 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-slave.opt +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-slave.opt @@ -1,2 +1,2 @@ ---gtid_mode=ON --enforce_gtid_consistency --log_slave_updates +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates --rocksdb_enable_2pc=OFF --sync_binlog=1000 --relay_log_recovery=1 diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf index c69c987b0d9..457665f9e76 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf @@ -5,6 +5,7 @@ log_slave_updates gtid_mode=ON enforce_gtid_consistency=ON rocksdb_enable_2pc=OFF +rocksdb_wal_recovery_mode=2 [mysqld.2] sync_relay_log_info=100 @@ -14,3 +15,4 @@ log_slave_updates gtid_mode=ON enforce_gtid_consistency=ON rocksdb_enable_2pc=OFF +rocksdb_wal_recovery_mode=2 diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-master.opt index a990dc22129..74c2de37100 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-master.opt @@ -1 +1 @@ ---gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates --rocksdb_write_sync=ON --rocksdb_write_disable_wal=OFF +--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates --rocksdb_flush_log_at_trx_commit=1 --rocksdb_write_disable_wal=OFF diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_blind_delete_primary_key_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_blind_delete_primary_key_basic.result new file mode 100644 index 00000000000..805ed2335f7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_blind_delete_primary_key_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 1" +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 1; +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT; +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 0" +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 0; +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT; +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to on" +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = on; +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT; +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 1" +SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 1; +SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT; +SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +"Trying to set variable @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 0" +SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 0; +SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT; +SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +"Trying to set variable @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to on" +SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = on; +SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT; +SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 'aaa'" +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 'bbb'" +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = @start_global_value; +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = @start_session_value; +SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compact_cf_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compact_cf_basic.result index 85517df6ce6..b65ef65c8f0 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compact_cf_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compact_cf_basic.result @@ -1,3 +1,4 @@ +call mtr.add_suppression(" Column family '[a-z]*' not found."); CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; INSERT INTO valid_values VALUES('abc'); INSERT INTO valid_values VALUES('def'); diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delayed_write_rate_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delayed_write_rate_basic.result new file mode 100644 index 00000000000..3eefd822e69 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delayed_write_rate_basic.result @@ -0,0 +1,85 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(100); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +INSERT INTO invalid_values VALUES('\'-1\''); +INSERT INTO invalid_values VALUES('\'101\''); +INSERT INTO invalid_values VALUES('\'484436\''); +SET @start_global_value = @@global.ROCKSDB_DELAYED_WRITE_RATE; +SELECT @start_global_value; +@start_global_value +16777216 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 100" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 100; +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +100 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = DEFAULT; +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 1" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 1; +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = DEFAULT; +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 0" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 0; +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = DEFAULT; +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +"Trying to set variable @@session.ROCKSDB_DELAYED_WRITE_RATE to 444. It should fail because it is not session." +SET @@session.ROCKSDB_DELAYED_WRITE_RATE = 444; +ERROR HY000: Variable 'rocksdb_delayed_write_rate' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 'aaa'" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 'bbb'" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to '-1'" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = '-1'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to '101'" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = '101'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to '484436'" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = '484436'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = @start_global_value; +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disabledatasync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disabledatasync_basic.result deleted file mode 100644 index 9b3000f8f3c..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disabledatasync_basic.result +++ /dev/null @@ -1,7 +0,0 @@ -SET @start_global_value = @@global.ROCKSDB_DISABLEDATASYNC; -SELECT @start_global_value; -@start_global_value -0 -"Trying to set variable @@global.ROCKSDB_DISABLEDATASYNC to 444. It should fail because it is readonly." -SET @@global.ROCKSDB_DISABLEDATASYNC = 444; -ERROR HY000: Variable 'rocksdb_disabledatasync' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_log_at_trx_commit_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_log_at_trx_commit_basic.result new file mode 100644 index 00000000000..19be4e3ad5d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_log_at_trx_commit_basic.result @@ -0,0 +1,93 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(2); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +SELECT @start_global_value; +@start_global_value +1 +SET @start_session_value = @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +SELECT @start_session_value; +@start_session_value +1 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 2" +SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 2; +SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +2 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT; +SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +"Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 1" +SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 1; +SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT; +SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +"Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 0" +SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 0; +SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT; +SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 2" +SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 2; +SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +2 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT; +SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +"Trying to set variable @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 1" +SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 1; +SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT; +SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +"Trying to set variable @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 0" +SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 0; +SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT; +SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 'aaa'" +SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = @start_global_value; +SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = @start_session_value; +SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result index ae4b0ac05a1..165f3811f84 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result @@ -21,7 +21,7 @@ Table Op Msg_type Msg_text test.t1 analyze status OK SHOW INDEXES FROM t1; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment -t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE +t1 0 PRIMARY 1 a A 3 NULL NULL LSMTREE set session rocksdb_flush_memtable_on_analyze=on; ANALYZE TABLE t1; Table Op Msg_type Msg_text @@ -48,11 +48,11 @@ a b 3 3 SHOW TABLE STATUS LIKE 't1'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 ROCKSDB 10 Fixed 0 0 69 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL +t1 ROCKSDB 10 Fixed # # 69 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL ANALYZE TABLE t1; Table Op Msg_type Msg_text test.t1 analyze status OK SHOW TABLE STATUS LIKE 't1'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 ROCKSDB 10 Fixed 3 8 24 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL +t1 ROCKSDB 10 Fixed # # 24 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_compute_memtable_stats_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_compute_memtable_stats_basic.result new file mode 100644 index 00000000000..a1c4d3caaa4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_compute_memtable_stats_basic.result @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS t; +CREATE TABLE t (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t (a,b) VALUES (1,'bar'),(2,'foo'); +SET @ORIG_PAUSE_BACKGROUND_WORK = @@rocksdb_force_compute_memtable_stats; +set global rocksdb_force_flush_memtable_now = true; +INSERT INTO t (a,b) VALUES (3,'dead'),(4,'beef'),(5,'a'),(6,'bbb'),(7,'c'),(8,'d'); +set global rocksdb_force_compute_memtable_stats=0; +SELECT TABLE_ROWS INTO @ROWS_EXCLUDE_MEMTABLE FROM information_schema.TABLES WHERE table_name = 't'; +set global rocksdb_force_compute_memtable_stats=1; +SELECT TABLE_ROWS INTO @ROWS_INCLUDE_MEMTABLE FROM information_schema.TABLES WHERE table_name = 't'; +select case when @ROWS_INCLUDE_MEMTABLE-@ROWS_EXCLUDE_MEMTABLE > 0 then 'true' else 'false' end; +case when @ROWS_INCLUDE_MEMTABLE-@ROWS_EXCLUDE_MEMTABLE > 0 then 'true' else 'false' end +true +DROP TABLE t; +set global rocksdb_force_compute_memtable_stats = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_master_skip_tx_api_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_master_skip_tx_api_basic.result new file mode 100644 index 00000000000..3f50772ded5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_master_skip_tx_api_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_MASTER_SKIP_TX_API; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_MASTER_SKIP_TX_API; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to 1" +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = 1; +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT; +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +0 +"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to 0" +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = 0; +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT; +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +0 +"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to on" +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = on; +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT; +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_MASTER_SKIP_TX_API to 1" +SET @@session.ROCKSDB_MASTER_SKIP_TX_API = 1; +SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API; +@@session.ROCKSDB_MASTER_SKIP_TX_API +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT; +SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API; +@@session.ROCKSDB_MASTER_SKIP_TX_API +0 +"Trying to set variable @@session.ROCKSDB_MASTER_SKIP_TX_API to 0" +SET @@session.ROCKSDB_MASTER_SKIP_TX_API = 0; +SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API; +@@session.ROCKSDB_MASTER_SKIP_TX_API +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT; +SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API; +@@session.ROCKSDB_MASTER_SKIP_TX_API +0 +"Trying to set variable @@session.ROCKSDB_MASTER_SKIP_TX_API to on" +SET @@session.ROCKSDB_MASTER_SKIP_TX_API = on; +SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API; +@@session.ROCKSDB_MASTER_SKIP_TX_API +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT; +SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API; +@@session.ROCKSDB_MASTER_SKIP_TX_API +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to 'aaa'" +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +0 +"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to 'bbb'" +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +0 +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = @start_global_value; +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +0 +SET @@session.ROCKSDB_MASTER_SKIP_TX_API = @start_session_value; +SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API; +@@session.ROCKSDB_MASTER_SKIP_TX_API +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_size_mb_basic.result similarity index 61% rename from storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_size_basic.result rename to storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_size_mb_basic.result index 87440ae0bcb..d097192545b 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_size_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_size_mb_basic.result @@ -3,12 +3,12 @@ INSERT INTO valid_values VALUES(1); INSERT INTO valid_values VALUES(1024); CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; INSERT INTO invalid_values VALUES('\'aaa\''); -SET @start_global_value = @@global.ROCKSDB_PERSISTENT_CACHE_SIZE; +SET @start_global_value = @@global.ROCKSDB_PERSISTENT_CACHE_SIZE_MB; SELECT @start_global_value; @start_global_value 0 -"Trying to set variable @@global.ROCKSDB_PERSISTENT_CACHE_SIZE to 444. It should fail because it is readonly." -SET @@global.ROCKSDB_PERSISTENT_CACHE_SIZE = 444; -ERROR HY000: Variable 'rocksdb_persistent_cache_size' is a read only variable +"Trying to set variable @@global.ROCKSDB_PERSISTENT_CACHE_SIZE_MB to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_PERSISTENT_CACHE_SIZE_MB = 444; +ERROR HY000: Variable 'rocksdb_persistent_cache_size_mb' is a read only variable DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_recovery_mode_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_recovery_mode_basic.result index cf11f295c29..9fec4a24bd8 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_recovery_mode_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_recovery_mode_basic.result @@ -6,7 +6,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); SET @start_global_value = @@global.ROCKSDB_WAL_RECOVERY_MODE; SELECT @start_global_value; @start_global_value -2 +1 '# Setting to valid values in global scope#' "Trying to set variable @@global.ROCKSDB_WAL_RECOVERY_MODE to 1" SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 1; @@ -17,7 +17,7 @@ SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; SET @@global.ROCKSDB_WAL_RECOVERY_MODE = DEFAULT; SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; @@global.ROCKSDB_WAL_RECOVERY_MODE -2 +1 "Trying to set variable @@global.ROCKSDB_WAL_RECOVERY_MODE to 0" SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 0; SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; @@ -27,7 +27,7 @@ SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; SET @@global.ROCKSDB_WAL_RECOVERY_MODE = DEFAULT; SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; @@global.ROCKSDB_WAL_RECOVERY_MODE -2 +1 "Trying to set variable @@session.ROCKSDB_WAL_RECOVERY_MODE to 444. It should fail because it is not session." SET @@session.ROCKSDB_WAL_RECOVERY_MODE = 444; ERROR HY000: Variable 'rocksdb_wal_recovery_mode' is a GLOBAL variable and should be set with SET GLOBAL @@ -37,10 +37,10 @@ SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 'aaa'; Got one of the listed errors SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; @@global.ROCKSDB_WAL_RECOVERY_MODE -2 +1 SET @@global.ROCKSDB_WAL_RECOVERY_MODE = @start_global_value; SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; @@global.ROCKSDB_WAL_RECOVERY_MODE -2 +1 DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_sync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_sync_basic.result deleted file mode 100644 index 9848e491b80..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_sync_basic.result +++ /dev/null @@ -1,114 +0,0 @@ -CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO valid_values VALUES(1); -INSERT INTO valid_values VALUES(0); -INSERT INTO valid_values VALUES('on'); -INSERT INTO valid_values VALUES('off'); -CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO invalid_values VALUES('\'aaa\''); -SET @start_global_value = @@global.ROCKSDB_WRITE_SYNC; -SELECT @start_global_value; -@start_global_value -0 -SET @start_session_value = @@session.ROCKSDB_WRITE_SYNC; -SELECT @start_session_value; -@start_session_value -0 -'# Setting to valid values in global scope#' -"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to 1" -SET @@global.ROCKSDB_WRITE_SYNC = 1; -SELECT @@global.ROCKSDB_WRITE_SYNC; -@@global.ROCKSDB_WRITE_SYNC -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT; -SELECT @@global.ROCKSDB_WRITE_SYNC; -@@global.ROCKSDB_WRITE_SYNC -0 -"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to 0" -SET @@global.ROCKSDB_WRITE_SYNC = 0; -SELECT @@global.ROCKSDB_WRITE_SYNC; -@@global.ROCKSDB_WRITE_SYNC -0 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT; -SELECT @@global.ROCKSDB_WRITE_SYNC; -@@global.ROCKSDB_WRITE_SYNC -0 -"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to on" -SET @@global.ROCKSDB_WRITE_SYNC = on; -SELECT @@global.ROCKSDB_WRITE_SYNC; -@@global.ROCKSDB_WRITE_SYNC -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT; -SELECT @@global.ROCKSDB_WRITE_SYNC; -@@global.ROCKSDB_WRITE_SYNC -0 -"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to off" -SET @@global.ROCKSDB_WRITE_SYNC = off; -SELECT @@global.ROCKSDB_WRITE_SYNC; -@@global.ROCKSDB_WRITE_SYNC -0 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT; -SELECT @@global.ROCKSDB_WRITE_SYNC; -@@global.ROCKSDB_WRITE_SYNC -0 -'# Setting to valid values in session scope#' -"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to 1" -SET @@session.ROCKSDB_WRITE_SYNC = 1; -SELECT @@session.ROCKSDB_WRITE_SYNC; -@@session.ROCKSDB_WRITE_SYNC -1 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT; -SELECT @@session.ROCKSDB_WRITE_SYNC; -@@session.ROCKSDB_WRITE_SYNC -0 -"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to 0" -SET @@session.ROCKSDB_WRITE_SYNC = 0; -SELECT @@session.ROCKSDB_WRITE_SYNC; -@@session.ROCKSDB_WRITE_SYNC -0 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT; -SELECT @@session.ROCKSDB_WRITE_SYNC; -@@session.ROCKSDB_WRITE_SYNC -0 -"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to on" -SET @@session.ROCKSDB_WRITE_SYNC = on; -SELECT @@session.ROCKSDB_WRITE_SYNC; -@@session.ROCKSDB_WRITE_SYNC -1 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT; -SELECT @@session.ROCKSDB_WRITE_SYNC; -@@session.ROCKSDB_WRITE_SYNC -0 -"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to off" -SET @@session.ROCKSDB_WRITE_SYNC = off; -SELECT @@session.ROCKSDB_WRITE_SYNC; -@@session.ROCKSDB_WRITE_SYNC -0 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT; -SELECT @@session.ROCKSDB_WRITE_SYNC; -@@session.ROCKSDB_WRITE_SYNC -0 -'# Testing with invalid values in global scope #' -"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to 'aaa'" -SET @@global.ROCKSDB_WRITE_SYNC = 'aaa'; -Got one of the listed errors -SELECT @@global.ROCKSDB_WRITE_SYNC; -@@global.ROCKSDB_WRITE_SYNC -0 -SET @@global.ROCKSDB_WRITE_SYNC = @start_global_value; -SELECT @@global.ROCKSDB_WRITE_SYNC; -@@global.ROCKSDB_WRITE_SYNC -0 -SET @@session.ROCKSDB_WRITE_SYNC = @start_session_value; -SELECT @@session.ROCKSDB_WRITE_SYNC; -@@session.ROCKSDB_WRITE_SYNC -0 -DROP TABLE valid_values; -DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_blind_delete_primary_key_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_blind_delete_primary_key_basic.test new file mode 100644 index 00000000000..39265af4c9f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_blind_delete_primary_key_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_BLIND_DELETE_PRIMARY_KEY +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test index c65f722fe6e..bbafd526055 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test @@ -1,3 +1,6 @@ + +call mtr.add_suppression(" Column family '[a-z]*' not found."); + --source include/have_rocksdb.inc CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; @@ -10,6 +13,7 @@ CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; --let $read_only=0 --let $session=0 --let $sticky=1 + --source suite/sys_vars/inc/rocksdb_sys_var.inc DROP TABLE valid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delayed_write_rate_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delayed_write_rate_basic.test new file mode 100644 index 00000000000..c8824a634cb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delayed_write_rate_basic.test @@ -0,0 +1,22 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(100); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +INSERT INTO invalid_values VALUES('\'-1\''); +INSERT INTO invalid_values VALUES('\'101\''); +INSERT INTO invalid_values VALUES('\'484436\''); + +--let $sys_var=ROCKSDB_DELAYED_WRITE_RATE +--let $read_only=0 +--let $session=0 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disabledatasync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disabledatasync_basic.test deleted file mode 100644 index b365370f214..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disabledatasync_basic.test +++ /dev/null @@ -1,6 +0,0 @@ ---source include/have_rocksdb.inc - ---let $sys_var=ROCKSDB_DISABLEDATASYNC ---let $read_only=1 ---let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_log_at_trx_commit_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_log_at_trx_commit_basic.test new file mode 100644 index 00000000000..6bd471d83ab --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_log_at_trx_commit_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(2); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +--let $read_only=0 +--let $session=1 +--source suite/sys_vars/inc/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test index 7fc4c3a77f9..c7e04f89498 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test @@ -37,8 +37,10 @@ INSERT INTO t1 (b) VALUES (3); --sorted_result SELECT * FROM t1; +--replace_column 5 # 6 # SHOW TABLE STATUS LIKE 't1'; ANALYZE TABLE t1; +--replace_column 5 # 6 # SHOW TABLE STATUS LIKE 't1'; DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_compute_memtable_stats_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_compute_memtable_stats_basic.test new file mode 100644 index 00000000000..3a0d7f63938 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_compute_memtable_stats_basic.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc +--disable_warnings +DROP TABLE IF EXISTS t; +--enable_warnings + +CREATE TABLE t (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t (a,b) VALUES (1,'bar'),(2,'foo'); + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@rocksdb_force_compute_memtable_stats; +set global rocksdb_force_flush_memtable_now = true; + +INSERT INTO t (a,b) VALUES (3,'dead'),(4,'beef'),(5,'a'),(6,'bbb'),(7,'c'),(8,'d'); + +set global rocksdb_force_compute_memtable_stats=0; +SELECT TABLE_ROWS INTO @ROWS_EXCLUDE_MEMTABLE FROM information_schema.TABLES WHERE table_name = 't'; + +set global rocksdb_force_compute_memtable_stats=1; +SELECT TABLE_ROWS INTO @ROWS_INCLUDE_MEMTABLE FROM information_schema.TABLES WHERE table_name = 't'; + +select case when @ROWS_INCLUDE_MEMTABLE-@ROWS_EXCLUDE_MEMTABLE > 0 then 'true' else 'false' end; + +DROP TABLE t; +set global rocksdb_force_compute_memtable_stats = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_master_skip_tx_api_basic.test similarity index 83% rename from storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_sync_basic.test rename to storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_master_skip_tx_api_basic.test index 3655ddb8c69..ac4a6f7bca0 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_sync_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_master_skip_tx_api_basic.test @@ -4,12 +4,12 @@ CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; INSERT INTO valid_values VALUES(1); INSERT INTO valid_values VALUES(0); INSERT INTO valid_values VALUES('on'); -INSERT INTO valid_values VALUES('off'); CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); ---let $sys_var=ROCKSDB_WRITE_SYNC +--let $sys_var=ROCKSDB_MASTER_SKIP_TX_API --let $read_only=0 --let $session=1 --source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_size_mb_basic.test similarity index 89% rename from storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_size_basic.test rename to storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_size_mb_basic.test index 32fafcaf232..45bfaaf9ef2 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_size_mb_basic.test @@ -7,7 +7,7 @@ INSERT INTO valid_values VALUES(1024); CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; INSERT INTO invalid_values VALUES('\'aaa\''); ---let $sys_var=ROCKSDB_PERSISTENT_CACHE_SIZE +--let $sys_var=ROCKSDB_PERSISTENT_CACHE_SIZE_MB --let $read_only=1 --let $session=0 --source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/properties_collector.cc b/storage/rocksdb/properties_collector.cc index 26a967bc088..9b7fb488ffc 100644 --- a/storage/rocksdb/properties_collector.cc +++ b/storage/rocksdb/properties_collector.cc @@ -483,8 +483,15 @@ void Rdb_index_stats::merge(const Rdb_index_stats &s, const bool &increment, m_entry_single_deletes += s.m_entry_single_deletes; m_entry_merges += s.m_entry_merges; m_entry_others += s.m_entry_others; - for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++) { - m_distinct_keys_per_prefix[i] += s.m_distinct_keys_per_prefix[i]; + if (s.m_distinct_keys_per_prefix.size() > 0) { + for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++) { + m_distinct_keys_per_prefix[i] += s.m_distinct_keys_per_prefix[i]; + } + } else { + for (i = 0; i < m_distinct_keys_per_prefix.size(); i++) { + m_distinct_keys_per_prefix[i] += + s.m_rows >> (m_distinct_keys_per_prefix.size() - i - 1); + } } } else { m_rows -= s.m_rows; @@ -495,8 +502,15 @@ void Rdb_index_stats::merge(const Rdb_index_stats &s, const bool &increment, m_entry_single_deletes -= s.m_entry_single_deletes; m_entry_merges -= s.m_entry_merges; m_entry_others -= s.m_entry_others; - for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++) { - m_distinct_keys_per_prefix[i] -= s.m_distinct_keys_per_prefix[i]; + if (s.m_distinct_keys_per_prefix.size() > 0) { + for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++) { + m_distinct_keys_per_prefix[i] -= s.m_distinct_keys_per_prefix[i]; + } + } else { + for (i = 0; i < m_distinct_keys_per_prefix.size(); i++) { + m_distinct_keys_per_prefix[i] -= + s.m_rows >> (m_distinct_keys_per_prefix.size() - i - 1); + } } } } diff --git a/storage/rocksdb/rdb_cf_manager.cc b/storage/rocksdb/rdb_cf_manager.cc index 2bd89fffee5..e81435248ec 100644 --- a/storage/rocksdb/rdb_cf_manager.cc +++ b/storage/rocksdb/rdb_cf_manager.cc @@ -24,6 +24,7 @@ /* MyRocks header files */ #include "./ha_rocksdb.h" #include "./ha_rocksdb_proto.h" +#include "./rdb_psi.h" namespace myrocks { @@ -31,20 +32,13 @@ namespace myrocks { bool Rdb_cf_manager::is_cf_name_reverse(const char *const name) { /* nullptr means the default CF is used.. (TODO: can the default CF be * reverse?) */ - if (name && !strncmp(name, "rev:", 4)) - return true; - else - return false; + return (name && !strncmp(name, "rev:", 4)); } -#ifdef HAVE_PSI_INTERFACE -static PSI_mutex_key ex_key_cfm; -#endif - void Rdb_cf_manager::init( Rdb_cf_options *const cf_options, std::vector *const handles) { - mysql_mutex_init(ex_key_cfm, &m_mutex, MY_MUTEX_INIT_FAST); + mysql_mutex_init(rdb_cfm_mutex_key, &m_mutex, MY_MUTEX_INIT_FAST); DBUG_ASSERT(cf_options != nullptr); DBUG_ASSERT(handles != nullptr); @@ -96,14 +90,20 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name, DBUG_ASSERT(rdb != nullptr); DBUG_ASSERT(is_automatic != nullptr); - rocksdb::ColumnFamilyHandle *cf_handle; + rocksdb::ColumnFamilyHandle *cf_handle = nullptr; + + RDB_MUTEX_LOCK_CHECK(m_mutex); - mysql_mutex_lock(&m_mutex); *is_automatic = false; - if (cf_name == nullptr) + + if (cf_name == nullptr || *cf_name == '\0') { cf_name = DEFAULT_CF_NAME; + } + + DBUG_ASSERT(cf_name != nullptr); std::string per_index_name; + if (!strcmp(cf_name, PER_INDEX_CF_NAME)) { get_per_index_cf_name(db_table_name, index_name, &per_index_name); cf_name = per_index_name.c_str(); @@ -111,15 +111,17 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name, } const auto it = m_cf_name_map.find(cf_name); - if (it != m_cf_name_map.end()) + + if (it != m_cf_name_map.end()) { cf_handle = it->second; - else { + } else { /* Create a Column Family. */ const std::string cf_name_str(cf_name); rocksdb::ColumnFamilyOptions opts; m_cf_options->get_cf_options(cf_name_str, &opts); - sql_print_information("RocksDB: creating column family %s", + // NO_LINT_DEBUG + sql_print_information("RocksDB: creating a column family %s", cf_name_str.c_str()); sql_print_information(" write_buffer_size=%ld", opts.write_buffer_size); sql_print_information(" target_file_size_base=%" PRIu64, @@ -127,6 +129,7 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name, const rocksdb::Status s = rdb->CreateColumnFamily(opts, cf_name_str, &cf_handle); + if (s.ok()) { m_cf_name_map[cf_handle->GetName()] = cf_handle; m_cf_id_map[cf_handle->GetID()] = cf_handle; @@ -134,7 +137,8 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name, cf_handle = nullptr; } } - mysql_mutex_unlock(&m_mutex); + + RDB_MUTEX_UNLOCK_CHECK(m_mutex); return cf_handle; } @@ -160,13 +164,18 @@ Rdb_cf_manager::get_cf(const char *cf_name, const std::string &db_table_name, rocksdb::ColumnFamilyHandle *cf_handle; *is_automatic = false; - mysql_mutex_lock(&m_mutex); - if (cf_name == nullptr) + + RDB_MUTEX_LOCK_CHECK(m_mutex); + + if (cf_name == nullptr) { cf_name = DEFAULT_CF_NAME; + } std::string per_index_name; + if (!strcmp(cf_name, PER_INDEX_CF_NAME)) { get_per_index_cf_name(db_table_name, index_name, &per_index_name); + DBUG_ASSERT(!per_index_name.empty()); cf_name = per_index_name.c_str(); *is_automatic = true; } @@ -174,7 +183,12 @@ Rdb_cf_manager::get_cf(const char *cf_name, const std::string &db_table_name, const auto it = m_cf_name_map.find(cf_name); cf_handle = (it != m_cf_name_map.end()) ? it->second : nullptr; - mysql_mutex_unlock(&m_mutex); + if (!cf_handle) { + // NO_LINT_DEBUG + sql_print_warning("Column family '%s' not found.", cf_name); + } + + RDB_MUTEX_UNLOCK_CHECK(m_mutex); return cf_handle; } @@ -182,11 +196,11 @@ Rdb_cf_manager::get_cf(const char *cf_name, const std::string &db_table_name, rocksdb::ColumnFamilyHandle *Rdb_cf_manager::get_cf(const uint32_t &id) const { rocksdb::ColumnFamilyHandle *cf_handle = nullptr; - mysql_mutex_lock(&m_mutex); + RDB_MUTEX_LOCK_CHECK(m_mutex); const auto it = m_cf_id_map.find(id); if (it != m_cf_id_map.end()) cf_handle = it->second; - mysql_mutex_unlock(&m_mutex); + RDB_MUTEX_UNLOCK_CHECK(m_mutex); return cf_handle; } @@ -194,11 +208,12 @@ rocksdb::ColumnFamilyHandle *Rdb_cf_manager::get_cf(const uint32_t &id) const { std::vector Rdb_cf_manager::get_cf_names(void) const { std::vector names; - mysql_mutex_lock(&m_mutex); + RDB_MUTEX_LOCK_CHECK(m_mutex); for (auto it : m_cf_name_map) { names.push_back(it.first); } - mysql_mutex_unlock(&m_mutex); + RDB_MUTEX_UNLOCK_CHECK(m_mutex); + return names; } @@ -206,11 +221,13 @@ std::vector Rdb_cf_manager::get_all_cf(void) const { std::vector list; - mysql_mutex_lock(&m_mutex); + RDB_MUTEX_LOCK_CHECK(m_mutex); + for (auto it : m_cf_id_map) { list.push_back(it.second); } - mysql_mutex_unlock(&m_mutex); + + RDB_MUTEX_UNLOCK_CHECK(m_mutex); return list; } diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index bd6bd2a0834..f81fe59fe7c 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -55,12 +55,13 @@ Rdb_key_def::Rdb_key_def(uint indexnr_arg, uint keyno_arg, rocksdb::ColumnFamilyHandle *cf_handle_arg, uint16_t index_dict_version_arg, uchar index_type_arg, uint16_t kv_format_version_arg, bool is_reverse_cf_arg, - bool is_auto_cf_arg, const char *_name, - Rdb_index_stats _stats) + bool is_auto_cf_arg, bool is_per_partition_cf_arg, + const char *_name, Rdb_index_stats _stats) : m_index_number(indexnr_arg), m_cf_handle(cf_handle_arg), m_index_dict_version(index_dict_version_arg), m_index_type(index_type_arg), m_kv_format_version(kv_format_version_arg), m_is_reverse_cf(is_reverse_cf_arg), m_is_auto_cf(is_auto_cf_arg), + m_is_per_partition_cf(is_per_partition_cf_arg), m_name(_name), m_stats(_stats), m_pk_part_no(nullptr), m_pack_info(nullptr), m_keyno(keyno_arg), m_key_parts(0), m_prefix_extractor(nullptr), m_maxlength(0) // means 'not intialized' @@ -73,6 +74,7 @@ Rdb_key_def::Rdb_key_def(uint indexnr_arg, uint keyno_arg, Rdb_key_def::Rdb_key_def(const Rdb_key_def &k) : m_index_number(k.m_index_number), m_cf_handle(k.m_cf_handle), m_is_reverse_cf(k.m_is_reverse_cf), m_is_auto_cf(k.m_is_auto_cf), + m_is_per_partition_cf(k.m_is_per_partition_cf), m_name(k.m_name), m_stats(k.m_stats), m_pk_part_no(k.m_pk_part_no), m_pack_info(k.m_pack_info), m_keyno(k.m_keyno), m_key_parts(k.m_key_parts), m_prefix_extractor(k.m_prefix_extractor), @@ -116,9 +118,9 @@ void Rdb_key_def::setup(const TABLE *const tbl, const bool hidden_pk_exists = table_has_hidden_pk(tbl); const bool secondary_key = (m_index_type == INDEX_TYPE_SECONDARY); if (!m_maxlength) { - mysql_mutex_lock(&m_mutex); + RDB_MUTEX_LOCK_CHECK(m_mutex); if (m_maxlength != 0) { - mysql_mutex_unlock(&m_mutex); + RDB_MUTEX_UNLOCK_CHECK(m_mutex); return; } @@ -280,7 +282,7 @@ void Rdb_key_def::setup(const TABLE *const tbl, */ m_maxlength = max_len; - mysql_mutex_unlock(&m_mutex); + RDB_MUTEX_UNLOCK_CHECK(m_mutex); } } @@ -515,6 +517,50 @@ int Rdb_key_def::successor(uchar *const packed_tuple, const uint &len) { return changed; } +uchar *Rdb_key_def::pack_field( + Field *const field, + Rdb_field_packing *pack_info, + uchar * tuple, + uchar *const packed_tuple, + uchar *const pack_buffer, + Rdb_string_writer *const unpack_info, + uint *const n_null_fields) const +{ + if (field->real_maybe_null()) { + DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 1)); + if (field->is_real_null()) { + /* NULL value. store '\0' so that it sorts before non-NULL values */ + *tuple++ = 0; + /* That's it, don't store anything else */ + if (n_null_fields) + (*n_null_fields)++; + return tuple; + } else { + /* Not a NULL value. Store '1' */ + *tuple++ = 1; + } + } + + const bool create_unpack_info = + (unpack_info && // we were requested to generate unpack_info + pack_info->uses_unpack_info()); // and this keypart uses it + Rdb_pack_field_context pack_ctx(unpack_info); + + // Set the offset for methods which do not take an offset as an argument + DBUG_ASSERT(is_storage_available(tuple - packed_tuple, + pack_info->m_max_image_len)); + + pack_info->m_pack_func(pack_info, field, pack_buffer, &tuple, &pack_ctx); + + /* Make "unpack info" to be stored in the value */ + if (create_unpack_info) { + pack_info->m_make_unpack_info_func(pack_info->m_charset_codec, field, + &pack_ctx); + } + + return tuple; +} + /** Get index columns from the record and pack them into mem-comparable form. @@ -595,45 +641,21 @@ uint Rdb_key_def::pack_record(const TABLE *const tbl, uchar *const pack_buffer, Field *const field = m_pack_info[i].get_field_in_table(tbl); DBUG_ASSERT(field != nullptr); - // Old Field methods expected the record pointer to be at tbl->record[0]. - // The quick and easy way to fix this was to pass along the offset - // for the pointer. - const my_ptrdiff_t ptr_diff = record - tbl->record[0]; + uint field_offset = field->ptr - tbl->record[0]; + uint null_offset = field->null_offset(tbl->record[0]); + bool maybe_null = field->real_maybe_null(); + field->move_field(const_cast(record) + field_offset, + maybe_null ? const_cast(record) + null_offset : nullptr, + field->null_bit); + // WARNING! Don't return without restoring field->ptr and field->null_ptr - if (field->real_maybe_null()) { - DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 1)); - if (field->is_real_null(ptr_diff)) { - /* NULL value. store '\0' so that it sorts before non-NULL values */ - *tuple++ = 0; - /* That's it, don't store anything else */ - if (n_null_fields) - (*n_null_fields)++; - continue; - } else { - /* Not a NULL value. Store '1' */ - *tuple++ = 1; - } - } + tuple = pack_field(field, &m_pack_info[i], tuple, packed_tuple, pack_buffer, + unpack_info, n_null_fields); - const bool create_unpack_info = - (unpack_info && // we were requested to generate unpack_info - m_pack_info[i].uses_unpack_info()); // and this keypart uses it - Rdb_pack_field_context pack_ctx(unpack_info); - - // Set the offset for methods which do not take an offset as an argument - DBUG_ASSERT(is_storage_available(tuple - packed_tuple, - m_pack_info[i].m_max_image_len)); - field->move_field_offset(ptr_diff); - - m_pack_info[i].m_pack_func(&m_pack_info[i], field, pack_buffer, &tuple, - &pack_ctx); - - /* Make "unpack info" to be stored in the value */ - if (create_unpack_info) { - m_pack_info[i].m_make_unpack_info_func(m_pack_info[i].m_charset_codec, - field, &pack_ctx); - } - field->move_field_offset(-ptr_diff); + // Restore field->ptr and field->null_ptr + field->move_field(tbl->record[0] + field_offset, + maybe_null ? tbl->record[0] + null_offset : nullptr, + field->null_bit); } if (unpack_info) { @@ -824,6 +846,35 @@ size_t Rdb_key_def::key_length(const TABLE *const table, return key.size() - reader.remaining_bytes(); } +int Rdb_key_def::unpack_field( + Rdb_field_packing *const fpi, + Field *const field, + Rdb_string_reader* reader, + const uchar *const default_value, + Rdb_string_reader* unp_reader) const +{ + if (fpi->m_maybe_null) { + const char *nullp; + if (!(nullp = reader->read(1))) { + return HA_EXIT_FAILURE; + } + + if (*nullp == 0) { + /* Set the NULL-bit of this field */ + field->set_null(); + /* Also set the field to its default value */ + memcpy(field->ptr, default_value, field->pack_length()); + return HA_EXIT_SUCCESS; + } else if (*nullp == 1) { + field->set_notnull(); + } else { + return HA_EXIT_FAILURE; + } + } + + return fpi->m_unpack_func(fpi, field, field->ptr, reader, unp_reader); +} + /* Take mem-comparable form and unpack_info and unpack it to Table->record @@ -850,11 +901,6 @@ int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf, // ha_rocksdb::convert_record_from_storage_format instead. DBUG_ASSERT_IMP(!secondary_key, !verify_row_debug_checksums); - // Old Field methods expected the record pointer to be at tbl->record[0]. - // The quick and easy way to fix this was to pass along the offset - // for the pointer. - const my_ptrdiff_t ptr_diff = buf - table->record[0]; - // Skip the index number if ((!reader.read(INDEX_NUMBER_SIZE))) { return HA_EXIT_FAILURE; @@ -891,35 +937,31 @@ int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf, if (fpi->m_unpack_func) { /* It is possible to unpack this column. Do it. */ - if (fpi->m_maybe_null) { - const char *nullp; - if (!(nullp = reader.read(1))) - return HA_EXIT_FAILURE; - if (*nullp == 0) { - /* Set the NULL-bit of this field */ - field->set_null(ptr_diff); - /* Also set the field to its default value */ - uint field_offset = field->ptr - table->record[0]; - memcpy(buf + field_offset, table->s->default_values + field_offset, - field->pack_length()); - continue; - } else if (*nullp == 1) - field->set_notnull(ptr_diff); - else - return HA_EXIT_FAILURE; - } + uint field_offset = field->ptr - table->record[0]; + uint null_offset = field->null_offset(); + bool maybe_null = field->real_maybe_null(); + field->move_field(buf + field_offset, + maybe_null ? buf + null_offset : nullptr, + field->null_bit); + // WARNING! Don't return without restoring field->ptr and field->null_ptr // If we need unpack info, but there is none, tell the unpack function // this by passing unp_reader as nullptr. If we never read unpack_info // during unpacking anyway, then there won't an error. const bool maybe_missing_unpack = !has_unpack_info && fpi->uses_unpack_info(); - const int res = - fpi->m_unpack_func(fpi, field, field->ptr + ptr_diff, &reader, + int res = unpack_field(fpi, field, &reader, + table->s->default_values + field_offset, maybe_missing_unpack ? nullptr : &unp_reader); - if (res) + // Restore field->ptr and field->null_ptr + field->move_field(table->record[0] + field_offset, + maybe_null ? table->record[0] + null_offset : nullptr, + field->null_bit); + + if (res) { return res; + } } else { /* It is impossible to unpack the column. Skip it. */ if (fpi->m_maybe_null) { @@ -2141,7 +2183,7 @@ static void rdb_get_mem_comparable_space(const CHARSET_INFO *const cs, size_t *const mb_len) { DBUG_ASSERT(cs->number < MY_ALL_CHARSETS_SIZE); if (!rdb_mem_comparable_space[cs->number].get()) { - mysql_mutex_lock(&rdb_mem_cmp_space_mutex); + RDB_MUTEX_LOCK_CHECK(rdb_mem_cmp_space_mutex); if (!rdb_mem_comparable_space[cs->number].get()) { // Upper bound of how many bytes can be occupied by multi-byte form of a // character in any charset. @@ -2167,7 +2209,7 @@ static void rdb_get_mem_comparable_space(const CHARSET_INFO *const cs, } rdb_mem_comparable_space[cs->number].reset(info); } - mysql_mutex_unlock(&rdb_mem_cmp_space_mutex); + RDB_MUTEX_UNLOCK_CHECK(rdb_mem_cmp_space_mutex); } *xfrm = &rdb_mem_comparable_space[cs->number]->spaces_xfrm; @@ -2191,7 +2233,8 @@ rdb_init_collation_mapping(const my_core::CHARSET_INFO *const cs) { const Rdb_collation_codec *codec = rdb_collation_data[cs->number]; if (codec == nullptr && rdb_is_collation_supported(cs)) { - mysql_mutex_lock(&rdb_collation_data_mutex); + RDB_MUTEX_LOCK_CHECK(rdb_collation_data_mutex); + codec = rdb_collation_data[cs->number]; if (codec == nullptr) { Rdb_collation_codec *cur = nullptr; @@ -2235,7 +2278,8 @@ rdb_init_collation_mapping(const my_core::CHARSET_INFO *const cs) { rdb_collation_data[cs->number] = cur; } } - mysql_mutex_unlock(&rdb_collation_data_mutex); + + RDB_MUTEX_UNLOCK_CHECK(rdb_collation_data_mutex); } return codec; @@ -2597,9 +2641,10 @@ bool Rdb_tbl_def::put_dict(Rdb_dict_manager *const dict, for (uint i = 0; i < m_key_count; i++) { const Rdb_key_def &kd = *m_key_descr_arr[i]; - const uchar flags = + uchar flags = (kd.m_is_reverse_cf ? Rdb_key_def::REVERSE_CF_FLAG : 0) | - (kd.m_is_auto_cf ? Rdb_key_def::AUTO_CF_FLAG : 0); + (kd.m_is_auto_cf ? Rdb_key_def::AUTO_CF_FLAG : 0) | + (kd.m_is_per_partition_cf ? Rdb_key_def::PER_PARTITION_CF_FLAG : 0); const uint cf_id = kd.get_cf()->GetID(); /* @@ -2610,13 +2655,21 @@ bool Rdb_tbl_def::put_dict(Rdb_dict_manager *const dict, control, we can switch to use it and removing mutex. */ uint existing_cf_flags; + const std::string cf_name = kd.get_cf()->GetName(); + if (dict->get_cf_flags(cf_id, &existing_cf_flags)) { + // For the purposes of comparison we'll clear the partitioning bit. The + // intent here is to make sure that both partitioned and non-partitioned + // tables can refer to the same CF. + existing_cf_flags &= ~Rdb_key_def::CF_FLAGS_TO_IGNORE; + flags &= ~Rdb_key_def::CF_FLAGS_TO_IGNORE; + if (existing_cf_flags != flags) { my_printf_error(ER_UNKNOWN_ERROR, - "Column Family Flag is different from existing flag. " - "Assign a new CF flag, or do not change existing " - "CF flag.", - MYF(0)); + "Column family ('%s') flag (%d) is different from an " + "existing flag (%d). Assign a new CF flag, or do not " + "change existing CF flag.", MYF(0), cf_name.c_str(), + flags, existing_cf_flags); return true; } } else { @@ -2690,6 +2743,24 @@ void Rdb_ddl_manager::erase_index_num(const GL_INDEX_ID &gl_index_id) { m_index_num_to_keydef.erase(gl_index_id); } +void Rdb_ddl_manager::add_uncommitted_keydefs( + const std::unordered_set> &indexes) { + mysql_rwlock_wrlock(&m_rwlock); + for (const auto &index : indexes) { + m_index_num_to_uncommitted_keydef[index->get_gl_index_id()] = index; + } + mysql_rwlock_unlock(&m_rwlock); +} + +void Rdb_ddl_manager::remove_uncommitted_keydefs( + const std::unordered_set> &indexes) { + mysql_rwlock_wrlock(&m_rwlock); + for (const auto &index : indexes) { + m_index_num_to_uncommitted_keydef.erase(index->get_gl_index_id()); + } + mysql_rwlock_unlock(&m_rwlock); +} + namespace // anonymous namespace = not visible outside this source file { struct Rdb_validate_tbls : public Rdb_tables_scanner { @@ -3005,7 +3076,8 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager *const dict_arg, tdef->m_key_descr_arr[keyno] = std::make_shared( gl_index_id.index_id, keyno, cfh, m_index_dict_version, m_index_type, kv_version, flags & Rdb_key_def::REVERSE_CF_FLAG, - flags & Rdb_key_def::AUTO_CF_FLAG, "", + flags & Rdb_key_def::AUTO_CF_FLAG, + flags & Rdb_key_def::PER_PARTITION_CF_FLAG, "", m_dict->get_stats(gl_index_id)); } put(tdef); @@ -3079,6 +3151,14 @@ Rdb_ddl_manager::safe_find(GL_INDEX_ID gl_index_id) { ret = kd; } } + } else { + auto it = m_index_num_to_uncommitted_keydef.find(gl_index_id); + if (it != m_index_num_to_uncommitted_keydef.end()) { + const auto &kd = it->second; + if (kd->max_storage_fmt_length() != 0) { + ret = kd; + } + } } mysql_rwlock_unlock(&m_rwlock); @@ -3097,6 +3177,11 @@ Rdb_ddl_manager::find(GL_INDEX_ID gl_index_id) { return table_def->m_key_descr_arr[it->second.second]; } } + } else { + auto it = m_index_num_to_uncommitted_keydef.find(gl_index_id); + if (it != m_index_num_to_uncommitted_keydef.end()) { + return it->second; + } } static std::shared_ptr empty = nullptr; @@ -3126,6 +3211,8 @@ void Rdb_ddl_manager::adjust_stats( for (const auto &src : data) { const auto &keydef = find(src.m_gl_index_id); if (keydef) { + keydef->m_stats.m_distinct_keys_per_prefix.resize( + keydef->get_key_parts()); keydef->m_stats.merge(src, i == 0, keydef->max_storage_fmt_length()); m_stats2store[keydef->m_stats.m_gl_index_id] = keydef->m_stats; } @@ -3671,6 +3758,7 @@ void Rdb_dict_manager::add_cf_flags(rocksdb::WriteBatch *const batch, void Rdb_dict_manager::delete_index_info(rocksdb::WriteBatch *batch, const GL_INDEX_ID &gl_index_id) const { delete_with_prefix(batch, Rdb_key_def::INDEX_INFO, gl_index_id); + delete_with_prefix(batch, Rdb_key_def::INDEX_STATISTICS, gl_index_id); } bool Rdb_dict_manager::get_index_info(const GL_INDEX_ID &gl_index_id, @@ -4133,7 +4221,7 @@ uint Rdb_seq_generator::get_and_update_next_number( DBUG_ASSERT(dict != nullptr); uint res; - mysql_mutex_lock(&m_mutex); + RDB_MUTEX_LOCK_CHECK(m_mutex); res = m_next_number++; @@ -4144,7 +4232,7 @@ uint Rdb_seq_generator::get_and_update_next_number( dict->update_max_index_id(batch, res); dict->commit(batch); - mysql_mutex_unlock(&m_mutex); + RDB_MUTEX_UNLOCK_CHECK(m_mutex); return res; } diff --git a/storage/rocksdb/rdb_datadic.h b/storage/rocksdb/rdb_datadic.h index b084752f601..7c1b2bf6dec 100644 --- a/storage/rocksdb/rdb_datadic.h +++ b/storage/rocksdb/rdb_datadic.h @@ -167,6 +167,13 @@ public: uchar *const packed_tuple, const uchar *const key_tuple, const key_part_map &keypart_map) const; + uchar *pack_field(Field *const field, + Rdb_field_packing *pack_info, + uchar * tuple, + uchar *const packed_tuple, + uchar *const pack_buffer, + Rdb_string_writer *const unpack_info, + uint *const n_null_fields) const; /* Convert a key from Table->record format to mem-comparable form */ uint pack_record(const TABLE *const tbl, uchar *const pack_buffer, const uchar *const record, uchar *const packed_tuple, @@ -177,6 +184,11 @@ public: /* Pack the hidden primary key into mem-comparable form. */ uint pack_hidden_pk(const longlong &hidden_pk_id, uchar *const packed_tuple) const; + int unpack_field(Rdb_field_packing *const fpi, + Field *const field, + Rdb_string_reader* reader, + const uchar *const default_value, + Rdb_string_reader* unp_reader) const; int unpack_record(TABLE *const table, uchar *const buf, const rocksdb::Slice *const packed_key, const rocksdb::Slice *const unpack_info, @@ -287,7 +299,7 @@ public: rocksdb::ColumnFamilyHandle *cf_handle_arg, uint16_t index_dict_version_arg, uchar index_type_arg, uint16_t kv_format_version_arg, bool is_reverse_cf_arg, - bool is_auto_cf_arg, const char *name, + bool is_auto_cf_arg, bool is_per_partition_cf, const char *name, Rdb_index_stats stats = Rdb_index_stats()); ~Rdb_key_def(); @@ -303,8 +315,13 @@ public: enum { REVERSE_CF_FLAG = 1, AUTO_CF_FLAG = 2, + PER_PARTITION_CF_FLAG = 4, }; + // Set of flags to ignore when comparing two CF-s and determining if + // they're same. + static const uint CF_FLAGS_TO_IGNORE = PER_PARTITION_CF_FLAG; + // Data dictionary types enum DATA_DICT_TYPE { DDL_ENTRY_INDEX_START_NUMBER = 1, @@ -414,6 +431,10 @@ public: bool m_is_reverse_cf; bool m_is_auto_cf; + + /* If true, then column family is created per partition. */ + bool m_is_per_partition_cf; + std::string m_name; mutable Rdb_index_stats m_stats; @@ -740,8 +761,13 @@ interface Rdb_tables_scanner { class Rdb_ddl_manager { Rdb_dict_manager *m_dict = nullptr; my_core::HASH m_ddl_hash; // Contains Rdb_tbl_def elements - // maps index id to + // Maps index id to std::map> m_index_num_to_keydef; + + // Maps index id to key definitons not yet committed to data dictionary. + // This is mainly used to store key definitions during ALTER TABLE. + std::map> + m_index_num_to_uncommitted_keydef; mysql_rwlock_t m_rwlock; Rdb_seq_generator m_sequence; @@ -787,6 +813,10 @@ public: int scan_for_tables(Rdb_tables_scanner *tables_scanner); void erase_index_num(const GL_INDEX_ID &gl_index_id); + void add_uncommitted_keydefs( + const std::unordered_set> &indexes); + void remove_uncommitted_keydefs( + const std::unordered_set> &indexes); private: /* Put the data into in-memory table (only) */ @@ -867,7 +897,7 @@ private: 3. CF id => CF flags key: Rdb_key_def::CF_DEFINITION(0x3) + cf_id - value: version, {is_reverse_cf, is_auto_cf} + value: version, {is_reverse_cf, is_auto_cf, is_per_partition_cf} cf_flags is 4 bytes in total. 4. Binlog entry (updated at commit) @@ -930,9 +960,9 @@ public: inline void cleanup() { mysql_mutex_destroy(&m_mutex); } - inline void lock() { mysql_mutex_lock(&m_mutex); } + inline void lock() { RDB_MUTEX_LOCK_CHECK(m_mutex); } - inline void unlock() { mysql_mutex_unlock(&m_mutex); } + inline void unlock() { RDB_MUTEX_UNLOCK_CHECK(m_mutex); } /* Raw RocksDB operations */ std::unique_ptr begin() const; diff --git a/storage/rocksdb/rdb_i_s.cc b/storage/rocksdb/rdb_i_s.cc index 35b0cf4899e..158a7070ea9 100644 --- a/storage/rocksdb/rdb_i_s.cc +++ b/storage/rocksdb/rdb_i_s.cc @@ -256,7 +256,7 @@ static int rdb_i_s_perf_context_fill_table( Rdb_perf_counters counters; if (rdb_normalize_tablename(it, &str)) { - return HA_ERR_INTERNAL_ERROR; + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } if (rdb_split_normalized_tablename(str, &dbname, &tablename, &partname)) { @@ -447,8 +447,6 @@ static int rdb_i_s_cfoptions_fill_table( opts.disable_auto_compactions ? "ON" : "OFF"}, {"PURGE_REDUNDANT_KVS_WHILE_FLUSH", opts.purge_redundant_kvs_while_flush ? "ON" : "OFF"}, - {"VERIFY_CHECKSUM_IN_COMPACTION", - opts.verify_checksums_in_compaction ? "ON" : "OFF"}, {"MAX_SEQUENTIAL_SKIP_IN_ITERATIONS", std::to_string(opts.max_sequential_skip_in_iterations)}, {"MEMTABLE_FACTORY", opts.memtable_factory == nullptr @@ -463,8 +461,6 @@ static int rdb_i_s_cfoptions_fill_table( std::to_string(opts.memtable_huge_page_size)}, {"BLOOM_LOCALITY", std::to_string(opts.bloom_locality)}, {"MAX_SUCCESSIVE_MERGES", std::to_string(opts.max_successive_merges)}, - {"MIN_PARTIAL_MERGE_OPERANDS", - std::to_string(opts.min_partial_merge_operands)}, {"OPTIMIZE_FILTERS_FOR_HITS", (opts.optimize_filters_for_hits ? "ON" : "OFF")}, }; @@ -811,7 +807,7 @@ static int rdb_i_s_compact_stats_fill_table( DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); - DBUG_ENTER("rdb_i_s_global_compact_stats_table"); + DBUG_ENTER_FUNC(); int ret = 0; @@ -1025,7 +1021,7 @@ static int rdb_i_s_global_info_init(void *const p) { static int rdb_i_s_compact_stats_init(void *p) { my_core::ST_SCHEMA_TABLE *schema; - DBUG_ENTER("rdb_i_s_compact_stats_init"); + DBUG_ENTER_FUNC(); DBUG_ASSERT(p != nullptr); schema = reinterpret_cast(p); diff --git a/storage/rocksdb/rdb_mutex_wrapper.cc b/storage/rocksdb/rdb_mutex_wrapper.cc index 4a50af0c13d..c9186a61727 100644 --- a/storage/rocksdb/rdb_mutex_wrapper.cc +++ b/storage/rocksdb/rdb_mutex_wrapper.cc @@ -159,7 +159,7 @@ Rdb_mutex::Rdb_mutex() { Rdb_mutex::~Rdb_mutex() { mysql_mutex_destroy(&m_mutex); } Status Rdb_mutex::Lock() { - mysql_mutex_lock(&m_mutex); + RDB_MUTEX_LOCK_CHECK(m_mutex); DBUG_ASSERT(m_old_stage_info.count(current_thd) == 0); return Status::OK(); } @@ -174,7 +174,7 @@ Status Rdb_mutex::TryLockFor(int64_t timeout_time MY_ATTRIBUTE((__unused__))) { Note: PThreads API has pthread_mutex_timedlock(), but mysql's mysql_mutex_* wrappers do not wrap that function. */ - mysql_mutex_lock(&m_mutex); + RDB_MUTEX_LOCK_CHECK(m_mutex); return Status::OK(); } @@ -202,7 +202,7 @@ void Rdb_mutex::UnLock() { return; } #endif - mysql_mutex_unlock(&m_mutex); + RDB_MUTEX_UNLOCK_CHECK(m_mutex); } } // namespace myrocks diff --git a/storage/rocksdb/rdb_psi.cc b/storage/rocksdb/rdb_psi.cc new file mode 100644 index 00000000000..b6bc89a02f9 --- /dev/null +++ b/storage/rocksdb/rdb_psi.cc @@ -0,0 +1,113 @@ +/* Copyright (c) 2017, Percona and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#define MYSQL_SERVER 1 + +/* The C++ file's header */ +#include "./rdb_psi.h" + +/* MySQL header files */ +#include + +namespace myrocks { + +/* + The following is needed as an argument for mysql_stage_register, + irrespectively of whether we're compiling with P_S or not. +*/ +my_core::PSI_stage_info stage_waiting_on_row_lock = {0, "Waiting for row lock", + 0}; + +#ifdef HAVE_PSI_INTERFACE +my_core::PSI_stage_info *all_rocksdb_stages[] = {&stage_waiting_on_row_lock}; + +my_core::PSI_thread_key rdb_background_psi_thread_key, + rdb_drop_idx_psi_thread_key; + +my_core::PSI_thread_info all_rocksdb_threads[] = { + {&rdb_background_psi_thread_key, "background", PSI_FLAG_GLOBAL}, + {&rdb_drop_idx_psi_thread_key, "drop index", PSI_FLAG_GLOBAL}, +}; + +my_core::PSI_mutex_key rdb_psi_open_tbls_mutex_key, rdb_signal_bg_psi_mutex_key, + rdb_signal_drop_idx_psi_mutex_key, rdb_collation_data_mutex_key, + rdb_mem_cmp_space_mutex_key, key_mutex_tx_list, rdb_sysvars_psi_mutex_key, + rdb_cfm_mutex_key; + +my_core::PSI_mutex_info all_rocksdb_mutexes[] = { + {&rdb_psi_open_tbls_mutex_key, "open tables", PSI_FLAG_GLOBAL}, + {&rdb_signal_bg_psi_mutex_key, "stop background", PSI_FLAG_GLOBAL}, + {&rdb_signal_drop_idx_psi_mutex_key, "signal drop index", PSI_FLAG_GLOBAL}, + {&rdb_collation_data_mutex_key, "collation data init", PSI_FLAG_GLOBAL}, + {&rdb_mem_cmp_space_mutex_key, "collation space char data init", + PSI_FLAG_GLOBAL}, + {&key_mutex_tx_list, "tx_list", PSI_FLAG_GLOBAL}, + {&rdb_sysvars_psi_mutex_key, "setting sysvar", PSI_FLAG_GLOBAL}, + {&rdb_cfm_mutex_key, "column family manager", PSI_FLAG_GLOBAL}, +}; + +my_core::PSI_rwlock_key key_rwlock_collation_exception_list, + key_rwlock_read_free_rpl_tables, key_rwlock_skip_unique_check_tables; + +my_core::PSI_rwlock_info all_rocksdb_rwlocks[] = { + {&key_rwlock_collation_exception_list, "collation_exception_list", + PSI_FLAG_GLOBAL}, + {&key_rwlock_read_free_rpl_tables, "read_free_rpl_tables", PSI_FLAG_GLOBAL}, + {&key_rwlock_skip_unique_check_tables, "skip_unique_check_tables", + PSI_FLAG_GLOBAL}, +}; + +my_core::PSI_cond_key rdb_signal_bg_psi_cond_key, + rdb_signal_drop_idx_psi_cond_key; + +my_core::PSI_cond_info all_rocksdb_conds[] = { + {&rdb_signal_bg_psi_cond_key, "cond signal background", PSI_FLAG_GLOBAL}, + {&rdb_signal_drop_idx_psi_cond_key, "cond signal drop index", + PSI_FLAG_GLOBAL}, +}; + +void init_rocksdb_psi_keys() { + const char *const category = "rocksdb"; + int count; + + if (PSI_server == nullptr) + return; + + count = array_elements(all_rocksdb_mutexes); + PSI_server->register_mutex(category, all_rocksdb_mutexes, count); + + count = array_elements(all_rocksdb_rwlocks); + PSI_server->register_rwlock(category, all_rocksdb_rwlocks, count); + + count = array_elements(all_rocksdb_conds); + //TODO Disabling PFS for conditions due to the bug + // https://github.com/MySQLOnRocksDB/mysql-5.6/issues/92 + // PSI_server->register_cond(category, all_rocksdb_conds, count); + + count = array_elements(all_rocksdb_stages); + mysql_stage_register(category, all_rocksdb_stages, count); + + count = array_elements(all_rocksdb_threads); + mysql_thread_register(category, all_rocksdb_threads, count); +} +#else // HAVE_PSI_INTERFACE +void init_rocksdb_psi_keys() {} +#endif // HAVE_PSI_INTERFACE + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_psi.h b/storage/rocksdb/rdb_psi.h new file mode 100644 index 00000000000..2df3b96a64d --- /dev/null +++ b/storage/rocksdb/rdb_psi.h @@ -0,0 +1,55 @@ +/* Copyright (c) 2017, Percona and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#pragma once + +#ifndef _rdb_psi_h_ +#define _rdb_psi_h_ + +/* MySQL header files */ +#include +#include + +/* MyRocks header files */ +#include "./rdb_utils.h" + +namespace myrocks { + +/* + The following is needed as an argument for mysql_stage_register, + irrespectively of whether we're compiling with P_S or not. +*/ +extern my_core::PSI_stage_info stage_waiting_on_row_lock; + +#ifdef HAVE_PSI_INTERFACE +extern my_core::PSI_thread_key rdb_background_psi_thread_key, + rdb_drop_idx_psi_thread_key; + +extern my_core::PSI_mutex_key rdb_psi_open_tbls_mutex_key, + rdb_signal_bg_psi_mutex_key, rdb_signal_drop_idx_psi_mutex_key, + rdb_collation_data_mutex_key, rdb_mem_cmp_space_mutex_key, + key_mutex_tx_list, rdb_sysvars_psi_mutex_key, rdb_cfm_mutex_key; + +extern my_core::PSI_rwlock_key key_rwlock_collation_exception_list, + key_rwlock_read_free_rpl_tables, key_rwlock_skip_unique_check_tables; + +extern my_core::PSI_cond_key rdb_signal_bg_psi_cond_key, + rdb_signal_drop_idx_psi_cond_key; +#endif // HAVE_PSI_INTERFACE + +void init_rocksdb_psi_keys(); + +} // namespace myrocks + +#endif // _rdb_psi_h_ diff --git a/storage/rocksdb/rdb_sst_info.cc b/storage/rocksdb/rdb_sst_info.cc index cfbefb2ce6d..b6fd14f3ccb 100644 --- a/storage/rocksdb/rdb_sst_info.cc +++ b/storage/rocksdb/rdb_sst_info.cc @@ -191,6 +191,10 @@ Rdb_sst_info::Rdb_sst_info(rocksdb::DB *const db, const std::string &tablename, m_prefix += normalized_table + "_" + indexname + "_"; } + // Unique filename generated to prevent collisions when the same table + // is loaded in parallel + m_prefix += std::to_string(m_prefix_counter.fetch_add(1)) + "_"; + rocksdb::ColumnFamilyDescriptor cf_descr; const rocksdb::Status s = m_cf->GetDescriptor(&cf_descr); if (!s.ok()) { @@ -221,7 +225,7 @@ int Rdb_sst_info::open_new_sst_file() { // Open the sst file const rocksdb::Status s = m_sst_file->open(); if (!s.ok()) { - set_error_msg(s.ToString()); + set_error_msg(m_sst_file->get_name(), s.ToString()); delete m_sst_file; m_sst_file = nullptr; return HA_EXIT_FAILURE; @@ -255,7 +259,7 @@ void Rdb_sst_info::close_curr_sst_file() { #else const rocksdb::Status s = m_sst_file->commit(); if (!s.ok()) { - set_error_msg(s.ToString()); + set_error_msg(m_sst_file->get_name(), s.ToString()); } delete m_sst_file; @@ -293,7 +297,7 @@ int Rdb_sst_info::put(const rocksdb::Slice &key, const rocksdb::Slice &value) { // Add the key/value to the current sst file const rocksdb::Status s = m_sst_file->put(key, value); if (!s.ok()) { - set_error_msg(s.ToString()); + set_error_msg(m_sst_file->get_name(), s.ToString()); return HA_EXIT_FAILURE; } @@ -329,16 +333,18 @@ int Rdb_sst_info::commit() { return HA_EXIT_SUCCESS; } -void Rdb_sst_info::set_error_msg(const std::string &msg) { +void Rdb_sst_info::set_error_msg(const std::string &sst_file_name, + const std::string &msg) { #if defined(RDB_SST_INFO_USE_THREAD) // Both the foreground and background threads can set the error message // so lock the mutex to protect it. We only want the first error that // we encounter. const std::lock_guard guard(m_mutex); #endif - my_printf_error(ER_UNKNOWN_ERROR, "bulk load error: %s", MYF(0), msg.c_str()); + my_printf_error(ER_UNKNOWN_ERROR, "[%s] bulk load error: %s", MYF(0), + sst_file_name.c_str(), msg.c_str()); if (m_error_msg.empty()) { - m_error_msg = msg; + m_error_msg = "[" + sst_file_name + "] " + msg; } } @@ -366,7 +372,7 @@ void Rdb_sst_info::run_thread() { // Close out the sst file and add it to the database const rocksdb::Status s = sst_file->commit(); if (!s.ok()) { - set_error_msg(s.ToString()); + set_error_msg(sst_file->get_name(), s.ToString()); } delete sst_file; @@ -412,5 +418,6 @@ void Rdb_sst_info::init(const rocksdb::DB *const db) { my_dirend(dir_info); } +std::atomic Rdb_sst_info::m_prefix_counter(0); std::string Rdb_sst_info::m_suffix = ".bulk_load.tmp"; } // namespace myrocks diff --git a/storage/rocksdb/rdb_sst_info.h b/storage/rocksdb/rdb_sst_info.h index 45d44fc848b..09c0edce097 100644 --- a/storage/rocksdb/rdb_sst_info.h +++ b/storage/rocksdb/rdb_sst_info.h @@ -17,6 +17,7 @@ #pragma once /* C++ standard header files */ +#include #include #include #include @@ -55,6 +56,7 @@ public: rocksdb::Status open(); rocksdb::Status put(const rocksdb::Slice &key, const rocksdb::Slice &value); rocksdb::Status commit(); + const std::string get_name() const { return m_name; } }; class Rdb_sst_info { @@ -70,6 +72,7 @@ private: uint m_sst_count; std::string m_error_msg; std::string m_prefix; + static std::atomic m_prefix_counter; static std::string m_suffix; #if defined(RDB_SST_INFO_USE_THREAD) std::queue m_queue; @@ -83,7 +86,7 @@ private: int open_new_sst_file(); void close_curr_sst_file(); - void set_error_msg(const std::string &msg); + void set_error_msg(const std::string &sst_file_name, const std::string &msg); #if defined(RDB_SST_INFO_USE_THREAD) void run_thread(); diff --git a/storage/rocksdb/rdb_threads.cc b/storage/rocksdb/rdb_threads.cc index 0bc590e4cf8..e8e5cf55e4b 100644 --- a/storage/rocksdb/rdb_threads.cc +++ b/storage/rocksdb/rdb_threads.cc @@ -28,6 +28,7 @@ void *Rdb_thread::thread_func(void *const thread_ptr) { DBUG_ASSERT(thread_ptr != nullptr); Rdb_thread *const thread = static_cast(thread_ptr); if (!thread->m_run_once.exchange(true)) { + thread->setname(); thread->run(); thread->uninit(); } @@ -56,32 +57,24 @@ int Rdb_thread::create_thread(const std::string &thread_name PSI_thread_key background_psi_thread_key #endif ) { - DBUG_ASSERT(!thread_name.empty()); + // Make a copy of the name so we can return without worrying that the + // caller will free the memory + m_name = thread_name; - int err = mysql_thread_create(background_psi_thread_key, &m_handle, nullptr, - thread_func, this); - - if (!err) { - /* - mysql_thread_create() ends up doing some work underneath and setting the - thread name as "my-func". This isn't what we want. Our intent is to name - the threads according to their purpose so that when displayed under the - debugger then they'll be more easily identifiable. Therefore we'll reset - the name if thread was successfully created. - */ - err = pthread_setname_np(m_handle, thread_name.c_str()); - } - - return err; + return mysql_thread_create(background_psi_thread_key, &m_handle, nullptr, + thread_func, this); } void Rdb_thread::signal(const bool &stop_thread) { - mysql_mutex_lock(&m_signal_mutex); + RDB_MUTEX_LOCK_CHECK(m_signal_mutex); + if (stop_thread) { m_stop = true; } + mysql_cond_signal(&m_signal_cond); - mysql_mutex_unlock(&m_signal_mutex); + + RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex); } } // namespace myrocks diff --git a/storage/rocksdb/rdb_threads.h b/storage/rocksdb/rdb_threads.h index b7890b03576..e5cbb52c54e 100644 --- a/storage/rocksdb/rdb_threads.h +++ b/storage/rocksdb/rdb_threads.h @@ -40,6 +40,8 @@ private: pthread_t m_handle; + std::string m_name; + protected: mysql_mutex_t m_signal_mutex; mysql_cond_t m_signal_cond; @@ -64,6 +66,31 @@ public: int join() { return pthread_join(m_handle, nullptr); } + void setname() { + /* + mysql_thread_create() ends up doing some work underneath and setting the + thread name as "my-func". This isn't what we want. Our intent is to name + the threads according to their purpose so that when displayed under the + debugger then they'll be more easily identifiable. Therefore we'll reset + the name if thread was successfully created. + */ + + /* + We originally had the creator also set the thread name, but that seems to + not work correctly in all situations. Having the created thread do the + pthread_setname_np resolves the issue. + */ + DBUG_ASSERT(!m_name.empty()); + int err = pthread_setname_np(m_handle, m_name.c_str()); + if (err) + { + // NO_LINT_DEBUG + sql_print_warning( + "MyRocks: Failed to set name (%s) for current thread, errno=%d", + m_name.c_str(), errno); + } + } + void uninit(); virtual ~Rdb_thread() {} @@ -92,9 +119,11 @@ public: virtual void run() override; void request_save_stats() { - mysql_mutex_lock(&m_signal_mutex); + RDB_MUTEX_LOCK_CHECK(m_signal_mutex); + m_save_stats = true; - mysql_mutex_unlock(&m_signal_mutex); + + RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex); } }; diff --git a/storage/rocksdb/rdb_utils.cc b/storage/rocksdb/rdb_utils.cc index 900d0f9be19..daa766ad871 100644 --- a/storage/rocksdb/rdb_utils.cc +++ b/storage/rocksdb/rdb_utils.cc @@ -20,6 +20,7 @@ /* C++ standard header files */ #include #include +#include /* C standard header files */ #include @@ -212,6 +213,22 @@ const char *rdb_skip_id(const struct charset_info_st *const cs, return rdb_parse_id(cs, str, nullptr); } +/* + Parses a given string into tokens (if any) separated by a specific delimiter. +*/ +const std::vector parse_into_tokens( + const std::string& s, const char delim) { + std::vector tokens; + std::string t; + std::stringstream ss(s); + + while (getline(ss, t, delim)) { + tokens.push_back(t); + } + + return tokens; +} + static const std::size_t rdb_hex_bytes_per_char = 2; static const std::array rdb_hexdigit = {{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', diff --git a/storage/rocksdb/rdb_utils.h b/storage/rocksdb/rdb_utils.h index b337ed108d3..58db29b3079 100644 --- a/storage/rocksdb/rdb_utils.h +++ b/storage/rocksdb/rdb_utils.h @@ -18,8 +18,11 @@ /* C++ standard header files */ #include #include +#include /* MySQL header files */ +#include "../sql/log.h" +#include "./my_stacktrace.h" #include "./sql_string.h" /* RocksDB header files */ @@ -129,6 +132,16 @@ namespace myrocks { #define HA_EXIT_SUCCESS FALSE #define HA_EXIT_FAILURE TRUE +/* + Macros to better convey the intent behind checking the results from locking + and unlocking mutexes. +*/ +#define RDB_MUTEX_LOCK_CHECK(m) \ + rdb_check_mutex_call_result(__PRETTY_FUNCTION__, true, mysql_mutex_lock(&m)) +#define RDB_MUTEX_UNLOCK_CHECK(m) \ + rdb_check_mutex_call_result(__PRETTY_FUNCTION__, false, \ + mysql_mutex_unlock(&m)) + /* Generic constant. */ @@ -203,6 +216,28 @@ inline int purge_all_jemalloc_arenas() { #endif } +/* + Helper function to check the result of locking or unlocking a mutex. We'll + intentionally abort in case of a failure because it's better to terminate + the process instead of continuing in an undefined state and corrupting data + as a result. +*/ +inline void rdb_check_mutex_call_result(const char *function_name, + const bool attempt_lock, + const int result) { + if (unlikely(result)) { + /* NO_LINT_DEBUG */ + sql_print_error("%s a mutex inside %s failed with an " + "error code %d.", + attempt_lock ? "Locking" : "Unlocking", function_name, + result); + + // This will hopefully result in a meaningful stack trace which we can use + // to efficiently debug the root cause. + abort_with_stack_traces(); + } +} + /* Helper functions to parse strings. */ @@ -230,6 +265,9 @@ const char *rdb_parse_id(const struct charset_info_st *const cs, const char *rdb_skip_id(const struct charset_info_st *const cs, const char *str) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); +const std::vector parse_into_tokens(const std::string& s, + const char delim); + /* Helper functions to populate strings. */ diff --git a/storage/rocksdb/tools/mysql_ldb.cc b/storage/rocksdb/tools/mysql_ldb.cc index 52d23f20a32..08efcf5ddf5 100644 --- a/storage/rocksdb/tools/mysql_ldb.cc +++ b/storage/rocksdb/tools/mysql_ldb.cc @@ -3,13 +3,13 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. // -#include "rocksdb/ldb_tool.h" #include "../rdb_comparator.h" +#include "rocksdb/ldb_tool.h" -int main(int argc, char** argv) { +int main(int argc, char **argv) { rocksdb::Options db_options; const myrocks::Rdb_pk_comparator pk_comparator; - db_options.comparator= &pk_comparator; + db_options.comparator = &pk_comparator; rocksdb::LDBTool tool; tool.Run(argc, argv, db_options); diff --git a/storage/rocksdb/unittest/test_properties_collector.cc b/storage/rocksdb/unittest/test_properties_collector.cc index f798a43d045..46a3badc6ee 100644 --- a/storage/rocksdb/unittest/test_properties_collector.cc +++ b/storage/rocksdb/unittest/test_properties_collector.cc @@ -18,43 +18,37 @@ #include "../ha_rocksdb.h" #include "../rdb_datadic.h" -void putKeys(myrocks::Rdb_tbl_prop_coll* coll, - int num, - bool is_delete, - uint64_t expected_deleted) -{ +void putKeys(myrocks::Rdb_tbl_prop_coll *coll, int num, bool is_delete, + uint64_t expected_deleted) { std::string str("aaaaaaaaaaaaaa"); rocksdb::Slice sl(str.data(), str.size()); - for (int i=0; i < num; i++) { + for (int i = 0; i < num; i++) { coll->AddUserKey( - sl, sl, - is_delete ? rocksdb::kEntryDelete : rocksdb::kEntryPut, - 0, 100); + sl, sl, is_delete ? rocksdb::kEntryDelete : rocksdb::kEntryPut, 0, 100); } - DBUG_ASSERT(coll->GetMaxDeletedRows() == expected_deleted); + DBUG_ASSERT(coll->GetMaxDeletedRows() == expected_deleted); } -int main(int argc, char** argv) -{ +int main(int argc, char **argv) { // test the circular buffer for delete flags myrocks::Rdb_compact_params params; - params.m_file_size= 333; - params.m_deletes= 333; // irrelevant - params.m_window= 10; + params.m_file_size = 333; + params.m_deletes = 333; // irrelevant + params.m_window = 10; myrocks::Rdb_tbl_prop_coll coll(nullptr, params, 0, - RDB_DEFAULT_TBL_STATS_SAMPLE_PCT); + RDB_DEFAULT_TBL_STATS_SAMPLE_PCT); - putKeys(&coll, 2, true, 2); // [xx] - putKeys(&coll, 3, false, 2); // [xxo] - putKeys(&coll, 1, true, 3); // [xxox] - putKeys(&coll, 6, false, 3); // [xxoxoooooo] - putKeys(&coll, 3, true, 4); // xxo[xooooooxxx] - putKeys(&coll, 1, false, 4); // xxox[ooooooxxxo] - putKeys(&coll, 100, false, 4); // ....[oooooooooo] - putKeys(&coll, 100, true, 10); // ....[xxxxxxxxxx] - putKeys(&coll, 100, true, 10); // ....[oooooooooo] + putKeys(&coll, 2, true, 2); // [xx] + putKeys(&coll, 3, false, 2); // [xxo] + putKeys(&coll, 1, true, 3); // [xxox] + putKeys(&coll, 6, false, 3); // [xxoxoooooo] + putKeys(&coll, 3, true, 4); // xxo[xooooooxxx] + putKeys(&coll, 1, false, 4); // xxox[ooooooxxxo] + putKeys(&coll, 100, false, 4); // ....[oooooooooo] + putKeys(&coll, 100, true, 10); // ....[xxxxxxxxxx] + putKeys(&coll, 100, true, 10); // ....[oooooooooo] return 0; } From d49bbf12a232074d41b9b4dd1472eced665cb14a Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 12 Mar 2017 12:14:33 +0300 Subject: [PATCH 153/233] MariaRocks: post-merge fixes: trivial updates to a few test results --- .../mysql-test/rocksdb/r/add_index_inplace_cardinality.result | 3 +++ storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result | 3 +++ storage/rocksdb/mysql-test/rocksdb/r/unique_check.result | 1 + .../rocksdb/mysql-test/rocksdb/t/rocksdb_cf_per_partition.test | 2 ++ 4 files changed, 9 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_cardinality.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_cardinality.result index f1ccff01e16..61105fa1ba2 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_cardinality.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_cardinality.result @@ -3,8 +3,10 @@ CREATE TABLE t1 (i INT PRIMARY KEY, j INT) ENGINE = ROCKSDB; INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10); SET debug_sync= 'rocksdb.commit_in_place_alter_table WAIT_FOR flushed'; ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +connect con1,localhost,root,,; SET GLOBAL rocksdb_force_flush_memtable_now = 1; SET debug_sync= 'now SIGNAL flushed'; +connection default; SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP WHERE INDEX_NUMBER = (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL @@ -17,5 +19,6 @@ WHERE INDEX_NUMBER = WHERE TABLE_NAME = 't1' AND INDEX_NAME = "kj"); COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX # # SSTNAME 5 # # # # # 5,5 +disconnect con1; SET debug_sync='RESET'; DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result index d859c8551b2..d75355f599f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result @@ -3,6 +3,7 @@ CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE CREATE TABLE t2(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'; CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; +connect other,localhost,root,,; set session transaction isolation level repeatable read; select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; STAT_TYPE VALUE @@ -11,6 +12,7 @@ start transaction with consistent snapshot; select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; STAT_TYPE VALUE DB_NUM_SNAPSHOTS 1 +connection default; set rocksdb_bulk_load=1; set rocksdb_bulk_load_size=100000; LOAD DATA INFILE INTO TABLE t1; @@ -61,4 +63,5 @@ count(b) 5000000 longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp test.bulk_load.tmp +disconnect other; DROP TABLE t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result index 41689e7bd06..8de94e0297e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result @@ -102,6 +102,7 @@ disconnect con1; disconnect con2; disconnect con3; drop table t1, t2; +connection default; drop table if exists t1,t2,t3; create table t1 (id int, value int, primary key (id)) engine=rocksdb; create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_per_partition.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_per_partition.test index 7cffa2e62a6..93febbc1319 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_per_partition.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_per_partition.test @@ -1,5 +1,7 @@ --source include/have_rocksdb.inc +--source include/have_partition.inc + --disable_warnings DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; From ec01aa5d6bc37b6b81e04bc1987e1a47eff1e4b2 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 12 Mar 2017 15:59:46 +0300 Subject: [PATCH 154/233] MariaRocks: fix compilation in Windows: don't use __PRETTY_FUNCTION__ where it is not available --- storage/rocksdb/rdb_utils.h | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/storage/rocksdb/rdb_utils.h b/storage/rocksdb/rdb_utils.h index e3c2fb36f15..71ec8ef54ab 100644 --- a/storage/rocksdb/rdb_utils.h +++ b/storage/rocksdb/rdb_utils.h @@ -106,6 +106,18 @@ namespace myrocks { DBUG_ASSERT(static_cast(a) == static_cast(b)) #endif + +/* + Portability: use __PRETTY_FUNCTION__ when available, otherwise use __func__ + which is in the standard. +*/ + +#ifdef __GNUC__ +# define __MYROCKS_PORTABLE_PRETTY_FUNCTION__ __PRETTY_FUNCTION__ +#else +# define __MYROCKS_PORTABLE_PRETTY_FUNCTION__ __func__ +#endif + /* Intent behind this macro is to avoid manually typing the function name every time we want to add the debugging statement and use the compiler for this @@ -116,11 +128,7 @@ namespace myrocks { contains the signature of the function as well as its bare name and provides therefore more context when interpreting the logs. */ -#ifdef __GNUC__ -# define DBUG_ENTER_FUNC() DBUG_ENTER(__PRETTY_FUNCTION__) -#else -# define DBUG_ENTER_FUNC() DBUG_ENTER(__func__) -#endif +#define DBUG_ENTER_FUNC() DBUG_ENTER(__MYROCKS_PORTABLE_PRETTY_FUNCTION__) /* Error handling pattern used across MySQL abides by the following rules: "All @@ -143,9 +151,10 @@ namespace myrocks { and unlocking mutexes. */ #define RDB_MUTEX_LOCK_CHECK(m) \ - rdb_check_mutex_call_result(__PRETTY_FUNCTION__, true, mysql_mutex_lock(&m)) + rdb_check_mutex_call_result(__MYROCKS_PORTABLE_PRETTY_FUNCTION__, true, \ + mysql_mutex_lock(&m)) #define RDB_MUTEX_UNLOCK_CHECK(m) \ - rdb_check_mutex_call_result(__PRETTY_FUNCTION__, false, \ + rdb_check_mutex_call_result(__MYROCKS_PORTABLE_PRETTY_FUNCTION__, false, \ mysql_mutex_unlock(&m)) /* From a72abc8c30c62e01fc2eae934478176ed02597e9 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 12 Mar 2017 16:08:26 +0300 Subject: [PATCH 155/233] Fix compile on windows --- storage/rocksdb/build_rocksdb.cmake | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/rocksdb/build_rocksdb.cmake b/storage/rocksdb/build_rocksdb.cmake index d7f655e6a2e..f29b4e5fabe 100644 --- a/storage/rocksdb/build_rocksdb.cmake +++ b/storage/rocksdb/build_rocksdb.cmake @@ -337,6 +337,7 @@ if(WIN32) port/win/env_default.cc port/win/port_win.cc port/win/win_logger.cc + port/win/win_thread.cc port/win/xpress_win.cc) else() list(APPEND ROCKSDB_SOURCES From 11789a4fbe754c0aeedbc4618ef226f259c2366d Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 12 Mar 2017 17:39:45 +0300 Subject: [PATCH 156/233] MariaRocks: Only call pthread_setname_np on platforms that support it This is a second such fix, the first was wiped out in a merge. --- storage/rocksdb/rdb_threads.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/rocksdb/rdb_threads.h b/storage/rocksdb/rdb_threads.h index b0bf1bad8da..a93e4fc93f2 100644 --- a/storage/rocksdb/rdb_threads.h +++ b/storage/rocksdb/rdb_threads.h @@ -112,6 +112,7 @@ public: pthread_setname_np resolves the issue. */ DBUG_ASSERT(!m_name.empty()); +#ifdef __linux__ int err = pthread_setname_np(m_handle, m_name.c_str()); if (err) { @@ -120,6 +121,7 @@ public: "MyRocks: Failed to set name (%s) for current thread, errno=%d", m_name.c_str(), errno); } +#endif } void uninit(); From 17aa495b641238619497205f4f4d47070362cf63 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 12 Mar 2017 22:52:52 +0300 Subject: [PATCH 157/233] MariaRocks: attempt to get to compile on Windows \include\mysql/psi/psi.h(1267): error C2061: syntax error: identifier 'pthread_t' (compiling source file D:\win32-debug\build\src \storage\rocksdb\rdb_psi.cc)\include\mysql/psi/psi.h(1267): error C2061: syntax error: identifier 'pthread_t' (compiling source file D:\win32-debug\build\src\storage\rocksdb\rdb_psi.cc) --- storage/rocksdb/rdb_psi.h | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/rocksdb/rdb_psi.h b/storage/rocksdb/rdb_psi.h index 2df3b96a64d..0a62f411ade 100644 --- a/storage/rocksdb/rdb_psi.h +++ b/storage/rocksdb/rdb_psi.h @@ -19,6 +19,7 @@ /* MySQL header files */ #include +#include #include /* MyRocks header files */ From b4ea125252b295e5fb319cb4a18a6a164c138862 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 13 Mar 2017 10:34:19 +0300 Subject: [PATCH 158/233] MariaRocks: disable tests that are known to fail --- storage/rocksdb/mysql-test/rocksdb/t/disabled.def | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index 0efe609ae63..56c78f02db0 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -24,3 +24,15 @@ gap_lock_raise_error: MDEV-11735: MyRocks: Gap Lock detector support rqg_examples : Test that use RQG are disabled rqg_runtime : Test that use RQG are disabled rqg_transactions : Test that use RQG are disabled + +# +# Temporarily disabled tests +# +information_schema : MariaRocks: requires GTIDs +mysqlbinlog_gtid_skip_empty_trans_rocksdb : MariaRocks: requires GTIDs +read_only_tx : MariaRocks: requires GTIDs + +2pc_group_commit : MariaRocks: Group Commit is not functional yet + +mysqldump : MariaRocks: MariaDB's mysqldump doesn't support --print-ordering-key + From 69387c68b594c01a9e47424779a4de257fe32233 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 13 Mar 2017 10:34:38 +0300 Subject: [PATCH 159/233] MariaRocks: update results for innodb_i_s_tables_disabled --- .../rocksdb/r/innodb_i_s_tables_disabled.result | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result b/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result index c75fe5893b0..c49dbba751a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result @@ -1,5 +1,5 @@ SELECT * FROM INFORMATION_SCHEMA.INNODB_TRX; -trx_id trx_state trx_started trx_requested_lock_id trx_wait_started trx_weight trx_mysql_thread_id trx_query trx_operation_state trx_tables_in_use trx_tables_locked trx_lock_structs trx_lock_memory_bytes trx_rows_locked trx_rows_modified trx_concurrency_tickets trx_isolation_level trx_unique_checks trx_foreign_key_checks trx_last_foreign_key_error trx_adaptive_hash_latched trx_adaptive_hash_timeout trx_is_read_only trx_autocommit_non_locking +trx_id trx_state trx_started trx_requested_lock_id trx_wait_started trx_weight trx_mysql_thread_id trx_query trx_operation_state trx_tables_in_use trx_tables_locked trx_lock_structs trx_lock_memory_bytes trx_rows_locked trx_rows_modified trx_concurrency_tickets trx_isolation_level trx_unique_checks trx_foreign_key_checks trx_last_foreign_key_error trx_adaptive_hash_latched trx_is_read_only trx_autocommit_non_locking SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCKS; lock_id lock_trx_id lock_mode lock_type lock_table lock_index lock_space lock_page lock_rec lock_data SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCK_WAITS; @@ -198,16 +198,8 @@ compress_pages_decompressed compression 0 NULL NULL NULL 0 NULL NULL NULL NULL N compression_pad_increments compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times padding is incremented to avoid compression failures compression_pad_decrements compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times padding is decremented due to good compressibility compress_saved compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of bytes saved by page compression -compress_trim_sect512 compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of sect-512 TRIMed by page compression -compress_trim_sect1024 compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of sect-1024 TRIMed by page compression -compress_trim_sect2048 compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of sect-2048 TRIMed by page compression -compress_trim_sect4096 compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of sect-4K TRIMed by page compression -compress_trim_sect8192 compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of sect-8K TRIMed by page compression -compress_trim_sect16384 compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of sect-16K TRIMed by page compression -compress_trim_sect32768 compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of sect-32K TRIMed by page compression compress_pages_page_compressed compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages compressed by page compression compress_page_compressed_trim_op compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of TRIM operation performed by page compression -compress_page_compressed_trim_op_saved compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of TRIM operation saved by page compression compress_pages_page_decompressed compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages decompressed by page compression compress_pages_page_compression_error compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of page compression errors compress_pages_encrypted compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages encrypted From 57672a85e3fdd60cd6608ad204d0250e700efbdd Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 13 Mar 2017 22:02:39 +0000 Subject: [PATCH 160/233] MariaRocks: make partition.test work on any platform --- storage/rocksdb/mysql-test/rocksdb/r/partition.result | 5 +++++ storage/rocksdb/mysql-test/rocksdb/t/partition.test | 7 +++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/partition.result b/storage/rocksdb/mysql-test/rocksdb/r/partition.result index 4c7651327cb..1ba966e9e07 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/partition.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/partition.result @@ -51,6 +51,11 @@ Table Op Msg_type Msg_text test.t1 repair status OK Table Op Msg_type Msg_text test.t1 check status OK +select lower(table_name) as tname +from information_schema.tables +where table_schema=database() +order by tname; +tname t1 temp0 var_pop diff --git a/storage/rocksdb/mysql-test/rocksdb/t/partition.test b/storage/rocksdb/mysql-test/rocksdb/t/partition.test index 95efe67c9da..5954c0d95db 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/partition.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/partition.test @@ -77,8 +77,11 @@ CREATE TABLE TEMP0 (a int) ENGINE = ROCKSDB PARTITION BY HASH (a) PARTITIONS 3; CREATE TABLE VAR_SAMP (a int) ENGINE = ROCKSDB PARTITION BY HASH (a) PARTITIONS 10; --enable_query_log ---lowercase_result -SHOW TABLES; + +select lower(table_name) as tname +from information_schema.tables +where table_schema=database() +order by tname; SELECT * FROM t1 ORDER BY i LIMIT 10; SELECT COUNT(*) FROM t1; From 3eb8bc740817aa7010a070be0699bff6266f829c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 14 Mar 2017 01:01:11 +0200 Subject: [PATCH 161/233] Make rocksdb not be compiled on x86 architectures --- storage/rocksdb/CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index f066bd48f9f..ed1667dd6e1 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -15,6 +15,11 @@ MACRO(SKIP_ROCKSDB_PLUGIN msg) RETURN() ENDMACRO() +# We've had our builders hang during the build process. This prevents MariaRocks +# to be built on 32 bit intel OS kernels. +IF(CMAKE_SYSTEM_PROCESSOR MATCHES "i[36]86") + SKIP_ROCKSDB_PLUGIN("Intel 32 bit not supported.") +ENDIF() # This plugin needs recent C++ compilers (it is using C++11 features) # Skip build for the old compilers From 1a3065b51a3d66cb5a393e8b51ff0e05051f74b8 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 14 Mar 2017 12:23:08 +0300 Subject: [PATCH 162/233] MariaRocks: make rocksdb.issue495 declare it uses partitioning --- storage/rocksdb/mysql-test/rocksdb/r/issue495.result | 2 -- storage/rocksdb/mysql-test/rocksdb/t/issue495.test | 4 ++++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue495.result b/storage/rocksdb/mysql-test/rocksdb/r/issue495.result index 2560ec577ed..c7ac34c6294 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/issue495.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue495.result @@ -1,6 +1,4 @@ drop table if exists t; -Warnings: -Note 1051 Unknown table 'test.t' create table t ( a int, b int, diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue495.test b/storage/rocksdb/mysql-test/rocksdb/t/issue495.test index bb215ebcd99..ee4d10bd33d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/issue495.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue495.test @@ -1,4 +1,8 @@ +--source include/have_partition.inc +--disable_warnings drop table if exists t; +--enable_warnings + create table t ( a int, b int, From 20c085a4b7a9e5c207db4b622a202d5e4500759f Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 14 Mar 2017 12:23:36 +0300 Subject: [PATCH 163/233] MariaRocks: disable rocksdb.mysqldump2 also (needs --print-ordering-key) --- storage/rocksdb/mysql-test/rocksdb/t/disabled.def | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index 56c78f02db0..0058993a103 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -35,4 +35,5 @@ read_only_tx : MariaRocks: requires GTIDs 2pc_group_commit : MariaRocks: Group Commit is not functional yet mysqldump : MariaRocks: MariaDB's mysqldump doesn't support --print-ordering-key +mysqldump2 : MariaRocks: MariaDB's mysqldump doesn't support --print-ordering-key From bf578ff9204a13128bcfc77bb45d73debce7ee45 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 15 Mar 2017 16:02:37 +0000 Subject: [PATCH 164/233] Add missing source include/have_rocksdb.inc --- storage/rocksdb/mysql-test/rocksdb/t/issue495.test | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue495.test b/storage/rocksdb/mysql-test/rocksdb/t/issue495.test index ee4d10bd33d..5dcc7c19ba9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/issue495.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue495.test @@ -1,3 +1,4 @@ +--source include/have_rocksdb.inc --source include/have_partition.inc --disable_warnings drop table if exists t; From c010f06380f004adbc6f7062f656ab9dfa73e15f Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 15 Mar 2017 23:19:33 +0300 Subject: [PATCH 165/233] MariaRocks: Run rocksdb testsuite with @@rocksdb_flush_log_at_trx_commit=0 The default value of 1 causes many tests to time out (primary reason is that many tests populate tables with one-row INSERT statements that run with autocommit=1). --- storage/rocksdb/mysql-test/rocksdb/my.cnf | 2 ++ storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result | 2 +- storage/rocksdb/mysql-test/rocksdb/r/write_sync.result | 3 ++- storage/rocksdb/mysql-test/rocksdb/t/write_sync.test | 4 ++-- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/my.cnf b/storage/rocksdb/mysql-test/rocksdb/my.cnf index c6817df9d18..64420f704d8 100644 --- a/storage/rocksdb/mysql-test/rocksdb/my.cnf +++ b/storage/rocksdb/mysql-test/rocksdb/my.cnf @@ -9,3 +9,5 @@ sql-mode=NO_ENGINE_SUBSTITUTION explicit-defaults-for-timestamp=1 loose-rocksdb_lock_wait_timeout=1 loose-rocksdb_strict_collation_check=0 + +loose-rocksdb-flush-log-at-trx-commit=0 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result index aa0bd231d34..bb02660d67e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result @@ -910,7 +910,7 @@ rocksdb_enable_bulk_load_api ON rocksdb_enable_thread_tracking OFF rocksdb_enable_write_thread_adaptive_yield OFF rocksdb_error_if_exists OFF -rocksdb_flush_log_at_trx_commit 1 +rocksdb_flush_log_at_trx_commit 0 rocksdb_flush_memtable_on_analyze ON rocksdb_force_compute_memtable_stats ON rocksdb_force_flush_memtable_now OFF diff --git a/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result b/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result index 6ba50a3796f..8f8495302e7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result @@ -1,6 +1,7 @@ SET GLOBAL rocksdb_write_disable_wal=false; SET GLOBAL rocksdb_write_ignore_missing_column_families=true; create table aaa (id int primary key, i int) engine rocksdb; +set @save_rocksdb_flush_log_at_trx_commit= @@global.rocksdb_flush_log_at_trx_commit; SET LOCAL rocksdb_flush_log_at_trx_commit=0; select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; insert aaa(id, i) values(1,1); @@ -33,7 +34,7 @@ SET LOCAL rocksdb_flush_log_at_trx_commit=0; insert aaa(id, i) values(7,1); truncate table aaa; drop table aaa; -SET GLOBAL rocksdb_flush_log_at_trx_commit=1; +SET GLOBAL rocksdb_flush_log_at_trx_commit=@save_rocksdb_flush_log_at_trx_commit; SET GLOBAL rocksdb_write_disable_wal=false; SET GLOBAL rocksdb_write_ignore_missing_column_families=false; SET GLOBAL rocksdb_background_sync=off; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test index 672687b044e..cd8237de2fd 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test @@ -4,7 +4,7 @@ SET GLOBAL rocksdb_write_disable_wal=false; SET GLOBAL rocksdb_write_ignore_missing_column_families=true; create table aaa (id int primary key, i int) engine rocksdb; - +set @save_rocksdb_flush_log_at_trx_commit= @@global.rocksdb_flush_log_at_trx_commit; SET LOCAL rocksdb_flush_log_at_trx_commit=0; --exec sleep 30 select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; @@ -35,7 +35,7 @@ truncate table aaa; # Cleanup drop table aaa; -SET GLOBAL rocksdb_flush_log_at_trx_commit=1; +SET GLOBAL rocksdb_flush_log_at_trx_commit=@save_rocksdb_flush_log_at_trx_commit; SET GLOBAL rocksdb_write_disable_wal=false; SET GLOBAL rocksdb_write_ignore_missing_column_families=false; SET GLOBAL rocksdb_background_sync=off; From 6dc2d581d478b5563250bde6db8d8226372f96f1 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 15 Mar 2017 23:36:20 +0300 Subject: [PATCH 166/233] Make rocksdb.rocksdb_range pass: MariaDB doesnt support ICP over reverse index scans atm --- storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result | 5 ++++- storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range.test | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result index 8cf38fd207d..918859ea036 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result @@ -198,10 +198,13 @@ max(pk) # #48: index_read_map(HA_READ_PREFIX_LAST) does not work in reverse CF # # Tests for search_flag=HA_READ_PREFIX_LAST_OR_PREV +# Note: the next explain has "Using index condition" in fb/mysql-5.6 +# but "Using where" in MariaDB because the latter does not +# support ICP over reverse scans. explain select * from t2 where a between 99 and 2000 order by a desc; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range a a 4 NULL # Using index condition +1 SIMPLE t2 range a a 4 NULL # Using where select * from t2 where a between 99 and 2000 order by a desc; pk a b 999 99 999 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range.test index c6f1ecc8424..f4b6096c696 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range.test @@ -128,6 +128,9 @@ select max(pk) from t3 where a=3 and pk < 33; --echo # --echo # Tests for search_flag=HA_READ_PREFIX_LAST_OR_PREV +--echo # Note: the next explain has "Using index condition" in fb/mysql-5.6 +--echo # but "Using where" in MariaDB because the latter does not +--echo # support ICP over reverse scans. --replace_column 9 # explain select * from t2 where a between 99 and 2000 order by a desc; From adb7470742933ca4cdef556e1d14175843b7165f Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 15 Mar 2017 23:44:16 +0300 Subject: [PATCH 167/233] Disable rocksdb.rpl_row_triggers, rocksdb.trx_info_rpl These are not expected to work yet. --- storage/rocksdb/mysql-test/rocksdb/t/disabled.def | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index 0058993a103..f431050d1f2 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -31,7 +31,9 @@ rqg_transactions : Test that use RQG are disabled information_schema : MariaRocks: requires GTIDs mysqlbinlog_gtid_skip_empty_trans_rocksdb : MariaRocks: requires GTIDs read_only_tx : MariaRocks: requires GTIDs +rpl_row_triggers : MariaRocks: requires GTIDs +trx_info_rpl : MariaRocks: @@rpl_skip_tx_api doesn't work, yet. 2pc_group_commit : MariaRocks: Group Commit is not functional yet mysqldump : MariaRocks: MariaDB's mysqldump doesn't support --print-ordering-key From 38919f68a1eb1dca14a41885828499a9c0cb8157 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Wed, 15 Mar 2017 23:19:24 +0200 Subject: [PATCH 168/233] Make rocksdb build as a deb package too --- debian/control | 9 +++++++++ storage/rocksdb/CMakeLists.txt | 4 +++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/debian/control b/debian/control index d0ecf4bddc3..c501078f678 100644 --- a/debian/control +++ b/debian/control @@ -22,6 +22,7 @@ Build-Depends: bison, libpcre3-dev (>= 2:8.35-3.2~), libreadline-gplv2-dev, libssl-dev, + libsnappy-dev, libsystemd-dev, libxml2-dev, lsb-release, @@ -452,6 +453,14 @@ Description: Connect storage engine for MariaDB other interesting features. This package contains the Connect plugin for MariaDB. +Package: mariadb-plugin-rocksdb +Architecture: any +Depends: mariadb-server-10.2, ${misc:Depends}, ${shlibs:Depends} +Description: RocksDB storage engine for MariaDB + The RocksDB storage engine is a high performance storage engine, aimed + at maximising storage efficiency while maintaining InnoDB-like performance. + This package contains the RocksDB plugin for MariaDB. + Package: mariadb-plugin-oqgraph Architecture: any Depends: libjudydebian1, mariadb-server-10.2, ${misc:Depends}, ${shlibs:Depends} diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index ed1667dd6e1..2d8e67bd87d 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -74,7 +74,9 @@ SET(ROCKSDB_SE_SOURCES rdb_psi.cc ) -MYSQL_ADD_PLUGIN(rocksdb_se ${ROCKSDB_SE_SOURCES} STORAGE_ENGINE MODULE_OUTPUT_NAME ha_rocksdb) +MYSQL_ADD_PLUGIN(rocksdb_se ${ROCKSDB_SE_SOURCES} STORAGE_ENGINE + MODULE_OUTPUT_NAME ha_rocksdb + COMPONENT rocksdb-engine) IF(NOT TARGET rocksdb_se) # Bail out if compilation with rocksdb engine is not requested From c5a20553c03bd44e112ef8e67a3f822b4ae2c532 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 16 Mar 2017 01:12:01 +0300 Subject: [PATCH 169/233] More testsuite fixes - Disable rocksdb.show_enge - Disable rocksdb.rpl_row_not_found - Run rocksdb.blind_delete_without_tx_api only with binlog_format=row (like its .cnf file specifies) --- .../mysql-test/rocksdb/t/blind_delete_without_tx_api.test | 1 + storage/rocksdb/mysql-test/rocksdb/t/disabled.def | 3 +++ 2 files changed, 4 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.test b/storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.test index 0481634f346..e5f70be4c3b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.test @@ -1,4 +1,5 @@ --source include/have_rocksdb.inc +--source include/have_binlog_format_row.inc source include/master-slave.inc; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index f431050d1f2..1ec51849e7e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -39,3 +39,6 @@ trx_info_rpl : MariaRocks: @@rpl_skip_tx_api doesn't work, yet. mysqldump : MariaRocks: MariaDB's mysqldump doesn't support --print-ordering-key mysqldump2 : MariaRocks: MariaDB's mysqldump doesn't support --print-ordering-key +show_engine : MariaRocks: MariaDB doesnt support SHOW ENGINE rocksdb TRANSACTION STATUS + +rpl_row_not_found : MariaDB doesnt support slave_exec_mode='SEMI_STRICT' From 17d7cc731ebfeb110757fe182dc7329b7a145c68 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 16 Mar 2017 09:46:01 +0300 Subject: [PATCH 170/233] MDEV-12277: rocksdb.rocksdb fails with Sort Aborted error in server stderr Add a suppression --- storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result | 1 + storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test | 1 + 2 files changed, 2 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result index bb02660d67e..e945e362f99 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result @@ -1172,6 +1172,7 @@ DROP TABLE t1; # # MDEV-4298: RocksDB: Assertion `thd->is_error() || kill_errno' fails in ha_rows filesort # +call mtr.add_suppression("Sort aborted"); CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, KEY(i)) ENGINE=RocksDB; INSERT INTO t1 VALUES (1,1),(2,2); BEGIN; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test index c572796c7ca..87fc2e6f0fb 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test @@ -986,6 +986,7 @@ DROP TABLE t1; --echo # --echo # MDEV-4298: RocksDB: Assertion `thd->is_error() || kill_errno' fails in ha_rows filesort --echo # +call mtr.add_suppression("Sort aborted"); CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, KEY(i)) ENGINE=RocksDB; INSERT INTO t1 VALUES (1,1),(2,2); BEGIN; From 49de95679da3040f6e653640476208d6fbf2c24e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Thu, 16 Mar 2017 09:49:34 +0200 Subject: [PATCH 171/233] Revert "Make rocksdb build as a deb package too" This reverts commit 38919f68a1eb1dca14a41885828499a9c0cb8157. Temporary revert to be able to see other failures until builders are updated. --- debian/control | 9 --------- storage/rocksdb/CMakeLists.txt | 4 +--- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/debian/control b/debian/control index c501078f678..d0ecf4bddc3 100644 --- a/debian/control +++ b/debian/control @@ -22,7 +22,6 @@ Build-Depends: bison, libpcre3-dev (>= 2:8.35-3.2~), libreadline-gplv2-dev, libssl-dev, - libsnappy-dev, libsystemd-dev, libxml2-dev, lsb-release, @@ -453,14 +452,6 @@ Description: Connect storage engine for MariaDB other interesting features. This package contains the Connect plugin for MariaDB. -Package: mariadb-plugin-rocksdb -Architecture: any -Depends: mariadb-server-10.2, ${misc:Depends}, ${shlibs:Depends} -Description: RocksDB storage engine for MariaDB - The RocksDB storage engine is a high performance storage engine, aimed - at maximising storage efficiency while maintaining InnoDB-like performance. - This package contains the RocksDB plugin for MariaDB. - Package: mariadb-plugin-oqgraph Architecture: any Depends: libjudydebian1, mariadb-server-10.2, ${misc:Depends}, ${shlibs:Depends} diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index 2d8e67bd87d..ed1667dd6e1 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -74,9 +74,7 @@ SET(ROCKSDB_SE_SOURCES rdb_psi.cc ) -MYSQL_ADD_PLUGIN(rocksdb_se ${ROCKSDB_SE_SOURCES} STORAGE_ENGINE - MODULE_OUTPUT_NAME ha_rocksdb - COMPONENT rocksdb-engine) +MYSQL_ADD_PLUGIN(rocksdb_se ${ROCKSDB_SE_SOURCES} STORAGE_ENGINE MODULE_OUTPUT_NAME ha_rocksdb) IF(NOT TARGET rocksdb_se) # Bail out if compilation with rocksdb engine is not requested From 23f9bb966b96b8555328b9e913724f435cb08182 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 16 Mar 2017 21:28:42 +0300 Subject: [PATCH 172/233] MDEV-12285: MariaRocks: "[ERROR] mysqld: Deadlock ..." messages in server stderr The mssages are caused by log_warnings=2. Set log_warnings=1 for the rocksdb test suite. --- storage/rocksdb/mysql-test/rocksdb/my.cnf | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/my.cnf b/storage/rocksdb/mysql-test/rocksdb/my.cnf index 64420f704d8..2beaf514cee 100644 --- a/storage/rocksdb/mysql-test/rocksdb/my.cnf +++ b/storage/rocksdb/mysql-test/rocksdb/my.cnf @@ -11,3 +11,7 @@ loose-rocksdb_lock_wait_timeout=1 loose-rocksdb_strict_collation_check=0 loose-rocksdb-flush-log-at-trx-commit=0 + +# The following is to get rid of the harmless +# "Deadlock found when trying to get lock" errors, see MDEV-12285. +log-warnings=1 From 46a78868fead5dadf68758d21e20e9c89c23d89d Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 17 Mar 2017 00:05:48 +0300 Subject: [PATCH 173/233] MariaRocks: make rocksdb.rocksdb_datadir test pass The test runs $MYSQLD_BOOTSTRAP_CMD but that command does not include arguments for loading ha_rocksdb.so plugin. Add them. --- storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test index ba10dcbe3b6..18dff316161 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test @@ -19,8 +19,9 @@ EOF # Must ensure this directory exists before launching mysqld mkdir $ddir; +let $plugin_dir=`select @@plugin_dir`; # Launch mysqld with non-standard rocksdb_datadir -exec $MYSQLD_BOOTSTRAP_CMD --datadir=$ddir --rocksdb_datadir=$rdb_ddir --default-storage-engine=rocksdb --skip-innodb --default-tmp-storage-engine=MyISAM --rocksdb < $sql_file; +exec $MYSQLD_BOOTSTRAP_CMD --plugin-dir=$plugin_dir --plugin-load=$HA_ROCKSDB_SO --datadir=$ddir --rocksdb_datadir=$rdb_ddir --default-storage-engine=rocksdb --skip-innodb --default-tmp-storage-engine=MyISAM --rocksdb < $sql_file; --echo Check for the number of MANIFEST files exec ls $rdb_ddir/MANIFEST-0000* | wc -l; From c707997e159f100e566ff14b21f10863c6358592 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 17 Mar 2017 01:21:11 +0300 Subject: [PATCH 174/233] MariaRocks: run rocksdb testsuite with --default-storage-engine=rocksdb Most tests use CREATE TABLE ... ENGINE=ROCKSB, but there are some exceptions: rpl_savepoint, rpl_row_stats. In order to avoid any "oh we are using the wrong storage engine" surprises, set the default for the whole testsuite. --- storage/rocksdb/mysql-test/rocksdb/suite.opt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/suite.opt b/storage/rocksdb/mysql-test/rocksdb/suite.opt index 431fc331458..f5dc0ce891c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/suite.opt +++ b/storage/rocksdb/mysql-test/rocksdb/suite.opt @@ -1,2 +1,2 @@ ---ignore-db-dirs=.rocksdb --plugin-load=$HA_ROCKSDB_SO +--ignore-db-dirs=.rocksdb --plugin-load=$HA_ROCKSDB_SO --default-storage-engine=rocksdb From dd743dae32f5e7c26fa453dd63bfcd064baa20c2 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 17 Mar 2017 04:05:03 +0300 Subject: [PATCH 175/233] Update test results for rocksdb.misc This .result file is not a statement of which storage engine should be used for any particular table in mysql database. This is just a check that a query against I_S doesn't crash. --- .../rocksdb/mysql-test/rocksdb/r/misc.result | 27 +++++++++++++------ 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/misc.result b/storage/rocksdb/mysql-test/rocksdb/r/misc.result index a7ee3f9a199..4a39f1cbff4 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/misc.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/misc.result @@ -28,6 +28,12 @@ DROP EVENT ev1; SELECT TABLE_NAME, COLUMN_NAME, REFERENCED_TABLE_NAME, REFERENCED_COLUMN_NAME FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE ORDER BY TABLE_NAME; TABLE_NAME COLUMN_NAME REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME +Warning 1286 Unknown storage engine 'InnoDB' +Warning 1286 Unknown storage engine 'InnoDB' +Warnings: +column_stats column_name NULL NULL +column_stats db_name NULL NULL +column_stats table_name NULL NULL columns_priv Column_name NULL NULL columns_priv Db NULL NULL columns_priv Host NULL NULL @@ -39,6 +45,8 @@ db User NULL NULL event db NULL NULL event name NULL NULL func name NULL NULL +gtid_slave_pos domain_id NULL NULL +gtid_slave_pos sub_id NULL NULL help_category help_category_id NULL NULL help_category name NULL NULL help_keyword help_keyword_id NULL NULL @@ -47,9 +55,12 @@ help_relation help_keyword_id NULL NULL help_relation help_topic_id NULL NULL help_topic help_topic_id NULL NULL help_topic name NULL NULL -ndb_binlog_index epoch NULL NULL -ndb_binlog_index orig_epoch NULL NULL -ndb_binlog_index orig_server_id NULL NULL +host Db NULL NULL +host Host NULL NULL +index_stats db_name NULL NULL +index_stats index_name NULL NULL +index_stats prefix_arity NULL NULL +index_stats table_name NULL NULL plugin name NULL NULL proc db NULL NULL proc name NULL NULL @@ -63,12 +74,12 @@ proxies_priv Host NULL NULL proxies_priv Proxied_host NULL NULL proxies_priv Proxied_user NULL NULL proxies_priv User NULL NULL +roles_mapping Host NULL NULL +roles_mapping Role NULL NULL +roles_mapping User NULL NULL servers Server_name NULL NULL -slave_gtid_info Id NULL NULL -slave_master_info Host NULL NULL -slave_master_info Port NULL NULL -slave_relay_log_info Id NULL NULL -slave_worker_info Id NULL NULL +table_stats db_name NULL NULL +table_stats table_name NULL NULL tables_priv Db NULL NULL tables_priv Host NULL NULL tables_priv Table_name NULL NULL From 131d858206525cd6fa3296fd1b025ccee396beac Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 17 Mar 2017 04:46:01 +0300 Subject: [PATCH 176/233] Temporarily disable rocksdb.blind_delete_without_tx_api test --- storage/rocksdb/mysql-test/rocksdb/t/disabled.def | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index 1ec51849e7e..7c8841b518e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -42,3 +42,5 @@ mysqldump2 : MariaRocks: MariaDB's mysqldump doesn't support --print-ordering-ke show_engine : MariaRocks: MariaDB doesnt support SHOW ENGINE rocksdb TRANSACTION STATUS rpl_row_not_found : MariaDB doesnt support slave_exec_mode='SEMI_STRICT' + +blind_delete_without_tx_api: MDEV-12286: rocksdb.blind_delete_without_tx_api test fails From 619623b86234e768421476cdfa53a53796deb18d Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 17 Mar 2017 14:44:05 +0300 Subject: [PATCH 177/233] MariaRocks: SET GLOBAL rocksdb_strict_collation_exceptions=null crashes A trivial fix --- storage/rocksdb/ha_rocksdb.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 5cb20974840..35e91a94999 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -10517,7 +10517,7 @@ void rocksdb_set_collation_exception_list(THD *const thd, rdb_set_collation_exception_list(val == nullptr ? "" : val); //psergey-todo: what is the purpose of the below?? - const char *val_copy= my_strdup(val, MYF(0)); + const char *val_copy= val? my_strdup(val, MYF(0)): nullptr; my_free(*static_cast(var_ptr)); *static_cast(var_ptr) = val_copy; } From 21bbe10bb3458c0f8336f8fee8b2246f005e76e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Fri, 17 Mar 2017 17:08:34 +0200 Subject: [PATCH 178/233] Revert "Revert "Make rocksdb build as a deb package too"" This reverts commit 49de95679da3040f6e653640476208d6fbf2c24e. --- debian/control | 9 +++++++++ storage/rocksdb/CMakeLists.txt | 4 +++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/debian/control b/debian/control index d0ecf4bddc3..c501078f678 100644 --- a/debian/control +++ b/debian/control @@ -22,6 +22,7 @@ Build-Depends: bison, libpcre3-dev (>= 2:8.35-3.2~), libreadline-gplv2-dev, libssl-dev, + libsnappy-dev, libsystemd-dev, libxml2-dev, lsb-release, @@ -452,6 +453,14 @@ Description: Connect storage engine for MariaDB other interesting features. This package contains the Connect plugin for MariaDB. +Package: mariadb-plugin-rocksdb +Architecture: any +Depends: mariadb-server-10.2, ${misc:Depends}, ${shlibs:Depends} +Description: RocksDB storage engine for MariaDB + The RocksDB storage engine is a high performance storage engine, aimed + at maximising storage efficiency while maintaining InnoDB-like performance. + This package contains the RocksDB plugin for MariaDB. + Package: mariadb-plugin-oqgraph Architecture: any Depends: libjudydebian1, mariadb-server-10.2, ${misc:Depends}, ${shlibs:Depends} diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index ed1667dd6e1..2d8e67bd87d 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -74,7 +74,9 @@ SET(ROCKSDB_SE_SOURCES rdb_psi.cc ) -MYSQL_ADD_PLUGIN(rocksdb_se ${ROCKSDB_SE_SOURCES} STORAGE_ENGINE MODULE_OUTPUT_NAME ha_rocksdb) +MYSQL_ADD_PLUGIN(rocksdb_se ${ROCKSDB_SE_SOURCES} STORAGE_ENGINE + MODULE_OUTPUT_NAME ha_rocksdb + COMPONENT rocksdb-engine) IF(NOT TARGET rocksdb_se) # Bail out if compilation with rocksdb engine is not requested From 69ba6b36e624fbe16aead82d3e013aa49d969b8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 21 Mar 2017 16:25:38 +0200 Subject: [PATCH 179/233] Add rocksdb as a plugin into debian packaging --- debian/mariadb-plugin-rocksdb.install | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 debian/mariadb-plugin-rocksdb.install diff --git a/debian/mariadb-plugin-rocksdb.install b/debian/mariadb-plugin-rocksdb.install new file mode 100644 index 00000000000..e92306c97b7 --- /dev/null +++ b/debian/mariadb-plugin-rocksdb.install @@ -0,0 +1,3 @@ +etc/mysql/conf.d/rocksdb_se.cnf etc/mysql/mariadb.conf.d +usr/lib/mysql/plugin/ha_rocksdb.so + From 4967c78aa07c9f657ae4825ff16c176a0f75193a Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 21 Mar 2017 19:41:28 +0000 Subject: [PATCH 180/233] make sure rocksdb-engine compoment is in MSI --- win/packaging/CPackWixConfig.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/win/packaging/CPackWixConfig.cmake b/win/packaging/CPackWixConfig.cmake index be07ff7d561..a8208de659f 100644 --- a/win/packaging/CPackWixConfig.cmake +++ b/win/packaging/CPackWixConfig.cmake @@ -9,7 +9,7 @@ IF(ESSENTIALS) ENDIF() ELSE() SET(CPACK_COMPONENTS_USED - "Server;Client;Development;SharedLibraries;Documentation;Readme;Debuginfo;Common;connect-engine;ClientPlugins;gssapi-server;gssapi-client;aws-key-management") + "Server;Client;Development;SharedLibraries;Documentation;Readme;Debuginfo;Common;connect-engine;ClientPlugins;gssapi-server;gssapi-client;aws-key-management;rocksdb-engine") ENDIF() SET( WIX_FEATURE_MySQLServer_EXTRA_FEATURES "DBInstance;SharedClientServerComponents") @@ -57,7 +57,7 @@ SET(CPACK_COMPONENT_GROUP_MYSQLSERVER_DESCRIPTION "Install server") #Miscellaneous (hidden) components, part of server / or client programs - FOREACH(comp connect-engine ClientPlugins gssapi-server gssapi-client aws-key-management) + FOREACH(comp connect-engine ClientPlugins gssapi-server gssapi-client aws-key-management rocksdb-engine) STRING(TOUPPER "${comp}" comp) SET(CPACK_COMPONENT_${comp}_GROUP "MySQLServer") SET(CPACK_COMPONENT_${comp}_HIDDEN 1) From 4653b6e2a3a459b5ef9bbc0907c141426830f23c Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 23 Mar 2017 11:06:26 +0300 Subject: [PATCH 181/233] MDEV-12279: rocksdb.tbl_opt_data_index_dir fails, wrong error code Change the returned error code to be ER_CANT_CREATE_TABLE. Emit the warning text ourselves. (When a query produces both an error and a warning, command-line client with default settings will not provide any indication that the warning is present, unfortunately. Need \W) --- storage/rocksdb/ha_rocksdb.cc | 8 ++++++-- .../rocksdb/r/tbl_opt_data_index_dir.result | 18 ++++++++++++++---- .../rocksdb/t/tbl_opt_data_index_dir.test | 15 +++++++++------ 3 files changed, 29 insertions(+), 12 deletions(-) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 35e91a94999..06b82dd2dc6 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -5417,12 +5417,16 @@ int ha_rocksdb::create(const char *const name, TABLE *const table_arg, // outside the MySQL data directory. We don't support this for MyRocks. // The `rocksdb_datadir` setting should be used to configure RocksDB data // directory. - DBUG_RETURN(HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED); + print_error(HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED, + MYF(ME_JUST_WARNING)); + DBUG_RETURN(HA_WRONG_CREATE_OPTION); } if (create_info->index_file_name) { // Similar check for INDEX DIRECTORY as well. - DBUG_RETURN(HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED); + print_error(HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED, + MYF(ME_JUST_WARNING)); + DBUG_RETURN(HA_WRONG_CREATE_OPTION); } int res; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result index bbdd604097f..d1e445f734c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result @@ -1,8 +1,18 @@ DROP TABLE IF EXISTS t1; CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '/foo/bar/data'; -ERROR HY000: Got error 197 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB +ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") +show warnings; +Level Code Message +Warning 1296 Got error 198 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB +Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options") +Warning 1030 Got error 140 "Wrong create options" from storage engine ROCKSDB CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INDEX DIRECTORY = '/foo/bar/index'; -ERROR HY000: Got error 198 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB +ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") +show warnings; +Level Code Message +Warning 1296 Got error 199 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB +Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options") +Warning 1030 Got error 140 "Wrong create options" from storage engine ROCKSDB CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=rocksdb PARTITION BY RANGE (id) ( PARTITION P0 VALUES LESS THAN (1000) @@ -11,7 +21,7 @@ PARTITION P1 VALUES LESS THAN (2000) DATA DIRECTORY = '/foo/bar/data/', PARTITION P2 VALUES LESS THAN (MAXVALUE) ); -ERROR HY000: Got error 197 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB +ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") CREATE TABLE t1 (id int not null primary key) ENGINE=rocksdb PARTITION BY RANGE (id) ( PARTITION P0 VALUES LESS THAN (1000) @@ -20,4 +30,4 @@ PARTITION P1 VALUES LESS THAN (2000) INDEX DIRECTORY = '/foo/bar/data/', PARTITION P2 VALUES LESS THAN (MAXVALUE) ); -ERROR HY000: Got error 198 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB +ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test index ab3f240dd54..1b3a1b144d0 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test @@ -1,5 +1,6 @@ --source include/have_rocksdb.inc +--source include/have_partition.inc # # Check that when either DATA DIRECTORY or INDEX DIRECTORY are specified # then MyRocks returns an appropriate error. We don't support this @@ -10,17 +11,19 @@ DROP TABLE IF EXISTS t1; --enable_warnings ---error 1296 -eval CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '/foo/bar/data'; +--error ER_CANT_CREATE_TABLE +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '/foo/bar/data'; +show warnings; ---error 1296 -eval CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INDEX DIRECTORY = '/foo/bar/index'; +--error ER_CANT_CREATE_TABLE +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INDEX DIRECTORY = '/foo/bar/index'; +show warnings; # # Verify that we'll get the same error codes when using the partitions. # ---error 1296 +--error ER_CANT_CREATE_TABLE CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=rocksdb PARTITION BY RANGE (id) ( PARTITION P0 VALUES LESS THAN (1000) @@ -30,7 +33,7 @@ CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=rocksdb PARTITION BY RANGE PARTITION P2 VALUES LESS THAN (MAXVALUE) ); ---error 1296 +--error ER_CANT_CREATE_TABLE CREATE TABLE t1 (id int not null primary key) ENGINE=rocksdb PARTITION BY RANGE (id) ( PARTITION P0 VALUES LESS THAN (1000) From 86680e8b4fbe396e5e2bcdbfe1c6512086f392fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Sat, 25 Mar 2017 20:18:06 +0200 Subject: [PATCH 182/233] Skip rocksdb plugin if sources can not be fetched Either we are building from a source package, in which case all sources should be present, or we are building from a repository. The repository needs to fetch the rocksdb submodule before building rocksdb. --- storage/rocksdb/CMakeLists.txt | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index 2d8e67bd87d..a680885a5a4 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -1,4 +1,10 @@ # TODO: Copyrights + +MACRO(SKIP_ROCKSDB_PLUGIN msg) + MESSAGE_ONCE(SKIP_ROCKSDB_PLUGIN "Can't build rocksdb engine - ${msg}") + RETURN() +ENDMACRO() + IF(NOT EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/Makefile AND GIT_EXECUTABLE) EXECUTE_PROCESS(COMMAND "${GIT_EXECUTABLE}" submodule init WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}") @@ -7,14 +13,9 @@ IF(NOT EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/Makefile AND GIT_EXECUTABLE) ENDIF() IF (NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/Makefile") - MESSAGE(SEND_ERROR "Missing Makefile in rocksdb directory. Try \"git submodule update\".") + SKIP_ROCKSDB_PLUGIN("Missing Makefile in rocksdb directory. Try \"git submodule update\".") ENDIF() -MACRO(SKIP_ROCKSDB_PLUGIN msg) - MESSAGE_ONCE(SKIP_ROCKSDB_PLUGIN "Can't build rocksdb engine - ${msg}") - RETURN() -ENDMACRO() - # We've had our builders hang during the build process. This prevents MariaRocks # to be built on 32 bit intel OS kernels. IF(CMAKE_SYSTEM_PROCESSOR MATCHES "i[36]86") From 3ade211a7299a78075fbd43516d8789b612284f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Sat, 25 Mar 2017 23:28:05 +0200 Subject: [PATCH 183/233] Do not build ldb binary as mysql_ldb does the same thing --- storage/rocksdb/CMakeLists.txt | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index a680885a5a4..4a0e65ca17b 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -151,14 +151,11 @@ ADD_LIBRARY(rocksdb_tools STATIC MYSQL_ADD_EXECUTABLE(sst_dump rocksdb/tools/sst_dump.cc) TARGET_LINK_LIBRARIES(sst_dump rocksdblib) -MYSQL_ADD_EXECUTABLE(ldb rocksdb/tools/ldb.cc) -TARGET_LINK_LIBRARIES(ldb rocksdb_tools rocksdblib) - MYSQL_ADD_EXECUTABLE(mysql_ldb tools/mysql_ldb.cc) TARGET_LINK_LIBRARIES(mysql_ldb rocksdb_tools rocksdb_aux_lib) IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") - SET_TARGET_PROPERTIES(rocksdb_tools sst_dump ldb mysql_ldb PROPERTIES COMPILE_FLAGS -frtti) + SET_TARGET_PROPERTIES(rocksdb_tools sst_dump mysql_ldb PROPERTIES COMPILE_FLAGS -frtti) ENDIF() IF(MSVC) # RocksDB, the storage engine, overdoes "const" by adding From 97e5ed163763591c7af1831a13233e3e9228e207 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Sat, 25 Mar 2017 23:44:21 +0200 Subject: [PATCH 184/233] Add mysql_ldb to debian rocksdb package --- debian/mariadb-plugin-rocksdb.install | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/mariadb-plugin-rocksdb.install b/debian/mariadb-plugin-rocksdb.install index e92306c97b7..dfd34d03aa9 100644 --- a/debian/mariadb-plugin-rocksdb.install +++ b/debian/mariadb-plugin-rocksdb.install @@ -1,3 +1,3 @@ etc/mysql/conf.d/rocksdb_se.cnf etc/mysql/mariadb.conf.d usr/lib/mysql/plugin/ha_rocksdb.so - +usr/bin/mysql_ldb From a421f0b6b8feb41a42fc4cedfcd549d8e681e47b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Sun, 26 Mar 2017 00:05:24 +0200 Subject: [PATCH 185/233] Add sst_dump as binary in rocksdb package --- debian/mariadb-plugin-rocksdb.install | 1 + 1 file changed, 1 insertion(+) diff --git a/debian/mariadb-plugin-rocksdb.install b/debian/mariadb-plugin-rocksdb.install index dfd34d03aa9..5e3b5772fc7 100644 --- a/debian/mariadb-plugin-rocksdb.install +++ b/debian/mariadb-plugin-rocksdb.install @@ -1,3 +1,4 @@ etc/mysql/conf.d/rocksdb_se.cnf etc/mysql/mariadb.conf.d usr/lib/mysql/plugin/ha_rocksdb.so usr/bin/mysql_ldb +usr/bin/sst_dump From 7ebb81be1d59eaea1697daca7842fc1cfb5b9609 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Sun, 26 Mar 2017 17:30:26 +0300 Subject: [PATCH 186/233] Make mysql_ldb and sst_dump part of the rocksdb-engine component --- storage/rocksdb/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index 4a0e65ca17b..b9d78239340 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -148,10 +148,10 @@ ADD_LIBRARY(rocksdb_tools STATIC rocksdb/tools/sst_dump_tool.cc ) -MYSQL_ADD_EXECUTABLE(sst_dump rocksdb/tools/sst_dump.cc) +MYSQL_ADD_EXECUTABLE(sst_dump rocksdb/tools/sst_dump.cc COMPONENT rocksdb-engine) TARGET_LINK_LIBRARIES(sst_dump rocksdblib) -MYSQL_ADD_EXECUTABLE(mysql_ldb tools/mysql_ldb.cc) +MYSQL_ADD_EXECUTABLE(mysql_ldb tools/mysql_ldb.cc COMPONENT rocksdb-engine) TARGET_LINK_LIBRARIES(mysql_ldb rocksdb_tools rocksdb_aux_lib) IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") From c29336f2b0c99d39979609374b8673573475696f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Sun, 26 Mar 2017 18:13:29 +0300 Subject: [PATCH 187/233] Skip rocksdb on debian i386 and when gcc version is < 4.8 --- debian/autobake-deb.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh index 198cba8a467..bdb5535da99 100755 --- a/debian/autobake-deb.sh +++ b/debian/autobake-deb.sh @@ -72,6 +72,16 @@ then sed '/mariadb-service-convert/d' -i debian/mariadb-server-10.2.install fi +# Convert gcc version to numberical value. Format is Mmmpp where M is Major +# version, mm is minor version and p is patch. +GCCVERSION=$(gcc -dumpversion | sed -e 's/\.\([0-9][0-9]\)/\1/g' -e 's/\.\([0-9]\)/0\1/g' -e 's/^[0-9]\{3,4\}$/&00/') +# Don't build rocksdb package if gcc version is less than 4.8 or we are running on +# x86 32 bit. +if [ $GCCVERSION -lt 40800 ] || [ $(uname -i) -eq "i386" ] || [$(uname -i) -eq "i486"] +then + sed '/Package: mariadb-plugin-rocksdb/,+7d' -i debian/control +fi + # Adjust changelog, add new version echo "Incrementing changelog and starting build scripts" From 54a892e133026aa93fed4fe2d5e8f379cfb92c5e Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 31 Mar 2017 01:32:59 +0300 Subject: [PATCH 188/233] Post-merge fixes --- storage/rocksdb/rdb_datadic.cc | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index 829b82585fa..c2862724773 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -2831,8 +2831,10 @@ bool Rdb_validate_tbls::check_frm_file(const std::string &fullpath, did in the future we would need to make a version that does it without the connection handle as we don't have one here. */ - enum legacy_db_type eng_type; - frm_type_enum type = dd_frm_type(nullptr, fullfilename.c_ptr(), &eng_type); + char eng_type_buf[NAME_CHAR_LEN+1]; + LEX_STRING eng_type_str = {eng_type_buf, 0}; + //enum legacy_db_type eng_type; + frm_type_enum type = dd_frm_type(nullptr, fullfilename.c_ptr(), &eng_type_str); if (type == FRMTYPE_ERROR) { sql_print_warning("RocksDB: Failed to open/read .from file: %s", fullfilename.ptr()); @@ -2841,7 +2843,7 @@ bool Rdb_validate_tbls::check_frm_file(const std::string &fullpath, if (type == FRMTYPE_TABLE) { /* For a RocksDB table do we have a reference in the data dictionary? */ - if (eng_type == DB_TYPE_ROCKSDB) { + if (!strncmp(eng_type_str.str, "ROCKSDB", eng_type_str.length)) { /* Attempt to remove the table entry from the list of tables. If this fails then we know we had a .frm file that wasn't registered in RocksDB. @@ -2854,7 +2856,7 @@ bool Rdb_validate_tbls::check_frm_file(const std::string &fullpath, dbname.c_str(), tablename.c_str()); *has_errors = true; } - } else if (eng_type == DB_TYPE_PARTITION_DB) { + } else if (!strncmp(eng_type_str.str, "partition", eng_type_str.length)) { /* For partition tables, see if it is in the m_list as a partition, but don't generate an error if it isn't there - we don't know that the From dd3b7ad10f8c994f91ef5f49728432e7b95507cf Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Fri, 31 Mar 2017 14:06:28 +0000 Subject: [PATCH 189/233] MariaRocks : Run rocksdb suite on Windows --- mysql-test/collections/buildbot_suite.bat | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 mysql-test/collections/buildbot_suite.bat diff --git a/mysql-test/collections/buildbot_suite.bat b/mysql-test/collections/buildbot_suite.bat new file mode 100644 index 00000000000..f91692d2918 --- /dev/null +++ b/mysql-test/collections/buildbot_suite.bat @@ -0,0 +1,5 @@ +perl mysql-test-run.pl --verbose-restart --force --testcase-timeout=45 --suite-timeout=600 --max-test-fail=500 --retry=3 --parallel=4 --suite=^ +main,^ +innodb,^ +plugins,^ +rocksdb From 9de7386f6f029985e65e02a1d98240f6808020aa Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Fri, 31 Mar 2017 16:01:37 +0000 Subject: [PATCH 190/233] AWS KMS plugin : Fix building in case AWS C++ SDK was preinstalled into non-standard compiler/linker path (e.g vcpkg on Windows). Also fix linking with static preinstalled aws c++ sdk libraries --- plugin/aws_key_management/CMakeLists.txt | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/plugin/aws_key_management/CMakeLists.txt b/plugin/aws_key_management/CMakeLists.txt index 62d359569c6..31ddcd7e038 100644 --- a/plugin/aws_key_management/CMakeLists.txt +++ b/plugin/aws_key_management/CMakeLists.txt @@ -61,10 +61,11 @@ ENDIF() FIND_LIBRARY(AWS_CPP_SDK_CORE NAMES aws-cpp-sdk-core PATH_SUFFIXES "${SDK_INSTALL_BINARY_PREFIX}") FIND_LIBRARY(AWS_CPP_SDK_KMS NAMES aws-cpp-sdk-kms PATH_SUFFIXES "${SDK_INSTALL_BINARY_PREFIX}") SET(CMAKE_REQUIRED_FLAGS ${CXX11_FLAGS}) -CHECK_INCLUDE_FILE_CXX(aws/kms/KMSClient.h HAVE_AWS_HEADERS) +FIND_PATH(AWS_CPP_SDK_INCLUDE_DIR NAMES aws/kms/KMSClient.h) -IF(AWS_CPP_SDK_CORE AND AWS_CPP_SDK_KMS AND HAVE_AWS_HEADERS) - # AWS C++ SDK installed +IF(AWS_CPP_SDK_CORE AND AWS_CPP_SDK_KMS AND AWS_CPP_SDK_INCLUDE_DIR) + # AWS C++ SDK installed + INCLUDE_DIRECTORIES(${AWS_CPP_SDK_INCLUDE_DIR}) SET(AWS_SDK_LIBS ${AWS_CPP_SDK_CORE} ${AWS_CPP_SDK_KMS}) ELSE() # Build from source, using ExternalProject_Add @@ -95,7 +96,7 @@ ELSE() ENDIF() ENDIF() IF(MSVC) - SET(EXTRA_SDK_CMAKE_FLAGS -DCMAKE_CXX_FLAGS_DEBUGOPT="" -DCMAKE_EXE_LINKER_FLAGS_DEBUGOPT="" "-DCMAKE_CXX_FLAGS=/wd4530 /WX-") + SET(EXTRA_SDK_CMAKE_FLAGS -DCMAKE_CXX_FLAGS_DEBUGOPT="" -DCMAKE_EXE_LINKER_FLAGS_DEBUGOPT="" "-DCMAKE_CXX_FLAGS=/wd4530 /wd4577 /WX-") ENDIF() IF(CMAKE_CXX_COMPILER) SET(EXTRA_SDK_CMAKE_FLAGS ${EXTRA_SDK_CMAKE_FLAGS} -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}) @@ -128,11 +129,6 @@ ELSE() ADD_DEPENDENCIES(${lib} aws_sdk_cpp) SET(loc "${CMAKE_BINARY_DIR}/aws_sdk_cpp/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${lib}${CMAKE_STATIC_LIBRARY_SUFFIX}") SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LOCATION ${loc}) - IF(WIN32) - SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LINK_INTERFACE_LIBRARIES "bcrypt;winhttp;wininet;userenv") - ELSE() - SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LINK_INTERFACE_LIBRARIES "${SSL_LIBRARIES};${CURL_LIBRARIES};${UUID_LIBRARIES}") - ENDIF() ENDFOREACH() IF(CMAKE_SYSTEM_NAME MATCHES "Linux") @@ -145,4 +141,12 @@ ENDIF() ADD_DEFINITIONS(${SSL_DEFINES}) # Need to know whether openssl should be initialized SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CXX11_FLAGS}") -TARGET_LINK_LIBRARIES(aws_key_management ${AWS_SDK_LIBS}) + +IF(WIN32) + SET(AWS_CPP_SDK_DEPENDENCIES bcrypt winhttp wininet userenv version) +ELSE() + SET(AWS_CPP_SDK_DEPENDENCIES ${SSL_LIBRARIES} ${CURL_LIBRARIES} ${UUID_LIBRARIES}) +ENDIF() + +TARGET_LINK_LIBRARIES(aws_key_management ${AWS_SDK_LIBS} ${AWS_CPP_SDK_DEPENDENCIES}) + From 31896aa6e21fd112b929350156b2995b4033506d Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 31 Mar 2017 15:25:35 +0200 Subject: [PATCH 191/233] put all aws_key_management plugin files into plugin/aws_key_management that is, download AWS SDK there, not into the builddir root. and .gitignore them all. --- .gitignore | 3 +++ plugin/aws_key_management/CMakeLists.txt | 8 ++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index 5d882f268ff..44832c99c6b 100644 --- a/.gitignore +++ b/.gitignore @@ -91,6 +91,9 @@ pcre/pcre_chartables.c pcre/pcregrep pcre/pcretest pcre/test*grep +plugin/aws_key_management/aws-sdk-cpp +plugin/aws_key_management/aws_sdk_cpp +plugin/aws_key_management/aws_sdk_cpp-prefix scripts/comp_sql scripts/make_binary_distribution scripts/msql2mysql diff --git a/plugin/aws_key_management/CMakeLists.txt b/plugin/aws_key_management/CMakeLists.txt index 31ddcd7e038..38e1b35d949 100644 --- a/plugin/aws_key_management/CMakeLists.txt +++ b/plugin/aws_key_management/CMakeLists.txt @@ -108,7 +108,7 @@ ELSE() GIT_REPOSITORY "https://github.com/awslabs/aws-sdk-cpp.git" GIT_TAG "1.0.8" UPDATE_COMMAND "" - SOURCE_DIR "${CMAKE_BINARY_DIR}/aws-sdk-cpp" + SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/aws-sdk-cpp" CMAKE_ARGS -DBUILD_ONLY=kms -DBUILD_SHARED_LIBS=OFF @@ -118,7 +118,7 @@ ELSE() "-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE} ${PIC_FLAG}" "-DCMAKE_CXX_FLAGS_MINSIZEREL=${CMAKE_CXX_FLAGS_MINSIZEREL} ${PIC_FLAG}" ${EXTRA_SDK_CMAKE_FLAGS} - -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR}/aws_sdk_cpp + -DCMAKE_INSTALL_PREFIX=${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp TEST_COMMAND "" ) SET_TARGET_PROPERTIES(aws_sdk_cpp PROPERTIES EXCLUDE_FROM_ALL TRUE) @@ -127,7 +127,7 @@ ELSE() FOREACH(lib ${AWS_SDK_LIBS}) ADD_LIBRARY(${lib} STATIC IMPORTED GLOBAL) ADD_DEPENDENCIES(${lib} aws_sdk_cpp) - SET(loc "${CMAKE_BINARY_DIR}/aws_sdk_cpp/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${lib}${CMAKE_STATIC_LIBRARY_SUFFIX}") + SET(loc "${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${lib}${CMAKE_STATIC_LIBRARY_SUFFIX}") SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LOCATION ${loc}) ENDFOREACH() @@ -136,7 +136,7 @@ ELSE() SET(AWS_SDK_LIBS -Wl,--whole-archive ${AWS_SDK_LIBS} -Wl,--no-whole-archive) ENDIF() SET_TARGET_PROPERTIES(aws_sdk_cpp PROPERTIES EXCLUDE_FROM_ALL TRUE) - INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/aws_sdk_cpp/include) + INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp/include) ENDIF() ADD_DEFINITIONS(${SSL_DEFINES}) # Need to know whether openssl should be initialized From ac8218a0be280ac834904e5579554d9ea3f92aeb Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 31 Mar 2017 17:40:42 +0200 Subject: [PATCH 192/233] fix Ninja builds for AWS SDK specify BUILD_BYPRODUCTS, ninja needs it --- plugin/aws_key_management/CMakeLists.txt | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/plugin/aws_key_management/CMakeLists.txt b/plugin/aws_key_management/CMakeLists.txt index 38e1b35d949..1ad96dd9f19 100644 --- a/plugin/aws_key_management/CMakeLists.txt +++ b/plugin/aws_key_management/CMakeLists.txt @@ -102,6 +102,17 @@ ELSE() SET(EXTRA_SDK_CMAKE_FLAGS ${EXTRA_SDK_CMAKE_FLAGS} -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}) ENDIF() + SET(byproducts ) + # We do not need to build the whole SDK , just 2 of its libs + set(AWS_SDK_LIBS aws-cpp-sdk-core aws-cpp-sdk-kms) + FOREACH(lib ${AWS_SDK_LIBS}) + ADD_LIBRARY(${lib} STATIC IMPORTED GLOBAL) + ADD_DEPENDENCIES(${lib} aws_sdk_cpp) + SET(loc "${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${lib}${CMAKE_STATIC_LIBRARY_SUFFIX}") + SET(byproducts ${byproducts} BUILD_BYPRODUCTS ${loc}) + SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LOCATION ${loc}) + ENDFOREACH() + SET(AWS_SDK_PATCH_COMMAND ) ExternalProject_Add( aws_sdk_cpp @@ -109,7 +120,8 @@ ELSE() GIT_TAG "1.0.8" UPDATE_COMMAND "" SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/aws-sdk-cpp" - CMAKE_ARGS + ${byproducts} + CMAKE_ARGS -DBUILD_ONLY=kms -DBUILD_SHARED_LIBS=OFF -DFORCE_SHARED_CRT=OFF @@ -122,14 +134,6 @@ ELSE() TEST_COMMAND "" ) SET_TARGET_PROPERTIES(aws_sdk_cpp PROPERTIES EXCLUDE_FROM_ALL TRUE) - # We do not need to build the whole SDK , just 2 of its libs - set(AWS_SDK_LIBS aws-cpp-sdk-core aws-cpp-sdk-kms) - FOREACH(lib ${AWS_SDK_LIBS}) - ADD_LIBRARY(${lib} STATIC IMPORTED GLOBAL) - ADD_DEPENDENCIES(${lib} aws_sdk_cpp) - SET(loc "${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${lib}${CMAKE_STATIC_LIBRARY_SUFFIX}") - SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LOCATION ${loc}) - ENDFOREACH() IF(CMAKE_SYSTEM_NAME MATCHES "Linux") # Need whole-archive , otherwise static libraries are not linked @@ -149,4 +153,3 @@ ELSE() ENDIF() TARGET_LINK_LIBRARIES(aws_key_management ${AWS_SDK_LIBS} ${AWS_CPP_SDK_DEPENDENCIES}) - From 63798a6ea8bb9074919ba0025074200bce6f940f Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 13 Mar 2017 10:21:00 +0100 Subject: [PATCH 193/233] remove DB_TYPE_ROCKSDB --- sql/handler.h | 3 +-- storage/rocksdb/ha_rocksdb.cc | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/sql/handler.h b/sql/handler.h index c418429d10c..028f40e5dab 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -434,8 +434,7 @@ enum legacy_db_type DB_TYPE_PERFORMANCE_SCHEMA=28, DB_TYPE_ARIA=42, DB_TYPE_TOKUDB=43, - DB_TYPE_ROCKSDB=44, - DB_TYPE_FIRST_DYNAMIC=45, + DB_TYPE_FIRST_DYNAMIC=44, DB_TYPE_DEFAULT=127 // Must be last }; /* diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 06b82dd2dc6..25f42db82f3 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -3304,7 +3304,6 @@ static int rocksdb_init_func(void *const p) { rocksdb_hton->recover = rocksdb_recover; rocksdb_hton->commit = rocksdb_commit; rocksdb_hton->rollback = rocksdb_rollback; - rocksdb_hton->db_type = DB_TYPE_ROCKSDB; rocksdb_hton->show_status = rocksdb_show_status; rocksdb_hton->start_consistent_snapshot = rocksdb_start_tx_and_assign_read_view; From 76a262cdf879c9f496f3026e15d251ad33df5516 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 10 Mar 2017 19:41:35 +0100 Subject: [PATCH 194/233] remove my_hash_const_element(), use Hash_set in C++ code --- include/hash.h | 1 - mysys/hash.c | 8 ----- sql/sql_hset.h | 14 ++++++-- storage/rocksdb/ha_rocksdb.cc | 65 +++++++++++++---------------------- 4 files changed, 36 insertions(+), 52 deletions(-) diff --git a/include/hash.h b/include/hash.h index 6b379cdab59..892922d81a3 100644 --- a/include/hash.h +++ b/include/hash.h @@ -74,7 +74,6 @@ my_bool my_hash_init2(HASH *hash, uint growth_size, CHARSET_INFO *charset, void my_hash_free(HASH *tree); void my_hash_reset(HASH *hash); uchar *my_hash_element(HASH *hash, ulong idx); -const uchar *my_hash_const_element(const HASH *hash, ulong idx); uchar *my_hash_search(const HASH *info, const uchar *key, size_t length); uchar *my_hash_search_using_hash_value(const HASH *info, my_hash_value_type hash_value, diff --git a/mysys/hash.c b/mysys/hash.c index 07f9f7b030f..ad01afba29e 100644 --- a/mysys/hash.c +++ b/mysys/hash.c @@ -756,14 +756,6 @@ uchar *my_hash_element(HASH *hash, ulong idx) } -const uchar *my_hash_const_element(const HASH *hash, ulong idx) -{ - if (idx < hash->records) - return dynamic_element(&hash->array,idx,const HASH_LINK*)->data; - return 0; -} - - /* Replace old row with new row. This should only be used when key isn't changed diff --git a/sql/sql_hset.h b/sql/sql_hset.h index dc3bd487ce5..4dfddf898f0 100644 --- a/sql/sql_hset.h +++ b/sql/sql_hset.h @@ -32,10 +32,12 @@ public: Constructs an empty hash. Does not allocate memory, it is done upon the first insert. Thus does not cause or return errors. */ - Hash_set(uchar *(*K)(const T *, size_t *, my_bool)) + Hash_set(uchar *(*K)(const T *, size_t *, my_bool), + CHARSET_INFO *cs= &my_charset_bin) { my_hash_clear(&m_hash); m_hash.get_key= (my_hash_get_key)K; + m_hash.charset= cs; } /** Destroy the hash by freeing the buckets table. Does @@ -56,7 +58,7 @@ public: */ bool insert(T *value) { - my_hash_init_opt(&m_hash, &my_charset_bin, START_SIZE, 0, 0, + my_hash_init_opt(&m_hash, m_hash.charset, START_SIZE, 0, 0, m_hash.get_key, 0, MYF(0)); size_t key_len; uchar *v= reinterpret_cast(value); @@ -65,6 +67,10 @@ public: return my_hash_insert(&m_hash, v); return FALSE; } + bool remove(T *value) + { + return my_hash_delete(&m_hash, reinterpret_cast(value)); + } T *find(const void *key, size_t klen) const { return (T*)my_hash_search(&m_hash, reinterpret_cast(key), klen); @@ -73,6 +79,10 @@ public: bool is_empty() const { return m_hash.records == 0; } /** Returns the number of unique elements. */ size_t size() const { return static_cast(m_hash.records); } + const T* at(size_t i) const + { + return reinterpret_cast(my_hash_element(const_cast(&m_hash), i)); + } /** An iterator over hash elements. Is not insert-stable. */ class Iterator { diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 25f42db82f3..8028df53a45 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -43,6 +43,7 @@ #include "./my_stacktrace.h" #include "./sql_audit.h" #include "./sql_table.h" +#include "./sql_hset.h" #include #ifdef MARIAROCKS_NOT_YET #include @@ -204,28 +205,23 @@ namespace // anonymous namespace = not visible outside this source file { const ulong TABLE_HASH_SIZE = 32; +typedef Hash_set Rdb_table_set; struct Rdb_open_tables_map { /* Hash table used to track the handlers of open tables */ - my_core::HASH m_hash; + Rdb_table_set m_hash; /* The mutex used to protect the hash table */ mutable mysql_mutex_t m_mutex; - void init_hash(void) { - (void)my_hash_init(&m_hash, my_core::system_charset_info, TABLE_HASH_SIZE, - 0, 0, (my_hash_get_key)Rdb_open_tables_map::get_hash_key, - 0, 0); - } - - void free_hash(void) { my_hash_free(&m_hash); } - - static uchar *get_hash_key(Rdb_table_handler *const table_handler, + static uchar *get_hash_key(const Rdb_table_handler *const table_handler, size_t *const length, my_bool not_used MY_ATTRIBUTE((__unused__))); Rdb_table_handler *get_table_handler(const char *const table_name); void release_table_handler(Rdb_table_handler *const table_handler); + Rdb_open_tables_map() : m_hash(get_hash_key, system_charset_info) { } + std::vector get_table_names(void) const; }; @@ -1336,7 +1332,7 @@ rdb_get_rocksdb_write_options(my_core::THD *const thd) { */ uchar * -Rdb_open_tables_map::get_hash_key(Rdb_table_handler *const table_handler, +Rdb_open_tables_map::get_hash_key(const Rdb_table_handler *const table_handler, size_t *const length, my_bool not_used MY_ATTRIBUTE((__unused__))) { *length = table_handler->m_table_name_length; @@ -2454,12 +2450,11 @@ static bool rocksdb_flush_wal(handlerton* hton __attribute__((__unused__))) replication progress. */ static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx) +{ #ifdef MARIAROCKS_NOT_YET // This is "ASYNC_COMMIT" feature which is only in webscalesql -// for now, define async=false below: -#endif -{ bool async=false; +#endif Rdb_transaction *&tx = get_tx_from_thd(thd); if (!tx->can_prepare()) { @@ -2805,10 +2800,9 @@ public: int64_t curr_time; rdb->GetEnv()->GetCurrentTime(&curr_time); - THD *thd = tx->get_thd(); char buffer[1024]; #ifdef MARIAROCKS_NOT_YET - thd_security_context(thd, buffer, sizeof buffer, 0); + thd_security_context(tx->get_thd(), buffer, sizeof buffer, 0); #endif m_data += format_string("---SNAPSHOT, ACTIVE %lld sec\n" "%s\n" @@ -2912,6 +2906,7 @@ std::vector rdb_get_all_trx_info() { return trx_info; } +#ifdef MARIAROCKS_NOT_YET /* Generate the snapshot status table */ static bool rocksdb_show_snapshot_status(handlerton *const hton, THD *const thd, stat_print_fn *const stat_print) { @@ -2923,6 +2918,7 @@ static bool rocksdb_show_snapshot_status(handlerton *const hton, THD *const thd, return print_stats(thd, "SNAPSHOTS", "rocksdb", showStatus.getResult(), stat_print); } +#endif /* This is called for SHOW ENGINE ROCKSDB STATUS|LOGS|etc. @@ -3292,7 +3288,6 @@ static int rocksdb_init_func(void *const p) { mysql_mutex_init(rdb_sysvars_psi_mutex_key, &rdb_sysvars_mutex, MY_MUTEX_INIT_FAST); - rdb_open_tables.init_hash(); Rdb_transaction::init_mutex(); rocksdb_hton->state = SHOW_OPTION_YES; @@ -3362,7 +3357,6 @@ static int rocksdb_init_func(void *const p) { // mmap_reads and direct_reads are both on. (NO_LINT_DEBUG) sql_print_error("RocksDB: Can't enable both use_direct_reads " "and allow_mmap_reads\n"); - rdb_open_tables.free_hash(); DBUG_RETURN(HA_EXIT_FAILURE); } @@ -3371,7 +3365,6 @@ static int rocksdb_init_func(void *const p) { // See above comment for allow_mmap_reads. (NO_LINT_DEBUG) sql_print_error("RocksDB: Can't enable both use_direct_writes " "and allow_mmap_writes\n"); - rdb_open_tables.free_hash(); DBUG_RETURN(HA_EXIT_FAILURE); } @@ -3397,7 +3390,6 @@ static int rocksdb_init_func(void *const p) { std::string err_text = status.ToString(); sql_print_error("RocksDB: Error listing column families: %s", err_text.c_str()); - rdb_open_tables.free_hash(); DBUG_RETURN(HA_EXIT_FAILURE); } } else @@ -3453,7 +3445,6 @@ static int rocksdb_init_func(void *const p) { rocksdb_default_cf_options, rocksdb_override_cf_options)) { // NO_LINT_DEBUG sql_print_error("RocksDB: Failed to initialize CF options map."); - rdb_open_tables.free_hash(); DBUG_RETURN(HA_EXIT_FAILURE); } @@ -3509,7 +3500,6 @@ static int rocksdb_init_func(void *const p) { sql_print_error("RocksDB: compatibility check against existing database " "options failed. %s", status.ToString().c_str()); - rdb_open_tables.free_hash(); DBUG_RETURN(HA_EXIT_FAILURE); } @@ -3519,7 +3509,6 @@ static int rocksdb_init_func(void *const p) { if (!status.ok()) { std::string err_text = status.ToString(); sql_print_error("RocksDB: Error opening instance: %s", err_text.c_str()); - rdb_open_tables.free_hash(); DBUG_RETURN(HA_EXIT_FAILURE); } cf_manager.init(&rocksdb_cf_options_map, &cf_handles); @@ -3527,21 +3516,18 @@ static int rocksdb_init_func(void *const p) { if (dict_manager.init(rdb->GetBaseDB(), &cf_manager)) { // NO_LINT_DEBUG sql_print_error("RocksDB: Failed to initialize data dictionary."); - rdb_open_tables.free_hash(); DBUG_RETURN(HA_EXIT_FAILURE); } if (binlog_manager.init(&dict_manager)) { // NO_LINT_DEBUG sql_print_error("RocksDB: Failed to initialize binlog manager."); - rdb_open_tables.free_hash(); DBUG_RETURN(HA_EXIT_FAILURE); } if (ddl_manager.init(&dict_manager, &cf_manager, rocksdb_validate_tables)) { // NO_LINT_DEBUG sql_print_error("RocksDB: Failed to initialize DDL manager."); - rdb_open_tables.free_hash(); DBUG_RETURN(HA_EXIT_FAILURE); } @@ -3563,7 +3549,6 @@ static int rocksdb_init_func(void *const p) { const std::string err_text = status.ToString(); // NO_LINT_DEBUG sql_print_error("RocksDB: Error enabling compaction: %s", err_text.c_str()); - rdb_open_tables.free_hash(); DBUG_RETURN(HA_EXIT_FAILURE); } @@ -3576,7 +3561,6 @@ static int rocksdb_init_func(void *const p) { if (err != 0) { sql_print_error("RocksDB: Couldn't start the background thread: (errno=%d)", err); - rdb_open_tables.free_hash(); DBUG_RETURN(HA_EXIT_FAILURE); } @@ -3589,7 +3573,6 @@ static int rocksdb_init_func(void *const p) { if (err != 0) { sql_print_error("RocksDB: Couldn't start the drop index thread: (errno=%d)", err); - rdb_open_tables.free_hash(); DBUG_RETURN(HA_EXIT_FAILURE); } @@ -3657,13 +3640,17 @@ static int rocksdb_done_func(void *const p) { sql_print_error("RocksDB: Couldn't stop the index thread: (errno=%d)", err); } - if (rdb_open_tables.m_hash.records) { + if (rdb_open_tables.m_hash.size()) { // Looks like we are getting unloaded and yet we have some open tables // left behind. error = 1; } - rdb_open_tables.free_hash(); + /* + destructors for static objects can be called at _exit(), + but we want to free the memory at dlclose() + */ + rdb_open_tables.m_hash.~Rdb_table_set(); mysql_mutex_destroy(&rdb_open_tables.m_mutex); mysql_mutex_destroy(&rdb_sysvars_mutex); @@ -3729,8 +3716,7 @@ Rdb_open_tables_map::get_table_handler(const char *const table_name) { // First, look up the table in the hash map. RDB_MUTEX_LOCK_CHECK(m_mutex); - if (!(table_handler = reinterpret_cast(my_hash_search( - &m_hash, reinterpret_cast(table_name), length)))) { + if (!m_hash.size() || !(table_handler = m_hash.find(table_name, length))) { // Since we did not find it in the hash map, attempt to create and add it // to the hash map. if (!(table_handler = reinterpret_cast(my_multi_malloc( @@ -3746,7 +3732,7 @@ Rdb_open_tables_map::get_table_handler(const char *const table_name) { table_handler->m_table_name = tmp_name; strmov(table_handler->m_table_name, table_name); - if (my_hash_insert(&m_hash, reinterpret_cast(table_handler))) { + if (m_hash.insert(table_handler)) { // Inserting into the hash map failed. RDB_MUTEX_UNLOCK_CHECK(m_mutex); my_free(table_handler); @@ -3776,13 +3762,11 @@ std::vector Rdb_open_tables_map::get_table_names(void) const { std::vector names; RDB_MUTEX_LOCK_CHECK(m_mutex); - for (i = 0; (table_handler = reinterpret_cast( - my_hash_const_element(&m_hash, i))); - i++) { + for (i = 0; (table_handler = m_hash.at(i)); i++) { DBUG_ASSERT(table_handler != nullptr); names.push_back(table_handler->m_table_name); } - DBUG_ASSERT(i == m_hash.records); + DBUG_ASSERT(i == m_hash.size()); RDB_MUTEX_UNLOCK_CHECK(m_mutex); return names; @@ -3937,9 +3921,8 @@ void Rdb_open_tables_map::release_table_handler( DBUG_ASSERT(table_handler != nullptr); DBUG_ASSERT(table_handler->m_ref_count > 0); if (!--table_handler->m_ref_count) { - // Last rereference was released. Tear down the hash entry. - const auto ret MY_ATTRIBUTE((__unused__)) = - my_hash_delete(&m_hash, reinterpret_cast(table_handler)); + // Last reference was released. Tear down the hash entry. + const auto ret MY_ATTRIBUTE((__unused__)) = m_hash.remove(table_handler); DBUG_ASSERT(!ret); // the hash entry must actually be found and deleted my_core::thr_lock_delete(&table_handler->m_thr_lock); my_free(table_handler); From 9ce639af5217f877fd9e4b93d1b0ecd0ef0dfd80 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 10 Mar 2017 19:41:48 +0100 Subject: [PATCH 195/233] don't export all charsets to plugins don't use internal server collation symbol names, use collation properties and collation IDs, they are much more stable. --- include/m_ctype.h | 156 +++++++++--------- storage/rocksdb/ha_rocksdb.cc | 8 +- storage/rocksdb/ha_rocksdb.h | 11 ++ .../rocksdb/r/add_index_inplace.result | 2 +- .../mysql-test/rocksdb/r/collation.result | 40 ++--- storage/rocksdb/rdb_datadic.cc | 21 +-- 6 files changed, 125 insertions(+), 113 deletions(-) diff --git a/include/m_ctype.h b/include/m_ctype.h index 1639332f5f7..04a82953f0a 100644 --- a/include/m_ctype.h +++ b/include/m_ctype.h @@ -360,7 +360,7 @@ struct my_collation_handler_st }; extern MY_COLLATION_HANDLER my_collation_8bit_bin_handler; -extern MYSQL_PLUGIN_IMPORT MY_COLLATION_HANDLER my_collation_8bit_simple_ci_handler; +extern MY_COLLATION_HANDLER my_collation_8bit_simple_ci_handler; extern MY_COLLATION_HANDLER my_collation_8bit_nopad_bin_handler; extern MY_COLLATION_HANDLER my_collation_8bit_simple_nopad_ci_handler; extern MY_COLLATION_HANDLER my_collation_ucs2_uca_handler; @@ -586,83 +586,83 @@ extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_latin1_nopad; extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_filename; extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8_general_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_big5_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_big5_chinese_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_big5_nopad_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_big5_chinese_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_cp1250_czech_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_cp932_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_cp932_japanese_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_cp932_nopad_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_cp932_japanese_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_eucjpms_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_eucjpms_japanese_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_eucjpms_nopad_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_eucjpms_japanese_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_euckr_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_euckr_korean_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_euckr_nopad_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_euckr_korean_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_gb2312_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_gb2312_chinese_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_gb2312_nopad_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_gb2312_chinese_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_gbk_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_gbk_chinese_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_gbk_nopad_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_gbk_chinese_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_latin1_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_latin1_nopad_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_latin1_german2_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_latin2_czech_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_sjis_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_sjis_japanese_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_sjis_nopad_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_sjis_japanese_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_tis620_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_tis620_thai_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_tis620_nopad_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_tis620_thai_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ucs2_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ucs2_general_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ucs2_nopad_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ucs2_general_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ucs2_general_mysql500_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ucs2_unicode_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ucs2_unicode_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ucs2_general_mysql500_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ujis_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ujis_japanese_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ujis_nopad_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_ujis_japanese_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16_general_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16_unicode_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16_unicode_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16le_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16le_general_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16_general_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16_nopad_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16le_nopad_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf16le_general_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf32_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf32_general_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf32_unicode_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf32_unicode_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf32_nopad_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf32_general_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8_nopad_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8_general_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8_general_mysql500_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8_unicode_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8_unicode_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8mb4_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8mb4_general_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8mb4_nopad_bin; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8mb4_general_nopad_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8mb4_unicode_ci; -extern MYSQL_PLUGIN_IMPORT struct charset_info_st my_charset_utf8mb4_unicode_nopad_ci; +extern struct charset_info_st my_charset_big5_bin; +extern struct charset_info_st my_charset_big5_chinese_ci; +extern struct charset_info_st my_charset_big5_nopad_bin; +extern struct charset_info_st my_charset_big5_chinese_nopad_ci; +extern struct charset_info_st my_charset_cp1250_czech_ci; +extern struct charset_info_st my_charset_cp932_bin; +extern struct charset_info_st my_charset_cp932_japanese_ci; +extern struct charset_info_st my_charset_cp932_nopad_bin; +extern struct charset_info_st my_charset_cp932_japanese_nopad_ci; +extern struct charset_info_st my_charset_eucjpms_bin; +extern struct charset_info_st my_charset_eucjpms_japanese_ci; +extern struct charset_info_st my_charset_eucjpms_nopad_bin; +extern struct charset_info_st my_charset_eucjpms_japanese_nopad_ci; +extern struct charset_info_st my_charset_euckr_bin; +extern struct charset_info_st my_charset_euckr_korean_ci; +extern struct charset_info_st my_charset_euckr_nopad_bin; +extern struct charset_info_st my_charset_euckr_korean_nopad_ci; +extern struct charset_info_st my_charset_gb2312_bin; +extern struct charset_info_st my_charset_gb2312_chinese_ci; +extern struct charset_info_st my_charset_gb2312_nopad_bin; +extern struct charset_info_st my_charset_gb2312_chinese_nopad_ci; +extern struct charset_info_st my_charset_gbk_bin; +extern struct charset_info_st my_charset_gbk_chinese_ci; +extern struct charset_info_st my_charset_gbk_nopad_bin; +extern struct charset_info_st my_charset_gbk_chinese_nopad_ci; +extern struct charset_info_st my_charset_latin1_bin; +extern struct charset_info_st my_charset_latin1_nopad_bin; +extern struct charset_info_st my_charset_latin1_german2_ci; +extern struct charset_info_st my_charset_latin2_czech_ci; +extern struct charset_info_st my_charset_sjis_bin; +extern struct charset_info_st my_charset_sjis_japanese_ci; +extern struct charset_info_st my_charset_sjis_nopad_bin; +extern struct charset_info_st my_charset_sjis_japanese_nopad_ci; +extern struct charset_info_st my_charset_tis620_bin; +extern struct charset_info_st my_charset_tis620_thai_ci; +extern struct charset_info_st my_charset_tis620_nopad_bin; +extern struct charset_info_st my_charset_tis620_thai_nopad_ci; +extern struct charset_info_st my_charset_ucs2_bin; +extern struct charset_info_st my_charset_ucs2_general_ci; +extern struct charset_info_st my_charset_ucs2_nopad_bin; +extern struct charset_info_st my_charset_ucs2_general_nopad_ci; +extern struct charset_info_st my_charset_ucs2_general_mysql500_ci; +extern struct charset_info_st my_charset_ucs2_unicode_ci; +extern struct charset_info_st my_charset_ucs2_unicode_nopad_ci; +extern struct charset_info_st my_charset_ucs2_general_mysql500_ci; +extern struct charset_info_st my_charset_ujis_bin; +extern struct charset_info_st my_charset_ujis_japanese_ci; +extern struct charset_info_st my_charset_ujis_nopad_bin; +extern struct charset_info_st my_charset_ujis_japanese_nopad_ci; +extern struct charset_info_st my_charset_utf16_bin; +extern struct charset_info_st my_charset_utf16_general_ci; +extern struct charset_info_st my_charset_utf16_unicode_ci; +extern struct charset_info_st my_charset_utf16_unicode_nopad_ci; +extern struct charset_info_st my_charset_utf16le_bin; +extern struct charset_info_st my_charset_utf16le_general_ci; +extern struct charset_info_st my_charset_utf16_general_nopad_ci; +extern struct charset_info_st my_charset_utf16_nopad_bin; +extern struct charset_info_st my_charset_utf16le_nopad_bin; +extern struct charset_info_st my_charset_utf16le_general_nopad_ci; +extern struct charset_info_st my_charset_utf32_bin; +extern struct charset_info_st my_charset_utf32_general_ci; +extern struct charset_info_st my_charset_utf32_unicode_ci; +extern struct charset_info_st my_charset_utf32_unicode_nopad_ci; +extern struct charset_info_st my_charset_utf32_nopad_bin; +extern struct charset_info_st my_charset_utf32_general_nopad_ci; +extern struct charset_info_st my_charset_utf8_bin; +extern struct charset_info_st my_charset_utf8_nopad_bin; +extern struct charset_info_st my_charset_utf8_general_nopad_ci; +extern struct charset_info_st my_charset_utf8_general_mysql500_ci; +extern struct charset_info_st my_charset_utf8_unicode_ci; +extern struct charset_info_st my_charset_utf8_unicode_nopad_ci; +extern struct charset_info_st my_charset_utf8mb4_bin; +extern struct charset_info_st my_charset_utf8mb4_general_ci; +extern struct charset_info_st my_charset_utf8mb4_nopad_bin; +extern struct charset_info_st my_charset_utf8mb4_general_nopad_ci; +extern struct charset_info_st my_charset_utf8mb4_unicode_ci; +extern struct charset_info_st my_charset_utf8mb4_unicode_nopad_ci; #define MY_UTF8MB3 "utf8" #define MY_UTF8MB4 "utf8mb4" diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 8028df53a45..c27835ac1be 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -4827,8 +4827,8 @@ bool ha_rocksdb::get_error_message(const int error, String *const buf) { } /* MyRocks supports only the following collations for indexed columns */ -static const std::set RDB_INDEX_COLLATIONS = { - &my_charset_bin, &my_charset_utf8_bin, &my_charset_latin1_bin}; +static const std::set RDB_INDEX_COLLATIONS = { + COLLATION_BINARY, COLLATION_UTF8_BIN, COLLATION_LATIN1_BIN}; static bool rdb_is_index_collation_supported(const my_core::Field *const field) { @@ -4836,7 +4836,7 @@ rdb_is_index_collation_supported(const my_core::Field *const field) { /* Handle [VAR](CHAR|BINARY) or TEXT|BLOB */ if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING || type == MYSQL_TYPE_BLOB) { - return RDB_INDEX_COLLATIONS.find(field->charset()) != + return RDB_INDEX_COLLATIONS.find(field->charset()->number) != RDB_INDEX_COLLATIONS.end(); } return true; @@ -4971,7 +4971,7 @@ int ha_rocksdb::create_cfs( if (collation_err != "") { collation_err += ", "; } - collation_err += coll->name; + collation_err += get_charset_name(coll); } my_printf_error( ER_UNKNOWN_ERROR, "Unsupported collation on string indexed " diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 093dd007580..6e9f6b41afe 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -201,6 +201,17 @@ const char *const RDB_CF_NAME_QUALIFIER = "cfname"; #define RDB_BQUAL_SZ 1 #define RDB_XIDHDR_LEN (RDB_FORMATID_SZ + RDB_GTRID_SZ + RDB_BQUAL_SZ) +/* collations, used in MariaRocks */ +enum collations_used { + COLLATION_UTF8MB4_BIN = 46, + COLLATION_LATIN1_BIN = 47, + COLLATION_UTF16LE_BIN = 55, + COLLATION_UTF32_BIN = 61, + COLLATION_UTF16_BIN = 62, + COLLATION_BINARY = 63, + COLLATION_UTF8_BIN = 83 +}; + /* To fix an unhandled exception we specify the upper bound as LONGLONGMAX instead of ULONGLONGMAX because the latter is -1 and causes an exception when diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result index 099356a6969..2aeeda4cfe6 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result @@ -280,7 +280,7 @@ set @tmp_rocksdb_strict_collation_check= @@rocksdb_strict_collation_check; set global rocksdb_strict_collation_check=1; CREATE TABLE t1 (a INT, b TEXT); ALTER TABLE t1 ADD KEY kb(b(10)); -ERROR HY000: Unsupported collation on string indexed column test.t1.b Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.t1.b Use binary collation (latin1_bin, binary, utf8_bin). ALTER TABLE t1 ADD PRIMARY KEY(a); DROP TABLE t1; set global rocksdb_strict_collation_check= @tmp_rocksdb_strict_collation_check; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/collation.result b/storage/rocksdb/mysql-test/rocksdb/r/collation.result index 71fc2b98a16..8b1246b49d7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/collation.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/collation.result @@ -3,9 +3,9 @@ DROP TABLE IF EXISTS t1; CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text) engine=rocksdb charset utf8; DROP TABLE t1; CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.t1.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.t1.value Use binary collation (latin1_bin, binary, utf8_bin). CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value3(50))) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.t1.value3 Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.t1.value3 Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_check=0; CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value3(50))) engine=rocksdb charset utf8; DROP TABLE t1; @@ -22,109 +22,109 @@ SET GLOBAL rocksdb_strict_collation_exceptions=t1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; CREATE TABLE t2 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.t2.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.t2.value Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_exceptions="t.*"; CREATE TABLE t123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t123; CREATE TABLE s123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.s123.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.s123.value Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_exceptions=".t.*"; CREATE TABLE xt123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE xt123; CREATE TABLE t123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.t123.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.t123.value Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_exceptions="s.*,t.*"; CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_exceptions="s.*|t.*"; CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_exceptions=",s.*,t.*"; CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_exceptions="|s.*|t.*"; CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_exceptions="s.*,,t.*"; CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_exceptions="s.*||t.*"; CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_exceptions="s.*,t.*,"; CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_exceptions="s.*|t.*|"; CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_exceptions="||||,,,,s.*,,|,,||,t.*,,|||,,,"; CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_exceptions='t1'; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb; ALTER TABLE t1 AUTO_INCREMENT=1; DROP TABLE t1; CREATE TABLE t2 (id INT primary key, value varchar(50), index(value)) engine=rocksdb; -ERROR HY000: Unsupported collation on string indexed column test.t2.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.t2.value Use binary collation (latin1_bin, binary, utf8_bin). CREATE TABLE t2 (id INT primary key, value varchar(50)) engine=rocksdb; ALTER TABLE t2 ADD INDEX(value); -ERROR HY000: Unsupported collation on string indexed column test.t2.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.t2.value Use binary collation (latin1_bin, binary, utf8_bin). DROP TABLE t2; SET GLOBAL rocksdb_strict_collation_exceptions="[a-b"; Invalid pattern in strict_collation_exceptions: [a-b CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.a.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.a.value Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_exceptions="[a-b]"; CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; CREATE TABLE b (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; CREATE TABLE c (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.c.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.c.value Use binary collation (latin1_bin, binary, utf8_bin). DROP TABLE a, b; call mtr.add_suppression("Invalid pattern in strict_collation_exceptions:"); SET GLOBAL rocksdb_strict_collation_exceptions="abc\\"; Invalid pattern in strict_collation_exceptions: abc\ CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.abc.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.abc.value Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_exceptions="abc"; CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; -ERROR HY000: Unsupported collation on string indexed column test.abcd.value Use binary collation (binary, latin1_bin, utf8_bin). +ERROR HY000: Unsupported collation on string indexed column test.abcd.value Use binary collation (latin1_bin, binary, utf8_bin). DROP TABLE abc; SET GLOBAL rocksdb_strict_collation_exceptions=null; SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value; diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index c2862724773..255a54cbdce 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -1690,7 +1690,7 @@ static int rdb_unpack_binary_or_utf8_varchar( /* Now, we need to decode used_bytes of data and append them to the value. */ - if (fpi->m_varchar_charset == &my_charset_utf8_bin) { + if (fpi->m_varchar_charset->number == COLLATION_UTF8_BIN) { if (used_bytes & 1) { /* UTF-8 characters are encoded into two-byte entities. There is no way @@ -1793,7 +1793,7 @@ static int rdb_unpack_binary_or_utf8_varchar_space_pad( } // Now, need to decode used_bytes of data and append them to the value. - if (fpi->m_varchar_charset == &my_charset_utf8_bin) { + if (fpi->m_varchar_charset->number == COLLATION_UTF8_BIN) { if (used_bytes & 1) { /* UTF-8 characters are encoded into two-byte entities. There is no way @@ -2239,7 +2239,8 @@ std::array mysql_mutex_t rdb_collation_data_mutex; static bool rdb_is_collation_supported(const my_core::CHARSET_INFO *const cs) { - return (cs->coll == &my_collation_8bit_simple_ci_handler); + return cs->strxfrm_multiply==1 && cs->mbmaxlen == 1 && + !(cs->state & (MY_CS_BINSORT | MY_CS_NOPAD)); } static const Rdb_collation_codec * @@ -2255,7 +2256,7 @@ rdb_init_collation_mapping(const my_core::CHARSET_INFO *const cs) { Rdb_collation_codec *cur = nullptr; // Compute reverse mapping for simple collations. - if (cs->coll == &my_collation_8bit_simple_ci_handler) { + if (rdb_is_collation_supported(cs)) { cur = new Rdb_collation_codec; std::map> rev_map; size_t max_conflict_size = 0; @@ -2302,8 +2303,8 @@ rdb_init_collation_mapping(const my_core::CHARSET_INFO *const cs) { static int get_segment_size_from_collation(const CHARSET_INFO *const cs) { int ret; - if (cs == &my_charset_utf8mb4_bin || cs == &my_charset_utf16_bin || - cs == &my_charset_utf16le_bin || cs == &my_charset_utf32_bin) { + if (cs->number == COLLATION_UTF8MB4_BIN || cs->number == COLLATION_UTF16_BIN || + cs->number == COLLATION_UTF16LE_BIN || cs->number == COLLATION_UTF32_BIN) { /* In these collations, a character produces one weight, which is 3 bytes. Segment has 3 characters, add one byte for VARCHAR_CMP_* marker, and we @@ -2424,7 +2425,7 @@ bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, // // See Field_blob::make_sort_key for details. m_max_image_len = - key_length + (field->charset() == &my_charset_bin + key_length + (field->charset()->number == COLLATION_BINARY ? reinterpret_cast(field) ->pack_length_no_ptr() : 0); @@ -2475,7 +2476,7 @@ bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, DBUG_EXECUTE_IF("myrocks_enable_unknown_collation_index_only_scans", use_unknown_collation = true;); - if (cs == &my_charset_bin) { + if (cs->number == COLLATION_BINARY) { // - SQL layer pads BINARY(N) so that it always is N bytes long. // - For VARBINARY(N), values may have different lengths, so we're using // variable-length encoding. This is also the only charset where the @@ -2483,7 +2484,7 @@ bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, m_unpack_func = is_varchar ? rdb_unpack_binary_or_utf8_varchar : rdb_unpack_binary_str; res = true; - } else if (cs == &my_charset_latin1_bin || cs == &my_charset_utf8_bin) { + } else if (cs->number == COLLATION_LATIN1_BIN || cs->number == COLLATION_UTF8_BIN) { // For _bin collations, mem-comparable form of the string is the string // itself. @@ -2504,7 +2505,7 @@ bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, } else { // SQL layer pads CHAR(N) values to their maximum length. // We just store that and restore it back. - m_unpack_func = (cs == &my_charset_latin1_bin) ? rdb_unpack_binary_str + m_unpack_func = (cs->number == COLLATION_LATIN1_BIN) ? rdb_unpack_binary_str : rdb_unpack_utf8_str; } res = true; From 6e899642fe65eb3f36ff33fbb7b77052a0f216e6 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 11 Mar 2017 16:25:03 +0100 Subject: [PATCH 196/233] move rocksdb specific changes into rocksdb --- include/mysql/plugin.h | 17 ----------------- sql/handler.cc | 8 -------- sql/handler.h | 1 - storage/rocksdb/build_rocksdb.cmake | 6 ++++++ storage/rocksdb/ha_rocksdb.cc | 20 ++++++++++++++++++++ storage/rocksdb/ha_rocksdb.h | 1 + 6 files changed, 27 insertions(+), 26 deletions(-) diff --git a/include/mysql/plugin.h b/include/mysql/plugin.h index 2f077d8440e..a5bfa1bbc9e 100644 --- a/include/mysql/plugin.h +++ b/include/mysql/plugin.h @@ -393,23 +393,6 @@ DECLARE_MYSQL_SYSVAR_SIMPLE(name, unsigned long long) = { \ PLUGIN_VAR_LONGLONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ #name, comment, check, update, &varname, def, min, max, blk } -#define MYSQL_SYSVAR_UINT64_T(name, varname, opt, comment, check, update, def, min, max, blk) \ -DECLARE_MYSQL_SYSVAR_SIMPLE(name, uint64_t) = { \ - PLUGIN_VAR_LONGLONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ - #name, comment, check, update, &varname, def, min, max, blk } - -#ifdef _WIN64 -#define MYSQL_SYSVAR_SIZE_T(name, varname, opt, comment, check, update, def, min, max, blk) \ -DECLARE_MYSQL_SYSVAR_SIMPLE(name, size_t) = { \ - PLUGIN_VAR_LONGLONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ - #name, comment, check, update, &varname, def, min, max, blk } -#else -#define MYSQL_SYSVAR_SIZE_T(name, varname, opt, comment, check, update, def, min, max, blk) \ -DECLARE_MYSQL_SYSVAR_SIMPLE(name, size_t) = { \ - PLUGIN_VAR_LONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ - #name, comment, check, update, &varname, def, min, max, blk } -#endif - #define MYSQL_SYSVAR_ENUM(name, varname, opt, comment, check, update, def, typelib) \ DECLARE_MYSQL_SYSVAR_TYPELIB(name, unsigned long) = { \ PLUGIN_VAR_ENUM | ((opt) & PLUGIN_VAR_MASK), \ diff --git a/sql/handler.cc b/sql/handler.cc index e927ba011aa..c28ab2a7bd7 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2728,14 +2728,6 @@ int handler::ha_index_first(uchar * buf) return result; } -bool handler::is_using_full_key(key_part_map keypart_map, - uint actual_key_parts) -{ - return (keypart_map == HA_WHOLE_KEY) || - (keypart_map == ((key_part_map(1) << actual_key_parts) - - 1)); -} - int handler::ha_index_last(uchar * buf) { int result; diff --git a/sql/handler.h b/sql/handler.h index 028f40e5dab..830e45a139f 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -3193,7 +3193,6 @@ public: size_t size) { return 0; } - bool is_using_full_key(key_part_map keypart_map, uint actual_key_parts); virtual int read_range_first(const key_range *start_key, const key_range *end_key, bool eq_range, bool sorted); diff --git a/storage/rocksdb/build_rocksdb.cmake b/storage/rocksdb/build_rocksdb.cmake index f29b4e5fabe..0cbca0f3b05 100644 --- a/storage/rocksdb/build_rocksdb.cmake +++ b/storage/rocksdb/build_rocksdb.cmake @@ -29,6 +29,12 @@ else() endif() endif() +include (CheckTypeSize) +check_type_size(size_t SIZEOF_SIZE_T) +check_type_size(uint64_t SIZEOF_UINT64_T) +set_property(SOURCE ha_rocksdb.cc APPEND PROPERTY COMPILE_DEFINITIONS + SIZEOF_SIZE_T=${SIZEOF_SIZE_T} SIZEOF_UINT64_T=${SIZEOF_UINT64_T}) + # Optional compression libraries. foreach(compression_lib LZ4 BZIP2 ZSTD snappy) diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index c27835ac1be..72b23f2655c 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -454,6 +454,18 @@ const int64 RDB_DEFAULT_BLOCK_CACHE_SIZE = 512 * 1024 * 1024; const int64 RDB_MIN_BLOCK_CACHE_SIZE = 1024; const int RDB_MAX_CHECKSUMS_PCT = 100; +#if SIZEOF_ULONG == SIZEOF_SIZE_T +#define MYSQL_SYSVAR_SIZE_T MYSQL_SYSVAR_ULONG +#else +#define MYSQL_SYSVAR_SIZE_T MYSQL_SYSVAR_ULONGLONG +#endif + +#if SIZEOF_ULONG == SIZEOF_UINT64_T +#define MYSQL_SYSVAR_UINT64_T MYSQL_SYSVAR_ULONG +#else +#define MYSQL_SYSVAR_UINT64_T MYSQL_SYSVAR_ULONGLONG +#endif + // TODO: 0 means don't wait at all, and we don't support it yet? static MYSQL_THDVAR_ULONG(lock_wait_timeout, PLUGIN_VAR_RQCMDARG, "Number of seconds to wait for lock", nullptr, @@ -5922,6 +5934,14 @@ int ha_rocksdb::secondary_index_read(const int keyno, uchar *const buf) { return HA_ERR_END_OF_FILE; } +bool ha_rocksdb::is_using_full_key(key_part_map keypart_map, + uint actual_key_parts) +{ + return (keypart_map == HA_WHOLE_KEY) || + (keypart_map == ((key_part_map(1) << actual_key_parts) + - 1)); +} + /* ha_rocksdb::read_range_first overrides handler::read_range_first. The only difference from handler::read_range_first is that diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 6e9f6b41afe..5852ad4e954 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -866,6 +866,7 @@ public: const key_range *end_key) MY_ATTRIBUTE((__warn_unused_result__)); + bool is_using_full_key(key_part_map keypart_map, uint actual_key_parts); int read_range_first(const key_range *const start_key, const key_range *const end_key, bool eq_range, bool sorted) override From 143e771deef47e4afdd5d8dcdaddc424f919af83 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 13 Mar 2017 10:18:15 +0100 Subject: [PATCH 197/233] ha_start_consistent_snapshot() did not check for errors --- sql/handler.cc | 13 ++++++++++--- sql/sql_parse.cc | 4 +--- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/sql/handler.cc b/sql/handler.cc index c28ab2a7bd7..a6016646d3c 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2212,7 +2212,8 @@ static my_bool snapshot_handlerton(THD *thd, plugin_ref plugin, if (hton->state == SHOW_OPTION_YES && hton->start_consistent_snapshot) { - hton->start_consistent_snapshot(hton, thd); + if (hton->start_consistent_snapshot(hton, thd)) + return TRUE; *((bool *)arg)= false; } return FALSE; @@ -2220,7 +2221,7 @@ static my_bool snapshot_handlerton(THD *thd, plugin_ref plugin, int ha_start_consistent_snapshot(THD *thd) { - bool warn= true; + bool err, warn= true; /* Holding the LOCK_commit_ordered mutex ensures that we get the same @@ -2230,9 +2231,15 @@ int ha_start_consistent_snapshot(THD *thd) have a consistent binlog position. */ mysql_mutex_lock(&LOCK_commit_ordered); - plugin_foreach(thd, snapshot_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, &warn); + err= plugin_foreach(thd, snapshot_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, &warn); mysql_mutex_unlock(&LOCK_commit_ordered); + if (err) + { + ha_rollback_trans(thd, true); + return 1; + } + /* Same idea as when one wants to CREATE TABLE in one engine which does not exist: diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index bbf3bc00a9d..590d2dfe681 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -5515,9 +5515,7 @@ end_with_restore_list: (longlong) thd->thread_id); goto error; } - /* MyRocks: hton->start_consistent_snapshot call may fail with an error */ - if (!thd->is_error()) - my_ok(thd); + my_ok(thd); break; case SQLCOM_COMMIT: { From d6d994bf42be0b855e3eb1c4d672a4871a186652 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 12 Mar 2017 11:18:51 +0100 Subject: [PATCH 198/233] remove two redundant *.inc files to restart a server namely, restart_mysqld_with_option.inc and kill_and_restart_mysqld.inc - use restart_mysqld.inc instead. Also remove innodb_wl6501_crash_stripped.inc that wasn't used anywhere. --- .../include/kill_and_restart_mysqld.inc | 19 ------------ .../include/restart_mysqld_with_option.inc | 31 ------------------- .../suite/innodb/r/autoinc_persist.result | 4 --- mysql-test/suite/innodb/r/innodb-blob.result | 3 -- .../suite/innodb/r/innodb_bug53756.result | 1 - .../suite/innodb/r/innodb_bug59641.result | 1 - .../suite/innodb/r/log_file_size.result | 2 -- .../suite/innodb/r/read_only_recovery.result | 1 - mysql-test/suite/innodb/r/xa_recovery.result | 1 - .../suite/innodb/t/autoinc_persist.test | 9 +++--- mysql-test/suite/innodb/t/innodb-blob.test | 9 ++++-- .../suite/innodb/t/innodb_bug53756.test | 3 +- .../suite/innodb/t/innodb_bug59641.test | 3 +- .../suite/innodb/t/log_alter_table.test | 3 ++ mysql-test/suite/innodb/t/log_file_size.test | 4 +-- .../suite/innodb/t/read_only_recovery.test | 4 ++- mysql-test/suite/innodb/t/xa_recovery.test | 3 +- .../suite/innodb_fts/r/crash_recovery.result | 3 -- .../suite/innodb_fts/t/crash_recovery.test | 6 ++-- .../rocksdb/t/compression_zstd.test | 4 +-- .../rocksdb/t/rocksdb_cf_options.test | 4 +-- 21 files changed, 32 insertions(+), 86 deletions(-) delete mode 100644 mysql-test/include/kill_and_restart_mysqld.inc delete mode 100644 mysql-test/include/restart_mysqld_with_option.inc diff --git a/mysql-test/include/kill_and_restart_mysqld.inc b/mysql-test/include/kill_and_restart_mysqld.inc deleted file mode 100644 index f2ac9b504d2..00000000000 --- a/mysql-test/include/kill_and_restart_mysqld.inc +++ /dev/null @@ -1,19 +0,0 @@ ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect - -if ($restart_parameters) -{ - --echo # Kill and restart: $restart_parameters - --exec echo "restart: $restart_parameters" > $_expect_file_name -} -if (!$restart_parameters) -{ - --echo # Kill and restart - --exec echo "restart" > $_expect_file_name -} - ---shutdown_server 0 ---source include/wait_until_disconnected.inc ---enable_reconnect ---source include/wait_until_connected_again.inc ---disable_reconnect diff --git a/mysql-test/include/restart_mysqld_with_option.inc b/mysql-test/include/restart_mysqld_with_option.inc deleted file mode 100644 index 4250b368b1a..00000000000 --- a/mysql-test/include/restart_mysqld_with_option.inc +++ /dev/null @@ -1,31 +0,0 @@ - -if ($rpl_inited) -{ - if (!$allow_rpl_inited) - { - --die ERROR IN TEST: This script does not support replication - } -} - -# Write file to make mysql-test-run.pl expect the "crash", but don't start -# it until it's told to ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect ---exec echo "wait" > $_expect_file_name - -# Send shutdown to the connected server and give -# it 10 seconds to die before zapping it -shutdown_server 10; - -# Write file to make mysql-test-run.pl start up the server again ---exec echo "restart:$_mysqld_option" > $_expect_file_name - -# Turn on reconnect ---enable_reconnect - -# Call script that will poll the server waiting for it to be back online again ---source include/wait_until_connected_again.inc - -# Turn off reconnect again ---disable_reconnect - diff --git a/mysql-test/suite/innodb/r/autoinc_persist.result b/mysql-test/suite/innodb/r/autoinc_persist.result index 814f3d32e60..e61262076ed 100644 --- a/mysql-test/suite/innodb/r/autoinc_persist.result +++ b/mysql-test/suite/innodb/r/autoinc_persist.result @@ -432,7 +432,6 @@ DELETE FROM t7 WHERE a = 100000200; set global innodb_flush_log_at_trx_commit=1; INSERT INTO t9 VALUES(100000000200); DELETE FROM t9 WHERE a = 100000000200; -# Kill and restart INSERT INTO t1 VALUES(0); SELECT a AS `Expect 126` FROM t1 ORDER BY a DESC LIMIT 1; Expect 126 @@ -498,7 +497,6 @@ SELECT * FROM t19; a 1 2 -# Kill and restart INSERT INTO t1 VALUES(0), (0); SELECT * FROM t1; a @@ -639,7 +637,6 @@ BEGIN; # Without the fix in page_create_empty() the counter value would be lost # when ROLLBACK deletes the last row. ROLLBACK; -# Kill and restart INSERT INTO t3 VALUES(0); SELECT MAX(a) AS `Expect 120` FROM t3; Expect 120 @@ -913,7 +910,6 @@ UPDATE t33 SET a = 10 WHERE a = 1; INSERT INTO t33 VALUES(2, NULL); ERROR 23000: Duplicate entry '2' for key 'PRIMARY' COMMIT; -# Kill and restart # This will not insert 0 INSERT INTO t31(a) VALUES(6), (0); SELECT * FROM t31; diff --git a/mysql-test/suite/innodb/r/innodb-blob.result b/mysql-test/suite/innodb/r/innodb-blob.result index afdaca9acd2..ec37492c279 100644 --- a/mysql-test/suite/innodb/r/innodb-blob.result +++ b/mysql-test/suite/innodb/r/innodb-blob.result @@ -43,7 +43,6 @@ a 3 BEGIN; INSERT INTO t2 VALUES (42); -# Kill and restart disconnect con1; disconnect con2; connection default; @@ -98,7 +97,6 @@ SELECT info FROM information_schema.processlist WHERE state = 'debug sync point: before_row_upd_extern'; info UPDATE t3 SET c=REPEAT('i',3000) WHERE a=2 -# Kill and restart disconnect con2; connection default; ERROR HY000: Lost connection to MySQL server during query @@ -130,7 +128,6 @@ SELECT info FROM information_schema.processlist WHERE state = 'debug sync point: after_row_upd_extern'; info UPDATE t3 SET c=REPEAT('j',3000) WHERE a=2 -# Kill and restart disconnect con2; connection default; ERROR HY000: Lost connection to MySQL server during query diff --git a/mysql-test/suite/innodb/r/innodb_bug53756.result b/mysql-test/suite/innodb/r/innodb_bug53756.result index 9809682a4b2..06fa96c2f81 100644 --- a/mysql-test/suite/innodb/r/innodb_bug53756.result +++ b/mysql-test/suite/innodb/r/innodb_bug53756.result @@ -77,7 +77,6 @@ pk c1 4 44 START TRANSACTION; INSERT INTO bug_53756 VALUES (666,666); -# Kill and restart disconnect con1; disconnect con2; disconnect con3; diff --git a/mysql-test/suite/innodb/r/innodb_bug59641.result b/mysql-test/suite/innodb/r/innodb_bug59641.result index 2c042585745..8bf574e2bec 100644 --- a/mysql-test/suite/innodb/r/innodb_bug59641.result +++ b/mysql-test/suite/innodb/r/innodb_bug59641.result @@ -17,7 +17,6 @@ UPDATE t SET b=4*a WHERE a=32; XA END '789'; XA PREPARE '789'; CONNECT con3,localhost,root,,; -# Kill and restart SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; SELECT * FROM t; a b diff --git a/mysql-test/suite/innodb/r/log_file_size.result b/mysql-test/suite/innodb/r/log_file_size.result index e07dba67a7b..b0ab6e38395 100644 --- a/mysql-test/suite/innodb/r/log_file_size.result +++ b/mysql-test/suite/innodb/r/log_file_size.result @@ -1,13 +1,11 @@ CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB; BEGIN; INSERT INTO t1 VALUES (42); -# Kill and restart: --innodb-log-file-size=6M SELECT * FROM t1; a INSERT INTO t1 VALUES (42); BEGIN; DELETE FROM t1; -# Kill and restart: --innodb-log-files-in-group=3 --innodb-log-file-size=5M SELECT * FROM t1; a 42 diff --git a/mysql-test/suite/innodb/r/read_only_recovery.result b/mysql-test/suite/innodb/r/read_only_recovery.result index 7fcbfddf33e..532749a7aae 100644 --- a/mysql-test/suite/innodb/r/read_only_recovery.result +++ b/mysql-test/suite/innodb/r/read_only_recovery.result @@ -13,7 +13,6 @@ SET GLOBAL innodb_flush_log_at_trx_commit=1; BEGIN; INSERT INTO t VALUES(0); ROLLBACK; -# Kill and restart: --innodb-force-recovery=3 disconnect con1; SELECT * FROM t; a diff --git a/mysql-test/suite/innodb/r/xa_recovery.result b/mysql-test/suite/innodb/r/xa_recovery.result index 7a9448ad9f0..a93afcb07f8 100644 --- a/mysql-test/suite/innodb/r/xa_recovery.result +++ b/mysql-test/suite/innodb/r/xa_recovery.result @@ -6,7 +6,6 @@ UPDATE t1 set a=2; XA END 'x'; XA PREPARE 'x'; connection default; -# Kill and restart disconnect con1; connect con1,localhost,root; SELECT * FROM t1 LOCK IN SHARE MODE; diff --git a/mysql-test/suite/innodb/t/autoinc_persist.test b/mysql-test/suite/innodb/t/autoinc_persist.test index 45a96f85fe1..904ed51f718 100644 --- a/mysql-test/suite/innodb/t/autoinc_persist.test +++ b/mysql-test/suite/innodb/t/autoinc_persist.test @@ -251,7 +251,8 @@ set global innodb_flush_log_at_trx_commit=1; INSERT INTO t9 VALUES(100000000200); DELETE FROM t9 WHERE a = 100000000200; ---source include/kill_and_restart_mysqld.inc +--let $shutdown_timeout=0 +--source include/restart_mysqld.inc INSERT INTO t1 VALUES(0); SELECT a AS `Expect 126` FROM t1 ORDER BY a DESC LIMIT 1; @@ -306,7 +307,7 @@ RENAME TABLE t9 to t19; INSERT INTO t19 VALUES(0), (0); SELECT * FROM t19; ---source include/kill_and_restart_mysqld.inc +--source include/restart_mysqld.inc INSERT INTO t1 VALUES(0), (0); SELECT * FROM t1; @@ -400,7 +401,7 @@ while ($i) { --enable_query_log ROLLBACK; ---source include/kill_and_restart_mysqld.inc +--source include/restart_mysqld.inc INSERT INTO t3 VALUES(0); SELECT MAX(a) AS `Expect 120` FROM t3; @@ -494,7 +495,7 @@ UPDATE t33 SET a = 10 WHERE a = 1; INSERT INTO t33 VALUES(2, NULL); COMMIT; ---source include/kill_and_restart_mysqld.inc +--source include/restart_mysqld.inc --echo # This will not insert 0 INSERT INTO t31(a) VALUES(6), (0); diff --git a/mysql-test/suite/innodb/t/innodb-blob.test b/mysql-test/suite/innodb/t/innodb-blob.test index ea50af4a7fc..d2484e2175d 100644 --- a/mysql-test/suite/innodb/t/innodb-blob.test +++ b/mysql-test/suite/innodb/t/innodb-blob.test @@ -70,7 +70,8 @@ SELECT a FROM t1; BEGIN; INSERT INTO t2 VALUES (42); ---source include/kill_and_restart_mysqld.inc +--let $shutdown_timeout=0 +--source include/restart_mysqld.inc disconnect con1; disconnect con2; @@ -138,7 +139,8 @@ SET DEBUG_SYNC='now WAIT_FOR have_latch'; SELECT info FROM information_schema.processlist WHERE state = 'debug sync point: before_row_upd_extern'; ---source include/kill_and_restart_mysqld.inc +--let $shutdown_timeout=0 +--source include/restart_mysqld.inc disconnect con2; connection default; @@ -177,7 +179,8 @@ SET DEBUG_SYNC='now WAIT_FOR have_latch'; SELECT info FROM information_schema.processlist WHERE state = 'debug sync point: after_row_upd_extern'; ---source include/kill_and_restart_mysqld.inc +--let $shutdown_timeout=0 +--source include/restart_mysqld.inc disconnect con2; connection default; diff --git a/mysql-test/suite/innodb/t/innodb_bug53756.test b/mysql-test/suite/innodb/t/innodb_bug53756.test index d6bccf70147..a676868aea7 100644 --- a/mysql-test/suite/innodb/t/innodb_bug53756.test +++ b/mysql-test/suite/innodb/t/innodb_bug53756.test @@ -84,7 +84,8 @@ SELECT * FROM bug_53756; START TRANSACTION; INSERT INTO bug_53756 VALUES (666,666); ---source include/kill_and_restart_mysqld.inc +--let $shutdown_timeout=0 +--source include/restart_mysqld.inc --disconnect con1 --disconnect con2 --disconnect con3 diff --git a/mysql-test/suite/innodb/t/innodb_bug59641.test b/mysql-test/suite/innodb/t/innodb_bug59641.test index 5f7528cf01a..e0d3431e45b 100644 --- a/mysql-test/suite/innodb/t/innodb_bug59641.test +++ b/mysql-test/suite/innodb/t/innodb_bug59641.test @@ -33,7 +33,8 @@ XA PREPARE '789'; CONNECT (con3,localhost,root,,); ---source include/kill_and_restart_mysqld.inc +--let $shutdown_timeout=0 +--source include/restart_mysqld.inc SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; SELECT * FROM t; COMMIT; diff --git a/mysql-test/suite/innodb/t/log_alter_table.test b/mysql-test/suite/innodb/t/log_alter_table.test index f479c6695aa..bb7738d9fb8 100644 --- a/mysql-test/suite/innodb/t/log_alter_table.test +++ b/mysql-test/suite/innodb/t/log_alter_table.test @@ -4,6 +4,9 @@ # Embedded server does not support crashing --source include/not_embedded.inc +# start afresh +--source include/restart_mysqld.inc + --echo # --echo # Bug#21801423 INNODB REDO LOG DOES NOT INDICATE WHEN --echo # FILES ARE CREATED diff --git a/mysql-test/suite/innodb/t/log_file_size.test b/mysql-test/suite/innodb/t/log_file_size.test index 4705ca68091..d01263e3c89 100644 --- a/mysql-test/suite/innodb/t/log_file_size.test +++ b/mysql-test/suite/innodb/t/log_file_size.test @@ -29,7 +29,7 @@ BEGIN; INSERT INTO t1 VALUES (42); let $restart_parameters = --innodb-log-file-size=6M; ---source include/kill_and_restart_mysqld.inc +--source include/restart_mysqld.inc SELECT * FROM t1; @@ -38,7 +38,7 @@ BEGIN; DELETE FROM t1; let $restart_parameters = --innodb-log-files-in-group=3 --innodb-log-file-size=5M; ---source include/kill_and_restart_mysqld.inc +--source include/restart_mysqld.inc SELECT * FROM t1; diff --git a/mysql-test/suite/innodb/t/read_only_recovery.test b/mysql-test/suite/innodb/t/read_only_recovery.test index b111d96debe..a1a69be724b 100644 --- a/mysql-test/suite/innodb/t/read_only_recovery.test +++ b/mysql-test/suite/innodb/t/read_only_recovery.test @@ -20,7 +20,9 @@ BEGIN; INSERT INTO t VALUES(0); ROLLBACK; --let $restart_parameters= --innodb-force-recovery=3 ---source include/kill_and_restart_mysqld.inc +--let $shutdown_timeout= 0 +--source include/restart_mysqld.inc +--let $shutdown_timeout= 30 --disconnect con1 SELECT * FROM t; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; diff --git a/mysql-test/suite/innodb/t/xa_recovery.test b/mysql-test/suite/innodb/t/xa_recovery.test index f5c2b655545..957b758d05c 100644 --- a/mysql-test/suite/innodb/t/xa_recovery.test +++ b/mysql-test/suite/innodb/t/xa_recovery.test @@ -15,7 +15,8 @@ connect (con1,localhost,root); XA START 'x'; UPDATE t1 set a=2; XA END 'x'; XA PREPARE 'x'; connection default; ---source include/kill_and_restart_mysqld.inc +--let $shutdown_timeout=0 +--source include/restart_mysqld.inc disconnect con1; connect (con1,localhost,root); diff --git a/mysql-test/suite/innodb_fts/r/crash_recovery.result b/mysql-test/suite/innodb_fts/r/crash_recovery.result index 2ff867b70fe..7bf86631d1e 100644 --- a/mysql-test/suite/innodb_fts/r/crash_recovery.result +++ b/mysql-test/suite/innodb_fts/r/crash_recovery.result @@ -23,7 +23,6 @@ DELETE FROM articles LIMIT 1; ROLLBACK; disconnect flush_redo_log; connection default; -# Kill and restart INSERT INTO articles (title,body) VALUES ('MySQL Tutorial','DBMS stands for DataBase ...'); CREATE FULLTEXT INDEX idx ON articles (title,body); @@ -52,7 +51,6 @@ DELETE FROM articles LIMIT 1; ROLLBACK; disconnect flush_redo_log; connection default; -# Kill and restart INSERT INTO articles (title,body) VALUES ('MySQL Tutorial','DBMS stands for DataBase ...'); SELECT * FROM articles @@ -83,7 +81,6 @@ INSERT INTO articles VALUES BEGIN; INSERT INTO articles VALUES (100, 200, 'MySQL Tutorial','DBMS stands for DataBase ...'); -# Kill and restart INSERT INTO articles VALUES (8, 12, 'MySQL Tutorial','DBMS stands for DataBase ...'); SELECT * FROM articles WHERE MATCH (title, body) AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE); diff --git a/mysql-test/suite/innodb_fts/t/crash_recovery.test b/mysql-test/suite/innodb_fts/t/crash_recovery.test index 63c920a91ec..8b82e5e68b5 100644 --- a/mysql-test/suite/innodb_fts/t/crash_recovery.test +++ b/mysql-test/suite/innodb_fts/t/crash_recovery.test @@ -47,7 +47,7 @@ ROLLBACK; --disconnect flush_redo_log --connection default ---source include/kill_and_restart_mysqld.inc +--source include/restart_mysqld.inc # This insert will re-initialize the Doc ID counter, it should not crash INSERT INTO articles (title,body) VALUES @@ -85,7 +85,7 @@ ROLLBACK; --disconnect flush_redo_log --connection default ---source include/kill_and_restart_mysqld.inc +--source include/restart_mysqld.inc # This insert will re-initialize the Doc ID counter, it should not crash INSERT INTO articles (title,body) VALUES @@ -126,7 +126,7 @@ BEGIN; INSERT INTO articles VALUES (100, 200, 'MySQL Tutorial','DBMS stands for DataBase ...'); ---source include/kill_and_restart_mysqld.inc +--source include/restart_mysqld.inc # This would re-initialize the FTS index and do the re-tokenization # of above records diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test b/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test index 263896e8487..c2216f768d0 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test @@ -7,8 +7,8 @@ if ($no_zstd) -- Skip Requires RocksDB to be built with ZStandard Compression support } ---let $_mysqld_option=--rocksdb_default_cf_options=compression_per_level=kZSTDNotFinalCompression;compression_opts=-14:4:0; ---source include/restart_mysqld_with_option.inc +--let $restart_parameters=--rocksdb_default_cf_options=compression_per_level=kZSTDNotFinalCompression;compression_opts=-14:4:0; +--source include/restart_mysqld.inc create table t (id int primary key) engine=rocksdb; drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options.test index 23f9b771d42..5fee66bddb6 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options.test @@ -34,8 +34,8 @@ select cf_name, option_type, value # restart with cf configs for cf1 and cf2 --exec echo "" > $MYSQLTEST_VARDIR/log/mysqld.1.err ---let $_mysqld_option=--rocksdb_override_cf_options=cf1={write_buffer_size=8m;target_file_size_base=2m};cf2={write_buffer_size=16m;max_bytes_for_level_multiplier=8};z={target_file_size_base=4m}; ---source include/restart_mysqld_with_option.inc +--let $restart_parameters=--rocksdb_override_cf_options=cf1={write_buffer_size=8m;target_file_size_base=2m};cf2={write_buffer_size=16m;max_bytes_for_level_multiplier=8};z={target_file_size_base=4m}; +--source include/restart_mysqld.inc # check column family options in log -- should reflect individual settings From b2865a437f45922c2f31f2bffe0f7d6134a8720e Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 14 Mar 2017 15:36:30 +0100 Subject: [PATCH 199/233] search_pattern_in_file.inc changes 1. Special mode to search in error logs: if SEARCH_RANGE is not set, the file is considered an error log and the search is performed since the last CURRENT_TEST: line 2. Number of matches is printed too. "FOUND 5 /foo/ in bar". Use greedy .* at the end of the pattern if number of matches isn't stable. If nothing is found it's still "NOT FOUND", not "FOUND 0". 3. SEARCH_ABORT specifies the prefix of the output. Can be "NOT FOUND" or "FOUND" as before, but also "FOUND 5 " if needed. --- mysql-test/extra/rpl_tests/rpl_checksum.inc | 1 - mysql-test/include/search_pattern_in_file.inc | 66 ++++++++----------- mysql-test/r/events_slowlog.result | 2 +- mysql-test/r/mysqldump.result | 2 +- mysql-test/r/shutdown.result | 2 +- mysql-test/r/view.result | 2 +- mysql-test/r/wait_timeout_not_windows.result | 2 +- .../binlog_encryption/encrypted_master.result | 18 ++--- .../binlog_encryption/encrypted_master.test | 19 +++--- .../encrypted_master_lost_key.test | 1 + ...ncrypted_master_switch_to_unencrypted.test | 1 + .../binlog_encryption/encrypted_slave.result | 6 +- .../binlog_encryption/encrypted_slave.test | 7 +- .../binlog_encryption/encryption_combo.result | 2 +- .../binlog_encryption/encryption_combo.test | 1 + .../binlog_encryption/rpl_checksum.result | 2 +- .../suite/binlog_encryption/rpl_loadfile.test | 1 + .../encryption/r/encrypt_and_grep.result | 6 +- .../encryption/r/filekeys_emptyfile.result | 2 +- .../encryption/r/filekeys_encfile_bad.result | 2 +- .../r/filekeys_encfile_badfile.result | 2 +- .../encryption/r/filekeys_encfile_no.result | 2 +- .../suite/encryption/r/filekeys_nofile.result | 2 +- .../suite/encryption/r/filekeys_syntax.result | 30 ++++----- .../encryption/r/filekeys_tooshort.result | 2 +- .../encryption/r/filekeys_unencfile.result | 2 +- .../r/innodb-discard-import-change.result | 2 +- .../r/innodb-key-rotation-disable.result | 4 +- .../encryption/r/innodb_encrypt_log.result | 2 +- .../r/innodb_encrypt_log_corruption.result | 40 +++++------ .../suite/encryption/t/encrypt_and_grep.test | 1 - .../suite/encryption/t/filekeys_badtest.inc | 1 - .../r/innodb-change-buffer-recovery.result | 2 +- .../suite/innodb/r/log_alter_table.result | 2 + .../suite/innodb/r/log_corruption.result | 42 ++++++------ mysql-test/suite/innodb/r/log_file.result | 20 +++--- .../suite/innodb/r/log_file_name.result | 40 +++++------ .../suite/innodb/r/log_file_name_debug.result | 4 +- .../suite/innodb/r/log_file_size.result | 34 +++++++--- .../suite/innodb/r/temporary_table.result | 5 ++ .../suite/innodb/t/log_alter_table.test | 11 +--- mysql-test/suite/innodb/t/log_corruption.test | 1 - mysql-test/suite/innodb/t/log_file.test | 1 - mysql-test/suite/innodb/t/log_file_name.test | 39 ++++++----- .../suite/innodb/t/log_file_name_debug.test | 1 - mysql-test/suite/innodb/t/log_file_size.test | 1 - .../suite/innodb/t/temporary_table.test | 1 - .../suite/innodb_zip/r/innochecksum.result | 36 +++++----- .../suite/innodb_zip/r/innochecksum_3.result | 8 +-- mysql-test/suite/rpl/r/rpl_checksum.result | 2 +- .../suite/rpl/r/rpl_gtid_errorlog.result | 4 +- mysql-test/suite/rpl/t/rpl_gtid_errorlog.test | 1 - .../suite/rpl/t/rpl_stop_slave_error.test | 1 - mysql-test/t/named_pipe.test | 1 - mysql-test/t/shutdown.test | 1 - mysql-test/t/view.test | 1 + mysql-test/t/wait_timeout_not_windows.test | 1 - 57 files changed, 244 insertions(+), 251 deletions(-) diff --git a/mysql-test/extra/rpl_tests/rpl_checksum.inc b/mysql-test/extra/rpl_tests/rpl_checksum.inc index 8423d2fc1cb..28d16658a7c 100644 --- a/mysql-test/extra/rpl_tests/rpl_checksum.inc +++ b/mysql-test/extra/rpl_tests/rpl_checksum.inc @@ -305,7 +305,6 @@ if(!$log_error_) let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.2.err; } --let SEARCH_FILE= $log_error_ ---let SEARCH_RANGE=-50000 --let SEARCH_PATTERN= Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590 --source include/search_pattern_in_file.inc diff --git a/mysql-test/include/search_pattern_in_file.inc b/mysql-test/include/search_pattern_in_file.inc index f77a7c60916..0a68fcf6765 100644 --- a/mysql-test/include/search_pattern_in_file.inc +++ b/mysql-test/include/search_pattern_in_file.inc @@ -12,37 +12,22 @@ # # Optionally, SEARCH_RANGE can be set to the max number of bytes of the file # to search. If negative, it will search that many bytes at the end of the -# file. The default is to search only the first 50000 bytes of the file. +# file. By default the search happens from the last CURRENT_TEST: +# marker till the end of file (appropriate for searching error logs). +# +# Optionally, SEARCH_ABORT can be set to "FOUND" or "NOT FOUND" and this +# will abort if the search result doesn't match the requested one. # # In case of # - SEARCH_FILE and/or SEARCH_PATTERN is not set # - SEARCH_FILE cannot be opened -# - SEARCH_FILE does not contain SEARCH_PATTERN # the test will abort immediate. -# MTR will report something like -# .... -# worker[1] Using MTR_BUILD_THREAD 300, with reserved ports 13000..13009 -# main.1st [ pass ] 3 -# innodb.innodb_page_size [ fail ] -# Test ended at 2011-11-11 18:15:58 -# -# CURRENT_TEST: innodb.innodb_page_size -# # ERROR: The file '' does not contain the expected pattern -# mysqltest: In included file "./include/search_pattern_in_file.inc": -# included from ./include/search_pattern_in_file.inc at line 36: -# At line 25: command "perl" failed with error 255. my_errno=175 -# -# The result from queries just before the failure was: -# ... -# - saving '' to '' -# main.1st [ pass ] 2 # # Typical use case (check invalid server startup options): # let $error_log= $MYSQLTEST_VARDIR/log/my_restart.err; # --error 0,1 # --remove_file $error_log # let SEARCH_FILE= $error_log; -# let SEARCH_RANGE= -50000; # # Stop the server # let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; # --exec echo "wait" > $restart_file @@ -60,36 +45,37 @@ perl; use strict; - die "SEARCH_FILE not set" unless $ENV{'SEARCH_FILE'}; - my @search_files= glob($ENV{'SEARCH_FILE'}); - my $search_pattern= $ENV{'SEARCH_PATTERN'} or die "SEARCH_PATTERN not set"; - my $search_range= $ENV{'SEARCH_RANGE'}; + use autodie qw(open); + die "SEARCH_FILE not set" unless $ENV{SEARCH_FILE}; + my @search_files= glob($ENV{SEARCH_FILE}); + my $search_pattern= $ENV{SEARCH_PATTERN} or die "SEARCH_PATTERN not set"; + my $search_range= $ENV{SEARCH_RANGE}; my $content; - $search_range= 50000 unless $search_range =~ /-?[0-9]+/; foreach my $search_file (@search_files) { - open(FILE, '<', $search_file) or die("Unable to open '$search_file': $!\n"); + open(FILE, '<', $search_file); my $file_content; - if ($search_range >= 0) { + if ($search_range > 0) { read(FILE, $file_content, $search_range, 0); - } else { + } elsif ($search_range < 0) { my $size= -s $search_file; $search_range = -$size if $size > -$search_range; seek(FILE, $search_range, 2); read(FILE, $file_content, -$search_range, 0); + } else { + while() { # error log + if (/^CURRENT_TEST:/) { + $content=''; + } else { + $content.=$_; + } + } } close(FILE); $content.= $file_content; } - $ENV{'SEARCH_FILE'} =~ s{^.*?([^/\\]+)$}{$1}; - if ($content =~ m{$search_pattern}) { - die "FOUND /$search_pattern/ in $ENV{'SEARCH_FILE'}\n" - if $ENV{SEARCH_ABORT} eq 'FOUND'; - print "FOUND /$search_pattern/ in $ENV{'SEARCH_FILE'}\n" - unless defined $ENV{SEARCH_ABORT}; - } else { - die "NOT FOUND /$search_pattern/ in $ENV{'SEARCH_FILE'}\n" - if $ENV{SEARCH_ABORT} eq 'NOT FOUND'; - print "NOT FOUND /$search_pattern/ in $ENV{'SEARCH_FILE'}\n" - unless defined $ENV{SEARCH_ABORT}; - } + my @matches=($content =~ m/$search_pattern/gs); + my $res=@matches ? "FOUND " . scalar(@matches) : "NOT FOUND"; + $ENV{SEARCH_FILE} =~ s{^.*?([^/\\]+)$}{$1}; + print "$res /$search_pattern/ in $ENV{SEARCH_FILE}\n"; + exit $ENV{SEARCH_ABORT} && $res =~ /^$ENV{SEARCH_ABORT}/; EOF diff --git a/mysql-test/r/events_slowlog.result b/mysql-test/r/events_slowlog.result index 7de5925bc0f..be0a1e78d2a 100644 --- a/mysql-test/r/events_slowlog.result +++ b/mysql-test/r/events_slowlog.result @@ -6,7 +6,7 @@ set global long_query_time=0.2; create table t1 (i int); insert into t1 values (0); create event ev on schedule at CURRENT_TIMESTAMP + INTERVAL 1 second do update t1 set i=1+sleep(0.5); -FOUND /update t1 set i=1/ in mysqld-slow.log +FOUND 1 /update t1 set i=1/ in mysqld-slow.log drop table t1; set global event_scheduler= @event_scheduler_save; set global slow_query_log= @slow_query_log_save; diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result index b46115c26f9..1243c455e6c 100644 --- a/mysql-test/r/mysqldump.result +++ b/mysql-test/r/mysqldump.result @@ -5533,4 +5533,4 @@ USE `db1`; DROP DATABASE db1; DROP DATABASE db2; -FOUND /Database: mysql/ in bug11505.sql +FOUND 1 /Database: mysql/ in bug11505.sql diff --git a/mysql-test/r/shutdown.result b/mysql-test/r/shutdown.result index ff2e450c3f0..be2eb16470c 100644 --- a/mysql-test/r/shutdown.result +++ b/mysql-test/r/shutdown.result @@ -13,4 +13,4 @@ drop user user1@localhost; # # MDEV-8491 - On shutdown, report the user and the host executed that. # -FOUND /mysqld(\.exe)? \(root\[root\] @ localhost \[(::1)?\]\): Normal shutdown/ in mysqld.1.err +FOUND 2 /mysqld(\.exe)? \(root\[root\] @ localhost \[(::1)?\]\): Normal shutdown/ in mysqld.1.err diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index b899695f11f..b3f1468a2aa 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -5479,7 +5479,7 @@ DROP FUNCTION f1; DROP VIEW v1; DROP TABLE t1, t2; create view v1 as select 1; -FOUND /mariadb-version/ in v1.frm +FOUND 1 /mariadb-version/ in v1.frm drop view v1; # # MDEV-7260: Crash in get_best_combination when executing multi-table diff --git a/mysql-test/r/wait_timeout_not_windows.result b/mysql-test/r/wait_timeout_not_windows.result index 7b129ce5f12..f31dec1b4ba 100644 --- a/mysql-test/r/wait_timeout_not_windows.result +++ b/mysql-test/r/wait_timeout_not_windows.result @@ -2,5 +2,5 @@ set global log_warnings=2; connect foo,localhost,root; set @@wait_timeout=1; connection default; -FOUND /Aborted.*Got timeout reading communication packets/ in mysqld.1.err +FOUND 1 /Aborted.*Got timeout reading communication packets/ in mysqld.1.err set global log_warnings=@@log_warnings; diff --git a/mysql-test/suite/binlog_encryption/encrypted_master.result b/mysql-test/suite/binlog_encryption/encrypted_master.result index 65dd12ccba3..8a3798731f2 100644 --- a/mysql-test/suite/binlog_encryption/encrypted_master.result +++ b/mysql-test/suite/binlog_encryption/encrypted_master.result @@ -598,23 +598,23 @@ DROP SERVER server_name_to_encrypt; ############################# # Final checks for the master ############################# -NOT FOUND /_to_encrypt/ in master-bin.0* -NOT FOUND /COMMIT/ in master-bin.0* -NOT FOUND /TIMESTAMP/ in master-bin.0* +NOT FOUND /_to_encrypt.*/ in master-bin.0* +NOT FOUND /COMMIT.*/ in master-bin.0* +NOT FOUND /TIMESTAMP.*/ in master-bin.0* include/save_master_pos.inc ############################# # Final checks for the slave ############################# connection server_2; include/sync_io_with_master.inc -FOUND /_to_encrypt/ in slave-relay-bin.0* -FOUND /COMMIT/ in slave-relay-bin.0* -FOUND /TIMESTAMP/ in slave-relay-bin.0* +FOUND 1 /_to_encrypt.*/ in slave-relay-bin.0* +FOUND 1 /COMMIT.*/ in slave-relay-bin.0* +FOUND 1 /TIMESTAMP.*/ in slave-relay-bin.0* include/start_slave.inc include/sync_slave_sql_with_io.inc -FOUND /_to_encrypt/ in slave-bin.0* -FOUND /COMMIT/ in slave-bin.0* -FOUND /TIMESTAMP/ in slave-bin.0* +FOUND 1 /_to_encrypt.*/ in slave-bin.0* +FOUND 1 /COMMIT.*/ in slave-bin.0* +FOUND 1 /TIMESTAMP.*/ in slave-bin.0* ########## # Cleanup ########## diff --git a/mysql-test/suite/binlog_encryption/encrypted_master.test b/mysql-test/suite/binlog_encryption/encrypted_master.test index 5eb0345342d..503a40443d2 100644 --- a/mysql-test/suite/binlog_encryption/encrypted_master.test +++ b/mysql-test/suite/binlog_encryption/encrypted_master.test @@ -106,16 +106,17 @@ SET binlog_row_image= MINIMAL; --let $master_datadir= `SELECT @@datadir` +--let SEARCH_RANGE = 500000 --let SEARCH_FILE= $master_datadir/master-bin.0* ---let SEARCH_PATTERN= _to_encrypt +--let SEARCH_PATTERN= _to_encrypt.* --source include/search_pattern_in_file.inc --let SEARCH_FILE= $master_datadir/master-bin.0* ---let SEARCH_PATTERN= COMMIT +--let SEARCH_PATTERN= COMMIT.* --source include/search_pattern_in_file.inc --let SEARCH_FILE= $master_datadir/master-bin.0* ---let SEARCH_PATTERN= TIMESTAMP +--let SEARCH_PATTERN= TIMESTAMP.* --source include/search_pattern_in_file.inc --disable_connect_log @@ -138,15 +139,15 @@ SET binlog_row_image= MINIMAL; # Check that relay logs are unencrypted --let SEARCH_FILE= $slave_datadir/slave-relay-bin.0* ---let SEARCH_PATTERN= _to_encrypt +--let SEARCH_PATTERN= _to_encrypt.* --source include/search_pattern_in_file.inc --let SEARCH_FILE= $slave_datadir/slave-relay-bin.0* ---let SEARCH_PATTERN= COMMIT +--let SEARCH_PATTERN= COMMIT.* --source include/search_pattern_in_file.inc --let SEARCH_FILE= $slave_datadir/slave-relay-bin.0* ---let SEARCH_PATTERN= TIMESTAMP +--let SEARCH_PATTERN= TIMESTAMP.* --source include/search_pattern_in_file.inc @@ -158,15 +159,15 @@ SET binlog_row_image= MINIMAL; --enable_connect_log --let SEARCH_FILE= $slave_datadir/slave-bin.0* ---let SEARCH_PATTERN= _to_encrypt +--let SEARCH_PATTERN= _to_encrypt.* --source include/search_pattern_in_file.inc --let SEARCH_FILE= $slave_datadir/slave-bin.0* ---let SEARCH_PATTERN= COMMIT +--let SEARCH_PATTERN= COMMIT.* --source include/search_pattern_in_file.inc --let SEARCH_FILE= $slave_datadir/slave-bin.0* ---let SEARCH_PATTERN= TIMESTAMP +--let SEARCH_PATTERN= TIMESTAMP.* --source include/search_pattern_in_file.inc --echo ########## diff --git a/mysql-test/suite/binlog_encryption/encrypted_master_lost_key.test b/mysql-test/suite/binlog_encryption/encrypted_master_lost_key.test index 7e5fd7859f0..c4cf337f94e 100644 --- a/mysql-test/suite/binlog_encryption/encrypted_master_lost_key.test +++ b/mysql-test/suite/binlog_encryption/encrypted_master_lost_key.test @@ -58,6 +58,7 @@ INSERT INTO table1_to_encrypt SELECT NULL,NOW(),b FROM table1_to_encrypt; # Make sure that binary logs are encrypted +--let SEARCH_RANGE = 500000 --let SEARCH_FILE= master-bin.0* --let SEARCH_PATTERN= table1_to_encrypt --source include/search_pattern_in_file.inc diff --git a/mysql-test/suite/binlog_encryption/encrypted_master_switch_to_unencrypted.test b/mysql-test/suite/binlog_encryption/encrypted_master_switch_to_unencrypted.test index 91231f89307..eec72d64066 100644 --- a/mysql-test/suite/binlog_encryption/encrypted_master_switch_to_unencrypted.test +++ b/mysql-test/suite/binlog_encryption/encrypted_master_switch_to_unencrypted.test @@ -52,6 +52,7 @@ INSERT INTO table1_no_encryption SELECT NULL,NOW(),b FROM table1_no_encryption; # Make sure that binary logs are not encrypted +--let SEARCH_RANGE = 500000 --let SEARCH_FILE= master-bin.0* --let SEARCH_PATTERN= table1_no_encryption --source include/search_pattern_in_file.inc diff --git a/mysql-test/suite/binlog_encryption/encrypted_slave.result b/mysql-test/suite/binlog_encryption/encrypted_slave.result index 00096a61a5b..ff8ae374014 100644 --- a/mysql-test/suite/binlog_encryption/encrypted_slave.result +++ b/mysql-test/suite/binlog_encryption/encrypted_slave.result @@ -149,9 +149,9 @@ DROP SERVER server_name_to_encrypt; ################# # Master binlog checks ################# -FOUND /_to_encrypt/ in master-bin.0* -FOUND /COMMIT/ in master-bin.0* -FOUND /TIMESTAMP/ in master-bin.0* +FOUND 1 /_to_encrypt.*/ in master-bin.0* +FOUND 1 /COMMIT.*/ in master-bin.0* +FOUND 1 /TIMESTAMP.*/ in master-bin.0* include/save_master_pos.inc ################# # Relay log checks diff --git a/mysql-test/suite/binlog_encryption/encrypted_slave.test b/mysql-test/suite/binlog_encryption/encrypted_slave.test index a69e78cd940..f5697d91779 100644 --- a/mysql-test/suite/binlog_encryption/encrypted_slave.test +++ b/mysql-test/suite/binlog_encryption/encrypted_slave.test @@ -42,16 +42,17 @@ --let $master_datadir= `SELECT @@datadir` +--let SEARCH_RANGE = 500000 --let SEARCH_FILE= $master_datadir/master-bin.0* ---let SEARCH_PATTERN= _to_encrypt +--let SEARCH_PATTERN= _to_encrypt.* --source include/search_pattern_in_file.inc --let SEARCH_FILE= $master_datadir/master-bin.0* ---let SEARCH_PATTERN= COMMIT +--let SEARCH_PATTERN= COMMIT.* --source include/search_pattern_in_file.inc --let SEARCH_FILE= $master_datadir/master-bin.0* ---let SEARCH_PATTERN= TIMESTAMP +--let SEARCH_PATTERN= TIMESTAMP.* --source include/search_pattern_in_file.inc --disable_connect_log diff --git a/mysql-test/suite/binlog_encryption/encryption_combo.result b/mysql-test/suite/binlog_encryption/encryption_combo.result index d921c73440d..de5d91dfab7 100644 --- a/mysql-test/suite/binlog_encryption/encryption_combo.result +++ b/mysql-test/suite/binlog_encryption/encryption_combo.result @@ -19,7 +19,7 @@ FLUSH BINARY LOGS; SET binlog_format=ROW; INSERT INTO table1_no_encryption SELECT NULL,NOW(),b FROM table1_no_encryption; INSERT INTO table1_no_encryption SELECT NULL,NOW(),b FROM table1_no_encryption; -FOUND /table1_no_encryption/ in master-bin.0* +FOUND 11 /table1_no_encryption/ in master-bin.0* ##################################################### # Part 2: restart master, now with binlog encryption ##################################################### diff --git a/mysql-test/suite/binlog_encryption/encryption_combo.test b/mysql-test/suite/binlog_encryption/encryption_combo.test index a5cf117d4a8..c24e77f4215 100644 --- a/mysql-test/suite/binlog_encryption/encryption_combo.test +++ b/mysql-test/suite/binlog_encryption/encryption_combo.test @@ -52,6 +52,7 @@ INSERT INTO table1_no_encryption SELECT NULL,NOW(),b FROM table1_no_encryption; --let $master_datadir= `SELECT @@datadir` +--let SEARCH_RANGE = 500000 --let SEARCH_FILE= $master_datadir/master-bin.0* --let SEARCH_PATTERN= table1_no_encryption --source include/search_pattern_in_file.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_checksum.result b/mysql-test/suite/binlog_encryption/rpl_checksum.result index 418536c3558..41c4cd94aff 100644 --- a/mysql-test/suite/binlog_encryption/rpl_checksum.result +++ b/mysql-test/suite/binlog_encryption/rpl_checksum.result @@ -174,7 +174,7 @@ INSERT INTO t4 VALUES (2); connection slave; include/wait_for_slave_sql_error.inc [errno=1590] Last_SQL_Error = 'The incident LOST_EVENTS occurred on the master. Message: error writing to the binary log' -FOUND /Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590/ in mysqld.2.err +FOUND 1 /Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590/ in mysqld.2.err SELECT * FROM t4 ORDER BY a; a 1 diff --git a/mysql-test/suite/binlog_encryption/rpl_loadfile.test b/mysql-test/suite/binlog_encryption/rpl_loadfile.test index 97886ca0f48..40379a5c3d0 100644 --- a/mysql-test/suite/binlog_encryption/rpl_loadfile.test +++ b/mysql-test/suite/binlog_encryption/rpl_loadfile.test @@ -7,5 +7,6 @@ --echo # --let SEARCH_FILE=$datadir/master-bin.0* +--let SEARCH_RANGE = 500000 --let SEARCH_PATTERN= xxxxxxxxxxx --source include/search_pattern_in_file.inc diff --git a/mysql-test/suite/encryption/r/encrypt_and_grep.result b/mysql-test/suite/encryption/r/encrypt_and_grep.result index bd20b79aafe..b1ffbdb8134 100644 --- a/mysql-test/suite/encryption/r/encrypt_and_grep.result +++ b/mysql-test/suite/encryption/r/encrypt_and_grep.result @@ -21,7 +21,7 @@ NOT FOUND /foobar/ in t1.ibd # t2 ... on expecting NOT FOUND NOT FOUND /temp/ in t2.ibd # t3 no on expecting FOUND -FOUND /dummy/ in t3.ibd +FOUND 42 /dummy/ in t3.ibd # ibdata1 expecting NOT FOUND NOT FOUND /foobar/ in ibdata1 # Now turn off encryption and wait for threads to decrypt everything @@ -43,7 +43,7 @@ NOT FOUND /foobar/ in t1.ibd # t2 ... on expecting FOUND NOT FOUND /temp/ in t2.ibd # t3 no on expecting FOUND -FOUND /dummy/ in t3.ibd +FOUND 42 /dummy/ in t3.ibd # ibdata1 expecting NOT FOUND NOT FOUND /foobar/ in ibdata1 # Now turn on encryption and wait for threads to encrypt all spaces @@ -65,7 +65,7 @@ NOT FOUND /foobar/ in t1.ibd # t2 ... on expecting NOT FOUND NOT FOUND /temp/ in t2.ibd # t3 no on expecting FOUND -FOUND /dummy/ in t3.ibd +FOUND 42 /dummy/ in t3.ibd # ibdata1 expecting NOT FOUND NOT FOUND /foobar/ in ibdata1 drop table t1, t2, t3; diff --git a/mysql-test/suite/encryption/r/filekeys_emptyfile.result b/mysql-test/suite/encryption/r/filekeys_emptyfile.result index f94f11d9f08..19bca3c36c7 100644 --- a/mysql-test/suite/encryption/r/filekeys_emptyfile.result +++ b/mysql-test/suite/encryption/r/filekeys_emptyfile.result @@ -1,7 +1,7 @@ call mtr.add_suppression("System key id 1 is missing at"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /System key id 1 is missing at/ in mysqld.1.err +FOUND 1 /System key id 1 is missing at/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins diff --git a/mysql-test/suite/encryption/r/filekeys_encfile_bad.result b/mysql-test/suite/encryption/r/filekeys_encfile_bad.result index 6261bd459b8..59124f2babd 100644 --- a/mysql-test/suite/encryption/r/filekeys_encfile_bad.result +++ b/mysql-test/suite/encryption/r/filekeys_encfile_bad.result @@ -1,7 +1,7 @@ call mtr.add_suppression("Cannot decrypt .*filekeys-data.enc. Wrong key"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Cannot decrypt .*filekeys-data.enc. Wrong key/ in mysqld.1.err +FOUND 1 /Cannot decrypt .*filekeys-data.enc. Wrong key/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins diff --git a/mysql-test/suite/encryption/r/filekeys_encfile_badfile.result b/mysql-test/suite/encryption/r/filekeys_encfile_badfile.result index 98e2266f3f2..7e244c2c381 100644 --- a/mysql-test/suite/encryption/r/filekeys_encfile_badfile.result +++ b/mysql-test/suite/encryption/r/filekeys_encfile_badfile.result @@ -1,7 +1,7 @@ call mtr.add_suppression("File 'bad' not found"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /File 'bad' not found/ in mysqld.1.err +FOUND 1 /File 'bad' not found/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins diff --git a/mysql-test/suite/encryption/r/filekeys_encfile_no.result b/mysql-test/suite/encryption/r/filekeys_encfile_no.result index 6261bd459b8..59124f2babd 100644 --- a/mysql-test/suite/encryption/r/filekeys_encfile_no.result +++ b/mysql-test/suite/encryption/r/filekeys_encfile_no.result @@ -1,7 +1,7 @@ call mtr.add_suppression("Cannot decrypt .*filekeys-data.enc. Wrong key"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Cannot decrypt .*filekeys-data.enc. Wrong key/ in mysqld.1.err +FOUND 1 /Cannot decrypt .*filekeys-data.enc. Wrong key/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins diff --git a/mysql-test/suite/encryption/r/filekeys_nofile.result b/mysql-test/suite/encryption/r/filekeys_nofile.result index 690f2e61df0..2caf258fef7 100644 --- a/mysql-test/suite/encryption/r/filekeys_nofile.result +++ b/mysql-test/suite/encryption/r/filekeys_nofile.result @@ -1,7 +1,7 @@ call mtr.add_suppression("file-key-management-filename is not set"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /file-key-management-filename is not set/ in mysqld.1.err +FOUND 1 /file-key-management-filename is not set/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins diff --git a/mysql-test/suite/encryption/r/filekeys_syntax.result b/mysql-test/suite/encryption/r/filekeys_syntax.result index eb8119bc4f5..019446096b9 100644 --- a/mysql-test/suite/encryption/r/filekeys_syntax.result +++ b/mysql-test/suite/encryption/r/filekeys_syntax.result @@ -1,7 +1,7 @@ call mtr.add_suppression("File '.*keys.txt' not found"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /File '.*keys.txt' not found/ in mysqld.1.err +FOUND 1 /File '.*keys.txt' not found/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -12,7 +12,7 @@ ERROR HY000: Invalid key id at MYSQL_TMP_DIR/keys.txt line 2, column 2 call mtr.add_suppression("File '.*keys.txt' not found"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /File '.*keys.txt' not found/ in mysqld.1.err +FOUND 1 /File '.*keys.txt' not found/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -21,7 +21,7 @@ plugin_status call mtr.add_suppression("Invalid key id"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Invalid key id/ in mysqld.1.err +FOUND 1 /Invalid key id/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -32,7 +32,7 @@ ERROR HY000: Invalid key id at MYSQL_TMP_DIR/keys.txt line 2, column 11 call mtr.add_suppression("Invalid key id"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Invalid key id/ in mysqld.1.err +FOUND 2 /Invalid key id/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -41,7 +41,7 @@ plugin_status call mtr.add_suppression("Invalid key id"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Invalid key id/ in mysqld.1.err +FOUND 2 /Invalid key id/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -52,7 +52,7 @@ ERROR HY000: Invalid key at MYSQL_TMP_DIR/keys.txt line 2, column 47 call mtr.add_suppression("Invalid key id"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Invalid key id/ in mysqld.1.err +FOUND 2 /Invalid key id/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -61,7 +61,7 @@ plugin_status call mtr.add_suppression("Invalid key"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Invalid key/ in mysqld.1.err +FOUND 3 /Invalid key/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -72,7 +72,7 @@ ERROR HY000: Invalid key at MYSQL_TMP_DIR/keys.txt line 2, column 33 call mtr.add_suppression("Invalid key"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Invalid key/ in mysqld.1.err +FOUND 4 /Invalid key/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -81,7 +81,7 @@ plugin_status call mtr.add_suppression("Invalid key"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Invalid key/ in mysqld.1.err +FOUND 4 /Invalid key/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -92,7 +92,7 @@ ERROR HY000: Syntax error at MYSQL_TMP_DIR/keys.txt line 2, column 2 call mtr.add_suppression("Invalid key"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Invalid key/ in mysqld.1.err +FOUND 4 /Invalid key/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -101,7 +101,7 @@ plugin_status call mtr.add_suppression("Syntax error"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Syntax error/ in mysqld.1.err +FOUND 1 /Syntax error/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -112,7 +112,7 @@ ERROR HY000: Syntax error at MYSQL_TMP_DIR/keys.txt line 2, column 1 call mtr.add_suppression("Syntax error"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Syntax error/ in mysqld.1.err +FOUND 2 /Syntax error/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -121,7 +121,7 @@ plugin_status call mtr.add_suppression("Syntax error"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Syntax error/ in mysqld.1.err +FOUND 2 /Syntax error/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -132,7 +132,7 @@ ERROR HY000: System key id 1 is missing at MYSQL_TMP_DIR/keys.txt line 1, column call mtr.add_suppression("Syntax error"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Syntax error/ in mysqld.1.err +FOUND 2 /Syntax error/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -141,7 +141,7 @@ plugin_status call mtr.add_suppression("System key id 1"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /System key id 1/ in mysqld.1.err +FOUND 1 /System key id 1/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins diff --git a/mysql-test/suite/encryption/r/filekeys_tooshort.result b/mysql-test/suite/encryption/r/filekeys_tooshort.result index efa66097563..781bde6fd49 100644 --- a/mysql-test/suite/encryption/r/filekeys_tooshort.result +++ b/mysql-test/suite/encryption/r/filekeys_tooshort.result @@ -1,7 +1,7 @@ call mtr.add_suppression("Cannot decrypt .*tooshort.enc. Not encrypted"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Cannot decrypt .*tooshort.enc. Not encrypted/ in mysqld.1.err +FOUND 1 /Cannot decrypt .*tooshort.enc. Not encrypted/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins diff --git a/mysql-test/suite/encryption/r/filekeys_unencfile.result b/mysql-test/suite/encryption/r/filekeys_unencfile.result index 1b9c092a713..31668348607 100644 --- a/mysql-test/suite/encryption/r/filekeys_unencfile.result +++ b/mysql-test/suite/encryption/r/filekeys_unencfile.result @@ -1,7 +1,7 @@ call mtr.add_suppression("Cannot decrypt .*keys.txt. Not encrypted"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Cannot decrypt .*keys.txt. Not encrypted/ in mysqld.1.err +FOUND 1 /Cannot decrypt .*keys.txt. Not encrypted/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins diff --git a/mysql-test/suite/encryption/r/innodb-discard-import-change.result b/mysql-test/suite/encryption/r/innodb-discard-import-change.result index b0b01b7cf7b..51670d89e52 100644 --- a/mysql-test/suite/encryption/r/innodb-discard-import-change.result +++ b/mysql-test/suite/encryption/r/innodb-discard-import-change.result @@ -99,5 +99,5 @@ NOT FOUND /verysecretmessage/ in t3.ibd # t4 page compressed and encrypted expecting NOT FOUND NOT FOUND /verysecretmessage/ in t4.ibd # t5 normal expecting FOUND -FOUND /verysecretmessage/ in t5.ibd +FOUND 289 /verysecretmessage/ in t5.ibd DROP TABLE t1,t2,t3,t4,t5,t6; diff --git a/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result b/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result index 07f6f98b88a..feaede20f2a 100644 --- a/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result +++ b/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result @@ -57,9 +57,9 @@ NOT FOUND /secred/ in t5.ibd # t6 on expecting NOT FOUND NOT FOUND /secred/ in t6.ibd # t7 off expecting FOUND -FOUND /public/ in t7.ibd +FOUND 1 /public/ in t7.ibd # t8 row compressed expecting NOT FOUND -FOUND /public/ in t8.ibd +FOUND 1 /public/ in t8.ibd # t9 page compressed expecting NOT FOUND NOT FOUND /public/ in t9.ibd use test; diff --git a/mysql-test/suite/encryption/r/innodb_encrypt_log.result b/mysql-test/suite/encryption/r/innodb_encrypt_log.result index c660ebe336b..f8f933be831 100644 --- a/mysql-test/suite/encryption/r/innodb_encrypt_log.result +++ b/mysql-test/suite/encryption/r/innodb_encrypt_log.result @@ -51,7 +51,7 @@ INSERT INTO t0 VALUES(NULL, 5, 5, 'public', 'gossip'); # ib_logfile0 expecting NOT FOUND NOT FOUND /private|secret|sacr(ed|ament)|success|story|secur(e|ity)/ in ib_logfile0 # ib_logfile0 expecting FOUND -FOUND /public|gossip/ in ib_logfile0 +FOUND 3 /public|gossip/ in ib_logfile0 # ibdata1 expecting NOT FOUND NOT FOUND /private|secret|sacr(ed|ament)|success|story|secur(e|ity)|public|gossip/ in ibdata1 # t0.ibd expecting NOT FOUND diff --git a/mysql-test/suite/encryption/r/innodb_encrypt_log_corruption.result b/mysql-test/suite/encryption/r/innodb_encrypt_log_corruption.result index 957a8c1eec9..4a31f1ba454 100644 --- a/mysql-test/suite/encryption/r/innodb_encrypt_log_corruption.result +++ b/mysql-test/suite/encryption/r/innodb_encrypt_log_corruption.result @@ -3,52 +3,52 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2\./ in mysqld.1.err +FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2\./ in mysqld.1.err # redo log from before MariaDB 10.2.2, with corrupted log checkpoint SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and we did not find a valid checkpoint/ in mysqld.1.err -FOUND /Plugin 'InnoDB' registration as a STORAGE ENGINE failed/ in mysqld.1.err +FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and we did not find a valid checkpoint/ in mysqld.1.err +FOUND 2 /Plugin 'InnoDB' registration as a STORAGE ENGINE failed/ in mysqld.1.err # redo log from before MariaDB 10.2.2, with corrupted log block SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and it appears corrupted/ in mysqld.1.err +FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and it appears corrupted/ in mysqld.1.err # redo log from "after" MariaDB 10.2.2, but with invalid header checksum SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Invalid redo log header checksum/ in mysqld.1.err +FOUND 1 /InnoDB: Invalid redo log header checksum/ in mysqld.1.err # distant future redo log format, with valid header checksum SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Unsupported redo log format. The redo log was created with malicious intentions, or perhaps\. Please follow the instructions at http://dev.mysql.com/doc/refman/5.7/en/upgrading-downgrading.html/ in mysqld.1.err +FOUND 1 /InnoDB: Unsupported redo log format. The redo log was created with malicious intentions, or perhaps\. Please follow the instructions at http://dev.mysql.com/doc/refman/5.7/en/upgrading-downgrading.html/ in mysqld.1.err # valid header, but old-format checkpoint blocks SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: No valid checkpoint found .corrupted redo log/ in mysqld.1.err +FOUND 1 /InnoDB: No valid checkpoint found .corrupted redo log/ in mysqld.1.err # valid header, valid checkpoint 1, all-zero (invalid) checkpoint 2, invalid block checksum SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 3362026715 found: 144444122/ in mysqld.1.err -FOUND /InnoDB: Missing MLOG_CHECKPOINT between the checkpoint 1213964 and the end 1213952\./ in mysqld.1.err +FOUND 1 /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 3362026715 found: 144444122/ in mysqld.1.err +FOUND 1 /InnoDB: Missing MLOG_CHECKPOINT between the checkpoint 1213964 and the end 1213952\./ in mysqld.1.err # --innodb-force-recovery=6 (skip the entire redo log) SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS InnoDB YES Supports transactions, row-level locking, foreign keys and encryption for tables YES YES YES -FOUND /\[Note\] InnoDB: .* started; log sequence number 0/ in mysqld.1.err +FOUND 1 /\[Note\] InnoDB: .* started; log sequence number 0/ in mysqld.1.err # valid header, valid checkpoint 1, all-zero (invalid) checkpoint 2, invalid block number SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' @@ -66,26 +66,26 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Starting crash recovery from checkpoint LSN=1213964/ in mysqld.1.err -FOUND /InnoDB: MLOG_FILE_NAME incorrect:bogus/ in mysqld.1.err -FOUND /InnoDB: ############### CORRUPT LOG RECORD FOUND ##################/ in mysqld.1.err -FOUND /InnoDB: Log record type 55, page 151:488\. Log parsing proceeded successfully up to 1213973\. Previous log record type 56, is multi 0 Recv offset 9, prev 0/ in mysqld.1.err -FOUND /len 22. hex 38000000000012860cb7809781e80006626f67757300. asc 8 bogus / in mysqld.1.err -FOUND /InnoDB: Set innodb_force_recovery to ignore this error/ in mysqld.1.err +FOUND 1 /InnoDB: Starting crash recovery from checkpoint LSN=1213964/ in mysqld.1.err +FOUND 1 /InnoDB: MLOG_FILE_NAME incorrect:bogus/ in mysqld.1.err +FOUND 1 /InnoDB: ############### CORRUPT LOG RECORD FOUND ##################/ in mysqld.1.err +FOUND 1 /InnoDB: Log record type 55, page 151:488\. Log parsing proceeded successfully up to 1213973\. Previous log record type 56, is multi 0 Recv offset 9, prev 0/ in mysqld.1.err +FOUND 1 /len 22. hex 38000000000012860cb7809781e80006626f67757300. asc 8 bogus / in mysqld.1.err +FOUND 1 /InnoDB: Set innodb_force_recovery to ignore this error/ in mysqld.1.err # Test a corrupted MLOG_FILE_NAME record. # valid header, invalid checkpoint 1, valid checkpoint 2, invalid block SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 2454333373 found: 150151/ in mysqld.1.err +FOUND 1 /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 2454333373 found: 150151/ in mysqld.1.err # valid header, invalid checkpoint 1, valid checkpoint 2, invalid log record SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: MLOG_FILE_NAME incorrect:bigot/ in mysqld.1.err -FOUND /len 22; hex 38000000000012860cb7809781e800066269676f7400; asc 8 bigot ;/ in mysqld.1.err +FOUND 1 /InnoDB: MLOG_FILE_NAME incorrect:bigot/ in mysqld.1.err +FOUND 1 /len 22; hex 38000000000012860cb7809781e800066269676f7400; asc 8 bigot ;/ in mysqld.1.err # missing MLOG_FILE_NAME or MLOG_FILE_DELETE before MLOG_CHECKPOINT SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' @@ -97,7 +97,7 @@ SELECT COUNT(*) `1` FROM INFORMATION_SCHEMA.ENGINES WHERE engine='innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); 1 1 -FOUND /InnoDB: Encrypting redo log/ in mysqld.1.err +FOUND 1 /InnoDB: Encrypting redo log/ in mysqld.1.err ib_buffer_pool ib_logfile0 ib_logfile1 diff --git a/mysql-test/suite/encryption/t/encrypt_and_grep.test b/mysql-test/suite/encryption/t/encrypt_and_grep.test index fd54fc74f0a..2ef69db237d 100644 --- a/mysql-test/suite/encryption/t/encrypt_and_grep.test +++ b/mysql-test/suite/encryption/t/encrypt_and_grep.test @@ -13,7 +13,6 @@ --let t2_IBD = $MYSQLD_DATADIR/test/t2.ibd --let t3_IBD = $MYSQLD_DATADIR/test/t3.ibd --let SEARCH_RANGE = 10000000 ---let SEARCH_PATTERN=foobar SET GLOBAL innodb_file_per_table = ON; diff --git a/mysql-test/suite/encryption/t/filekeys_badtest.inc b/mysql-test/suite/encryption/t/filekeys_badtest.inc index 1cdea0e1a53..60ac9f0e798 100644 --- a/mysql-test/suite/encryption/t/filekeys_badtest.inc +++ b/mysql-test/suite/encryption/t/filekeys_badtest.inc @@ -7,7 +7,6 @@ call mtr.add_suppression("Plugin 'file_key_management' init function returned er call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); --let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err ---let SEARCH_RANGE= -10000 --source include/search_pattern_in_file.inc --error ER_CANT_CREATE_TABLE diff --git a/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result b/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result index ca58b77a21e..f03072053c3 100644 --- a/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result +++ b/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result @@ -38,7 +38,7 @@ SELECT b FROM t1 LIMIT 3; ERROR HY000: Lost connection to MySQL server during query disconnect con1; connection default; -FOUND /Wrote log record for ibuf update in place operation/ in my_restart.err +FOUND 1 /Wrote log record for ibuf update in place operation/ in my_restart.err CHECK TABLE t1; Table Op Msg_type Msg_text test.t1 check status OK diff --git a/mysql-test/suite/innodb/r/log_alter_table.result b/mysql-test/suite/innodb/r/log_alter_table.result index f1ee61e7572..a6f35543c04 100644 --- a/mysql-test/suite/innodb/r/log_alter_table.result +++ b/mysql-test/suite/innodb/r/log_alter_table.result @@ -10,6 +10,8 @@ INSERT INTO t1 VALUES (1,2); ALTER TABLE t1 ADD PRIMARY KEY(a), ALGORITHM=INPLACE; ALTER TABLE t1 DROP INDEX b, ADD INDEX (b); # Kill the server +FOUND 1 /scan .*: multi-log rec MLOG_FILE_CREATE2.*page .*:0/ in mysqld.1.err +FOUND 1 /scan .*: log rec MLOG_INDEX_LOAD/ in mysqld.1.err CHECK TABLE t1; Table Op Msg_type Msg_text test.t1 check status OK diff --git a/mysql-test/suite/innodb/r/log_corruption.result b/mysql-test/suite/innodb/r/log_corruption.result index a932efeca62..3a20a11cd8f 100644 --- a/mysql-test/suite/innodb/r/log_corruption.result +++ b/mysql-test/suite/innodb/r/log_corruption.result @@ -3,52 +3,52 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2\./ in mysqld.1.err +FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2\./ in mysqld.1.err # redo log from before MariaDB 10.2.2, with corrupted log checkpoint SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and we did not find a valid checkpoint/ in mysqld.1.err -FOUND /Plugin 'InnoDB' registration as a STORAGE ENGINE failed/ in mysqld.1.err +FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and we did not find a valid checkpoint/ in mysqld.1.err +FOUND 2 /Plugin 'InnoDB' registration as a STORAGE ENGINE failed/ in mysqld.1.err # redo log from before MariaDB 10.2.2, with corrupted log block SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and it appears corrupted/ in mysqld.1.err +FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and it appears corrupted/ in mysqld.1.err # redo log from "after" MariaDB 10.2.2, but with invalid header checksum SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Invalid redo log header checksum/ in mysqld.1.err +FOUND 1 /InnoDB: Invalid redo log header checksum/ in mysqld.1.err # distant future redo log format, with valid header checksum SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Unsupported redo log format. The redo log was created with malicious intentions, or perhaps\. Please follow the instructions at http://dev.mysql.com/doc/refman/5.7/en/upgrading-downgrading.html/ in mysqld.1.err +FOUND 1 /InnoDB: Unsupported redo log format. The redo log was created with malicious intentions, or perhaps\. Please follow the instructions at http://dev.mysql.com/doc/refman/5.7/en/upgrading-downgrading.html/ in mysqld.1.err # valid header, but old-format checkpoint blocks SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: No valid checkpoint found .corrupted redo log/ in mysqld.1.err +FOUND 1 /InnoDB: No valid checkpoint found .corrupted redo log/ in mysqld.1.err # valid header, valid checkpoint 1, all-zero (invalid) checkpoint 2, invalid block checksum SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 3362026715 found: 144444122/ in mysqld.1.err -FOUND /InnoDB: Missing MLOG_CHECKPOINT between the checkpoint 1213964 and the end 1213952\./ in mysqld.1.err +FOUND 1 /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 3362026715 found: 144444122/ in mysqld.1.err +FOUND 1 /InnoDB: Missing MLOG_CHECKPOINT between the checkpoint 1213964 and the end 1213952\./ in mysqld.1.err # --innodb-force-recovery=6 (skip the entire redo log) SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS InnoDB YES Supports transactions, row-level locking, foreign keys and encryption for tables YES YES YES -FOUND /\[Note\] InnoDB: .* started; log sequence number 0/ in mysqld.1.err +FOUND 1 /\[Note\] InnoDB: .* started; log sequence number 0/ in mysqld.1.err # valid header, valid checkpoint 1, all-zero (invalid) checkpoint 2, invalid block number SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' @@ -66,26 +66,26 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Starting crash recovery from checkpoint LSN=1213964/ in mysqld.1.err -FOUND /InnoDB: MLOG_FILE_NAME incorrect:bogus/ in mysqld.1.err -FOUND /InnoDB: ############### CORRUPT LOG RECORD FOUND ##################/ in mysqld.1.err -FOUND /InnoDB: Log record type 55, page 151:488\. Log parsing proceeded successfully up to 1213973\. Previous log record type 56, is multi 0 Recv offset 9, prev 0/ in mysqld.1.err -FOUND /len 22. hex 38000000000012860cb7809781e80006626f67757300. asc 8 bogus / in mysqld.1.err -FOUND /InnoDB: Set innodb_force_recovery to ignore this error/ in mysqld.1.err +FOUND 1 /InnoDB: Starting crash recovery from checkpoint LSN=1213964/ in mysqld.1.err +FOUND 1 /InnoDB: MLOG_FILE_NAME incorrect:bogus/ in mysqld.1.err +FOUND 1 /InnoDB: ############### CORRUPT LOG RECORD FOUND ##################/ in mysqld.1.err +FOUND 1 /InnoDB: Log record type 55, page 151:488\. Log parsing proceeded successfully up to 1213973\. Previous log record type 56, is multi 0 Recv offset 9, prev 0/ in mysqld.1.err +FOUND 1 /len 22. hex 38000000000012860cb7809781e80006626f67757300. asc 8 bogus / in mysqld.1.err +FOUND 1 /InnoDB: Set innodb_force_recovery to ignore this error/ in mysqld.1.err # Test a corrupted MLOG_FILE_NAME record. # valid header, invalid checkpoint 1, valid checkpoint 2, invalid block SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 2454333373 found: 150151/ in mysqld.1.err +FOUND 1 /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 2454333373 found: 150151/ in mysqld.1.err # valid header, invalid checkpoint 1, valid checkpoint 2, invalid log record SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: MLOG_FILE_NAME incorrect:bigot/ in mysqld.1.err -FOUND /len 22; hex 38000000000012860cb7809781e800066269676f7400; asc 8 bigot ;/ in mysqld.1.err +FOUND 1 /InnoDB: MLOG_FILE_NAME incorrect:bigot/ in mysqld.1.err +FOUND 1 /len 22; hex 38000000000012860cb7809781e800066269676f7400; asc 8 bigot ;/ in mysqld.1.err # missing MLOG_FILE_NAME or MLOG_FILE_DELETE before MLOG_CHECKPOINT SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' @@ -97,8 +97,8 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Obtaining redo log encryption key version 1 failed/ in mysqld.1.err -FOUND /InnoDB: Decrypting checkpoint failed/ in mysqld.1.err +FOUND 1 /InnoDB: Obtaining redo log encryption key version 1 failed/ in mysqld.1.err +FOUND 1 /InnoDB: Decrypting checkpoint failed/ in mysqld.1.err ib_buffer_pool ib_logfile0 ib_logfile1 diff --git a/mysql-test/suite/innodb/r/log_file.result b/mysql-test/suite/innodb/r/log_file.result index 352e4b76cf1..a442091f33b 100644 --- a/mysql-test/suite/innodb/r/log_file.result +++ b/mysql-test/suite/innodb/r/log_file.result @@ -6,14 +6,14 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /\[ERROR\] InnoDB: Could not create undo tablespace '.*undo002'/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Could not create undo tablespace '.*undo002'/ in mysqld.1.err # Remove undo001,undo002,ibdata1,ibdata2,ib_logfile1,ib_logfile2,ib_logfile101 # Start mysqld with non existent innodb_log_group_home_dir SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /File .path.to.non-existent.*ib_logfile101: 'create' returned OS error \d+/ in mysqld.1.err +FOUND 1 /File .path.to.non-existent.*ib_logfile101: 'create' returned OS error \d+/ in mysqld.1.err # Remove ibdata1 & ibdata2 # Successfully let InnoDB create tablespaces SELECT COUNT(*) `1` FROM INFORMATION_SCHEMA.ENGINES @@ -27,7 +27,7 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /The innodb_system data file 'ibdata1' was not found but one of the other data files 'ibdata2' exists/ in mysqld.1.err +FOUND 1 /The innodb_system data file 'ibdata1' was not found but one of the other data files 'ibdata2' exists/ in mysqld.1.err bak_ib_logfile0 bak_ib_logfile1 bak_ib_logfile2 @@ -49,8 +49,8 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Tablespace size stored in header is \d+ pages, but the sum of data file sizes is \d+ pages/ in mysqld.1.err -FOUND /InnoDB: Cannot start InnoDB. The tail of the system tablespace is missing/ in mysqld.1.err +FOUND 1 /InnoDB: Tablespace size stored in header is \d+ pages, but the sum of data file sizes is \d+ pages/ in mysqld.1.err +FOUND 1 /InnoDB: Cannot start InnoDB. The tail of the system tablespace is missing/ in mysqld.1.err bak_ib_logfile0 bak_ib_logfile1 bak_ib_logfile2 @@ -88,7 +88,7 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: undo tablespace .*undo001.* exists\. Creating system tablespace with existing undo tablespaces is not supported\. Please delete all undo tablespaces before creating new system tablespace\./ in mysqld.1.err +FOUND 1 /InnoDB: undo tablespace .*undo001.* exists\. Creating system tablespace with existing undo tablespaces is not supported\. Please delete all undo tablespaces before creating new system tablespace\./ in mysqld.1.err bak_ib_logfile0 bak_ib_logfile1 bak_ib_logfile2 @@ -175,7 +175,7 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /undo tablespace .*undo003.* exists\. Creating system tablespace with existing undo tablespaces is not supported\. Please delete all undo tablespaces before creating new system tablespace\./ in mysqld.1.err +FOUND 1 /undo tablespace .*undo003.* exists\. Creating system tablespace with existing undo tablespaces is not supported\. Please delete all undo tablespaces before creating new system tablespace\./ in mysqld.1.err bak_ib_logfile0 bak_ib_logfile1 bak_ib_logfile2 @@ -207,7 +207,7 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /Expected to open 3 undo tablespaces but was able to find only 1 undo tablespaces/ in mysqld.1.err +FOUND 1 /Expected to open 3 undo tablespaces but was able to find only 1 undo tablespaces/ in mysqld.1.err bak_ib_logfile0 bak_ib_logfile1 bak_ib_logfile2 @@ -244,7 +244,7 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /Expected to open 3 undo tablespaces but was able to find only 0 undo tablespaces/ in mysqld.1.err +FOUND 1 /Expected to open 3 undo tablespaces but was able to find only 0 undo tablespaces/ in mysqld.1.err bak_ib_logfile0 bak_ib_logfile1 bak_ib_logfile2 @@ -340,7 +340,7 @@ WHERE engine='innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); 1 1 -FOUND /Resizing redo log from 1\*\d+ to 3\*\d+ pages; LSN=\d+/ in mysqld.1.err +FOUND 1 /Resizing redo log from 1\*\d+ to 3\*\d+ pages; LSN=\d+/ in mysqld.1.err # Cleanup bak_ib_logfile0 bak_ib_logfile1 diff --git a/mysql-test/suite/innodb/r/log_file_name.result b/mysql-test/suite/innodb/r/log_file_name.result index e5904165781..df4c9f637be 100644 --- a/mysql-test/suite/innodb/r/log_file_name.result +++ b/mysql-test/suite/innodb/r/log_file_name.result @@ -14,30 +14,30 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Ignoring data file '.*t2.ibd' with space ID \d+. Another data file called .*t1.ibd exists with the same space ID/ in mysqld.1.err +FOUND 1 /InnoDB: Ignoring data file '.*t2.ibd' with space ID \d+. Another data file called .*t1.ibd exists with the same space ID.*/ in mysqld.1.err # Fault 2: Wrong space_id in a dirty file, and a missing file. SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Ignoring data file '.*t1.ibd' with space ID/ in mysqld.1.err -FOUND /InnoDB: Tablespace \d+ was not found at.*t3.ibd/ in mysqld.1.err +FOUND 1 /InnoDB: Ignoring data file '.*t1.ibd' with space ID.*/ in mysqld.1.err +FOUND 1 /InnoDB: Tablespace \d+ was not found at.*t3.ibd.*/ in mysqld.1.err # Fault 3: Wrong space_id in a dirty file, and no missing file. SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Ignoring data file '.*t[23].ibd' with space ID/ in mysqld.1.err -FOUND /InnoDB: Tablespace \d+ was not found at .*t1.ibd/ in mysqld.1.err -FOUND /InnoDB: Tablespace \d+ was not found at .*t3.ibd/ in mysqld.1.err -FOUND /InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace/ in mysqld.1.err +FOUND 1 /InnoDB: Ignoring data file '.*t[23].ibd' with space ID.*/ in mysqld.1.err +FOUND 1 /InnoDB: Tablespace \d+ was not found at .*t1.ibd.*/ in mysqld.1.err +FOUND 1 /InnoDB: Tablespace \d+ was not found at .*t3.ibd.*/ in mysqld.1.err +FOUND 1 /InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace.*/ in mysqld.1.err # Fault 4: Missing data file SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Tablespace \d+ was not found at .*t[12].ibd. -.*InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace/ in mysqld.1.err +FOUND 1 /InnoDB: Tablespace \d+ was not found at .*t[12].ibd. +.*InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace.*/ in mysqld.1.err # Fault 5: Wrong type of data file SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' @@ -47,8 +47,8 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /\[ERROR\] InnoDB: Cannot read first page of .*t2.ibd/ in mysqld.1.err -FOUND /\[ERROR\] InnoDB: Datafile .*t2.*\. Cannot determine the space ID from the first 64 pages/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Cannot read first page of .*t2.ibd.*/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Datafile .*t2.*\. Cannot determine the space ID from the first 64 pages.*/ in mysqld.1.err SELECT * FROM t2; a 9 @@ -81,20 +81,20 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /\[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd/ in mysqld.1.err -FOUND /\[ERROR\] InnoDB: Datafile .*u1.*\. Cannot determine the space ID from the first 64 pages/ in mysqld.1.err -FOUND /\[ERROR\] InnoDB: Cannot read first page of .*u2.ibd/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd.*/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Datafile .*u1.*\. Cannot determine the space ID from the first 64 pages.*/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Cannot read first page of .*u2.ibd.*/ in mysqld.1.err # Fault 7: Missing or wrong data file and innodb_force_recovery SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /\[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd/ in mysqld.1.err -FOUND /InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace/ in mysqld.1.err -FOUND /\[ERROR\] InnoDB: Cannot rename '.*u5.ibd' to '.*u6.ibd' for space ID \d+ because the target file exists/ in mysqld.1.err -FOUND /\[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd/ in mysqld.1.err -FOUND /InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace/ in mysqld.1.err -FOUND /\[Warning\] InnoDB: Tablespace \d+ was not found at .*u[1-5].ibd, and innodb_force_recovery was set. All redo log for this tablespace will be ignored!/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd.*/ in mysqld.1.err +FOUND 1 /InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace.*/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Cannot rename '.*u5.ibd' to '.*u6.ibd' for space ID \d+ because the target file exists.*/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd.*/ in mysqld.1.err +FOUND 1 /InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace.*/ in mysqld.1.err +FOUND 1 /\[Warning\] InnoDB: Tablespace \d+ was not found at .*u[1-5].ibd, and innodb_force_recovery was set. All redo log for this tablespace will be ignored!.*/ in mysqld.1.err DROP TABLE u1,u2,u3,u6; # List of files: SHOW TABLES; diff --git a/mysql-test/suite/innodb/r/log_file_name_debug.result b/mysql-test/suite/innodb/r/log_file_name_debug.result index e33ce36d1f2..ae7ce48fe5e 100644 --- a/mysql-test/suite/innodb/r/log_file_name_debug.result +++ b/mysql-test/suite/innodb/r/log_file_name_debug.result @@ -7,8 +7,8 @@ CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB; # Kill the server SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' -FOUND /InnoDB: Tablespace 4294967280 was not found at .*, but there were no modifications either/ in mysqld.1.err +FOUND 1 /InnoDB: Tablespace 4294967280 was not found at .*, but there were no modifications either/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' -FOUND /srv_prepare_to_delete_redo_log_files: ib_log: MLOG_CHECKPOINT.* written/ in mysqld.1.err +FOUND 1 /srv_prepare_to_delete_redo_log_files: ib_log: MLOG_CHECKPOINT.* written/ in mysqld.1.err DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/log_file_size.result b/mysql-test/suite/innodb/r/log_file_size.result index b0ab6e38395..a29a4e81683 100644 --- a/mysql-test/suite/innodb/r/log_file_size.result +++ b/mysql-test/suite/innodb/r/log_file_size.result @@ -22,34 +22,48 @@ connection default; # Kill the server SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /syntax error in innodb_log_group_home_dir/ in mysqld.1.err +SELECT * FROM t1; +ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /InnoDB: Starting crash recovery from checkpoint LSN=/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /InnoDB: innodb_read_only prevents crash recovery/ in mysqld.1.err +SELECT * FROM t1; +ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 2 /redo log from 3\*[0-9]+ to 2\*[0-9]+ pages/ in mysqld.1.err +SELECT * FROM t1; +ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 3 /redo log from 3\*[0-9]+ to 2\*[0-9]+ pages/ in mysqld.1.err +SELECT * FROM t1; +ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 2 /InnoDB: innodb_read_only prevents crash recovery/ in mysqld.1.err +SELECT * FROM t1; +ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 4 /redo log from 3\*[0-9]+ to 2\*[0-9]+ pages/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /InnoDB: Cannot create log files in read-only mode/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /InnoDB: Setting log file .*ib_logfile[0-9]+ size to/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /InnoDB: Setting log file .*ib_logfile[0-9]+ size to/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /InnoDB: Log file .*ib_logfile0 size 7 is not a multiple of innodb_page_size/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /InnoDB: Log file .*ib_logfile1 is of different size 1048576 bytes than other log files/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' -SELECT * FROM t1; -ERROR 42000: Unknown storage engine 'InnoDB' -SELECT * FROM t1; -ERROR 42000: Unknown storage engine 'InnoDB' -SELECT * FROM t1; -ERROR 42000: Unknown storage engine 'InnoDB' -SELECT * FROM t1; -ERROR 42000: Unknown storage engine 'InnoDB' -SELECT * FROM t1; -ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /InnoDB: Setting log file .*ib_logfile[0-9]+ size to/ in mysqld.1.err +FOUND 1 /InnoDB: Renaming log file .*ib_logfile101 to .*ib_logfile0/ in mysqld.1.err SELECT * FROM t1; a 42 diff --git a/mysql-test/suite/innodb/r/temporary_table.result b/mysql-test/suite/innodb/r/temporary_table.result index 1fb73f4e775..72c5250934b 100644 --- a/mysql-test/suite/innodb/r/temporary_table.result +++ b/mysql-test/suite/innodb/r/temporary_table.result @@ -138,18 +138,23 @@ Tables_in_test create temporary table t1 (keyc int, c1 char(100), c2 char(100)) engine = innodb; ERROR HY000: Can't create table `test`.`t1` (errno: 165 "Table is read only") # test various bad start-up parameters +FOUND 1 /innodb_temporary and innodb_system file names seem to be the same/ in mysqld.1.err SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +FOUND 1 /support raw device/ in mysqld.1.err SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +FOUND 2 /support raw device/ in mysqld.1.err SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +FOUND 1 /The innodb_temporary data file 'ibtmp1' must be at least/ in mysqld.1.err SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +FOUND 1 /InnoDB: syntax error in file path/ in mysqld.1.err SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS diff --git a/mysql-test/suite/innodb/t/log_alter_table.test b/mysql-test/suite/innodb/t/log_alter_table.test index bb7738d9fb8..6f12dfaf0b9 100644 --- a/mysql-test/suite/innodb/t/log_alter_table.test +++ b/mysql-test/suite/innodb/t/log_alter_table.test @@ -29,19 +29,12 @@ ALTER TABLE t1 DROP INDEX b, ADD INDEX (b); --let $restart_parameters= --debug=d,ib_log --source include/start_mysqld.inc -let SEARCH_RANGE = -50000; let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.1.err; let SEARCH_ABORT=NOT FOUND; -# Look for at least one MLOG_FILE_CREATE2 in the error log. -# Theoretically, it may have been written by this test or an earlier test. -# FIXME: redirect the error log of the restart to a new file, -# and ensure that we have exactly 2 records there. +# ensure that we have exactly 2 records there. let SEARCH_PATTERN=scan .*: multi-log rec MLOG_FILE_CREATE2.*page .*:0; --source include/search_pattern_in_file.inc -# Look for at least one MLOG_INDEX_LOAD in the error log. -# Theoretically, it may have been written by this test or an earlier test. -# FIXME: redirect the error log of the restart to a new file, -# and ensure that we have exactly 3 records there. +# ensure that we have exactly 3 records there. let SEARCH_PATTERN=scan .*: log rec MLOG_INDEX_LOAD; --source include/search_pattern_in_file.inc diff --git a/mysql-test/suite/innodb/t/log_corruption.test b/mysql-test/suite/innodb/t/log_corruption.test index 7cfbda181e0..8013cc45830 100644 --- a/mysql-test/suite/innodb/t/log_corruption.test +++ b/mysql-test/suite/innodb/t/log_corruption.test @@ -20,7 +20,6 @@ call mtr.add_suppression("InnoDB: Decrypting checkpoint failed"); let bugdir= $MYSQLTEST_VARDIR/tmp/log_corruption; --mkdir $bugdir ---let SEARCH_RANGE = -50000 --let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.1.err let $check_no_innodb=SELECT * FROM INFORMATION_SCHEMA.ENGINES diff --git a/mysql-test/suite/innodb/t/log_file.test b/mysql-test/suite/innodb/t/log_file.test index c50257a69be..ffd7613289d 100644 --- a/mysql-test/suite/innodb/t/log_file.test +++ b/mysql-test/suite/innodb/t/log_file.test @@ -26,7 +26,6 @@ let bugdir= $MYSQLTEST_VARDIR/tmp/log_file; --mkdir $bugdir let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err; -let SEARCH_RANGE = -100000; let $check_no_innodb=SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); diff --git a/mysql-test/suite/innodb/t/log_file_name.test b/mysql-test/suite/innodb/t/log_file_name.test index e528abc80d5..0a8dc3e1fc0 100644 --- a/mysql-test/suite/innodb/t/log_file_name.test +++ b/mysql-test/suite/innodb/t/log_file_name.test @@ -30,7 +30,6 @@ COMMIT; --copy_file $MYSQLD_DATADIR/test/t2.ibd $MYSQLD_DATADIR/test/t1.ibd let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err; -let SEARCH_RANGE= -50000; let $check_no_innodb=SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); @@ -39,7 +38,7 @@ AND support IN ('YES', 'DEFAULT', 'ENABLED'); # checkpoint after the INSERT. That is what we checked above. --source include/start_mysqld.inc eval $check_no_innodb; -let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t2.ibd' with space ID \d+. Another data file called .*t1.ibd exists with the same space ID; +let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t2.ibd' with space ID \d+. Another data file called .*t1.ibd exists with the same space ID.*; --source include/search_pattern_in_file.inc --source include/shutdown_mysqld.inc @@ -54,10 +53,10 @@ let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t2.ibd' with space ID \d+. Ano --source include/start_mysqld.inc eval $check_no_innodb; -let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t1.ibd' with space ID; +let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t1.ibd' with space ID.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at.*t3.ibd; +let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at.*t3.ibd.*; --source include/search_pattern_in_file.inc --source include/shutdown_mysqld.inc @@ -73,14 +72,14 @@ let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at.*t3.ibd; --source include/start_mysqld.inc eval $check_no_innodb; -let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t[23].ibd' with space ID; +let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t[23].ibd' with space ID.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at .*t1.ibd; +let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at .*t1.ibd.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at .*t3.ibd; +let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at .*t3.ibd.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace; +let SEARCH_PATTERN= InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace.*; --source include/search_pattern_in_file.inc --source include/shutdown_mysqld.inc @@ -96,7 +95,7 @@ eval $check_no_innodb; --source include/shutdown_mysqld.inc let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at .*t[12].ibd. -.*InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace; +.*InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace.*; --source include/search_pattern_in_file.inc --echo # Fault 5: Wrong type of data file @@ -120,9 +119,9 @@ EOF eval $check_no_innodb; --source include/shutdown_mysqld.inc -let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot read first page of .*t2.ibd; +let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot read first page of .*t2.ibd.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= \[ERROR\] InnoDB: Datafile .*t2.*\. Cannot determine the space ID from the first 64 pages; +let SEARCH_PATTERN= \[ERROR\] InnoDB: Datafile .*t2.*\. Cannot determine the space ID from the first 64 pages.*; --source include/search_pattern_in_file.inc # Restore t2.ibd @@ -214,17 +213,17 @@ EOF --source include/start_mysqld.inc eval $check_no_innodb; -let SEARCH_PATTERN= \[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd; +let SEARCH_PATTERN= \[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= \[ERROR\] InnoDB: Datafile .*u1.*\. Cannot determine the space ID from the first 64 pages; +let SEARCH_PATTERN= \[ERROR\] InnoDB: Datafile .*u1.*\. Cannot determine the space ID from the first 64 pages.*; --source include/search_pattern_in_file.inc # TODO: These errors should state the file name (u2.ibd) and be ignored # in innodb-force-recovery mode once # Bug#18131883 IMPROVE INNODB ERROR MESSAGES REGARDING FILES # has been fixed: -let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot read first page of .*u2.ibd; +let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot read first page of .*u2.ibd.*; --source include/search_pattern_in_file.inc --source include/shutdown_mysqld.inc @@ -239,26 +238,26 @@ let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot read first page of .*u2.ibd; --source include/start_mysqld.inc eval $check_no_innodb; -let SEARCH_PATTERN= \[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd; +let SEARCH_PATTERN= \[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace; +let SEARCH_PATTERN= InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot rename '.*u5.ibd' to '.*u6.ibd' for space ID \d+ because the target file exists; +let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot rename '.*u5.ibd' to '.*u6.ibd' for space ID \d+ because the target file exists.*; --source include/search_pattern_in_file.inc --remove_file $MYSQLD_DATADIR/test/u6.ibd --source include/restart_mysqld.inc -let SEARCH_PATTERN= \[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd; +let SEARCH_PATTERN= \[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace; +let SEARCH_PATTERN= InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= \[Warning\] InnoDB: Tablespace \d+ was not found at .*u[1-5].ibd, and innodb_force_recovery was set. All redo log for this tablespace will be ignored!; +let SEARCH_PATTERN= \[Warning\] InnoDB: Tablespace \d+ was not found at .*u[1-5].ibd, and innodb_force_recovery was set. All redo log for this tablespace will be ignored!.*; --source include/search_pattern_in_file.inc --let $restart_parameters= diff --git a/mysql-test/suite/innodb/t/log_file_name_debug.test b/mysql-test/suite/innodb/t/log_file_name_debug.test index 44012d38c8e..0aaf798e2b3 100644 --- a/mysql-test/suite/innodb/t/log_file_name_debug.test +++ b/mysql-test/suite/innodb/t/log_file_name_debug.test @@ -32,7 +32,6 @@ CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB; SELECT * FROM t1; --let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.1.err ---let SEARCH_RANGE = -50000 --let SEARCH_PATTERN = InnoDB: Tablespace 4294967280 was not found at .*, but there were no modifications either --source include/search_pattern_in_file.inc diff --git a/mysql-test/suite/innodb/t/log_file_size.test b/mysql-test/suite/innodb/t/log_file_size.test index d01263e3c89..069cfca3585 100644 --- a/mysql-test/suite/innodb/t/log_file_size.test +++ b/mysql-test/suite/innodb/t/log_file_size.test @@ -46,7 +46,6 @@ INSERT INTO t1 VALUES (0),(123); let MYSQLD_DATADIR= `select @@datadir`; let SEARCH_ABORT = NOT FOUND; -let SEARCH_RANGE= -50000; let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err; BEGIN; diff --git a/mysql-test/suite/innodb/t/temporary_table.test b/mysql-test/suite/innodb/t/temporary_table.test index 9f63fe52f3b..f841acff1c0 100644 --- a/mysql-test/suite/innodb/t/temporary_table.test +++ b/mysql-test/suite/innodb/t/temporary_table.test @@ -122,7 +122,6 @@ create temporary table t1 (keyc int, c1 char(100), c2 char(100)) engine = innodb --echo # test various bad start-up parameters let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.1.err; -let SEARCH_RANGE = -50000; let SEARCH_ABORT = NOT FOUND; let $check_no_innodb=SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); diff --git a/mysql-test/suite/innodb_zip/r/innochecksum.result b/mysql-test/suite/innodb_zip/r/innochecksum.result index ff1bccfb60c..31d9450df80 100644 --- a/mysql-test/suite/innodb_zip/r/innochecksum.result +++ b/mysql-test/suite/innodb_zip/r/innochecksum.result @@ -14,16 +14,16 @@ insert into t1 values(3,"compressed table"); [2]: check the innochecksum with full form --strict-check=crc32 [3]: check the innochecksum with short form -C crc32 [4]: check the innochecksum with --no-check ignores algorithm check, warning is expected -FOUND /Error: --no-check must be associated with --write option./ in my_restart.err +FOUND 1 /Error: --no-check must be associated with --write option./ in my_restart.err [5]: check the innochecksum with short form --no-check ignores algorithm check, warning is expected -FOUND /Error: --no-check must be associated with --write option./ in my_restart.err +FOUND 1 /Error: --no-check must be associated with --write option./ in my_restart.err [6]: check the innochecksum with full form strict-check & no-check , an error is expected -FOUND /Error: --strict-check option cannot be used together with --no-check option./ in my_restart.err +FOUND 1 /Error: --strict-check option cannot be used together with --no-check option./ in my_restart.err [7]: check the innochecksum with short form strict-check & no-check , an error is expected -FOUND /Error: --strict-check option cannot be used together with --no-check option./ in my_restart.err +FOUND 1 /Error: --strict-check option cannot be used together with --no-check option./ in my_restart.err [8]: check the innochecksum with short & full form combination # strict-check & no-check, an error is expected -FOUND /Error: --strict-check option cannot be used together with --no-check option./ in my_restart.err +FOUND 1 /Error: --strict-check option cannot be used together with --no-check option./ in my_restart.err [9]: check the innochecksum with full form --strict-check=innodb [10]: check the innochecksum with full form --strict-check=none # when server Default checksum=crc32 @@ -32,16 +32,16 @@ FOUND /Error: --strict-check option cannot be used together with --no-check opti [12]: check the innochecksum with short form -C none # when server Default checksum=crc32 [13]: check strict-check with invalid values -FOUND /Error while setting value \'strict_innodb\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'strict_innodb\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'strict_crc32\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'strict_crc32\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'strict_none\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'strict_none\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'InnoBD\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'InnoBD\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'crc\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'no\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'strict_innodb\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'strict_innodb\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'strict_crc32\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'strict_crc32\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'strict_none\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'strict_none\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'InnoBD\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'InnoBD\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'crc\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'no\' to \'strict-check\'/ in my_restart.err [14a]: when server default checksum=crc32 rewrite new checksum=crc32 with innochecksum # Also check the long form of write option. [14b]: when server default checksum=crc32 rewrite new checksum=innodb with innochecksum @@ -85,7 +85,7 @@ c1 c2 1 Innochecksum InnoDB1 # Stop server [18]:check Innochecksum with invalid write options -FOUND /Error while setting value \'strict_crc32\' to \'write\'/ in my_restart.err -FOUND /Error while setting value \'strict_innodb\' to \'write\'/ in my_restart.err -FOUND /Error while setting value \'crc23\' to \'write\'/ in my_restart.err +FOUND 1 /Error while setting value \'strict_crc32\' to \'write\'/ in my_restart.err +FOUND 1 /Error while setting value \'strict_innodb\' to \'write\'/ in my_restart.err +FOUND 1 /Error while setting value \'crc23\' to \'write\'/ in my_restart.err DROP TABLE tab1; diff --git a/mysql-test/suite/innodb_zip/r/innochecksum_3.result b/mysql-test/suite/innodb_zip/r/innochecksum_3.result index da7de031f42..800556c4ff3 100644 --- a/mysql-test/suite/innodb_zip/r/innochecksum_3.result +++ b/mysql-test/suite/innodb_zip/r/innochecksum_3.result @@ -206,10 +206,10 @@ Filename::tab#.ibd # allow-mismatches,page,start-page,end-page [9]: check the both short and long options "page" and "start-page" when # seek value is larger than file size. -FOUND /Error: Unable to seek to necessary offset: Invalid argument/ in my_restart.err -FOUND /Error: Unable to seek to necessary offset: Invalid argument/ in my_restart.err -FOUND /Error: Unable to seek to necessary offset: Invalid argument/ in my_restart.err -FOUND /Error: Unable to seek to necessary offset: Invalid argument/ in my_restart.err +FOUND 1 /Error: Unable to seek to necessary offset: Invalid argument/ in my_restart.err +FOUND 1 /Error: Unable to seek to necessary offset: Invalid argument/ in my_restart.err +FOUND 1 /Error: Unable to seek to necessary offset: Invalid argument/ in my_restart.err +FOUND 1 /Error: Unable to seek to necessary offset: Invalid argument/ in my_restart.err [34]: check the invalid upper bound values for options, allow-mismatches, end-page, start-page and page. # innochecksum will fail with error code: 1 NOT FOUND /Incorrect unsigned integer value: '18446744073709551616'/ in my_restart.err diff --git a/mysql-test/suite/rpl/r/rpl_checksum.result b/mysql-test/suite/rpl/r/rpl_checksum.result index e74e5af9f84..a74b688d722 100644 --- a/mysql-test/suite/rpl/r/rpl_checksum.result +++ b/mysql-test/suite/rpl/r/rpl_checksum.result @@ -174,7 +174,7 @@ INSERT INTO t4 VALUES (2); connection slave; include/wait_for_slave_sql_error.inc [errno=1590] Last_SQL_Error = 'The incident LOST_EVENTS occurred on the master. Message: error writing to the binary log' -FOUND /Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590/ in mysqld.2.err +FOUND 1 /Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590/ in mysqld.2.err SELECT * FROM t4 ORDER BY a; a 1 diff --git a/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result b/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result index db80abf1df2..593f83a7946 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result @@ -49,8 +49,8 @@ a 3 4 5 -FOUND /Slave SQL: Error 'Duplicate entry .* on query\. .*Query: '.*', Gtid 0-1-100, Internal MariaDB error code:|Slave SQL: Could not execute Write_rows.*table test.t1; Duplicate entry.*, Gtid 0-1-100, Internal MariaDB error/ in mysqld.2.err -FOUND /Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: , Internal MariaDB error code: 1590/ in mysqld.2.err +FOUND 1 /Slave SQL: Error 'Duplicate entry .* on query\. .*Query: '.*', Gtid 0-1-100, Internal MariaDB error code:|Slave SQL: Could not execute Write_rows.*table test.t1; Duplicate entry.*, Gtid 0-1-100, Internal MariaDB error/ in mysqld.2.err +FOUND 1 /Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: , Internal MariaDB error code: 1590/ in mysqld.2.err connection master; DROP TABLE t1; connection master; diff --git a/mysql-test/suite/rpl/t/rpl_gtid_errorlog.test b/mysql-test/suite/rpl/t/rpl_gtid_errorlog.test index 2ae910ff3e9..ea321062100 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_errorlog.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_errorlog.test @@ -68,7 +68,6 @@ if(!$log_error_) let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.2.err; } --let SEARCH_FILE=$log_error_ ---let SEARCH_RANGE=-50000 --let SEARCH_PATTERN=Slave SQL: Error 'Duplicate entry .* on query\. .*Query: '.*', Gtid 0-1-100, Internal MariaDB error code:|Slave SQL: Could not execute Write_rows.*table test.t1; Duplicate entry.*, Gtid 0-1-100, Internal MariaDB error --source include/search_pattern_in_file.inc --let SEARCH_PATTERN=Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: , Internal MariaDB error code: 1590 diff --git a/mysql-test/suite/rpl/t/rpl_stop_slave_error.test b/mysql-test/suite/rpl/t/rpl_stop_slave_error.test index a88981c15c4..10d7c7736f1 100644 --- a/mysql-test/suite/rpl/t/rpl_stop_slave_error.test +++ b/mysql-test/suite/rpl/t/rpl_stop_slave_error.test @@ -9,7 +9,6 @@ sync_slave_with_master; source include/stop_slave.inc; let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/slave_log.err; let SEARCH_PATTERN=Error reading packet from server: Lost connection; -let SEARCH_RANGE= -50000; source include/search_pattern_in_file.inc; source include/start_slave.inc; diff --git a/mysql-test/t/named_pipe.test b/mysql-test/t/named_pipe.test index af74c200e96..8503907b808 100644 --- a/mysql-test/t/named_pipe.test +++ b/mysql-test/t/named_pipe.test @@ -28,6 +28,5 @@ let $MYSQLD_DATADIR= `select @@datadir`; --error 1 --exec $MYSQLD_CMD --enable-named-pipe --skip-networking --log-error=second-mysqld.err let SEARCH_FILE=$MYSQLD_DATADIR/second-mysqld.err; -let SEARCH_RANGE= -50; let SEARCH_PATTERN=\[ERROR\] Create named pipe failed; source include/search_pattern_in_file.inc; diff --git a/mysql-test/t/shutdown.test b/mysql-test/t/shutdown.test index 7080f9a1a71..775628e441d 100644 --- a/mysql-test/t/shutdown.test +++ b/mysql-test/t/shutdown.test @@ -34,6 +34,5 @@ drop user user1@localhost; --echo # MDEV-8491 - On shutdown, report the user and the host executed that. --echo # --let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err ---let SEARCH_RANGE= -50000 --let SEARCH_PATTERN=mysqld(\.exe)? \(root\[root\] @ localhost \[(::1)?\]\): Normal shutdown --source include/search_pattern_in_file.inc diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test index eaaebba166c..df5c7d3495d 100644 --- a/mysql-test/t/view.test +++ b/mysql-test/t/view.test @@ -5387,6 +5387,7 @@ create view v1 as select 1; --let $MYSQLD_DATADIR= `select @@datadir` --let SEARCH_FILE= $MYSQLD_DATADIR/test/v1.frm +--let SEARCH_RANGE= 50000 --let SEARCH_PATTERN=mariadb-version --source include/search_pattern_in_file.inc diff --git a/mysql-test/t/wait_timeout_not_windows.test b/mysql-test/t/wait_timeout_not_windows.test index de4904fada2..50731779845 100644 --- a/mysql-test/t/wait_timeout_not_windows.test +++ b/mysql-test/t/wait_timeout_not_windows.test @@ -10,7 +10,6 @@ set @@wait_timeout=1; sleep 2; connection default; let SEARCH_FILE=$MYSQLTEST_VARDIR/log/mysqld.1.err; -let SEARCH_RANGE= -50; let SEARCH_PATTERN= Aborted.*Got timeout reading communication packets; source include/search_pattern_in_file.inc; set global log_warnings=@@log_warnings; From 16a99c5ad9ac3f73e307488041caedc4e2b5c8c9 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 31 Mar 2017 16:28:27 +0200 Subject: [PATCH 200/233] MariaRocks tests: various cleanups remove hard-coded paths (that assumed we're in a source tree) remove various shell/perl/awk/whatsnot scripts, use mysqltest and perl remove numerous --exec /some/unix/tool commands, use mysqltest and perl --- .../rocksdb/optimize_table_check_sst.pl | 22 ----- .../mysql-test/rocksdb/r/bloomfilter.result | 62 ++++++------ .../rocksdb/r/bloomfilter_skip.result | 62 ++++++------ .../mysql-test/rocksdb/r/collation.result | 4 +- .../rocksdb/r/compact_deletes.result | 15 --- .../mysql-test/rocksdb/r/drop_table.result | 2 + .../mysql-test/rocksdb/r/drop_table2.result | 2 + .../mysql-test/rocksdb/r/drop_table3.result | 2 + .../mysql-test/rocksdb/r/mysqldump.result | 33 ++++--- .../rocksdb/r/optimize_table.result | 12 +-- .../rocksdb/r/rocksdb_checksums.result | 22 +---- .../rocksdb/r/rocksdb_datadir.result | 4 +- .../rocksdb/r/truncate_table3.result | 2 + .../mysql-test/rocksdb/r/unique_sec.result | 12 +-- .../rocksdb/r/use_direct_reads_writes.result | 6 +- .../rocksdb/r/validate_datadic.result | 11 +-- storage/rocksdb/mysql-test/rocksdb/suite.pm | 6 +- .../mysql-test/rocksdb/t/bloomfilter.inc | 22 ++--- .../rocksdb/t/bloomfilter_load_select.inc | 12 +-- .../rocksdb/t/bloomfilter_table_def.inc | 33 +++++++ .../rocksdb/t/bloomfilter_table_def.tmpl | 36 ------- .../mysql-test/rocksdb/t/checkpoint.test | 3 - .../mysql-test/rocksdb/t/collation.test | 13 +-- .../mysql-test/rocksdb/t/compact_deletes.test | 22 ++--- .../rocksdb/t/compact_deletes_test.inc | 33 ++++++- .../rocksdb/mysql-test/rocksdb/t/disabled.def | 4 + .../mysql-test/rocksdb/t/drop_table.test | 45 ++++++++- .../mysql-test/rocksdb/t/drop_table2.test | 23 +++-- .../mysql-test/rocksdb/t/drop_table2_check.pl | 19 ---- .../mysql-test/rocksdb/t/drop_table3.inc | 4 +- .../rocksdb/t/drop_table_compactions.pl | 37 -------- .../mysql-test/rocksdb/t/gen_insert.pl | 32 ------- .../mysql-test/rocksdb/t/mysqldump.test | 17 +--- .../mysql-test/rocksdb/t/optimize_table.inc | 95 ++++--------------- .../mysql-test/rocksdb/t/optimize_table.test | 75 ++++++++++++++- .../rocksdb/t/rocksdb_cf_options.test | 1 - .../mysql-test/rocksdb/t/rocksdb_checksums.pl | 16 ---- .../rocksdb/t/rocksdb_checksums.test | 44 +++++---- .../mysql-test/rocksdb/t/rocksdb_datadir.test | 13 +-- .../mysql-test/rocksdb/t/set_checkpoint.inc | 7 +- .../mysql-test/rocksdb/t/sst_count_rows.sh | 52 ---------- .../mysql-test/rocksdb/t/unique_sec.inc | 14 +-- .../mysql-test/rocksdb/t/unique_sec.test | 7 +- .../rocksdb/t/unique_sec_rev_cf.test | 5 +- .../rocksdb/t/use_direct_reads_writes.test | 15 +-- .../rocksdb/t/validate_datadic.test | 36 +++---- .../mysql-test/rocksdb/t/write_sync.test | 2 +- 47 files changed, 449 insertions(+), 567 deletions(-) delete mode 100644 storage/rocksdb/mysql-test/rocksdb/optimize_table_check_sst.pl create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.inc delete mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.tmpl delete mode 100644 storage/rocksdb/mysql-test/rocksdb/t/drop_table2_check.pl delete mode 100755 storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl delete mode 100644 storage/rocksdb/mysql-test/rocksdb/t/gen_insert.pl delete mode 100644 storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.pl delete mode 100755 storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh diff --git a/storage/rocksdb/mysql-test/rocksdb/optimize_table_check_sst.pl b/storage/rocksdb/mysql-test/rocksdb/optimize_table_check_sst.pl deleted file mode 100644 index 8199d5051df..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/optimize_table_check_sst.pl +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/perl - -die unless($ARGV[0]); -open(my $f, "<", $ARGV[0]) or die $!; -my @sst; -while(my $l = readline($f)) { - chomp($l); - push @sst, int($l); -} - -for(my $i= 0; $i < $#sst; $i++) { - printf("checking sst file reduction on optimize table from %d to %d..\n", $i, $i+1); - - if($sst[$i] - 1000 < $sst[$i+1]) { - printf("sst file reduction was not enough. %d->%d (minimum 1000kb)\n", $sst[$i], $sst[$i+1]); - die; - }else { - print "ok.\n"; - } -} -exit(0); - diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result index d65a4efea30..be93cf2eead 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result @@ -7,13 +7,7 @@ CREATE PROCEDURE bloom_end() BEGIN select case when variable_value-@c > 0 then 'true' else 'false' end as checked from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; END// -drop table if exists t1; -Warnings: -Note 1051 Unknown table 'test.t1' -drop table if exists t2; -Warnings: -Note 1051 Unknown table 'test.t2' -create table t1 ( +create or replace table t1 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -21,15 +15,15 @@ id4 int not null, id5 int not null, value bigint, value2 varchar(100), -primary key (id1, id2, id3, id4), -index id2 (id2), -index id2_id1 (id2, id1), -index id2_id3 (id2, id3), -index id2_id4 (id2, id4), -index id2_id3_id1_id4 (id2, id3, id1, id4), -index id3_id2 (id3, id2) +primary key (id1, id2, id3, id4) , +index id2 (id2) , +index id2_id1 (id2, id1) , +index id2_id3 (id2, id3) , +index id2_id4 (id2, id4) , +index id2_id3_id1_id4 (id2, id3, id1, id4) , +index id3_id2 (id3, id2) ) engine=ROCKSDB; -create table t2 ( +create or replace table t2 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -37,14 +31,18 @@ id4 int not null, id5 int not null, value bigint, value2 varchar(100), -primary key (id4), -index id2 (id2), -index id2_id3 (id2, id3), -index id2_id4 (id2, id4), -index id2_id4_id5 (id2, id4, id5), -index id3_id4 (id3, id4), -index id3_id5 (id3, id5) +primary key (id4) , +index id2 (id2) , +index id2_id3 (id2, id3) , +index id2_id4 (id2, id4) , +index id2_id4_id5 (id2, id4, id5) , +index id3_id4 (id3, id4) , +index id3_id5 (id3, id5) ) engine=ROCKSDB; +insert t1 +select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; call bloom_start(); select count(*) from t1; count(*) @@ -409,9 +407,7 @@ count(*) call bloom_end(); checked false -drop table if exists t1; -drop table if exists t2; -create table t1 ( +create or replace table t1 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -427,7 +423,7 @@ index id2_id4 (id2, id4) COMMENT 'cf_short_prefix', index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'cf_short_prefix', index id3_id2 (id3, id2) COMMENT 'cf_short_prefix' ) engine=ROCKSDB; -create table t2 ( +create or replace table t2 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -443,6 +439,10 @@ index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_short_prefix', index id3_id4 (id3, id4) COMMENT 'cf_short_prefix', index id3_id5 (id3, id5) COMMENT 'cf_short_prefix' ) engine=ROCKSDB; +insert t1 +select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; call bloom_start(); select count(*) from t1; count(*) @@ -807,9 +807,7 @@ count(*) call bloom_end(); checked true -drop table if exists t1; -drop table if exists t2; -create table t1 ( +create or replace table t1 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -825,7 +823,7 @@ index id2_id4 (id2, id4) COMMENT 'cf_long_prefix', index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'cf_long_prefix', index id3_id2 (id3, id2) COMMENT 'cf_long_prefix' ) engine=ROCKSDB; -create table t2 ( +create or replace table t2 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -841,6 +839,10 @@ index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_long_prefix', index id3_id4 (id3, id4) COMMENT 'cf_long_prefix', index id3_id5 (id3, id5) COMMENT 'cf_long_prefix' ) engine=ROCKSDB; +insert t1 +select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; call bloom_start(); select count(*) from t1; count(*) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result index af7feaf8682..18f007be4b2 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result @@ -7,13 +7,7 @@ CREATE PROCEDURE bloom_end() BEGIN select case when variable_value-@c > 0 then 'true' else 'false' end as checked from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; END// -drop table if exists t1; -Warnings: -Note 1051 Unknown table 'test.t1' -drop table if exists t2; -Warnings: -Note 1051 Unknown table 'test.t2' -create table t1 ( +create or replace table t1 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -21,15 +15,15 @@ id4 int not null, id5 int not null, value bigint, value2 varchar(100), -primary key (id1, id2, id3, id4), -index id2 (id2), -index id2_id1 (id2, id1), -index id2_id3 (id2, id3), -index id2_id4 (id2, id4), -index id2_id3_id1_id4 (id2, id3, id1, id4), -index id3_id2 (id3, id2) +primary key (id1, id2, id3, id4) , +index id2 (id2) , +index id2_id1 (id2, id1) , +index id2_id3 (id2, id3) , +index id2_id4 (id2, id4) , +index id2_id3_id1_id4 (id2, id3, id1, id4) , +index id3_id2 (id3, id2) ) engine=ROCKSDB; -create table t2 ( +create or replace table t2 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -37,14 +31,18 @@ id4 int not null, id5 int not null, value bigint, value2 varchar(100), -primary key (id4), -index id2 (id2), -index id2_id3 (id2, id3), -index id2_id4 (id2, id4), -index id2_id4_id5 (id2, id4, id5), -index id3_id4 (id3, id4), -index id3_id5 (id3, id5) +primary key (id4) , +index id2 (id2) , +index id2_id3 (id2, id3) , +index id2_id4 (id2, id4) , +index id2_id4_id5 (id2, id4, id5) , +index id3_id4 (id3, id4) , +index id3_id5 (id3, id5) ) engine=ROCKSDB; +insert t1 +select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; call bloom_start(); select count(*) from t1; count(*) @@ -409,9 +407,7 @@ count(*) call bloom_end(); checked false -drop table if exists t1; -drop table if exists t2; -create table t1 ( +create or replace table t1 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -427,7 +423,7 @@ index id2_id4 (id2, id4) COMMENT 'cf_short_prefix', index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'cf_short_prefix', index id3_id2 (id3, id2) COMMENT 'cf_short_prefix' ) engine=ROCKSDB; -create table t2 ( +create or replace table t2 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -443,6 +439,10 @@ index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_short_prefix', index id3_id4 (id3, id4) COMMENT 'cf_short_prefix', index id3_id5 (id3, id5) COMMENT 'cf_short_prefix' ) engine=ROCKSDB; +insert t1 +select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; call bloom_start(); select count(*) from t1; count(*) @@ -807,9 +807,7 @@ count(*) call bloom_end(); checked false -drop table if exists t1; -drop table if exists t2; -create table t1 ( +create or replace table t1 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -825,7 +823,7 @@ index id2_id4 (id2, id4) COMMENT 'cf_long_prefix', index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'cf_long_prefix', index id3_id2 (id3, id2) COMMENT 'cf_long_prefix' ) engine=ROCKSDB; -create table t2 ( +create or replace table t2 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -841,6 +839,10 @@ index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_long_prefix', index id3_id4 (id3, id4) COMMENT 'cf_long_prefix', index id3_id5 (id3, id5) COMMENT 'cf_long_prefix' ) engine=ROCKSDB; +insert t1 +select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; call bloom_start(); select count(*) from t1; count(*) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/collation.result b/storage/rocksdb/mysql-test/rocksdb/r/collation.result index 8b1246b49d7..e372cbe2109 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/collation.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/collation.result @@ -107,7 +107,7 @@ ALTER TABLE t2 ADD INDEX(value); ERROR HY000: Unsupported collation on string indexed column test.t2.value Use binary collation (latin1_bin, binary, utf8_bin). DROP TABLE t2; SET GLOBAL rocksdb_strict_collation_exceptions="[a-b"; - Invalid pattern in strict_collation_exceptions: [a-b +FOUND 1 /Invalid pattern in strict_collation_exceptions: \[a-b/ in mysqld.1.err CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; ERROR HY000: Unsupported collation on string indexed column test.a.value Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_exceptions="[a-b]"; @@ -118,7 +118,7 @@ ERROR HY000: Unsupported collation on string indexed column test.c.value Use bin DROP TABLE a, b; call mtr.add_suppression("Invalid pattern in strict_collation_exceptions:"); SET GLOBAL rocksdb_strict_collation_exceptions="abc\\"; - Invalid pattern in strict_collation_exceptions: abc\ +FOUND 1 /Invalid pattern in strict_collation_exceptions: abc/ in mysqld.1.err CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; ERROR HY000: Unsupported collation on string indexed column test.abc.value Use binary collation (latin1_bin, binary, utf8_bin). SET GLOBAL rocksdb_strict_collation_exceptions="abc"; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/compact_deletes.result b/storage/rocksdb/mysql-test/rocksdb/r/compact_deletes.result index 408a93441b9..5b3cfaf7839 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/compact_deletes.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/compact_deletes.result @@ -22,9 +22,6 @@ set global rocksdb_compaction_sequential_deletes_window=0; set global rocksdb_compaction_sequential_deletes= 0; set global rocksdb_compaction_sequential_deletes_file_size=0; set global rocksdb_force_flush_memtable_now=1; -select sleep(1); -sleep(1) -0 wait_for_delete: 0 There are deletes left SET GLOBAL rocksdb_compaction_sequential_deletes= 0; @@ -36,9 +33,6 @@ set global rocksdb_compaction_sequential_deletes_window=1000; set global rocksdb_compaction_sequential_deletes= 990; set global rocksdb_compaction_sequential_deletes_file_size=0; set global rocksdb_force_flush_memtable_now=1; -select sleep(1); -sleep(1) -0 wait_for_delete: 1 No more deletes left SET GLOBAL rocksdb_compaction_sequential_deletes= 0; @@ -50,9 +44,6 @@ set global rocksdb_compaction_sequential_deletes_window=1000; set global rocksdb_compaction_sequential_deletes= 1000; set global rocksdb_compaction_sequential_deletes_file_size=1000000; set global rocksdb_force_flush_memtable_now=1; -select sleep(1); -sleep(1) -0 wait_for_delete: 0 There are deletes left SET GLOBAL rocksdb_compaction_sequential_deletes= 0; @@ -64,9 +55,6 @@ set global rocksdb_compaction_sequential_deletes_window=1000; set global rocksdb_compaction_sequential_deletes= 50; set global rocksdb_compaction_sequential_deletes_file_size=0; set global rocksdb_force_flush_memtable_now=1; -select sleep(1); -sleep(1) -0 wait_for_delete: 1 No more deletes left SET GLOBAL rocksdb_compaction_sequential_deletes= 0; @@ -81,9 +69,6 @@ set global rocksdb_compaction_sequential_deletes_window=1000; set global rocksdb_compaction_sequential_deletes= 50; set global rocksdb_compaction_sequential_deletes_file_size=0; set global rocksdb_force_flush_memtable_now=1; -select sleep(1); -sleep(1) -0 wait_for_delete: 1 No more deletes left SET GLOBAL rocksdb_compaction_sequential_deletes= 0; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result index 7d0fae229da..fbe6f35126d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result @@ -1,3 +1,5 @@ +call mtr.add_suppression("Column family 'cf1' not found"); +call mtr.add_suppression("Column family 'rev:cf2' not found"); DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; DROP TABLE IF EXISTS t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result index c46d3522dd7..83d9fd9493f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result @@ -1,3 +1,5 @@ +call mtr.add_suppression("Column family 'cf1' not found"); +call mtr.add_suppression("Column family 'rev:cf2' not found"); DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; DROP TABLE IF EXISTS t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result index c69d789c12a..e5237fe9b1e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result @@ -1,3 +1,5 @@ +call mtr.add_suppression("Column family 'cf1' not found"); +call mtr.add_suppression("Column family 'rev:cf2' not found"); DROP TABLE IF EXISTS t1; set global rocksdb_compact_cf = 'cf1'; set global rocksdb_compact_cf = 'rev:cf2'; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result b/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result index 1cc90c61c01..7e17c98668c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result @@ -38,15 +38,16 @@ update r1 set value1=value1+100 where id1=1 and id2=1 and id3='1'; /*!50601 SET @enable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load=1', 'SET @dummy = 0') */; /*!50601 PREPARE s FROM @enable_bulk_load */; /*!50601 EXECUTE s */; --- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=3893; +-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000002', MASTER_LOG_POS=4832; +-- SET GLOBAL gtid_slave_pos='0-1-18'; DROP TABLE IF EXISTS `r1`; /*!40101 SET @saved_cs_client = @@character_set_client */; /*!40101 SET character_set_client = utf8 */; CREATE TABLE `r1` ( - `id1` int(11) NOT NULL DEFAULT '0', - `id2` int(11) NOT NULL DEFAULT '0', - `id3` varchar(100) NOT NULL DEFAULT '', - `id4` int(11) NOT NULL DEFAULT '0', + `id1` int(11) NOT NULL, + `id2` int(11) NOT NULL, + `id3` varchar(100) NOT NULL, + `id4` int(11) NOT NULL, `value1` int(11) DEFAULT NULL, `value2` int(11) DEFAULT NULL, `value3` int(11) DEFAULT NULL, @@ -90,15 +91,16 @@ SET GLOBAL default_storage_engine=rocksdb; /*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; /*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; /*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; --- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=3893; +-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000002', MASTER_LOG_POS=4832; +-- SET GLOBAL gtid_slave_pos='0-1-18'; DROP TABLE IF EXISTS `r1`; /*!40101 SET @saved_cs_client = @@character_set_client */; /*!40101 SET character_set_client = utf8 */; CREATE TABLE `r1` ( - `id1` int(11) NOT NULL DEFAULT '0', - `id2` int(11) NOT NULL DEFAULT '0', - `id3` varchar(100) NOT NULL DEFAULT '', - `id4` int(11) NOT NULL DEFAULT '0', + `id1` int(11) NOT NULL, + `id2` int(11) NOT NULL, + `id3` varchar(100) NOT NULL, + `id4` int(11) NOT NULL, `value1` int(11) DEFAULT NULL, `value2` int(11) DEFAULT NULL, `value3` int(11) DEFAULT NULL, @@ -136,15 +138,16 @@ UNLOCK TABLES; /*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; /*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; /*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; --- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=3893; +-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000002', MASTER_LOG_POS=4832; +-- SET GLOBAL gtid_slave_pos='0-1-18'; DROP TABLE IF EXISTS `r1`; /*!40101 SET @saved_cs_client = @@character_set_client */; /*!40101 SET character_set_client = utf8 */; CREATE TABLE `r1` ( - `id1` int(11) NOT NULL DEFAULT '0', - `id2` int(11) NOT NULL DEFAULT '0', - `id3` varchar(100) NOT NULL DEFAULT '', - `id4` int(11) NOT NULL DEFAULT '0', + `id1` int(11) NOT NULL, + `id2` int(11) NOT NULL, + `id3` varchar(100) NOT NULL, + `id4` int(11) NOT NULL, `value1` int(11) DEFAULT NULL, `value2` int(11) DEFAULT NULL, `value3` int(11) DEFAULT NULL, diff --git a/storage/rocksdb/mysql-test/rocksdb/r/optimize_table.result b/storage/rocksdb/mysql-test/rocksdb/r/optimize_table.result index fa2062b415e..8273fdbae9f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/optimize_table.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/optimize_table.result @@ -32,15 +32,19 @@ delete from t6 where id <= 9900; optimize table t1; Table Op Msg_type Msg_text test.t1 optimize status OK +sst file reduction ok optimize table t3; Table Op Msg_type Msg_text test.t3 optimize status OK +sst file reduction ok optimize table t4; Table Op Msg_type Msg_text test.t4 optimize status OK +sst file reduction ok optimize table t6; Table Op Msg_type Msg_text test.t6 optimize status OK +sst file reduction ok select count(*) from t1; count(*) 100 @@ -59,14 +63,6 @@ count(*) select count(*) from t6; count(*) 100 -checking sst file reduction on optimize table from 0 to 1.. -ok. -checking sst file reduction on optimize table from 1 to 2.. -ok. -checking sst file reduction on optimize table from 2 to 3.. -ok. -checking sst file reduction on optimize table from 3 to 4.. -ok. optimize table t2; Table Op Msg_type Msg_text test.t2 optimize status OK diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result index 206ce335c0c..0ec2540e8dd 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result @@ -1,7 +1,6 @@ set @save_rocksdb_store_row_debug_checksums=@@global.rocksdb_store_row_debug_checksums; set @save_rocksdb_verify_row_debug_checksums=@@global.rocksdb_verify_row_debug_checksums; set @save_rocksdb_checksums_pct=@@global.rocksdb_checksums_pct; -drop table if exists t1,t2,t3; show variables like 'rocksdb_%checksum%'; Variable_name Value rocksdb_checksums_pct 100 @@ -12,12 +11,7 @@ insert into t1 values (1,1,1),(2,2,2),(3,3,3); check table t1; Table Op Msg_type Msg_text test.t1 check status OK - CHECKTABLE t1: Checking table t1 - CHECKTABLE t1: Checking index a - CHECKTABLE t1: ... 3 index entries checked (0 had checksums) - CHECKTABLE t1: Checking index b - CHECKTABLE t1: ... 3 index entries checked (0 had checksums) - CHECKTABLE t1: 0 table records had checksums +FOUND 1 /0 table records had checksums/ in mysqld.1.err drop table t1; set session rocksdb_store_row_debug_checksums=on; create table t2 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; @@ -25,12 +19,7 @@ insert into t2 values (1,1,1),(2,2,2),(3,3,3); check table t2; Table Op Msg_type Msg_text test.t2 check status OK - CHECKTABLE t2: Checking table t2 - CHECKTABLE t2: Checking index a - CHECKTABLE t2: ... 3 index entries checked (3 had checksums) - CHECKTABLE t2: Checking index b - CHECKTABLE t2: ... 3 index entries checked (3 had checksums) - CHECKTABLE t2: 3 table records had checksums +FOUND 1 /3 table records had checksums/ in mysqld.1.err # Now, make a table that has both rows with checksums and without create table t3 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; insert into t3 values (1,1,1),(2,2,2),(3,3,3); @@ -40,12 +29,7 @@ set session rocksdb_store_row_debug_checksums=on; check table t3; Table Op Msg_type Msg_text test.t3 check status OK - CHECKTABLE t3: Checking table t3 - CHECKTABLE t3: Checking index a - CHECKTABLE t3: ... 3 index entries checked (3 had checksums) - CHECKTABLE t3: Checking index b - CHECKTABLE t3: ... 3 index entries checked (2 had checksums) - CHECKTABLE t3: 2 table records had checksums +FOUND 1 /2 table records had checksums/ in mysqld.1.err set session rocksdb_store_row_debug_checksums=on; set session rocksdb_checksums_pct=5; create table t4 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_datadir.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_datadir.result index 7910e98b198..40c53f6fd8a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_datadir.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_datadir.result @@ -1,2 +1,2 @@ -Check for the number of MANIFEST files -1 +Check for MANIFEST files +MANIFEST-000006 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result index 7adf50f9ff3..813f651be62 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result @@ -1,3 +1,5 @@ +call mtr.add_suppression("Column family 'cf1' not found"); +call mtr.add_suppression("Column family 'rev:cf2' not found"); DROP TABLE IF EXISTS t1; set global rocksdb_compact_cf = 'cf1'; set global rocksdb_compact_cf = 'rev:cf2'; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result index a2fbf664346..a37e7f1cb31 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result @@ -5,12 +5,12 @@ connection con1; CREATE TABLE t1 (id1 INT NOT NULL, id2 INT NOT NULL, id3 VARCHAR(32), id4 INT, id5 VARCHAR(32), value1 INT, value2 INT, value3 VARCHAR(32), -PRIMARY KEY (id1, id2) , -UNIQUE INDEX (id2, id1) , -UNIQUE INDEX (id2, id3, id4) , -INDEX (id1) , -INDEX (id3, id1) , -UNIQUE INDEX(id5) , +PRIMARY KEY (id1, id2) , +UNIQUE INDEX (id2, id1) , +UNIQUE INDEX (id2, id3, id4) , +INDEX (id1) , +INDEX (id3, id1) , +UNIQUE INDEX(id5) , INDEX (id2, id5)) ENGINE=ROCKSDB; SELECT COUNT(*) FROM t1; COUNT(*) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result b/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result index d5cfdee4f07..59fb1e41bdd 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result @@ -1,2 +1,4 @@ - RocksDB: Can't enable both use_direct_reads and allow_mmap_reads - RocksDB: Can't enable both use_direct_writes and allow_mmap_writes +call mtr.add_suppression("rocksdb"); +call mtr.add_suppression("Aborting"); +FOUND 1 /enable both use_direct_reads/ in mysqld.1.err +FOUND 1 /enable both use_direct_writes/ in mysqld.1.err diff --git a/storage/rocksdb/mysql-test/rocksdb/r/validate_datadic.result b/storage/rocksdb/mysql-test/rocksdb/r/validate_datadic.result index 92c5207046a..b5ab85d14c6 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/validate_datadic.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/validate_datadic.result @@ -1,9 +1,8 @@ -DROP TABLE IF EXISTS t1, t2; +call mtr.add_suppression('RocksDB: Schema mismatch'); CREATE TABLE t1 (pk int primary key) ENGINE=ROCKSDB; CREATE TABLE t2 (pk int primary key) ENGINE=ROCKSDB PARTITION BY KEY(pk) PARTITIONS 4; -Expect errors that we are missing two .frm files - RocksDB: Schema mismatch - Table test.t1 is registered in RocksDB but does not have a .frm file - RocksDB: Schema mismatch - Table test.t2 is registered in RocksDB but does not have a .frm file -Expect an error that we have an extra .frm file - RocksDB: Schema mismatch - A .frm file exists for table test.t1_dummy, but that table is not registered in RocksDB +"Expect errors that we are missing two .frm files" +FOUND 2 /Schema mismatch/ in mysqld.1.err +"Expect an error that we have an extra .frm file" +FOUND 3 /Schema mismatch/ in mysqld.1.err DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/suite.pm b/storage/rocksdb/mysql-test/rocksdb/suite.pm index 658a0b3b4d1..f425274da27 100644 --- a/storage/rocksdb/mysql-test/rocksdb/suite.pm +++ b/storage/rocksdb/mysql-test/rocksdb/suite.pm @@ -1,8 +1,12 @@ -package My::Suite::Rocksdb_sys_vars; +package My::Suite::Rocksdb; @ISA = qw(My::Suite); sub is_default { not $::opt_embedded_server } +my ($sst_dump) = grep { -x "$_/sst_dump" } "$::bindir/storage/rocksdb", $::path_client_bindir; +return "RocksDB is not compiled, no sst_dump" unless $sst_dump; +$ENV{MARIAROCKS_SST_DUMP}="$sst_dump/sst_dump"; + bless { }; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc index 14b69c93e5b..b388a8036ad 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc @@ -1,8 +1,5 @@ --source include/have_rocksdb.inc -let tmpl_ddl= ../storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.tmpl; -let ddl= $MYSQL_TMP_DIR/bloomfilter_create.sql; - DELIMITER //; CREATE PROCEDURE bloom_start() BEGIN @@ -17,20 +14,20 @@ DELIMITER ;// #BF is sometimes invoked and useful ---exec sed s/##CF##//g $tmpl_ddl > $ddl ---source $ddl ---source t/bloomfilter_load_select.inc +--let $CF= +--source bloomfilter_table_def.inc +--source bloomfilter_load_select.inc #BF is always invoked but not useful at all ---exec sed s/##CF##/" COMMENT 'cf_short_prefix'"/g $tmpl_ddl > $ddl ---source $ddl ---source t/bloomfilter_load_select.inc +--let $CF=COMMENT 'cf_short_prefix' +--source bloomfilter_table_def.inc +--source bloomfilter_load_select.inc #BF is most of the time invoked and useful ---exec sed s/##CF##/" COMMENT 'cf_long_prefix'"/g $tmpl_ddl > $ddl ---source $ddl ---source t/bloomfilter_load_select.inc +--let $CF=COMMENT 'cf_long_prefix' +--source bloomfilter_table_def.inc +--source bloomfilter_load_select.inc # BUG: Prev() with prefix lookup should not use prefix bloom filter create table r1 (id1 bigint, id2 bigint, id3 bigint, v1 int, v2 text, primary key (id1, id2, id3)) engine=rocksdb DEFAULT CHARSET=latin1 collate latin1_bin; @@ -60,4 +57,3 @@ optimize table t2; drop table if exists t1; drop table if exists t2; drop table if exists r1; ---remove_file $ddl diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc index a4a60d18bec..5c122d6bd19 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc @@ -1,10 +1,10 @@ +source include/have_sequence.inc; + # loading some data (larger than write buf size) to cause compaction ---exec perl ../storage/rocksdb/mysql-test/rocksdb/t/gen_insert.pl t1 > $MYSQL_TMP_DIR/insert_t1.sql ---exec perl ../storage/rocksdb/mysql-test/rocksdb/t/gen_insert.pl t2 > $MYSQL_TMP_DIR/insert_t2.sql ---disable_query_log ---source $MYSQL_TMP_DIR/insert_t1.sql ---source $MYSQL_TMP_DIR/insert_t2.sql ---enable_query_log +insert t1 + select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; # BF conditions (prefix short(4B)|medium(20B)|long(240B)) #0 no eq condition (o, x, x) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.inc b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.inc new file mode 100644 index 00000000000..2bc9bb64d5e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.inc @@ -0,0 +1,33 @@ +eval create or replace table t1 ( + id1 bigint not null, + id2 bigint not null, + id3 varchar(100) not null, + id4 int not null, + id5 int not null, + value bigint, + value2 varchar(100), + primary key (id1, id2, id3, id4) $CF, + index id2 (id2) $CF, + index id2_id1 (id2, id1) $CF, + index id2_id3 (id2, id3) $CF, + index id2_id4 (id2, id4) $CF, + index id2_id3_id1_id4 (id2, id3, id1, id4) $CF, + index id3_id2 (id3, id2) $CF +) engine=ROCKSDB; + +eval create or replace table t2 ( + id1 bigint not null, + id2 bigint not null, + id3 varchar(100) not null, + id4 int not null, + id5 int not null, + value bigint, + value2 varchar(100), + primary key (id4) $CF, + index id2 (id2) $CF, + index id2_id3 (id2, id3) $CF, + index id2_id4 (id2, id4) $CF, + index id2_id4_id5 (id2, id4, id5) $CF, + index id3_id4 (id3, id4) $CF, + index id3_id5 (id3, id5) $CF +) engine=ROCKSDB; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.tmpl b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.tmpl deleted file mode 100644 index 5cf033d4726..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.tmpl +++ /dev/null @@ -1,36 +0,0 @@ -drop table if exists t1; -drop table if exists t2; -create table t1 ( - id1 bigint not null, - id2 bigint not null, - id3 varchar(100) not null, - id4 int not null, - id5 int not null, - value bigint, - value2 varchar(100), - primary key (id1, id2, id3, id4)##CF##, - index id2 (id2)##CF##, - index id2_id1 (id2, id1)##CF##, - index id2_id3 (id2, id3)##CF##, - index id2_id4 (id2, id4)##CF##, - index id2_id3_id1_id4 (id2, id3, id1, id4)##CF##, - index id3_id2 (id3, id2)##CF## -) engine=ROCKSDB; - -create table t2 ( - id1 bigint not null, - id2 bigint not null, - id3 varchar(100) not null, - id4 int not null, - id5 int not null, - value bigint, - value2 varchar(100), - primary key (id4)##CF##, - index id2 (id2)##CF##, - index id2_id3 (id2, id3)##CF##, - index id2_id4 (id2, id4)##CF##, - index id2_id4_id5 (id2, id4, id5)##CF##, - index id3_id4 (id3, id4)##CF##, - index id3_id5 (id3, id5)##CF## -) engine=ROCKSDB; - diff --git a/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test b/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test index 70ab64f8194..e5de6246f60 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test @@ -1,8 +1,5 @@ --source include/have_rocksdb.inc -# Unixisms ("exec ls" in set_checkpoint.inc etc) ---source include/not_windows.inc - --disable_warnings DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation.test b/storage/rocksdb/mysql-test/rocksdb/t/collation.test index 8df81acea77..29c2c2886b5 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/collation.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/collation.test @@ -3,10 +3,6 @@ # following check is commented out: # --source include/have_fullregex.inc -# Unixisms (exec grep) ---source include/not_windows.inc - - SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; --disable_warnings @@ -159,9 +155,10 @@ DROP TABLE t2; # test invalid regex (missing end bracket) ---exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err SET GLOBAL rocksdb_strict_collation_exceptions="[a-b"; ---exec grep "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +let SEARCH_FILE=$MYSQLTEST_VARDIR/log/mysqld.1.err; +let SEARCH_PATTERN=Invalid pattern in strict_collation_exceptions: \[a-b; +source include/search_pattern_in_file.inc; --error ER_UNKNOWN_ERROR CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; SET GLOBAL rocksdb_strict_collation_exceptions="[a-b]"; @@ -173,9 +170,9 @@ DROP TABLE a, b; call mtr.add_suppression("Invalid pattern in strict_collation_exceptions:"); # test invalid regex (trailing escape) ---exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err SET GLOBAL rocksdb_strict_collation_exceptions="abc\\"; ---exec grep "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +let SEARCH_PATTERN=Invalid pattern in strict_collation_exceptions: abc; +source include/search_pattern_in_file.inc; --error ER_UNKNOWN_ERROR CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; SET GLOBAL rocksdb_strict_collation_exceptions="abc"; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test index 121c0d610d6..b61da676b48 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test @@ -31,8 +31,8 @@ while ($i<1000) set global rocksdb_force_flush_memtable_now=1; optimize table r1; ---exec echo Test 1: Do a bunch of updates without setting the compaction sysvar ---exec echo Expect: no compaction +--echo Test 1: Do a bunch of updates without setting the compaction sysvar +--echo Expect: no compaction let $window = 0; let $deletes = 0; let $file_size = 0; @@ -41,8 +41,8 @@ let $primary = 1; let $no_more_deletes = 0; --source compact_deletes_test.inc ---exec echo Test 2: Do a bunch of updates and set the compaction sysvar ---exec echo Expect: compaction +--echo Test 2: Do a bunch of updates and set the compaction sysvar +--echo Expect: compaction let $window = 1000; let $deletes = 990; let $file_size = 0; @@ -51,8 +51,8 @@ let $primary = 1; let $no_more_deletes = 1; --source compact_deletes_test.inc ---exec echo Test 3: Do a bunch of updates and set the compaction sysvar and a file size to something large ---exec echo Expect: no compaction +--echo Test 3: Do a bunch of updates and set the compaction sysvar and a file size to something large +--echo Expect: no compaction let $window = 1000; let $deletes = 1000; let $file_size = 1000000; @@ -61,8 +61,8 @@ let $primary = 1; let $no_more_deletes = 0; --source compact_deletes_test.inc ---exec echo Test 4: Do a bunch of secondary key updates and set the compaction sysvar ---exec echo Expect: compaction +--echo Test 4: Do a bunch of secondary key updates and set the compaction sysvar +--echo Expect: compaction let $window = 1000; let $deletes = 50; let $file_size = 0; @@ -71,9 +71,9 @@ let $primary = 0; let $no_more_deletes = 1; --source compact_deletes_test.inc ---exec echo Test 5: Do a bunch of secondary key updates and set the compaction sysvar, ---exec echo and rocksdb_compaction_sequential_deletes_count_sd turned on ---exec echo Expect: compaction +--echo Test 5: Do a bunch of secondary key updates and set the compaction sysvar, +--echo and rocksdb_compaction_sequential_deletes_count_sd turned on +--echo Expect: compaction let $window = 1000; let $deletes = 50; let $file_size = 0; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc index 6ecd13bac41..d80dcebcced 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc @@ -27,7 +27,7 @@ while ($i<1000) } --enable_query_log set global rocksdb_force_flush_memtable_now=1; -select sleep(1); +--sleep 1 --disable_query_log let $wait_timeout= 300; # Override default 30 seconds with 300. @@ -37,8 +37,35 @@ let $wait_condition = select count(*) = 0 --source include/wait_condition.inc --enable_query_log -let $MYSQL_SST_DUMP=../storage/rocksdb/sst_dump; -exec bash ../storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh $MYSQLTEST_VARDIR $MYSQL_SST_DUMP $no_more_deletes ; +let NO_MORE_DELETES=$no_more_deletes; +perl; + use autodie qw(open); + $num_retries=240; + $retry=0; + print "wait_for_delete: $ENV{no_more_deletes}\n"; + while ($retry++ < $num_retries) { + $total_d=$total_e=0; + for $f (<$ENV{MYSQLTEST_VARDIR}/mysqld.1/data/.rocksdb/*.sst>) { + # excluding system cf + open D, '-|', "$ENV{MARIAROCKS_SST_DUMP} --command=scan --output_hex --file=$f"; + while () { + next unless /'(\d{8})/ and $1 >= 8; + $total_d++ if /: [07]/; + $total_e++ if /: 1/; + } + close D; + } + last if $total_e and not ($total_d and $ENV{no_more_deletes}); + sleep 1; + } + + unless ($total_e) { + print "No records in the database\n"; + exit; + } + + print $total_d ? "There are deletes left\n" : "No more deletes left\n"; +EOF eval SET GLOBAL rocksdb_compaction_sequential_deletes= $save_rocksdb_compaction_sequential_deletes; eval SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= $save_rocksdb_compaction_sequential_deletes_file_size; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index 7c8841b518e..fbb8d645cb4 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -44,3 +44,7 @@ show_engine : MariaRocks: MariaDB doesnt support SHOW ENGINE rocksdb TRANSACTION rpl_row_not_found : MariaDB doesnt support slave_exec_mode='SEMI_STRICT' blind_delete_without_tx_api: MDEV-12286: rocksdb.blind_delete_without_tx_api test fails + +allow_no_pk_concurrent_insert: stress test +rocksdb_deadlock_stress_rc: stress test +rocksdb_deadlock_stress_rr: stress test diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test index f06b04ec561..530f5792bed 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test @@ -1,6 +1,6 @@ --source include/have_rocksdb.inc -#Unixisms (possibly Linuxisms, exec truncate) ---source include/not_windows.inc +call mtr.add_suppression("Column family 'cf1' not found"); +call mtr.add_suppression("Column family 'rev:cf2' not found"); --disable_warnings DROP TABLE IF EXISTS t1; @@ -15,7 +15,6 @@ set global rocksdb_compact_cf = 'cf1'; set global rocksdb_compact_cf = 'rev:cf2'; set global rocksdb_signal_drop_index_thread = 1; --source include/restart_mysqld.inc ---exec truncate --size=0 $MYSQLTEST_VARDIR/log/mysqld.1.err CREATE TABLE t1 ( a int not null, @@ -111,7 +110,45 @@ let $wait_condition = select count(*) = 0 # Get list of all indices needing to be dropped # Check total compacted-away rows for all indices # Check that all indices have been successfully dropped ---exec perl ../storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl $MYSQLTEST_VARDIR/log/mysqld.1.err +perl; +use autodie qw(open); + +sub print_array { + $str = shift; + $prev= $_[0]; + foreach (@_) { + $dummy_idx = $_ - $prev; + $prev= $_; + print "$str $dummy_idx\n"; + } +} + +open F, '<', "$ENV{MYSQLTEST_VARDIR}/log/mysqld.1.err"; +while () { + %a = @b = @c = () if /CURRENT_TEST/; + if (/Compacting away elements from dropped index \(\d+,(\d+)\): (\d+)/) { + $a{$1} += $2; + } + if (/Begin filtering dropped index \(\d+,(\d+)\)/) { + push @b, $1; + } + if (/Finished filtering dropped index \(\d+,(\d+)\)/) { + push @c, $1; + } +} + +$prev= 0; +foreach (sort {$a <=> $b} keys %a){ + if ($prev) { + $dummy_idx= $_ - $prev; + }else { + $dummy_idx= 0; + } + $prev= $_; +} +print_array("Begin filtering dropped index+", sort {$a <=> $b} @b); +print_array("Finished filtering dropped index+", sort {$a <=> $b} @c); +EOF # Cleanup drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test index 69c3ca28f17..7d53582e47b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test @@ -1,7 +1,7 @@ --source include/have_rocksdb.inc -#Unixisms (--exec truncate, du, grep ,sed) ---source include/not_windows.inc +call mtr.add_suppression("Column family 'cf1' not found"); +call mtr.add_suppression("Column family 'rev:cf2' not found"); --disable_warnings DROP TABLE IF EXISTS t1; @@ -16,7 +16,6 @@ set global rocksdb_compact_cf = 'cf1'; set global rocksdb_compact_cf = 'rev:cf2'; set global rocksdb_signal_drop_index_thread = 1; --source include/restart_mysqld.inc ---exec truncate --size=0 $MYSQLTEST_VARDIR/log/mysqld.1.err CREATE TABLE t1 ( a int not null, @@ -90,9 +89,12 @@ let $max = 1000; let $table = t5; --source drop_table_repopulate_table.inc -let $output= $MYSQLTEST_VARDIR/tmp/size_output; - ---exec du -c $MYSQLTEST_VARDIR/mysqld.1/data/.rocksdb/*.sst |grep total |sed 's/[\t]total/ before/' > $output +perl; +use autodie qw(open); +$size+=-s $_ for (<$ENV{MYSQLTEST_VARDIR}/mysqld.1/data/.rocksdb/*.sst>); +open(F, '>', "$ENV{MYSQLTEST_VARDIR}/tmp/size_output"); +print F $size; +EOF drop table t1; drop table t2; drop table t3; @@ -107,7 +109,12 @@ let $wait_condition = select count(*) = 0 --source include/wait_condition.inc # Check that space is reclaimed ---exec du -c $MYSQLTEST_VARDIR/mysqld.1/data/.rocksdb/*.sst |grep total |sed 's/[\t]total/ after/' >> $output ---exec perl ../storage/rocksdb/mysql-test/rocksdb/t/drop_table2_check.pl $output +perl; +use autodie qw(open); +$size+=-s $_ for (<$ENV{MYSQLTEST_VARDIR}/mysqld.1/data/.rocksdb/*.sst>); +open(F, '<', "$ENV{MYSQLTEST_VARDIR}/tmp/size_output"); +$old=; +print "Compacted\n" if $old > $size * 2; +EOF # Cleanup diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2_check.pl b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2_check.pl deleted file mode 100644 index 8f43f4725b5..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2_check.pl +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/perl - -my $a = 0; -my $b=0; -die unless($ARGV[0]); -open(my $f, "<", $ARGV[0]) or die $!; -while(readline($f)) { - if (/(\d+) before/) { - $a = $1; - } - - if (/(\d+) after/ ) { - $b = $1; - } -} - -if ($a > $b * 2) { - printf("Compacted\n"); -} diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.inc b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.inc index 4d23f7a1c5f..7a643d9a720 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.inc @@ -1,5 +1,8 @@ --source include/have_rocksdb.inc +call mtr.add_suppression("Column family 'cf1' not found"); +call mtr.add_suppression("Column family 'rev:cf2' not found"); + --disable_warnings DROP TABLE IF EXISTS t1; --enable_warnings @@ -9,7 +12,6 @@ set global rocksdb_compact_cf = 'cf1'; set global rocksdb_compact_cf = 'rev:cf2'; set global rocksdb_signal_drop_index_thread = 1; --source include/restart_mysqld.inc ---exec truncate --size=0 $MYSQLTEST_VARDIR/log/mysqld.1.err CREATE TABLE t1 ( a int not null, diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl deleted file mode 100755 index b123ac5492f..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl +++ /dev/null @@ -1,37 +0,0 @@ -sub print_array { - $str = shift; - @arr = @_; - $prev= 0; - foreach (@arr) { - if ($prev) { - $dummy_idx = $_ - $prev; - }else { - $dummy_idx = 0; - } - $prev= $_; - print "$str $dummy_idx\n"; - } -} - -while (<>) { - if (/Compacting away elements from dropped index \(\d+,(\d+)\): (\d+)/) { - $a{$1} += $2; - } - if (/Begin filtering dropped index \(\d+,(\d+)\)/) { - push @b, $1; - } - if (/Finished filtering dropped index \(\d+,(\d+)\)/) { - push @c, $1; - } -} -$prev= 0; -foreach (sort {$a <=> $b} keys %a){ - if ($prev) { - $dummy_idx= $_ - $prev; - }else { - $dummy_idx= 0; - } - $prev= $_; -} -print_array("Begin filtering dropped index+", sort {$a <=> $b} @b); -print_array("Finished filtering dropped index+", sort {$a <=> $b} @c); diff --git a/storage/rocksdb/mysql-test/rocksdb/t/gen_insert.pl b/storage/rocksdb/mysql-test/rocksdb/t/gen_insert.pl deleted file mode 100644 index c723ec3ca17..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/t/gen_insert.pl +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/perl - -my $table_name= $ARGV[0]; -my $id1= 1; -my $id2= 1; -my $id3= 1; -my $id4= 1; -my $id5= 1; -my $value= 1000; -my $value2= 'aaabbbccc'; -my $max_rows = 1 * 10000; - -for(my $row_id= 1; $row_id <= $max_rows; $row_id++) { - my $value_clause = "($id1, $id2, $id3, $id4, $id5, $value, \"$value2\")"; - - if ($row_id % 100 == 1) { - print "INSERT INTO $table_name VALUES"; - } - - if ($row_id % 100 == 0) { - print "$value_clause;\n"; - }else { - print "$value_clause,"; - } - - $id4++; - $id5++; - $id3++ if($row_id % 5 == 0); - $id2++ if($row_id % 5 == 0); - $id1++ if($row_id % 10 == 0); -} - diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test index b21c718fedc..9e904908330 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test @@ -1,18 +1,10 @@ --source include/have_rocksdb.inc - --source include/have_log_bin.inc - ---enable_connect_log - --source include/restart_mysqld.inc # Save the initial number of concurrent sessions --source include/count_sessions.inc ---disable_warnings -drop table if exists r1; ---enable_warnings - connect (con1,localhost,root,,); connect (con2,localhost,root,,); @@ -46,20 +38,19 @@ rollback; connection con1; ---exec grep "START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT" $MYSQLTEST_VARDIR/mysqld.1/mysqld.log | wc -l +let SEARCH_FILE=$MYSQLTEST_VARDIR/mysqld.1/mysqld.log; +let SEARCH_PATTERN=START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +source include/search_pattern_in_file.inc; set @save_default_storage_engine=@@global.default_storage_engine; SET GLOBAL default_storage_engine=rocksdb; --exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test ---exec grep "START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT" $MYSQLTEST_VARDIR/mysqld.1/mysqld.log | wc -l +source include/search_pattern_in_file.inc; # Sanity test mysqldump when the --innodb-stats-on-metadata is specified (no effect) --echo ==== mysqldump with --innodb-stats-on-metadata ==== --exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --innodb-stats-on-metadata test -# wiping general log so that this test case doesn't fail with --repeat ---exec echo "" > $MYSQLTEST_VARDIR/mysqld.1/mysqld.log - # testing mysqldump work with statement based binary logging SET GLOBAL binlog_format=statement; --exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test > /dev/null diff --git a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc index 08a465e7244..ad5953150c9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc @@ -1,78 +1,21 @@ -let $datadir = `SELECT @@datadir`; - ---disable_warnings -DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6; ---enable_warnings -create table t1 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; -create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; -create table t3 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; -create table t4 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; -create table t5 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; -create table t6 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; - ---disable_query_log -let $t = 1; -while ($t <= 6) { - let $i = 1; - while ($i <= 10000) { - let $insert = INSERT INTO t$t VALUES($i, $i, REPEAT('x', 150)); - inc $i; - eval $insert; - } - inc $t; -} ---enable_query_log - -# Disable auto compaction so that effects of optimize table are stable -let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; ---exec echo "wait" > $restart_file ---shutdown_server 10 ---source include/wait_until_disconnected.inc --- exec echo "restart:--rocksdb_default_cf_options=write_buffer_size=64k;target_file_size_base=64k;max_bytes_for_level_base=1m;compression_per_level=kNoCompression;disable_auto_compactions=true;level0_stop_writes_trigger=1000 " > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect --- enable_reconnect --- source include/wait_until_connected_again.inc - -select count(*) from t1; -select count(*) from t2; -select count(*) from t3; -select count(*) from t4; -select count(*) from t5; -select count(*) from t6; -delete from t1 where id <= 9900; -delete from t2 where id <= 9900; -delete from t3 where id <= 9900; -delete from t4 where id <= 9900; -delete from t5 where id <= 9900; -delete from t6 where id <= 9900; - ---let $size_cmd = du -ks $datadir/.rocksdb/*.sst | awk '{t=t+\$1} END{print t}' >> $MYSQL_TMP_DIR/sst_size.dat ---exec $size_cmd -optimize table t1; ---exec $size_cmd -optimize table t3; ---exec $size_cmd -optimize table t4; ---exec $size_cmd -optimize table t6; ---exec $size_cmd - -select count(*) from t1; -select count(*) from t2; -select count(*) from t3; -select count(*) from t4; -select count(*) from t5; -select count(*) from t6; - # run a check script to verify sst files reduced enough during each optimize table ---exec perl ../storage/rocksdb/mysql-test/rocksdb/optimize_table_check_sst.pl $MYSQL_TMP_DIR/sst_size.dat +perl; +use autodie qw(open); -#cleanup -optimize table t2; -optimize table t5; -DROP TABLE t1; -DROP TABLE t2; -DROP TABLE t3; -DROP TABLE t4; -DROP TABLE t5; -DROP TABLE t6; ---remove_file $MYSQL_TMP_DIR/sst_size.dat +$size += -s $_ for (<$ENV{datadir}/.rocksdb/*.sst>); +$file= "$ENV{MYSQL_TMP_DIR}/sst_size.dat"; + +if (-f $file) { + open(F, '<', $file); + $old = ; + close F; + if ($old - $size < 1e6) { + print "sst file reduction was not enough $old -> $size (minimum 1000kb)\n"; + } else { + print "sst file reduction ok\n"; + } +} +open(F, '>', $file); +print F $size; +close F; +EOF diff --git a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.test b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.test index 383bae72904..7a8f4fc7085 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.test @@ -4,5 +4,78 @@ # OPTIMIZE TABLE statements # ---source optimize_table.inc +let datadir = `SELECT @@datadir`; + +--disable_warnings +DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6; +--enable_warnings +create table t1 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +create table t3 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +create table t4 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; +create table t5 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; +create table t6 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; + +--disable_query_log +let $t = 1; +while ($t <= 6) { + let $i = 1; + while ($i <= 10000) { + let $insert = INSERT INTO t$t VALUES($i, $i, REPEAT('x', 150)); + inc $i; + eval $insert; + } + inc $t; +} +--enable_query_log + +# Disable auto compaction so that effects of optimize table are stable +let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; +--exec echo "wait" > $restart_file +--shutdown_server 10 +--source include/wait_until_disconnected.inc +-- exec echo "restart:--rocksdb_default_cf_options=write_buffer_size=64k;target_file_size_base=64k;max_bytes_for_level_base=1m;compression_per_level=kNoCompression;disable_auto_compactions=true;level0_stop_writes_trigger=1000 " > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- enable_reconnect +-- source include/wait_until_connected_again.inc + +select count(*) from t1; +select count(*) from t2; +select count(*) from t3; +select count(*) from t4; +select count(*) from t5; +select count(*) from t6; +delete from t1 where id <= 9900; +delete from t2 where id <= 9900; +delete from t3 where id <= 9900; +delete from t4 where id <= 9900; +delete from t5 where id <= 9900; +delete from t6 where id <= 9900; + +source optimize_table.inc; +optimize table t1; +source optimize_table.inc; +optimize table t3; +source optimize_table.inc; +optimize table t4; +source optimize_table.inc; +optimize table t6; +source optimize_table.inc; + +select count(*) from t1; +select count(*) from t2; +select count(*) from t3; +select count(*) from t4; +select count(*) from t5; +select count(*) from t6; + +#cleanup +optimize table t2; +optimize table t5; +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE t4; +DROP TABLE t5; +DROP TABLE t6; +--remove_file $MYSQL_TMP_DIR/sst_size.dat diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options.test index 5fee66bddb6..9482c2178be 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options.test @@ -33,7 +33,6 @@ select cf_name, option_type, value order by cf_name, option_type; # restart with cf configs for cf1 and cf2 ---exec echo "" > $MYSQLTEST_VARDIR/log/mysqld.1.err --let $restart_parameters=--rocksdb_override_cf_options=cf1={write_buffer_size=8m;target_file_size_base=2m};cf2={write_buffer_size=16m;max_bytes_for_level_multiplier=8};z={target_file_size_base=4m}; --source include/restart_mysqld.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.pl b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.pl deleted file mode 100644 index 322f0781719..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.pl +++ /dev/null @@ -1,16 +0,0 @@ -$file=$ARGV[0]; -$total=$ARGV[1]; -$pct=$ARGV[2]; - -open($fh, "<", $file) or die $!; -while(readline($fh)) { - if (/(\d+) index entries checked \((\d+) had checksums/) { - if ($1 == $total && $2 >= $total*($pct-2)/100 && $2 <= $total*($pct+2)/100) { - printf("%d index entries had around %d checksums\n", $total, $total*$pct/100); - } - }elsif (/(\d+) table records had checksums/) { - if ($1 >= $total*($pct-2)/100 && $1 <= $total*($pct+2)/100) { - printf("Around %d table records had checksums\n", $total*$pct/100); - } - } -} diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test index 101b2085ac4..e643bdfcda8 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test @@ -1,6 +1,4 @@ --source include/have_rocksdb.inc -# Does not run on Windows, because of unixisms (exec grep, cut, truncate file with exec echo) ---source include/not_windows.inc # # Tests for row checksums feature @@ -11,20 +9,14 @@ set @save_rocksdb_store_row_debug_checksums=@@global.rocksdb_store_row_debug_che set @save_rocksdb_verify_row_debug_checksums=@@global.rocksdb_verify_row_debug_checksums; set @save_rocksdb_checksums_pct=@@global.rocksdb_checksums_pct; -# wiping mysql log for repeatable tests ---exec echo "" > $MYSQLTEST_VARDIR/log/mysqld.1.err - ---disable_warnings -drop table if exists t1,t2,t3; ---enable_warnings --- exec echo "" > $MYSQLTEST_VARDIR/log/mysqld.1.err - show variables like 'rocksdb_%checksum%'; create table t1 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; insert into t1 values (1,1,1),(2,2,2),(3,3,3); check table t1; ---exec grep "^[0-9-]* \?[0-9:]* [0-9]* \[Note\] CHECKTABLE t1" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +--let SEARCH_FILE=$MYSQLTEST_VARDIR/log/mysqld.1.err +--let SEARCH_PATTERN=0 table records had checksums +--source include/search_pattern_in_file.inc drop table t1; @@ -32,7 +24,8 @@ set session rocksdb_store_row_debug_checksums=on; create table t2 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; insert into t2 values (1,1,1),(2,2,2),(3,3,3); check table t2; ---exec grep "^[0-9-]* \?[0-9:]* [0-9]* \[Note\] CHECKTABLE t2" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +--let SEARCH_PATTERN=3 table records had checksums +--source include/search_pattern_in_file.inc --echo # Now, make a table that has both rows with checksums and without create table t3 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; @@ -41,7 +34,8 @@ set session rocksdb_store_row_debug_checksums=off; update t3 set b=3 where a=2; set session rocksdb_store_row_debug_checksums=on; check table t3; ---exec grep "^[0-9-]* \?[0-9:]* [0-9]* \[Note\] CHECKTABLE t3" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +--let SEARCH_PATTERN=2 table records had checksums +--source include/search_pattern_in_file.inc set session rocksdb_store_row_debug_checksums=on; set session rocksdb_checksums_pct=5; @@ -58,9 +52,27 @@ while ($i<10000) } --enable_query_log check table t4; ---exec grep "^[0-9-]* \?[0-9:]* [0-9]* \[Note\] CHECKTABLE t4" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 > $MYSQL_TMP_DIR/rocksdb_checksums.log ---exec perl ../storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.pl $MYSQL_TMP_DIR/rocksdb_checksums.log 10000 5 ---remove_file $MYSQL_TMP_DIR/rocksdb_checksums.log +perl; +use autodie qw(open); +$total=10000; +$pct=5; +@out=(); + +open(F, '<', "$ENV{MYSQLTEST_VARDIR}/log/mysqld.1.err"); +while() { + @out=() if /^CURRENT_TEST:/; + if (/(\d+) index entries checked \((\d+) had checksums/) { + if ($1 == $total and $2 >= $total*($pct-2)/100 and $2 <= $total*($pct+2)/100) { + push @out, sprintf "%d index entries had around %d checksums\n", $total, $total*$pct/100; + } + } elsif (/(\d+) table records had checksums/) { + if ($1 >= $total*($pct-2)/100 and $1 <= $total*($pct+2)/100) { + push @out, sprintf "Around %d table records had checksums\n", $total*$pct/100; + } + } +} +print @out; +EOF set session rocksdb_checksums_pct=100; --echo # diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test index 18dff316161..4399dd1a401 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test @@ -1,12 +1,9 @@ --source include/have_rocksdb.inc -# Unixisms (exec ls | wc -l) ---source include/not_windows.inc let $ddir = $MYSQL_TMP_DIR/.rocksdb_datadir.test.install.db; let $rdb_ddir = $MYSQL_TMP_DIR/.rocksdb_datadir.test; let $sql_file = $MYSQL_TMP_DIR/rocksdb_datadir.sql; --write_file $sql_file -DROP DATABASE IF EXISTS mysqltest; CREATE DATABASE mysqltest; USE mysqltest; CREATE TABLE t1 (a INT PRIMARY KEY); @@ -14,6 +11,7 @@ INSERT INTO t1 VALUES(42); SET GLOBAL rocksdb_force_flush_memtable_now = 1; SELECT sleep(1); DROP TABLE t1; +DROP DATABASE mysqltest; EOF # Must ensure this directory exists before launching mysqld @@ -23,10 +21,13 @@ let $plugin_dir=`select @@plugin_dir`; # Launch mysqld with non-standard rocksdb_datadir exec $MYSQLD_BOOTSTRAP_CMD --plugin-dir=$plugin_dir --plugin-load=$HA_ROCKSDB_SO --datadir=$ddir --rocksdb_datadir=$rdb_ddir --default-storage-engine=rocksdb --skip-innodb --default-tmp-storage-engine=MyISAM --rocksdb < $sql_file; ---echo Check for the number of MANIFEST files -exec ls $rdb_ddir/MANIFEST-0000* | wc -l; +--echo Check for MANIFEST files +--list_files $rdb_ddir MANIFEST-0000* # Clean up -exec rm -rf $ddir; +remove_files_wildcard $ddir *; +remove_files_wildcard $ddir *; remove_files_wildcard $rdb_ddir *; +rmdir $ddir; +rmdir $rdb_ddir; remove_file $sql_file; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc b/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc index a8d8ed53cba..283afd3d5f8 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc @@ -11,10 +11,13 @@ if ($succeeds) eval SET GLOBAL ROCKSDB_CREATE_CHECKPOINT = '$checkpoint'; # Check checkpoint - --exec ls $checkpoint/CURRENT | sed s/.*CURRENT/CURRENT/g + --list_files $checkpoint CURRENT # Cleanup - --exec rm -rf $checkpoint + --remove_files_wildcard $checkpoint * + --rmdir $checkpoint + --disable_abort_on_error + --enable_abort_on_error } if (!$succeeds) { diff --git a/storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh b/storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh deleted file mode 100755 index 72442fa1e3e..00000000000 --- a/storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -sst_dump=$2 -wait_for_no_more_deletes=$3 -num_retries=240 -retry=0 - -echo "wait_for_delete: $wait_for_no_more_deletes" - -while : ; do - TOTAL_D=0 - TOTAL_E=0 - for f in `ls $1/mysqld.1/data/.rocksdb/*.sst` - do - # excluding system cf - DELETED=`$sst_dump --command=scan --output_hex --file=$f | \ - perl -ne 'print if(/''(\d\d\d\d\d\d\d\d)/ && $1 >= 8)' | \ - grep -e ": 0" -e ": 7" | wc -l` - EXISTS=`$sst_dump --command=scan --output_hex --file=$f | \ - perl -ne 'print if(/''(\d\d\d\d\d\d\d\d)/ && $1 >= 8)' | \ - grep ": 1" | wc -l` - TOTAL_D=$(($TOTAL_D+$DELETED)) - TOTAL_E=$(($TOTAL_E+$EXISTS)) - # echo "${f##*/} $DELETED $EXISTS" - done - if [ $TOTAL_E != "0" ] - then - if [ $TOTAL_D = "0" ] || [ $wait_for_no_more_deletes = "0" ] - then - break - fi - fi - if [ $retry -ge $num_retries ] - then - break - fi - sleep 1 - retry=$(($retry + 1)) -done - -if [ "$TOTAL_E" = "0" ] -then - echo "No records in the database" - exit -fi - -if [ "$TOTAL_D" = "0" ] -then - echo "No more deletes left" -else - echo "There are deletes left" -fi diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc index 2f11cd3b65a..ecfc0d8f734 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc @@ -10,15 +10,15 @@ connect (con2,localhost,root,,); connection con1; -CREATE TABLE t1 (id1 INT NOT NULL, id2 INT NOT NULL, id3 VARCHAR(32), +eval CREATE TABLE t1 (id1 INT NOT NULL, id2 INT NOT NULL, id3 VARCHAR(32), id4 INT, id5 VARCHAR(32), value1 INT, value2 INT, value3 VARCHAR(32), - PRIMARY KEY (id1, id2) ##CF##, - UNIQUE INDEX (id2, id1) ##CF##, - UNIQUE INDEX (id2, id3, id4) ##CF##, - INDEX (id1) ##CF##, - INDEX (id3, id1) ##CF##, - UNIQUE INDEX(id5) ##CF##, + PRIMARY KEY (id1, id2) $CF, + UNIQUE INDEX (id2, id1) $CF, + UNIQUE INDEX (id2, id3, id4) $CF, + INDEX (id1) $CF, + INDEX (id3, id1) $CF, + UNIQUE INDEX(id5) $CF, INDEX (id2, id5)) ENGINE=ROCKSDB; --disable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test index 74b5285763a..1dedd75f561 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test @@ -1,11 +1,8 @@ --source include/have_rocksdb.inc --source include/have_partition.inc -# Unixism, exec sed ---source include/not_windows.inc -let ddl= $MYSQL_TMP_DIR/unique_sec.sql; ---exec sed s/##CF##//g ../storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc > $ddl ---source $ddl +--let $CF= +--source unique_sec.inc --echo # --echo # Issue #88: Creating unique index over column with duplicate values succeeds diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test index 724281b73c8..dc0bbd8ec8d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test @@ -1,5 +1,4 @@ --source include/have_rocksdb.inc -let ddl= $MYSQL_TMP_DIR/unique_sec_rev_cf.sql; ---exec sed s/##CF##/" COMMENT 'rev:cf'"/g ../storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc > $ddl ---source $ddl +let $CF=COMMENT 'rev:cf'; +--source unique_sec.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test b/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test index 349748e91a8..87d31d2e2d9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test @@ -1,5 +1,8 @@ --source include/have_rocksdb.inc +call mtr.add_suppression("rocksdb"); +call mtr.add_suppression("Aborting"); + # Issue221 # Turning on both --rocksdb-allow-mmap-reads and --rocksdb-use-direct-reads # caused an assertion in RocksDB. Now it should not be allowed and the @@ -12,9 +15,6 @@ --exec echo "wait" >$_expect_file_name shutdown_server 10; -# Clear the log ---exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err - # Attempt to restart the server with invalid options --exec echo "restart:--rocksdb_use_direct_reads=1 --rocksdb_allow_mmap_reads=1" >$_expect_file_name --sleep 0.1 # Wait 100ms - that is how long the sleep is in check_expected_crash_and_restart @@ -26,7 +26,9 @@ shutdown_server 10; --disable_reconnect # We should now have an error message ---exec grep "enable both use_direct_reads" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +let SEARCH_FILE=$MYSQLTEST_VARDIR/log/mysqld.1.err; +let SEARCH_PATTERN=enable both use_direct_reads; +source include/search_pattern_in_file.inc; # Repeat with --rocksdb-use-direct-writes --let $_server_id= `SELECT @@server_id` @@ -34,8 +36,6 @@ shutdown_server 10; --exec echo "wait" >$_expect_file_name shutdown_server 10; ---exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err - --exec echo "restart:--rocksdb_use_direct_writes=1 --rocksdb_allow_mmap_writes=1" >$_expect_file_name --sleep 0.1 --exec echo "restart:" >$_expect_file_name @@ -44,4 +44,5 @@ shutdown_server 10; --source include/wait_until_connected_again.inc --disable_reconnect ---exec grep "enable both use_direct_writes" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +let SEARCH_PATTERN=enable both use_direct_writes; +source include/search_pattern_in_file.inc; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test b/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test index 183b62f1e80..e9dcc604155 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test @@ -10,9 +10,7 @@ # it in the log bug still start: --rocksdb_validate_tables=2 # ---disable_warnings -DROP TABLE IF EXISTS t1, t2; ---enable_warnings +call mtr.add_suppression('RocksDB: Schema mismatch'); CREATE TABLE t1 (pk int primary key) ENGINE=ROCKSDB; CREATE TABLE t2 (pk int primary key) ENGINE=ROCKSDB PARTITION BY KEY(pk) PARTITIONS 4; @@ -44,54 +42,48 @@ shutdown_server 10; --exec echo "wait" >$_expect_file_name shutdown_server 10; -# Clear the log ---exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err - # Rename the file ---exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm.tmp ---exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm.tmp +--move_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm.tmp +--move_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm.tmp # Attempt to restart the server --exec echo "restart:--rocksdb_validate_tables=2" >$_expect_file_name ---sleep 5 --enable_reconnect --source include/wait_until_connected_again.inc --disable_reconnect # We should now have an error message ---exec echo "Expect errors that we are missing two .frm files" ---exec grep "Schema mismatch" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +--echo "Expect errors that we are missing two .frm files" +--let SEARCH_FILE=$MYSQLTEST_VARDIR/log/mysqld.1.err +--let SEARCH_PATTERN=Schema mismatch +--source include/search_pattern_in_file.inc # Now shut down again and rename one the .frm file back and make a copy of it --exec echo "wait" >$_expect_file_name shutdown_server 10; -# Clear the log ---exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err - # Rename the file ---exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm ---exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm ---exec cp $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t1_dummy.frm +--move_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm +--move_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm +--copy_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t1_dummy.frm # Attempt to restart the server --exec echo "restart:--rocksdb_validate_tables=2" >$_expect_file_name ---sleep 5 --enable_reconnect --source include/wait_until_connected_again.inc --disable_reconnect # We should now have an error message for the second case ---exec echo "Expect an error that we have an extra .frm file" ---exec grep "Schema mismatch" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +--echo "Expect an error that we have an extra .frm file" +--let SEARCH_PATTERN=Schema mismatch +--source include/search_pattern_in_file.inc # Shut down an clean up --exec echo "wait" >$_expect_file_name shutdown_server 10; ---exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err ---exec rm $MYSQLTEST_VARDIR/mysqld.1/data/test/t1_dummy.frm +--remove_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t1_dummy.frm --exec echo "restart" >$_expect_file_name --enable_reconnect --source include/wait_until_connected_again.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test index cd8237de2fd..d508000bf8a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test @@ -6,7 +6,7 @@ SET GLOBAL rocksdb_write_ignore_missing_column_families=true; create table aaa (id int primary key, i int) engine rocksdb; set @save_rocksdb_flush_log_at_trx_commit= @@global.rocksdb_flush_log_at_trx_commit; SET LOCAL rocksdb_flush_log_at_trx_commit=0; ---exec sleep 30 +sleep 30; select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; insert aaa(id, i) values(1,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; From 788382f77a3253ef0152330dfecadec0afcab841 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 1 Apr 2017 06:32:20 +0300 Subject: [PATCH 201/233] Fix coompilation on windows Some Windows enviroments don't have uint64_t, and we dont really need that type. Assume 64-bit integers take 8 bytes. --- storage/rocksdb/build_rocksdb.cmake | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/storage/rocksdb/build_rocksdb.cmake b/storage/rocksdb/build_rocksdb.cmake index 0cbca0f3b05..4b830bbdf32 100644 --- a/storage/rocksdb/build_rocksdb.cmake +++ b/storage/rocksdb/build_rocksdb.cmake @@ -31,9 +31,8 @@ endif() include (CheckTypeSize) check_type_size(size_t SIZEOF_SIZE_T) -check_type_size(uint64_t SIZEOF_UINT64_T) set_property(SOURCE ha_rocksdb.cc APPEND PROPERTY COMPILE_DEFINITIONS - SIZEOF_SIZE_T=${SIZEOF_SIZE_T} SIZEOF_UINT64_T=${SIZEOF_UINT64_T}) + SIZEOF_SIZE_T=${SIZEOF_SIZE_T} SIZEOF_UINT64_T=8) # Optional compression libraries. From 321f5d9d70153a020a3f8442ee84abed248307bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Sat, 1 Apr 2017 10:12:28 +0300 Subject: [PATCH 202/233] Fix debian architecture parsing --- debian/autobake-deb.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh index bdb5535da99..b6d1711e6ea 100755 --- a/debian/autobake-deb.sh +++ b/debian/autobake-deb.sh @@ -77,7 +77,7 @@ fi GCCVERSION=$(gcc -dumpversion | sed -e 's/\.\([0-9][0-9]\)/\1/g' -e 's/\.\([0-9]\)/0\1/g' -e 's/^[0-9]\{3,4\}$/&00/') # Don't build rocksdb package if gcc version is less than 4.8 or we are running on # x86 32 bit. -if [ $GCCVERSION -lt 40800 ] || [ $(uname -i) -eq "i386" ] || [$(uname -i) -eq "i486"] +if [[ $GCCVERSION -lt 40800 ]] || [[ $(arch) =~ i[346]86 ]] then sed '/Package: mariadb-plugin-rocksdb/,+7d' -i debian/control fi From 9cdc6bcfe4cc77cd550e6769d137dbb3945e186e Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 1 Apr 2017 12:56:45 +0300 Subject: [PATCH 203/233] Update test results after changes in search_pattern_in_file.inc --- mysql-test/r/lowercase_fs_on.result | 3 ++- mysql-test/r/named_pipe.result | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/lowercase_fs_on.result b/mysql-test/r/lowercase_fs_on.result index b844b3f77dd..4aa46ddb34e 100644 --- a/mysql-test/r/lowercase_fs_on.result +++ b/mysql-test/r/lowercase_fs_on.result @@ -1,4 +1,5 @@ # # Bug#20198490 : LOWER_CASE_TABLE_NAMES=0 ON WINDOWS LEADS TO PROBLEMS # -FOUND /\[ERROR\] The server option \'lower_case_table_names\' is configured to use case sensitive table names/ in my_restart.err +FOUND 1 /\[ERROR\] The server option \'lower_case_table_names\' is configured to use case sensitive table names/ in my_restart.err + diff --git a/mysql-test/r/named_pipe.result b/mysql-test/r/named_pipe.result index 89b3881eb5d..0fd0fbe5484 100644 --- a/mysql-test/r/named_pipe.result +++ b/mysql-test/r/named_pipe.result @@ -2157,4 +2157,5 @@ Warning 1052 Column 'kundentyp' in group statement is ambiguous drop table t1; connection default; disconnect pipe_con; -FOUND /\[ERROR\] Create named pipe failed/ in second-mysqld.err +FOUND 1 /\[ERROR\] Create named pipe failed/ in second-mysqld.err + From 62ba511314c7c524f42a4ba8bb0213d10498db9c Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sat, 1 Apr 2017 10:37:36 +0000 Subject: [PATCH 204/233] Fix the build on Windows. Restore MYSQL_SYSVAR_UINT64_T and MYSQL_SYSVAR_SIZE_T in plugin.h --- include/mysql/plugin.h | 17 +++++++++++++++++ storage/rocksdb/ha_rocksdb.cc | 12 ------------ 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/include/mysql/plugin.h b/include/mysql/plugin.h index a5bfa1bbc9e..2f077d8440e 100644 --- a/include/mysql/plugin.h +++ b/include/mysql/plugin.h @@ -393,6 +393,23 @@ DECLARE_MYSQL_SYSVAR_SIMPLE(name, unsigned long long) = { \ PLUGIN_VAR_LONGLONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ #name, comment, check, update, &varname, def, min, max, blk } +#define MYSQL_SYSVAR_UINT64_T(name, varname, opt, comment, check, update, def, min, max, blk) \ +DECLARE_MYSQL_SYSVAR_SIMPLE(name, uint64_t) = { \ + PLUGIN_VAR_LONGLONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ + #name, comment, check, update, &varname, def, min, max, blk } + +#ifdef _WIN64 +#define MYSQL_SYSVAR_SIZE_T(name, varname, opt, comment, check, update, def, min, max, blk) \ +DECLARE_MYSQL_SYSVAR_SIMPLE(name, size_t) = { \ + PLUGIN_VAR_LONGLONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ + #name, comment, check, update, &varname, def, min, max, blk } +#else +#define MYSQL_SYSVAR_SIZE_T(name, varname, opt, comment, check, update, def, min, max, blk) \ +DECLARE_MYSQL_SYSVAR_SIMPLE(name, size_t) = { \ + PLUGIN_VAR_LONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ + #name, comment, check, update, &varname, def, min, max, blk } +#endif + #define MYSQL_SYSVAR_ENUM(name, varname, opt, comment, check, update, def, typelib) \ DECLARE_MYSQL_SYSVAR_TYPELIB(name, unsigned long) = { \ PLUGIN_VAR_ENUM | ((opt) & PLUGIN_VAR_MASK), \ diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 72b23f2655c..981acdf22b0 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -454,18 +454,6 @@ const int64 RDB_DEFAULT_BLOCK_CACHE_SIZE = 512 * 1024 * 1024; const int64 RDB_MIN_BLOCK_CACHE_SIZE = 1024; const int RDB_MAX_CHECKSUMS_PCT = 100; -#if SIZEOF_ULONG == SIZEOF_SIZE_T -#define MYSQL_SYSVAR_SIZE_T MYSQL_SYSVAR_ULONG -#else -#define MYSQL_SYSVAR_SIZE_T MYSQL_SYSVAR_ULONGLONG -#endif - -#if SIZEOF_ULONG == SIZEOF_UINT64_T -#define MYSQL_SYSVAR_UINT64_T MYSQL_SYSVAR_ULONG -#else -#define MYSQL_SYSVAR_UINT64_T MYSQL_SYSVAR_ULONGLONG -#endif - // TODO: 0 means don't wait at all, and we don't support it yet? static MYSQL_THDVAR_ULONG(lock_wait_timeout, PLUGIN_VAR_RQCMDARG, "Number of seconds to wait for lock", nullptr, From 58bff40d6c12b59c44b25162e52f158eb4b04212 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sat, 1 Apr 2017 10:47:06 +0000 Subject: [PATCH 205/233] Enable MariaRocks test on Buildbot --- .../collections/{buildbot_suite.bat => buildbot_suites.bat} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename mysql-test/collections/{buildbot_suite.bat => buildbot_suites.bat} (100%) diff --git a/mysql-test/collections/buildbot_suite.bat b/mysql-test/collections/buildbot_suites.bat similarity index 100% rename from mysql-test/collections/buildbot_suite.bat rename to mysql-test/collections/buildbot_suites.bat From 980905c884a469eca08d9ce86cc7d847db82203c Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sat, 1 Apr 2017 11:01:36 +0000 Subject: [PATCH 206/233] Fix result files --- mysql-test/r/lowercase_fs_on.result | 1 - mysql-test/r/named_pipe.result | 1 - 2 files changed, 2 deletions(-) diff --git a/mysql-test/r/lowercase_fs_on.result b/mysql-test/r/lowercase_fs_on.result index 4aa46ddb34e..ddf3fd5f1fb 100644 --- a/mysql-test/r/lowercase_fs_on.result +++ b/mysql-test/r/lowercase_fs_on.result @@ -2,4 +2,3 @@ # Bug#20198490 : LOWER_CASE_TABLE_NAMES=0 ON WINDOWS LEADS TO PROBLEMS # FOUND 1 /\[ERROR\] The server option \'lower_case_table_names\' is configured to use case sensitive table names/ in my_restart.err - diff --git a/mysql-test/r/named_pipe.result b/mysql-test/r/named_pipe.result index 0fd0fbe5484..66da9a874b4 100644 --- a/mysql-test/r/named_pipe.result +++ b/mysql-test/r/named_pipe.result @@ -2158,4 +2158,3 @@ drop table t1; connection default; disconnect pipe_con; FOUND 1 /\[ERROR\] Create named pipe failed/ in second-mysqld.err - From 92c24fda08dc08e9d86863761e05ce8e29d8b7e2 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sat, 1 Apr 2017 11:01:36 +0000 Subject: [PATCH 207/233] Fix result files --- mysql-test/r/lowercase_fs_on.result | 1 - mysql-test/r/named_pipe.result | 1 - 2 files changed, 2 deletions(-) diff --git a/mysql-test/r/lowercase_fs_on.result b/mysql-test/r/lowercase_fs_on.result index 4aa46ddb34e..ddf3fd5f1fb 100644 --- a/mysql-test/r/lowercase_fs_on.result +++ b/mysql-test/r/lowercase_fs_on.result @@ -2,4 +2,3 @@ # Bug#20198490 : LOWER_CASE_TABLE_NAMES=0 ON WINDOWS LEADS TO PROBLEMS # FOUND 1 /\[ERROR\] The server option \'lower_case_table_names\' is configured to use case sensitive table names/ in my_restart.err - diff --git a/mysql-test/r/named_pipe.result b/mysql-test/r/named_pipe.result index 0fd0fbe5484..66da9a874b4 100644 --- a/mysql-test/r/named_pipe.result +++ b/mysql-test/r/named_pipe.result @@ -2158,4 +2158,3 @@ drop table t1; connection default; disconnect pipe_con; FOUND 1 /\[ERROR\] Create named pipe failed/ in second-mysqld.err - From df92ba345901c2bc0a3178c33ed927c645567038 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sat, 1 Apr 2017 11:51:24 +0000 Subject: [PATCH 208/233] MariaRocks : Fix looking up sst_dump --- storage/rocksdb/mysql-test/rocksdb/suite.pm | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/suite.pm b/storage/rocksdb/mysql-test/rocksdb/suite.pm index f425274da27..87f34ffc2b1 100644 --- a/storage/rocksdb/mysql-test/rocksdb/suite.pm +++ b/storage/rocksdb/mysql-test/rocksdb/suite.pm @@ -1,12 +1,18 @@ package My::Suite::Rocksdb; @ISA = qw(My::Suite); +use My::Find; +use File::Basename; +use strict; sub is_default { not $::opt_embedded_server } -my ($sst_dump) = grep { -x "$_/sst_dump" } "$::bindir/storage/rocksdb", $::path_client_bindir; +my $sst_dump= +::mtr_exe_maybe_exists( + "$::bindir/storage/rocksdb$::opt_vs_config/sst_dump", + "$::path_client_bindir/sst_dump"); return "RocksDB is not compiled, no sst_dump" unless $sst_dump; -$ENV{MARIAROCKS_SST_DUMP}="$sst_dump/sst_dump"; +$ENV{MARIAROCKS_SST_DUMP}="$sst_dump"; bless { }; From ba85f519b9eae7981ee2672a9927a36648eda703 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sat, 1 Apr 2017 12:39:09 +0000 Subject: [PATCH 209/233] Fixes for innodb crash recovery tests from Serg --- mysql-test/suite/innodb/r/log_file_size.result | 2 +- mysql-test/suite/innodb/t/log_file_size.test | 4 +++- mysql-test/suite/innodb_fts/t/crash_recovery.test | 1 + 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/mysql-test/suite/innodb/r/log_file_size.result b/mysql-test/suite/innodb/r/log_file_size.result index a29a4e81683..9e0cb4dbbf7 100644 --- a/mysql-test/suite/innodb/r/log_file_size.result +++ b/mysql-test/suite/innodb/r/log_file_size.result @@ -25,7 +25,7 @@ ERROR 42000: Unknown storage engine 'InnoDB' FOUND 1 /syntax error in innodb_log_group_home_dir/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' -FOUND 1 /InnoDB: Starting crash recovery from checkpoint LSN=/ in mysqld.1.err +FOUND 1 /InnoDB: Starting crash recovery from checkpoint LSN=.*/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' SELECT * FROM t1; diff --git a/mysql-test/suite/innodb/t/log_file_size.test b/mysql-test/suite/innodb/t/log_file_size.test index 069cfca3585..ae6442a7994 100644 --- a/mysql-test/suite/innodb/t/log_file_size.test +++ b/mysql-test/suite/innodb/t/log_file_size.test @@ -29,6 +29,7 @@ BEGIN; INSERT INTO t1 VALUES (42); let $restart_parameters = --innodb-log-file-size=6M; +let $shutdown_timeout=0; --source include/restart_mysqld.inc SELECT * FROM t1; @@ -39,6 +40,7 @@ DELETE FROM t1; let $restart_parameters = --innodb-log-files-in-group=3 --innodb-log-file-size=5M; --source include/restart_mysqld.inc +let $shutdown_timeout=; SELECT * FROM t1; @@ -72,7 +74,7 @@ let SEARCH_PATTERN= syntax error in innodb_log_group_home_dir; --source include/restart_mysqld.inc --error ER_UNKNOWN_STORAGE_ENGINE SELECT * FROM t1; -let SEARCH_PATTERN= InnoDB: Starting crash recovery from checkpoint LSN=; +let SEARCH_PATTERN= InnoDB: Starting crash recovery from checkpoint LSN=.*; --source include/search_pattern_in_file.inc --let $restart_parameters= --debug=d,innodb_log_abort_3 diff --git a/mysql-test/suite/innodb_fts/t/crash_recovery.test b/mysql-test/suite/innodb_fts/t/crash_recovery.test index 8b82e5e68b5..63843ef8511 100644 --- a/mysql-test/suite/innodb_fts/t/crash_recovery.test +++ b/mysql-test/suite/innodb_fts/t/crash_recovery.test @@ -47,6 +47,7 @@ ROLLBACK; --disconnect flush_redo_log --connection default +let $shutdown_timeout=0; --source include/restart_mysqld.inc # This insert will re-initialize the Doc ID counter, it should not crash From 74889de426382d9d8286466e3fce41482ba70216 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 1 Apr 2017 16:11:19 +0300 Subject: [PATCH 210/233] Apply a workaround for MyRocks upstream issue #569 We are hitting this https://github.com/facebook/mysql-5.6/issues/569 and we are not ready to merge the fix currently --- .../mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test | 1 + storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test | 1 + 2 files changed, 2 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test index 7e600224dcc..805cf618b2e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test @@ -99,3 +99,4 @@ ALTER TABLE t1 ADD INDEX kb(b) comment 'rev:cf1', ALGORITHM=INPLACE; SHOW CREATE TABLE t1; SELECT COUNT(*) FROM t1 FORCE INDEX(kb); DROP TABLE t1; +--remove_file $file diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test index 029765f5083..a392b371a04 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test @@ -118,3 +118,4 @@ EOF # Cleanup disconnect other; DROP TABLE t1, t2, t3; +--remove_file $file From a194390eb8ddd4512751a937cc87f157c4d937e5 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Thu, 30 Mar 2017 13:11:34 +0000 Subject: [PATCH 211/233] Allow to specify C runtime library used for compilation. Default to static release (previously static debug was used in debug builds, but not is appears to be too slow) --- cmake/os/Windows.cmake | 29 +++++++++++++++++++++-------- win/packaging/CPackWixConfig.cmake | 3 ++- win/upgrade_wizard/CMakeLists.txt | 11 +++++++---- 3 files changed, 30 insertions(+), 13 deletions(-) diff --git a/cmake/os/Windows.cmake b/cmake/os/Windows.cmake index 67108132d8a..38f440d251d 100644 --- a/cmake/os/Windows.cmake +++ b/cmake/os/Windows.cmake @@ -63,6 +63,26 @@ IF(MINGW AND CMAKE_SIZEOF_VOID_P EQUAL 4) ENDIF() IF(MSVC) + SET(MSVC_CRT_TYPE /MT CACHE STRING + "Runtime library - specify runtime library for linking (/MT,/MTd,/MD,/MDd)" + ) + SET(VALID_CRT_TYPES /MTd /MDd /MD /MT) + IF (NOT ";${VALID_CRT_TYPES};" MATCHES ";${MSVC_CRT_TYPE};") + MESSAGE(FATAL_ERROR "Invalid value ${MSVC_CRT_TYPE} for MSVC_CRT_TYPE, choose one of /MT,/MTd,/MD,/MDd ") + ENDIF() + + IF(MSVC_CRT_TYPE MATCHES "/MD") + # Dynamic runtime (DLLs), need to install CRT libraries. + SET(CMAKE_INSTALL_MFC_LIBRARIES TRUE)# upgrade wizard + SET(CMAKE_INSTALL_SYSTEM_RUNTIME_COMPONENT VCCRT) + SET(CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_NO_WARNINGS TRUE) + SET(CMAKE_INSTALL_UCRT_LIBRARIES TRUE) + IF(MSVC_CRT_TYPE STREQUAL "/MDd") + SET (CMAKE_INSTALL_DEBUG_LIBRARIES_ONLY TRUE) + ENDIF() + INCLUDE(InstallRequiredSystemLibraries) + ENDIF() + # Enable debug info also in Release build, # and create PDB to be able to analyze crashes. FOREACH(type EXE SHARED MODULE) @@ -85,7 +105,7 @@ IF(MSVC) CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_DEBUG_INIT CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_RELWITHDEBINFO CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_DEBUG_INIT) - STRING(REPLACE "/MD" "/MT" "${flag}" "${${flag}}") + STRING(REGEX REPLACE "/M[TD][d]?" "${MSVC_CRT_TYPE}" "${flag}" "${${flag}}" ) STRING(REPLACE "/Zi" "/Z7" "${flag}" "${${flag}}") ENDFOREACH() @@ -117,13 +137,6 @@ IF(MSVC) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4800 /wd4805 /wd4996") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4800 /wd4805 /wd4996 /wd4291 /wd4577 /we4099") - IF(CMAKE_SIZEOF_VOID_P MATCHES 8) - # _WIN64 is defined by the compiler itself. - # Yet, we define it here again to work around a bug with Intellisense - # described here: http://tinyurl.com/2cb428. - # Syntax highlighting is important for proper debugger functionality. - ADD_DEFINITIONS("-D_WIN64") - ENDIF() ENDIF() # Always link with socket library diff --git a/win/packaging/CPackWixConfig.cmake b/win/packaging/CPackWixConfig.cmake index a8208de659f..e954110ef19 100644 --- a/win/packaging/CPackWixConfig.cmake +++ b/win/packaging/CPackWixConfig.cmake @@ -9,7 +9,7 @@ IF(ESSENTIALS) ENDIF() ELSE() SET(CPACK_COMPONENTS_USED - "Server;Client;Development;SharedLibraries;Documentation;Readme;Debuginfo;Common;connect-engine;ClientPlugins;gssapi-server;gssapi-client;aws-key-management;rocksdb-engine") + "Server;Client;Development;SharedLibraries;Documentation;Readme;Debuginfo;Common;VCCRT;connect-engine;ClientPlugins;gssapi-server;gssapi-client;aws-key-management;rocksdb-engine") ENDIF() SET( WIX_FEATURE_MySQLServer_EXTRA_FEATURES "DBInstance;SharedClientServerComponents") @@ -35,6 +35,7 @@ SET(CPACK_COMPONENTS_ALL ${CPACK_ALL}) SET(CPACK_COMPONENT_GROUP_ALWAYSINSTALL_HIDDEN 1) SET(CPACK_COMPONENT_README_GROUP "AlwaysInstall") SET(CPACK_COMPONENT_COMMON_GROUP "AlwaysInstall") +SET(CPACK_COMPONENT_VCCRT_GROUP "AlwaysInstall") # Feature MySQL Server SET(CPACK_COMPONENT_GROUP_MYSQLSERVER_DISPLAY_NAME "MariaDB Server") diff --git a/win/upgrade_wizard/CMakeLists.txt b/win/upgrade_wizard/CMakeLists.txt index 44d6249ea1e..dc4ef67387d 100644 --- a/win/upgrade_wizard/CMakeLists.txt +++ b/win/upgrade_wizard/CMakeLists.txt @@ -16,10 +16,13 @@ IF(NOT MFC_FOUND) ENDIF() RETURN() ENDIF() - -# MFC should be statically linked -SET(CMAKE_MFC_FLAG 1) - +IF(MSVC_CRT_TYPE MATCHES "/MD") + # MFC should be dynamically linked + SET(CMAKE_MFC_FLAG 2) +ELSE() + # MFC should be statically linked + SET(CMAKE_MFC_FLAG 1) +ENDIF() # Enable exception handling (avoids warnings) SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc") From 20d9fbcf9ae774a50177146aa916a984dd53ec6a Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 2 Apr 2017 12:18:23 +0300 Subject: [PATCH 212/233] MDEV-12424: binlog_encryption.encrypted_* tests fail with Can't locate autodie.pm error Don't use Perl autodie module as there are platforms where it is not present --- mysql-test/include/search_pattern_in_file.inc | 3 +-- .../rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc | 4 ++-- storage/rocksdb/mysql-test/rocksdb/t/drop_table.test | 4 ++-- storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test | 8 ++++---- storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc | 5 ++--- .../rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test | 4 ++-- 6 files changed, 13 insertions(+), 15 deletions(-) diff --git a/mysql-test/include/search_pattern_in_file.inc b/mysql-test/include/search_pattern_in_file.inc index 0a68fcf6765..3c5529989bb 100644 --- a/mysql-test/include/search_pattern_in_file.inc +++ b/mysql-test/include/search_pattern_in_file.inc @@ -45,14 +45,13 @@ perl; use strict; - use autodie qw(open); die "SEARCH_FILE not set" unless $ENV{SEARCH_FILE}; my @search_files= glob($ENV{SEARCH_FILE}); my $search_pattern= $ENV{SEARCH_PATTERN} or die "SEARCH_PATTERN not set"; my $search_range= $ENV{SEARCH_RANGE}; my $content; foreach my $search_file (@search_files) { - open(FILE, '<', $search_file); + open(FILE, '<', $search_file) || die("Can't open file $search_file: $!"); my $file_content; if ($search_range > 0) { read(FILE, $file_content, $search_range, 0); diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc index d80dcebcced..19a16fbe3a7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc @@ -39,7 +39,6 @@ let $wait_condition = select count(*) = 0 let NO_MORE_DELETES=$no_more_deletes; perl; - use autodie qw(open); $num_retries=240; $retry=0; print "wait_for_delete: $ENV{no_more_deletes}\n"; @@ -47,7 +46,8 @@ perl; $total_d=$total_e=0; for $f (<$ENV{MYSQLTEST_VARDIR}/mysqld.1/data/.rocksdb/*.sst>) { # excluding system cf - open D, '-|', "$ENV{MARIAROCKS_SST_DUMP} --command=scan --output_hex --file=$f"; + $filename= "$ENV{MARIAROCKS_SST_DUMP} --command=scan --output_hex --file=$f"; + open(D, '-|', $filename) || die("Can't open file $filename: $!"); while () { next unless /'(\d{8})/ and $1 >= 8; $total_d++ if /: [07]/; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test index 530f5792bed..0d48ae461ca 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test @@ -111,7 +111,6 @@ let $wait_condition = select count(*) = 0 # Check total compacted-away rows for all indices # Check that all indices have been successfully dropped perl; -use autodie qw(open); sub print_array { $str = shift; @@ -123,7 +122,8 @@ sub print_array { } } -open F, '<', "$ENV{MYSQLTEST_VARDIR}/log/mysqld.1.err"; +$filename= "$ENV{MYSQLTEST_VARDIR}/log/mysqld.1.err"; +open(F, '<', $filename) || die("Can't open file $filename: $!"); while () { %a = @b = @c = () if /CURRENT_TEST/; if (/Compacting away elements from dropped index \(\d+,(\d+)\): (\d+)/) { diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test index 7d53582e47b..1b5f6c14ee1 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test @@ -90,9 +90,9 @@ let $table = t5; --source drop_table_repopulate_table.inc perl; -use autodie qw(open); $size+=-s $_ for (<$ENV{MYSQLTEST_VARDIR}/mysqld.1/data/.rocksdb/*.sst>); -open(F, '>', "$ENV{MYSQLTEST_VARDIR}/tmp/size_output"); +$filename= "$ENV{MYSQLTEST_VARDIR}/tmp/size_output"; +open(F, '>', $filename) || die("Can't open file $filename: $!"); print F $size; EOF drop table t1; @@ -110,9 +110,9 @@ let $wait_condition = select count(*) = 0 # Check that space is reclaimed perl; -use autodie qw(open); $size+=-s $_ for (<$ENV{MYSQLTEST_VARDIR}/mysqld.1/data/.rocksdb/*.sst>); -open(F, '<', "$ENV{MYSQLTEST_VARDIR}/tmp/size_output"); +$filename= "$ENV{MYSQLTEST_VARDIR}/tmp/size_output"; +open(F, '<', $filename) || die("Can't open file $filename: $!"); $old=; print "Compacted\n" if $old > $size * 2; EOF diff --git a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc index ad5953150c9..9d03aae5c0c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc @@ -1,12 +1,11 @@ # run a check script to verify sst files reduced enough during each optimize table perl; -use autodie qw(open); $size += -s $_ for (<$ENV{datadir}/.rocksdb/*.sst>); $file= "$ENV{MYSQL_TMP_DIR}/sst_size.dat"; if (-f $file) { - open(F, '<', $file); + open(F, '<', $file) || die("Can't open file $file: $!"); $old = ; close F; if ($old - $size < 1e6) { @@ -15,7 +14,7 @@ if (-f $file) { print "sst file reduction ok\n"; } } -open(F, '>', $file); +open(F, '>', $file) || die("Can't open file $file: $!"); print F $size; close F; EOF diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test index e643bdfcda8..9a7704c7ab0 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test @@ -53,12 +53,12 @@ while ($i<10000) --enable_query_log check table t4; perl; -use autodie qw(open); $total=10000; $pct=5; @out=(); -open(F, '<', "$ENV{MYSQLTEST_VARDIR}/log/mysqld.1.err"); +$filename= "$ENV{MYSQLTEST_VARDIR}/log/mysqld.1.err"; +open(F, '<', $filename) || die("Can't open file $filename: $!"); while() { @out=() if /^CURRENT_TEST:/; if (/(\d+) index entries checked \((\d+) had checksums/) { From 0aa056f642f966ab2c88e91feced9035d5d4f50b Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 2 Apr 2017 13:12:19 +0300 Subject: [PATCH 213/233] Disable persistent_cache.test due to upstream MyRocks issue #579. --- storage/rocksdb/mysql-test/rocksdb/t/disabled.def | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index fbb8d645cb4..5247bf2aad2 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -45,6 +45,8 @@ rpl_row_not_found : MariaDB doesnt support slave_exec_mode='SEMI_STRICT' blind_delete_without_tx_api: MDEV-12286: rocksdb.blind_delete_without_tx_api test fails +persistent_cache: Upstream RocksDB bug https://github.com/facebook/mysql-5.6/issues/579 + allow_no_pk_concurrent_insert: stress test rocksdb_deadlock_stress_rc: stress test rocksdb_deadlock_stress_rr: stress test From c35a5884b05c380839031b16aad47cc6dc023847 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 2 Apr 2017 14:51:59 +0300 Subject: [PATCH 214/233] Mark rocksdb.bulk_load and rocksdb.add_index_inplace_sstfilewriter as big They use much more disk space than all other tests in rocksdb test suite --- .../mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test | 3 +++ storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test | 3 +++ 2 files changed, 6 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test index 805cf618b2e..1ac382794ae 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test @@ -1,5 +1,8 @@ --source include/have_rocksdb.inc +# This test requires ~1.3G of disk space +--source include/big_test.inc + --disable_warnings drop table if exists t1; --enable_warnings diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test index a392b371a04..283cedcb6bb 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test @@ -1,6 +1,9 @@ --source include/have_rocksdb.inc --source include/have_partition.inc +# This test requires ~2.3G of disk space +--source include/big_test.inc + --disable_warnings DROP TABLE IF EXISTS t1, t2, t3; --enable_warnings From 96c48a0d89a1591996eaf9855414b5eb5cfb2d3b Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sun, 2 Apr 2017 12:35:59 +0000 Subject: [PATCH 215/233] rocksdb - fix failing tests on Windows --- storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test | 1 + storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test | 1 + storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test | 1 + .../rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test | 2 ++ storage/rocksdb/mysql-test/rocksdb/t/write_sync.test | 1 + 5 files changed, 6 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test index 3c7d61aa15b..b64af16411b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test @@ -40,6 +40,7 @@ while ($i > 0) my $thr = $ENV{'ROCKSDB_THREAD'}; my $num = $ENV{'ROCKSDB_ROWS_PER_THREAD'}; open(my $fh, '>>', $fn) || die "perl open($fn): $!"; + binmode $fh; for (my $ii = 0; $ii < $num; $ii++) { print $fh "\\N\t$thr\n" diff --git a/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test b/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test index 31013b75f63..b4c9e2dae3a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test @@ -1,5 +1,6 @@ --source include/have_rocksdb.inc --source include/have_partition.inc +--source include/not_windows.inc # Data directory option not availablr # # Create tables with partitions and try to generate an error while creating diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test index 29cc2ccfb5e..a293b9ee6b8 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test @@ -1,4 +1,5 @@ --source include/have_rocksdb.inc +--source include/not_windows.inc # decorated database names is too long, exceeded OS limits # # SHOW TABLE STATUS statement diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test index 1b3a1b144d0..99cb2253d94 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test @@ -1,6 +1,8 @@ --source include/have_rocksdb.inc --source include/have_partition.inc +--source include/not_windows.inc + # # Check that when either DATA DIRECTORY or INDEX DIRECTORY are specified # then MyRocks returns an appropriate error. We don't support this diff --git a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test index d508000bf8a..804889dcec9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test @@ -1,4 +1,5 @@ --source include/have_rocksdb.inc +--source include/not_windows.inc # MDEV-12427 SET GLOBAL rocksdb_write_disable_wal=false; SET GLOBAL rocksdb_write_ignore_missing_column_families=true; From 2be18d9b7f12f6eed577b5a4a29e0c7162dfa76e Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sat, 1 Apr 2017 10:37:36 +0000 Subject: [PATCH 216/233] Fix the build on Windows. Restore MYSQL_SYSVAR_UINT64_T and MYSQL_SYSVAR_SIZE_T in plugin.h --- include/mysql/plugin.h | 17 +++++++++++++++++ storage/rocksdb/ha_rocksdb.cc | 12 ------------ 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/include/mysql/plugin.h b/include/mysql/plugin.h index a5bfa1bbc9e..2f077d8440e 100644 --- a/include/mysql/plugin.h +++ b/include/mysql/plugin.h @@ -393,6 +393,23 @@ DECLARE_MYSQL_SYSVAR_SIMPLE(name, unsigned long long) = { \ PLUGIN_VAR_LONGLONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ #name, comment, check, update, &varname, def, min, max, blk } +#define MYSQL_SYSVAR_UINT64_T(name, varname, opt, comment, check, update, def, min, max, blk) \ +DECLARE_MYSQL_SYSVAR_SIMPLE(name, uint64_t) = { \ + PLUGIN_VAR_LONGLONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ + #name, comment, check, update, &varname, def, min, max, blk } + +#ifdef _WIN64 +#define MYSQL_SYSVAR_SIZE_T(name, varname, opt, comment, check, update, def, min, max, blk) \ +DECLARE_MYSQL_SYSVAR_SIMPLE(name, size_t) = { \ + PLUGIN_VAR_LONGLONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ + #name, comment, check, update, &varname, def, min, max, blk } +#else +#define MYSQL_SYSVAR_SIZE_T(name, varname, opt, comment, check, update, def, min, max, blk) \ +DECLARE_MYSQL_SYSVAR_SIMPLE(name, size_t) = { \ + PLUGIN_VAR_LONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ + #name, comment, check, update, &varname, def, min, max, blk } +#endif + #define MYSQL_SYSVAR_ENUM(name, varname, opt, comment, check, update, def, typelib) \ DECLARE_MYSQL_SYSVAR_TYPELIB(name, unsigned long) = { \ PLUGIN_VAR_ENUM | ((opt) & PLUGIN_VAR_MASK), \ diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 72b23f2655c..981acdf22b0 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -454,18 +454,6 @@ const int64 RDB_DEFAULT_BLOCK_CACHE_SIZE = 512 * 1024 * 1024; const int64 RDB_MIN_BLOCK_CACHE_SIZE = 1024; const int RDB_MAX_CHECKSUMS_PCT = 100; -#if SIZEOF_ULONG == SIZEOF_SIZE_T -#define MYSQL_SYSVAR_SIZE_T MYSQL_SYSVAR_ULONG -#else -#define MYSQL_SYSVAR_SIZE_T MYSQL_SYSVAR_ULONGLONG -#endif - -#if SIZEOF_ULONG == SIZEOF_UINT64_T -#define MYSQL_SYSVAR_UINT64_T MYSQL_SYSVAR_ULONG -#else -#define MYSQL_SYSVAR_UINT64_T MYSQL_SYSVAR_ULONGLONG -#endif - // TODO: 0 means don't wait at all, and we don't support it yet? static MYSQL_THDVAR_ULONG(lock_wait_timeout, PLUGIN_VAR_RQCMDARG, "Number of seconds to wait for lock", nullptr, From 440addf63530d70adf2f25eea3d3bff6e09b4f86 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sat, 1 Apr 2017 10:47:06 +0000 Subject: [PATCH 217/233] Enable MariaRocks test on Buildbot --- .../collections/{buildbot_suite.bat => buildbot_suites.bat} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename mysql-test/collections/{buildbot_suite.bat => buildbot_suites.bat} (100%) diff --git a/mysql-test/collections/buildbot_suite.bat b/mysql-test/collections/buildbot_suites.bat similarity index 100% rename from mysql-test/collections/buildbot_suite.bat rename to mysql-test/collections/buildbot_suites.bat From 3599b4989d4bf2f14ccb1f951dc0b9a63fdd8a41 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sat, 1 Apr 2017 11:51:24 +0000 Subject: [PATCH 218/233] MariaRocks : Fix looking up sst_dump --- storage/rocksdb/mysql-test/rocksdb/suite.pm | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/suite.pm b/storage/rocksdb/mysql-test/rocksdb/suite.pm index f425274da27..87f34ffc2b1 100644 --- a/storage/rocksdb/mysql-test/rocksdb/suite.pm +++ b/storage/rocksdb/mysql-test/rocksdb/suite.pm @@ -1,12 +1,18 @@ package My::Suite::Rocksdb; @ISA = qw(My::Suite); +use My::Find; +use File::Basename; +use strict; sub is_default { not $::opt_embedded_server } -my ($sst_dump) = grep { -x "$_/sst_dump" } "$::bindir/storage/rocksdb", $::path_client_bindir; +my $sst_dump= +::mtr_exe_maybe_exists( + "$::bindir/storage/rocksdb$::opt_vs_config/sst_dump", + "$::path_client_bindir/sst_dump"); return "RocksDB is not compiled, no sst_dump" unless $sst_dump; -$ENV{MARIAROCKS_SST_DUMP}="$sst_dump/sst_dump"; +$ENV{MARIAROCKS_SST_DUMP}="$sst_dump"; bless { }; From 099ba3465e5f93351a5d265e0ef59ab0f3dcb260 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sat, 1 Apr 2017 12:39:09 +0000 Subject: [PATCH 219/233] Fixes for innodb crash recovery tests from Serg --- mysql-test/suite/innodb/r/log_file_size.result | 2 +- mysql-test/suite/innodb/t/log_file_size.test | 4 +++- mysql-test/suite/innodb_fts/t/crash_recovery.test | 1 + 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/mysql-test/suite/innodb/r/log_file_size.result b/mysql-test/suite/innodb/r/log_file_size.result index a29a4e81683..9e0cb4dbbf7 100644 --- a/mysql-test/suite/innodb/r/log_file_size.result +++ b/mysql-test/suite/innodb/r/log_file_size.result @@ -25,7 +25,7 @@ ERROR 42000: Unknown storage engine 'InnoDB' FOUND 1 /syntax error in innodb_log_group_home_dir/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' -FOUND 1 /InnoDB: Starting crash recovery from checkpoint LSN=/ in mysqld.1.err +FOUND 1 /InnoDB: Starting crash recovery from checkpoint LSN=.*/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' SELECT * FROM t1; diff --git a/mysql-test/suite/innodb/t/log_file_size.test b/mysql-test/suite/innodb/t/log_file_size.test index 069cfca3585..ae6442a7994 100644 --- a/mysql-test/suite/innodb/t/log_file_size.test +++ b/mysql-test/suite/innodb/t/log_file_size.test @@ -29,6 +29,7 @@ BEGIN; INSERT INTO t1 VALUES (42); let $restart_parameters = --innodb-log-file-size=6M; +let $shutdown_timeout=0; --source include/restart_mysqld.inc SELECT * FROM t1; @@ -39,6 +40,7 @@ DELETE FROM t1; let $restart_parameters = --innodb-log-files-in-group=3 --innodb-log-file-size=5M; --source include/restart_mysqld.inc +let $shutdown_timeout=; SELECT * FROM t1; @@ -72,7 +74,7 @@ let SEARCH_PATTERN= syntax error in innodb_log_group_home_dir; --source include/restart_mysqld.inc --error ER_UNKNOWN_STORAGE_ENGINE SELECT * FROM t1; -let SEARCH_PATTERN= InnoDB: Starting crash recovery from checkpoint LSN=; +let SEARCH_PATTERN= InnoDB: Starting crash recovery from checkpoint LSN=.*; --source include/search_pattern_in_file.inc --let $restart_parameters= --debug=d,innodb_log_abort_3 diff --git a/mysql-test/suite/innodb_fts/t/crash_recovery.test b/mysql-test/suite/innodb_fts/t/crash_recovery.test index 8b82e5e68b5..63843ef8511 100644 --- a/mysql-test/suite/innodb_fts/t/crash_recovery.test +++ b/mysql-test/suite/innodb_fts/t/crash_recovery.test @@ -47,6 +47,7 @@ ROLLBACK; --disconnect flush_redo_log --connection default +let $shutdown_timeout=0; --source include/restart_mysqld.inc # This insert will re-initialize the Doc ID counter, it should not crash From 0cca5bdf0b6d9b1562e7a92ae51d983d2a63a1f0 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Thu, 30 Mar 2017 13:11:34 +0000 Subject: [PATCH 220/233] Allow to specify C runtime library used for compilation. Default to static release (previously static debug was used in debug builds, but not is appears to be too slow) --- cmake/os/Windows.cmake | 29 +++++++++++++++++++++-------- win/packaging/CPackWixConfig.cmake | 3 ++- win/upgrade_wizard/CMakeLists.txt | 11 +++++++---- 3 files changed, 30 insertions(+), 13 deletions(-) diff --git a/cmake/os/Windows.cmake b/cmake/os/Windows.cmake index 67108132d8a..38f440d251d 100644 --- a/cmake/os/Windows.cmake +++ b/cmake/os/Windows.cmake @@ -63,6 +63,26 @@ IF(MINGW AND CMAKE_SIZEOF_VOID_P EQUAL 4) ENDIF() IF(MSVC) + SET(MSVC_CRT_TYPE /MT CACHE STRING + "Runtime library - specify runtime library for linking (/MT,/MTd,/MD,/MDd)" + ) + SET(VALID_CRT_TYPES /MTd /MDd /MD /MT) + IF (NOT ";${VALID_CRT_TYPES};" MATCHES ";${MSVC_CRT_TYPE};") + MESSAGE(FATAL_ERROR "Invalid value ${MSVC_CRT_TYPE} for MSVC_CRT_TYPE, choose one of /MT,/MTd,/MD,/MDd ") + ENDIF() + + IF(MSVC_CRT_TYPE MATCHES "/MD") + # Dynamic runtime (DLLs), need to install CRT libraries. + SET(CMAKE_INSTALL_MFC_LIBRARIES TRUE)# upgrade wizard + SET(CMAKE_INSTALL_SYSTEM_RUNTIME_COMPONENT VCCRT) + SET(CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_NO_WARNINGS TRUE) + SET(CMAKE_INSTALL_UCRT_LIBRARIES TRUE) + IF(MSVC_CRT_TYPE STREQUAL "/MDd") + SET (CMAKE_INSTALL_DEBUG_LIBRARIES_ONLY TRUE) + ENDIF() + INCLUDE(InstallRequiredSystemLibraries) + ENDIF() + # Enable debug info also in Release build, # and create PDB to be able to analyze crashes. FOREACH(type EXE SHARED MODULE) @@ -85,7 +105,7 @@ IF(MSVC) CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_DEBUG_INIT CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_RELWITHDEBINFO CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_DEBUG_INIT) - STRING(REPLACE "/MD" "/MT" "${flag}" "${${flag}}") + STRING(REGEX REPLACE "/M[TD][d]?" "${MSVC_CRT_TYPE}" "${flag}" "${${flag}}" ) STRING(REPLACE "/Zi" "/Z7" "${flag}" "${${flag}}") ENDFOREACH() @@ -117,13 +137,6 @@ IF(MSVC) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4800 /wd4805 /wd4996") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4800 /wd4805 /wd4996 /wd4291 /wd4577 /we4099") - IF(CMAKE_SIZEOF_VOID_P MATCHES 8) - # _WIN64 is defined by the compiler itself. - # Yet, we define it here again to work around a bug with Intellisense - # described here: http://tinyurl.com/2cb428. - # Syntax highlighting is important for proper debugger functionality. - ADD_DEFINITIONS("-D_WIN64") - ENDIF() ENDIF() # Always link with socket library diff --git a/win/packaging/CPackWixConfig.cmake b/win/packaging/CPackWixConfig.cmake index a8208de659f..e954110ef19 100644 --- a/win/packaging/CPackWixConfig.cmake +++ b/win/packaging/CPackWixConfig.cmake @@ -9,7 +9,7 @@ IF(ESSENTIALS) ENDIF() ELSE() SET(CPACK_COMPONENTS_USED - "Server;Client;Development;SharedLibraries;Documentation;Readme;Debuginfo;Common;connect-engine;ClientPlugins;gssapi-server;gssapi-client;aws-key-management;rocksdb-engine") + "Server;Client;Development;SharedLibraries;Documentation;Readme;Debuginfo;Common;VCCRT;connect-engine;ClientPlugins;gssapi-server;gssapi-client;aws-key-management;rocksdb-engine") ENDIF() SET( WIX_FEATURE_MySQLServer_EXTRA_FEATURES "DBInstance;SharedClientServerComponents") @@ -35,6 +35,7 @@ SET(CPACK_COMPONENTS_ALL ${CPACK_ALL}) SET(CPACK_COMPONENT_GROUP_ALWAYSINSTALL_HIDDEN 1) SET(CPACK_COMPONENT_README_GROUP "AlwaysInstall") SET(CPACK_COMPONENT_COMMON_GROUP "AlwaysInstall") +SET(CPACK_COMPONENT_VCCRT_GROUP "AlwaysInstall") # Feature MySQL Server SET(CPACK_COMPONENT_GROUP_MYSQLSERVER_DISPLAY_NAME "MariaDB Server") diff --git a/win/upgrade_wizard/CMakeLists.txt b/win/upgrade_wizard/CMakeLists.txt index 44d6249ea1e..dc4ef67387d 100644 --- a/win/upgrade_wizard/CMakeLists.txt +++ b/win/upgrade_wizard/CMakeLists.txt @@ -16,10 +16,13 @@ IF(NOT MFC_FOUND) ENDIF() RETURN() ENDIF() - -# MFC should be statically linked -SET(CMAKE_MFC_FLAG 1) - +IF(MSVC_CRT_TYPE MATCHES "/MD") + # MFC should be dynamically linked + SET(CMAKE_MFC_FLAG 2) +ELSE() + # MFC should be statically linked + SET(CMAKE_MFC_FLAG 1) +ENDIF() # Enable exception handling (avoids warnings) SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc") From 85a1d9edbe4d5362528e4fa74f6d698fddc49680 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sun, 2 Apr 2017 12:35:59 +0000 Subject: [PATCH 221/233] rocksdb - fix failing tests on Windows --- storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test | 1 + storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test | 1 + storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test | 1 + .../rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test | 2 ++ storage/rocksdb/mysql-test/rocksdb/t/write_sync.test | 1 + 5 files changed, 6 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test index 3c7d61aa15b..b64af16411b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test @@ -40,6 +40,7 @@ while ($i > 0) my $thr = $ENV{'ROCKSDB_THREAD'}; my $num = $ENV{'ROCKSDB_ROWS_PER_THREAD'}; open(my $fh, '>>', $fn) || die "perl open($fn): $!"; + binmode $fh; for (my $ii = 0; $ii < $num; $ii++) { print $fh "\\N\t$thr\n" diff --git a/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test b/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test index 31013b75f63..b4c9e2dae3a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test @@ -1,5 +1,6 @@ --source include/have_rocksdb.inc --source include/have_partition.inc +--source include/not_windows.inc # Data directory option not availablr # # Create tables with partitions and try to generate an error while creating diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test index 29cc2ccfb5e..a293b9ee6b8 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test @@ -1,4 +1,5 @@ --source include/have_rocksdb.inc +--source include/not_windows.inc # decorated database names is too long, exceeded OS limits # # SHOW TABLE STATUS statement diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test index 1b3a1b144d0..99cb2253d94 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test @@ -1,6 +1,8 @@ --source include/have_rocksdb.inc --source include/have_partition.inc +--source include/not_windows.inc + # # Check that when either DATA DIRECTORY or INDEX DIRECTORY are specified # then MyRocks returns an appropriate error. We don't support this diff --git a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test index d508000bf8a..804889dcec9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test @@ -1,4 +1,5 @@ --source include/have_rocksdb.inc +--source include/not_windows.inc # MDEV-12427 SET GLOBAL rocksdb_write_disable_wal=false; SET GLOBAL rocksdb_write_ignore_missing_column_families=true; From 45a9470ff3e58612f3e8bc9846be68a9482fae60 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sun, 2 Apr 2017 18:22:22 +0000 Subject: [PATCH 222/233] Rename plugin rocksdb_se to rocksdb --- storage/rocksdb/CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index b9d78239340..29f5cd32849 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -75,11 +75,11 @@ SET(ROCKSDB_SE_SOURCES rdb_psi.cc ) -MYSQL_ADD_PLUGIN(rocksdb_se ${ROCKSDB_SE_SOURCES} STORAGE_ENGINE +MYSQL_ADD_PLUGIN(rocksdb ${ROCKSDB_SE_SOURCES} STORAGE_ENGINE MODULE_OUTPUT_NAME ha_rocksdb COMPONENT rocksdb-engine) -IF(NOT TARGET rocksdb_se) +IF(NOT TARGET rocksdb) # Bail out if compilation with rocksdb engine is not requested RETURN() ENDIF() @@ -111,7 +111,7 @@ ADD_CONVENIENCE_LIBRARY(rocksdb_aux_lib ADD_DEPENDENCIES(rocksdb_aux_lib GenError) TARGET_LINK_LIBRARIES(rocksdb_aux_lib rocksdblib ${ZLIB_LIBRARY}) -TARGET_LINK_LIBRARIES(rocksdb_se rocksdb_aux_lib) +TARGET_LINK_LIBRARIES(rocksdb rocksdb_aux_lib) IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") From e7e6e30cb7d71572dafa877ae9650ed33110719a Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 2 Apr 2017 23:14:12 +0300 Subject: [PATCH 223/233] rocksdb_sys_vars should not be run if MyRocks is not compiled. Copy the detection logic from 'rocksdb' test suite, add a note. --- storage/rocksdb/mysql-test/rocksdb/suite.pm | 4 ++++ .../rocksdb/mysql-test/rocksdb_sys_vars/suite.pm | 13 +++++++++++++ 2 files changed, 17 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/suite.pm b/storage/rocksdb/mysql-test/rocksdb/suite.pm index 87f34ffc2b1..6d7c352dd13 100644 --- a/storage/rocksdb/mysql-test/rocksdb/suite.pm +++ b/storage/rocksdb/mysql-test/rocksdb/suite.pm @@ -1,5 +1,9 @@ package My::Suite::Rocksdb; +# +# Note: ../rocksdb_sys_vars/suite.pm file has a similar +# function. If you modify this file, consider modifying that one, too. +# @ISA = qw(My::Suite); use My::Find; use File::Basename; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.pm b/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.pm index 658a0b3b4d1..c8452b55227 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.pm +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.pm @@ -1,8 +1,21 @@ package My::Suite::Rocksdb_sys_vars; +# +# Note: The below is copied from ../rocksdb/suite.pm +# @ISA = qw(My::Suite); +use My::Find; +use File::Basename; +use strict; sub is_default { not $::opt_embedded_server } +my $sst_dump= +::mtr_exe_maybe_exists( + "$::bindir/storage/rocksdb$::opt_vs_config/sst_dump", + "$::path_client_bindir/sst_dump"); +return "RocksDB is not compiled, no sst_dump" unless $sst_dump; +$ENV{MARIAROCKS_SST_DUMP}="$sst_dump"; + bless { }; From 44bc2a0ef71b034a0efea13cde96033743a69651 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 2 Apr 2017 23:38:28 +0300 Subject: [PATCH 224/233] Post-fixes for "Rename plugin rocksdb_se to rocksdb" - Also rename plugin's config file - And fix unit test's CMakeLists.txt --- debian/mariadb-plugin-rocksdb.install | 2 +- storage/rocksdb/unittest/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/mariadb-plugin-rocksdb.install b/debian/mariadb-plugin-rocksdb.install index 5e3b5772fc7..ee45a822e0c 100644 --- a/debian/mariadb-plugin-rocksdb.install +++ b/debian/mariadb-plugin-rocksdb.install @@ -1,4 +1,4 @@ -etc/mysql/conf.d/rocksdb_se.cnf etc/mysql/mariadb.conf.d +etc/mysql/conf.d/rocksdb.cnf etc/mysql/mariadb.conf.d usr/lib/mysql/plugin/ha_rocksdb.so usr/bin/mysql_ldb usr/bin/sst_dump diff --git a/storage/rocksdb/unittest/CMakeLists.txt b/storage/rocksdb/unittest/CMakeLists.txt index d2a5ea2aff7..de8d0d82aea 100644 --- a/storage/rocksdb/unittest/CMakeLists.txt +++ b/storage/rocksdb/unittest/CMakeLists.txt @@ -1,4 +1,4 @@ -IF (WITH_ROCKSDB_SE_STORAGE_ENGINE) +IF (TARGET rocksdb) INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/zlib ${CMAKE_SOURCE_DIR}/unittest/mytap ${CMAKE_SOURCE_DIR}/rocksdb/third-party/gtest-1.7.0/fused-src From 5c66eb5c9f7f1da4a08d736ebaec161aeef9383c Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 3 Apr 2017 10:29:40 +0300 Subject: [PATCH 225/233] Disable compilation of storage/rocksdb/unittest/test_properties_collector Met additional issues while trying to enable it. --- storage/rocksdb/CMakeLists.txt | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index 29f5cd32849..0ae3b240273 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -138,9 +138,12 @@ IF(HAVE_SCHED_GETCPU) ADD_DEFINITIONS(-DHAVE_SCHED_GETCPU=1) ENDIF() -IF(WITH_UNIT_TESTS AND WITH_EMBEDDED_SERVER) - ADD_SUBDIRECTORY(unittest) -ENDIF() +# +# MariaDB: Dynamic plugin build is not suitable with unittest ATM +# +#IF(WITH_UNIT_TESTS AND WITH_EMBEDDED_SERVER) +# ADD_SUBDIRECTORY(unittest) +#ENDIF() ADD_LIBRARY(rocksdb_tools STATIC rocksdb/tools/ldb_tool.cc From 9505c9683930290c1e028043940ae8512a6ca040 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 3 Apr 2017 19:36:54 +0300 Subject: [PATCH 226/233] MDEV-12428 SIGSEGV in buf_page_decrypt_after_read() during DDL Also, some MDEV-11738/MDEV-11581 post-push fixes. In MariaDB 10.1, there is no fil_space_t::is_being_truncated field, and the predicates fil_space_t::stop_new_ops and fil_space_t::is_stopping() are interchangeable. I requested the fil_space_t::is_stopping() to be added in the review, but some added checks for fil_space_t::stop_new_ops were not replaced with calls to fil_space_t::is_stopping(). buf_page_decrypt_after_read(): In this low-level I/O operation, we must look up the tablespace if it exists, even though future I/O operations have been blocked on it due to a pending DDL operation, such as DROP TABLE or TRUNCATE TABLE or other table-rebuilding operations (ALTER, OPTIMIZE). Pass a parameter to fil_space_acquire_low() telling that we are performing a low-level I/O operation and the fil_space_t::is_stopping() status should be ignored. --- storage/innobase/btr/btr0scrub.cc | 35 +++++++++----------- storage/innobase/buf/buf0buf.cc | 9 +++--- storage/innobase/fil/fil0fil.cc | 52 ++++++++++++++---------------- storage/innobase/include/fil0fil.h | 17 +++++----- storage/xtradb/btr/btr0scrub.cc | 35 +++++++++----------- storage/xtradb/buf/buf0buf.cc | 10 +++--- storage/xtradb/fil/fil0fil.cc | 52 ++++++++++++++---------------- storage/xtradb/include/fil0fil.h | 17 +++++----- 8 files changed, 108 insertions(+), 119 deletions(-) diff --git a/storage/innobase/btr/btr0scrub.cc b/storage/innobase/btr/btr0scrub.cc index 560d2ece6c0..e9434c9f778 100644 --- a/storage/innobase/btr/btr0scrub.cc +++ b/storage/innobase/btr/btr0scrub.cc @@ -129,15 +129,15 @@ btr_scrub_lock_dict_func(ulint space_id, bool lock_to_close_table, * if we don't lock to close a table, we check if space * is closing, and then instead give up */ - if (lock_to_close_table == false) { - fil_space_t* space = fil_space_acquire(space_id); - if (!space || space->stop_new_ops) { - if (space) { - fil_space_release(space); - } + if (lock_to_close_table) { + } else if (fil_space_t* space = fil_space_acquire(space_id)) { + bool stopping = space->is_stopping(); + fil_space_release(space); + if (stopping) { return false; } - fil_space_release(space); + } else { + return false; } os_thread_sleep(250000); @@ -197,18 +197,15 @@ btr_scrub_table_close_for_thread( return; } - fil_space_t* space = fil_space_acquire(scrub_data->space); - - /* If tablespace is not marked as stopping perform - the actual close. */ - if (space && !space->is_stopping()) { - mutex_enter(&dict_sys->mutex); - /* perform the actual closing */ - btr_scrub_table_close(scrub_data->current_table); - mutex_exit(&dict_sys->mutex); - } - - if (space) { + if (fil_space_t* space = fil_space_acquire(scrub_data->space)) { + /* If tablespace is not marked as stopping perform + the actual close. */ + if (!space->is_stopping()) { + mutex_enter(&dict_sys->mutex); + /* perform the actual closing */ + btr_scrub_table_close(scrub_data->current_table); + mutex_exit(&dict_sys->mutex); + } fil_space_release(space); } diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index c7e2183fa7b..589fd734ba3 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -6250,13 +6250,12 @@ buf_page_decrypt_after_read( return (true); } - fil_space_t* space = fil_space_acquire(bpage->space); - fil_space_crypt_t* crypt_data = space->crypt_data; + fil_space_t* space = fil_space_acquire(bpage->space, true); /* Page is encrypted if encryption information is found from tablespace and page contains used key_version. This is true also for pages first compressed and then encrypted. */ - if (!crypt_data) { + if (!space || !space->crypt_data) { key_version = 0; } @@ -6340,6 +6339,8 @@ buf_page_decrypt_after_read( } } - fil_space_release(space); + if (space != NULL) { + fil_space_release(space); + } return (success); } diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 627a63aaae0..6fec41839ee 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -6325,16 +6325,12 @@ fil_flush( { mutex_enter(&fil_system->mutex); - fil_space_t* space = fil_space_get_by_id(space_id); - - if (!space || space->stop_new_ops) { - mutex_exit(&fil_system->mutex); - - return; + if (fil_space_t* space = fil_space_get_by_id(space_id)) { + if (!space->is_stopping()) { + fil_flush_low(space); + } } - fil_flush_low(space); - mutex_exit(&fil_system->mutex); } @@ -6374,8 +6370,7 @@ fil_flush_file_spaces( space; space = UT_LIST_GET_NEXT(unflushed_spaces, space)) { - if (space->purpose == purpose && !space->stop_new_ops) { - + if (space->purpose == purpose && !space->is_stopping()) { space_ids[n_space_ids++] = space->id; } } @@ -7276,12 +7271,13 @@ Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID @param[in] silent whether to silently ignore missing tablespaces -@return the tablespace, or NULL if missing or being deleted */ +@param[in] for_io whether to look up the tablespace while performing I/O + (possibly executing TRUNCATE) +@return the tablespace +@retval NULL if missing or being deleted or truncated */ inline fil_space_t* -fil_space_acquire_low( - ulint id, - bool silent) +fil_space_acquire_low(ulint id, bool silent, bool for_io = false) { fil_space_t* space; @@ -7294,7 +7290,7 @@ fil_space_acquire_low( ib_logf(IB_LOG_LEVEL_WARN, "Trying to access missing" " tablespace " ULINTPF ".", id); } - } else if (space->stop_new_ops) { + } else if (!for_io && space->is_stopping()) { space = NULL; } else { space->n_pending_ops++; @@ -7309,22 +7305,24 @@ fil_space_acquire_low( Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID -@return the tablespace, or NULL if missing or being deleted */ +@param[in] for_io whether to look up the tablespace while performing I/O + (possibly executing TRUNCATE) +@return the tablespace +@retval NULL if missing or being deleted or truncated */ fil_space_t* -fil_space_acquire( - ulint id) +fil_space_acquire(ulint id, bool for_io) { - return(fil_space_acquire_low(id, false)); + return(fil_space_acquire_low(id, false, for_io)); } /** Acquire a tablespace that may not exist. Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID -@return the tablespace, or NULL if missing or being deleted */ +@return the tablespace +@retval NULL if missing or being deleted */ fil_space_t* -fil_space_acquire_silent( - ulint id) +fil_space_acquire_silent(ulint id) { return(fil_space_acquire_low(id, true)); } @@ -7332,8 +7330,7 @@ fil_space_acquire_silent( /** Release a tablespace acquired with fil_space_acquire(). @param[in,out] space tablespace to release */ void -fil_space_release( - fil_space_t* space) +fil_space_release(fil_space_t* space) { mutex_enter(&fil_system->mutex); ut_ad(space->magic_n == FIL_SPACE_MAGIC_N); @@ -7351,8 +7348,7 @@ If NULL, use the first fil_space_t on fil_system->space_list. @return pointer to the next fil_space_t. @retval NULL if this was the last*/ fil_space_t* -fil_space_next( - fil_space_t* prev_space) +fil_space_next(fil_space_t* prev_space) { fil_space_t* space=prev_space; @@ -7375,8 +7371,8 @@ fil_space_next( fil_ibd_create(), or dropped, or !tablespace. */ while (space != NULL && (UT_LIST_GET_LEN(space->chain) == 0 - || space->stop_new_ops - || space->purpose != FIL_TABLESPACE)) { + || space->is_stopping() + || space->purpose != FIL_TABLESPACE)) { space = UT_LIST_GET_NEXT(space_list, space); } diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index b4ee931fdbe..3d7ae49f8ae 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -645,27 +645,28 @@ fil_write_flushed_lsn_to_data_files( Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID -@return the tablespace, or NULL if missing or being deleted */ +@param[in] for_io whether to look up the tablespace while performing I/O + (possibly executing TRUNCATE) +@return the tablespace +@retval NULL if missing or being deleted or truncated */ fil_space_t* -fil_space_acquire( - ulint id) +fil_space_acquire(ulint id, bool for_io = false) MY_ATTRIBUTE((warn_unused_result)); /** Acquire a tablespace that may not exist. Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID -@return the tablespace, or NULL if missing or being deleted */ +@return the tablespace +@retval NULL if missing or being deleted */ fil_space_t* -fil_space_acquire_silent( - ulint id) +fil_space_acquire_silent(ulint id) MY_ATTRIBUTE((warn_unused_result)); /** Release a tablespace acquired with fil_space_acquire(). @param[in,out] space tablespace to release */ void -fil_space_release( - fil_space_t* space); +fil_space_release(fil_space_t* space); /** Return the next fil_space_t. Once started, the caller must keep calling this until it returns NULL. diff --git a/storage/xtradb/btr/btr0scrub.cc b/storage/xtradb/btr/btr0scrub.cc index 560d2ece6c0..e9434c9f778 100644 --- a/storage/xtradb/btr/btr0scrub.cc +++ b/storage/xtradb/btr/btr0scrub.cc @@ -129,15 +129,15 @@ btr_scrub_lock_dict_func(ulint space_id, bool lock_to_close_table, * if we don't lock to close a table, we check if space * is closing, and then instead give up */ - if (lock_to_close_table == false) { - fil_space_t* space = fil_space_acquire(space_id); - if (!space || space->stop_new_ops) { - if (space) { - fil_space_release(space); - } + if (lock_to_close_table) { + } else if (fil_space_t* space = fil_space_acquire(space_id)) { + bool stopping = space->is_stopping(); + fil_space_release(space); + if (stopping) { return false; } - fil_space_release(space); + } else { + return false; } os_thread_sleep(250000); @@ -197,18 +197,15 @@ btr_scrub_table_close_for_thread( return; } - fil_space_t* space = fil_space_acquire(scrub_data->space); - - /* If tablespace is not marked as stopping perform - the actual close. */ - if (space && !space->is_stopping()) { - mutex_enter(&dict_sys->mutex); - /* perform the actual closing */ - btr_scrub_table_close(scrub_data->current_table); - mutex_exit(&dict_sys->mutex); - } - - if (space) { + if (fil_space_t* space = fil_space_acquire(scrub_data->space)) { + /* If tablespace is not marked as stopping perform + the actual close. */ + if (!space->is_stopping()) { + mutex_enter(&dict_sys->mutex); + /* perform the actual closing */ + btr_scrub_table_close(scrub_data->current_table); + mutex_exit(&dict_sys->mutex); + } fil_space_release(space); } diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index 5016445b8e7..5b448999cca 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -6410,14 +6410,12 @@ buf_page_decrypt_after_read( return (true); } - fil_space_t* space = fil_space_acquire(bpage->space); - - fil_space_crypt_t* crypt_data = space->crypt_data; + fil_space_t* space = fil_space_acquire(bpage->space, true); /* Page is encrypted if encryption information is found from tablespace and page contains used key_version. This is true also for pages first compressed and then encrypted. */ - if (!crypt_data) { + if (!space || !space->crypt_data) { key_version = 0; } @@ -6501,6 +6499,8 @@ buf_page_decrypt_after_read( } } - fil_space_release(space); + if (space != NULL) { + fil_space_release(space); + } return (success); } diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index a116bfad99d..e7244d719c8 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -6389,16 +6389,12 @@ fil_flush( { mutex_enter(&fil_system->mutex); - fil_space_t* space = fil_space_get_by_id(space_id); - - if (!space || space->stop_new_ops) { - mutex_exit(&fil_system->mutex); - - return; + if (fil_space_t* space = fil_space_get_by_id(space_id)) { + if (!space->is_stopping()) { + fil_flush_low(space); + } } - fil_flush_low(space); - mutex_exit(&fil_system->mutex); } @@ -6438,8 +6434,7 @@ fil_flush_file_spaces( space; space = UT_LIST_GET_NEXT(unflushed_spaces, space)) { - if (space->purpose == purpose && !space->stop_new_ops) { - + if (space->purpose == purpose && !space->is_stopping()) { space_ids[n_space_ids++] = space->id; } } @@ -7388,12 +7383,13 @@ Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID @param[in] silent whether to silently ignore missing tablespaces -@return the tablespace, or NULL if missing or being deleted */ +@param[in] for_io whether to look up the tablespace while performing I/O + (possibly executing TRUNCATE) +@return the tablespace +@retval NULL if missing or being deleted or truncated */ inline fil_space_t* -fil_space_acquire_low( - ulint id, - bool silent) +fil_space_acquire_low(ulint id, bool silent, bool for_io = false) { fil_space_t* space; @@ -7407,7 +7403,7 @@ fil_space_acquire_low( " tablespace " ULINTPF ".", id); ut_error; } - } else if (space->stop_new_ops) { + } else if (!for_io && space->is_stopping()) { space = NULL; } else { space->n_pending_ops++; @@ -7422,22 +7418,24 @@ fil_space_acquire_low( Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID -@return the tablespace, or NULL if missing or being deleted */ +@param[in] for_io whether to look up the tablespace while performing I/O + (possibly executing TRUNCATE) +@return the tablespace +@retval NULL if missing or being deleted or truncated */ fil_space_t* -fil_space_acquire( - ulint id) +fil_space_acquire(ulint id, bool for_io) { - return(fil_space_acquire_low(id, false)); + return(fil_space_acquire_low(id, false, for_io)); } /** Acquire a tablespace that may not exist. Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID -@return the tablespace, or NULL if missing or being deleted */ +@return the tablespace +@retval NULL if missing or being deleted */ fil_space_t* -fil_space_acquire_silent( - ulint id) +fil_space_acquire_silent(ulint id) { return(fil_space_acquire_low(id, true)); } @@ -7445,8 +7443,7 @@ fil_space_acquire_silent( /** Release a tablespace acquired with fil_space_acquire(). @param[in,out] space tablespace to release */ void -fil_space_release( - fil_space_t* space) +fil_space_release(fil_space_t* space) { mutex_enter(&fil_system->mutex); ut_ad(space->magic_n == FIL_SPACE_MAGIC_N); @@ -7464,8 +7461,7 @@ If NULL, use the first fil_space_t on fil_system->space_list. @return pointer to the next fil_space_t. @retval NULL if this was the last*/ fil_space_t* -fil_space_next( - fil_space_t* prev_space) +fil_space_next(fil_space_t* prev_space) { fil_space_t* space=prev_space; @@ -7488,8 +7484,8 @@ fil_space_next( fil_ibd_create(), or dropped, or !tablespace. */ while (space != NULL && (UT_LIST_GET_LEN(space->chain) == 0 - || space->stop_new_ops - || space->purpose != FIL_TABLESPACE)) { + || space->is_stopping() + || space->purpose != FIL_TABLESPACE)) { space = UT_LIST_GET_NEXT(space_list, space); } diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h index b80df057351..698039afede 100644 --- a/storage/xtradb/include/fil0fil.h +++ b/storage/xtradb/include/fil0fil.h @@ -650,27 +650,28 @@ fil_write_flushed_lsn_to_data_files( Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID -@return the tablespace, or NULL if missing or being deleted */ +@param[in] for_io whether to look up the tablespace while performing I/O + (possibly executing TRUNCATE) +@return the tablespace +@retval NULL if missing or being deleted or truncated */ fil_space_t* -fil_space_acquire( - ulint id) +fil_space_acquire(ulint id, bool for_io = false) MY_ATTRIBUTE((warn_unused_result)); /** Acquire a tablespace that may not exist. Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID -@return the tablespace, or NULL if missing or being deleted */ +@return the tablespace +@retval NULL if missing or being deleted */ fil_space_t* -fil_space_acquire_silent( - ulint id) +fil_space_acquire_silent(ulint id) MY_ATTRIBUTE((warn_unused_result)); /** Release a tablespace acquired with fil_space_acquire(). @param[in,out] space tablespace to release */ void -fil_space_release( - fil_space_t* space); +fil_space_release(fil_space_t* space); /** Return the next fil_space_t. Once started, the caller must keep calling this until it returns NULL. From 00ab154d49853e20f48a516897e14bf67c58671e Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Mon, 3 Apr 2017 15:59:38 -0700 Subject: [PATCH 227/233] Fixed bug mdev-10454. The patch actually fixes the old defect of the optimizer that could not extract keys for range access from IN predicates with row arguments. This problem was resolved in the mysql-5.7 code. The patch supersedes what was done there: - it can build range access when not all components of the first row argument are refer to the columns of the table for which the range access is constructed. - it can use equality predicates to build range access to the table that is not referred to in this argument. --- mysql-test/r/range.result | 674 +++++++++++++++++++++++++++++ mysql-test/r/range_mrr_icp.result | 684 ++++++++++++++++++++++++++++++ mysql-test/t/range.test | 192 +++++++++ sql/item_cmpfunc.h | 3 + sql/item_row.h | 7 + sql/opt_range.cc | 217 +++++++++- sql/sql_select.cc | 58 ++- 7 files changed, 1822 insertions(+), 13 deletions(-) diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result index 5027fffe047..28f5cf635d0 100644 --- a/mysql-test/r/range.result +++ b/mysql-test/r/range.result @@ -2332,3 +2332,677 @@ DROP TABLE t1; # # End of 10.1 tests # +# +# MDEV-10454: range access keys extracted +# from IN () +# +create table t1(a int, b int, c varchar(16), key idx(a,b)) engine=myisam; +insert into t1 values +(1,1,'xx'), (2,2,'yyy'), (3,3,'zzzz'), (1,2,'zz'), (1,3,'x'), +(2,3,'yy'), (4,5,'ww'), (7,8,'xxxxx'), (4,3,'zyx'), (1,2,'uuu'), +(2,1,'w'), (5,5,'wx'), (2,3,'ww'), (7,7,'xxxyy'), (3,3,'zyxw'), +(3,2,'uuuw'), (2,2,'wxz'), (5,5,'xw'), (12,12,'xx'), (12,12,'y'), +(13,13,'z'), (11,12,'zz'), (11,13,'x'), (12,13,'y'), (14,15,'w'), +(17,18,'xx'), (14,13,'zx'), (11,12,'u'), (12,11,'w'), (5,5,'wx'), +(12,13,'ww'), (17,17,'xxxyy'), (13,13,'zyxw'), (13,12,'uuuw'), (12,12,'wxz'), +(15,15,'xw'), (1,1,'xa'), (2,2,'yya'), (3,3,'zzza'), (1,2,'za'), +(1,3,'xb'), (2,3,'ya'), (4,5,'wa'), (7,8,'xxxxa'), (4,3,'zya'), +(1,2,'uua'), (2,1,'wb'), (5,5,'wc'), (2,3,'wa'), (7,7,'xxxya'), +(3,3,'zyxa'), (3,2,'uuua'), (2,2,'wxa'), (5,5,'xa'), (12,12,'xa'), +(22,12,'yb'), (23,13,'zb'), (21,12,'za'), (24,13,'c'), (32,13,'d'), +(34,15,'wd'), (47,18,'xa'), (54,13,'za'), (51,12,'ub'), (52,11,'wc'), +(5,5,'wd'), (62,13,'wa'), (67,17,'xxxya'), (63,13,'zyxa'), (73,12,'uuua'), +(82,12,'wxa'), (85,15,'xd'); +# range access to t1 by 2-component keys for index idx +explain select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 10 NULL 7 Using where +explain format=json select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "10", + "used_key_parts": ["a", "b"], + "rows": 7, + "filtered": 100, + "attached_condition": "(t1.a,t1.b) in (((2,3)),((3,3)),((8,8)),((7,7)))" + } + } +} +select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7)); +a b c +2 3 yy +2 3 ww +2 3 ya +2 3 wa +3 3 zzzz +3 3 zyxw +3 3 zzza +3 3 zyxa +7 7 xxxyy +7 7 xxxya +prepare stmt from "select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7))"; +execute stmt; +a b c +2 3 yy +2 3 ww +2 3 ya +2 3 wa +3 3 zzzz +3 3 zyxw +3 3 zzza +3 3 zyxa +7 7 xxxyy +7 7 xxxya +execute stmt; +a b c +2 3 yy +2 3 ww +2 3 ya +2 3 wa +3 3 zzzz +3 3 zyxw +3 3 zzza +3 3 zyxa +7 7 xxxyy +7 7 xxxya +deallocate prepare stmt; +# range access to t1 by 1-component keys for index idx +explain select * from t1 where (a,b+a) IN ((4,9),(8,8),(7,7)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 5 Using where +explain format=json select * from t1 where (a,b+a) IN ((4,9),(8,8),(7,7)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 5, + "filtered": 100, + "attached_condition": "(t1.a,t1.b + t1.a) in (((4,9)),((8,8)),((7,7)))" + } + } +} +select * from t1 where (a,b+a) IN ((4,9),(8,8),(7,7)); +a b c +4 5 ww +4 5 wa +# range access to t1 by 1-component keys for index idx +explain select * from t1 where (a,b) IN ((4,a-1),(8,a+8),(7,a+7)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 5 Using where +explain format=json select * from t1 where (a,b) IN ((4,a-1),(8,a+8),(7,a+7)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 5, + "filtered": 100, + "attached_condition": "(t1.a,t1.b) in ((4,t1.a - 1),(8,t1.a + 8),(7,t1.a + 7))" + } + } +} +select * from t1 where (a,b) IN ((4,a-1),(8,a+8),(7,a+7)); +a b c +4 3 zyx +4 3 zya +set @save_optimizer_switch=@@optimizer_switch; +set optimizer_switch='index_merge=off'; +create table t2( +d int, e int, key idx1(d), key idx2(e), f varchar(32) +) engine=myisam; +insert into t2 values +(9,5,'a'), (9,8,'b'), (9,3,'c'), (9,2,'d'), (9,1,'e'), +(6,5,'f'), (6,3,'g'), (6,7,'h'), (3,3,'i'), (6,2,'j'), +(9,5,'aa'), (9,8,'ba'), (9,3,'ca'), (2,2,'da'), (9,1,'ea'), +(6,5,'fa'), (6,3,'ga'), (6,7,'ha'), (9,3,'ia'), (6,2,'ja'); +# join order: (t2,t1) with ref access of t1 +# range access to t1 by keys for index idx1 +explain select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(2,2)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range idx1,idx2 idx1 5 NULL 3 Using index condition; Using where +1 SIMPLE t1 ref idx idx 5 test.t2.d 8 +explain format=json select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(2,2)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "5", + "used_key_parts": ["d"], + "rows": 3, + "filtered": 100, + "index_condition": "t2.d is not null", + "attached_condition": "(t2.d,t2.e) in (((3,3)),((7,7)),((2,2)))" + }, + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.d"], + "rows": 8, + "filtered": 100 + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(2,2)); +a b c d e f +2 1 w 2 2 da +2 1 wb 2 2 da +2 2 yyy 2 2 da +2 2 wxz 2 2 da +2 2 yya 2 2 da +2 2 wxa 2 2 da +2 3 yy 2 2 da +2 3 ww 2 2 da +2 3 ya 2 2 da +2 3 wa 2 2 da +3 2 uuuw 3 3 i +3 2 uuua 3 3 i +3 3 zzzz 3 3 i +3 3 zyxw 3 3 i +3 3 zzza 3 3 i +3 3 zyxa 3 3 i +insert into t2 values +(4,5,'a'), (7,8,'b'), (4,3,'c'), (1,2,'d'), (2,1,'e'), (5,5,'f'), +(2,3,'g'), (7,7,'h'), (3,3,'i'), (3,2,'j'), (2,2,'k'), (5,5,'l'), +(4,5,'aa'), (7,8,'bb'), (4,3,'cc'), (1,2,'dd'), (2,1,'ee'), (9,5,'ff'), +(2,3,'gg'), (7,7,'hh'), (3,3,'ii'), (3,2,'jj'), (2,2,'kk'), (9,5,'ll'), +(4,5,'aaa'), (7,8,'bbb'), (4,3,'ccc'), (1,2,'ddd'), (2,1,'eee'), (5,5,'fff'), +(2,3,'ggg'), (7,7,'hhh'), (3,3,'iii'), (3,2,'jjj'), (2,2,'kkk'), (5,5,'lll'), +(14,15,'a'), (17,18,'b'), (14,13,'c'), (11,12,'d'), (12,11,'e'), (15,15,'f'), +(12,13,'g'), (17,17,'h'), (13,13,'i'), (13,12,'j'), (12,12,'k'), (15,15,'l'), +(24,25,'a'), (27,28,'b'), (24,23,'c'), (21,22,'d'), (22,21,'e'), (25,25,'f'), +(22,23,'g'), (27,27,'h'), (23,23,'i'), (23,22,'j'), (22,22,'k'), (25,25,'l'), +(34,35,'a'), (37,38,'b'), (34,33,'c'), (31,32,'d'), (32,31,'e'), (35,35,'f'), +(32,33,'g'), (37,37,'h'), (33,33,'i'), (33,32,'j'), (32,32,'k'), (35,35,'l'), +(44,45,'a'), (47,48,'b'), (44,43,'c'), (41,42,'d'), (42,41,'e'), (45,45,'f'), +(42,43,'g'), (47,47,'h'), (43,43,'i'), (43,42,'j'), (42,42,'k'), (45,45,'l'); +# join order: (t1,t2) with ref access of t2 +# range access to t1 by 1-component keys for index idx +explain select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 6 Using index condition +1 SIMPLE t2 ref idx1,idx2 idx1 5 test.t1.a 12 Using where +explain format=json select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 6, + "filtered": 100, + "index_condition": "t1.a is not null" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 12, + "filtered": 100, + "attached_condition": "(t1.a,t2.e) in (((3,3)),((7,7)),((8,8))) and length(t2.f) = 1" + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1; +a b c d e f +3 2 uuuw 3 3 i +3 2 uuuw 3 3 i +3 2 uuua 3 3 i +3 2 uuua 3 3 i +3 3 zzzz 3 3 i +3 3 zzzz 3 3 i +3 3 zyxw 3 3 i +3 3 zyxw 3 3 i +3 3 zzza 3 3 i +3 3 zzza 3 3 i +3 3 zyxa 3 3 i +3 3 zyxa 3 3 i +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +prepare stmt from "select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1"; +execute stmt; +a b c d e f +3 2 uuuw 3 3 i +3 2 uuuw 3 3 i +3 2 uuua 3 3 i +3 2 uuua 3 3 i +3 3 zzzz 3 3 i +3 3 zzzz 3 3 i +3 3 zyxw 3 3 i +3 3 zyxw 3 3 i +3 3 zzza 3 3 i +3 3 zzza 3 3 i +3 3 zyxa 3 3 i +3 3 zyxa 3 3 i +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +execute stmt; +a b c d e f +3 2 uuuw 3 3 i +3 2 uuuw 3 3 i +3 2 uuua 3 3 i +3 2 uuua 3 3 i +3 3 zzzz 3 3 i +3 3 zzzz 3 3 i +3 3 zyxw 3 3 i +3 3 zyxw 3 3 i +3 3 zzza 3 3 i +3 3 zzza 3 3 i +3 3 zyxa 3 3 i +3 3 zyxa 3 3 i +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +deallocate prepare stmt; +insert into t1 select * from t1; +# join order: (t2,t1) with ref access of t1 +# range access to t2 by keys for index idx2 +explain select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range idx1,idx2 idx2 5 NULL 6 Using where +1 SIMPLE t1 ref idx idx 5 test.t2.d 11 +explain format=json select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "5", + "used_key_parts": ["e"], + "rows": 6, + "filtered": 100, + "attached_condition": "(t2.d,t2.e) in (((4,4)),((7,7)),((8,8))) and length(t2.f) = 1 and t2.d is not null" + }, + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.d"], + "rows": 11, + "filtered": 100 + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +a b c d e f +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +alter table t2 drop index idx1, drop index idx2, add index idx3(d,e); +# join order: (t2,t1) with ref access of t1 +# range access to t2 by 2-component keys for index idx3 +explain select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range idx3 idx3 10 NULL 5 Using index condition; Using where +1 SIMPLE t1 ref idx idx 5 test.t2.d 11 +explain format=json select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "range", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "10", + "used_key_parts": ["d", "e"], + "rows": 5, + "filtered": 100, + "index_condition": "t2.d is not null", + "attached_condition": "(t2.d,t2.e) in (((4,4)),((7,7)),((8,8))) and length(t2.f) = 1" + }, + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.d"], + "rows": 11, + "filtered": 100 + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +a b c d e f +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +# join order: (t1,t2) with ref access of t2 +# range access to t1 by 1-component keys for index idx +explain select * from t1,t2 +where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 15 Using index condition +1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where +explain format=json select * from t1,t2 +where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 15, + "filtered": 100, + "index_condition": "t1.a is not null" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 3, + "filtered": 100, + "attached_condition": "(t1.a,t2.e) in ((4,t1.a + 1),(7,t1.a + 1),(8,t1.a + 1)) and length(t2.f) = 1" + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1; +a b c d e f +4 3 zyx 4 5 a +4 3 zya 4 5 a +4 3 zyx 4 5 a +4 3 zya 4 5 a +4 5 ww 4 5 a +4 5 wa 4 5 a +4 5 ww 4 5 a +4 5 wa 4 5 a +7 7 xxxyy 7 8 b +7 7 xxxya 7 8 b +7 7 xxxyy 7 8 b +7 7 xxxya 7 8 b +7 8 xxxxx 7 8 b +7 8 xxxxa 7 8 b +7 8 xxxxx 7 8 b +7 8 xxxxa 7 8 b +# join order: (t1,t2) with ref access of t2 +# no range access +explain select * from t1,t2 +where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL idx NULL NULL NULL 144 Using where +1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where +explain format=json select * from t1,t2 +where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "ALL", + "possible_keys": ["idx"], + "rows": 144, + "filtered": 100, + "attached_condition": "t1.a is not null" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 3, + "filtered": 100, + "attached_condition": "(t1.a,t2.e) in ((t2.e,t1.a + 1),((7,7)),((8,8))) and length(t2.f) = 1" + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1; +a b c d e f +7 8 xxxxx 7 7 h +7 7 xxxyy 7 7 h +7 8 xxxxa 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 7 xxxyy 7 7 h +7 8 xxxxa 7 7 h +7 7 xxxya 7 7 h +# join order: (t1,t2) with ref access of t2 +# range access to t1 by 1-component keys for index idx +explain select * from t1,t2 +where a = d and (a,2) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 13 Using index condition; Using where +1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where +explain format=json select * from t1,t2 +where a = d and (a,2) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 13, + "filtered": 100, + "index_condition": "t1.a is not null", + "attached_condition": "(t1.a,2) in (((2,2)),((7,7)),((8,8))) and length(t1.c) = 1" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 3, + "filtered": 100, + "attached_condition": "length(t2.f) = 1" + } + } +} +select * from t1,t2 +where a = d and (a,2) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +a b c d e f +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +prepare stmt from "select * from t1,t2 +where a = d and (a,2) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1"; +execute stmt; +a b c d e f +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +execute stmt; +a b c d e f +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +deallocate prepare stmt; +create table t3 (id int primary key, v int) engine=myisam; +insert into t3 values +(3,2), (1,1), (4,12), (2,15); +# join order: (t3,t1,t2) with const t3 and ref access of t2 +# range access to t1 by 1-component keys for index idx +explain select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 const PRIMARY PRIMARY 4 const 1 +1 SIMPLE t1 range idx idx 5 NULL 13 Using index condition; Using where +1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where +explain format=json select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t3", + "access_type": "const", + "possible_keys": ["PRIMARY"], + "key": "PRIMARY", + "key_length": "4", + "used_key_parts": ["id"], + "ref": ["const"], + "rows": 1, + "filtered": 100 + }, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 13, + "filtered": 100, + "index_condition": "t1.a is not null", + "attached_condition": "(t1.a,1 + 1) in (((2,2)),((7,7)),((8,8))) and length(t1.c) = 1" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 3, + "filtered": 100, + "attached_condition": "length(t2.f) = 1" + } + } +} +select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +a b c d e f id v +2 1 w 2 1 e 1 1 +2 1 w 2 2 k 1 1 +2 1 w 2 3 g 1 1 +2 1 w 2 1 e 1 1 +2 1 w 2 2 k 1 1 +2 1 w 2 3 g 1 1 +# IN predicate is always FALSE +explain select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((9,9),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +prepare stmt from "select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((9,9),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1"; +execute stmt; +a b c d e f id v +execute stmt; +a b c d e f id v +deallocate prepare stmt; +set optimizer_switch=@save_optimizer_switch; +drop table t1,t2,t3; +# +# End of 10.2 tests +# diff --git a/mysql-test/r/range_mrr_icp.result b/mysql-test/r/range_mrr_icp.result index 7d009070150..f2860aaab76 100644 --- a/mysql-test/r/range_mrr_icp.result +++ b/mysql-test/r/range_mrr_icp.result @@ -2334,4 +2334,688 @@ DROP TABLE t1; # # End of 10.1 tests # +# +# MDEV-10454: range access keys extracted +# from IN () +# +create table t1(a int, b int, c varchar(16), key idx(a,b)) engine=myisam; +insert into t1 values +(1,1,'xx'), (2,2,'yyy'), (3,3,'zzzz'), (1,2,'zz'), (1,3,'x'), +(2,3,'yy'), (4,5,'ww'), (7,8,'xxxxx'), (4,3,'zyx'), (1,2,'uuu'), +(2,1,'w'), (5,5,'wx'), (2,3,'ww'), (7,7,'xxxyy'), (3,3,'zyxw'), +(3,2,'uuuw'), (2,2,'wxz'), (5,5,'xw'), (12,12,'xx'), (12,12,'y'), +(13,13,'z'), (11,12,'zz'), (11,13,'x'), (12,13,'y'), (14,15,'w'), +(17,18,'xx'), (14,13,'zx'), (11,12,'u'), (12,11,'w'), (5,5,'wx'), +(12,13,'ww'), (17,17,'xxxyy'), (13,13,'zyxw'), (13,12,'uuuw'), (12,12,'wxz'), +(15,15,'xw'), (1,1,'xa'), (2,2,'yya'), (3,3,'zzza'), (1,2,'za'), +(1,3,'xb'), (2,3,'ya'), (4,5,'wa'), (7,8,'xxxxa'), (4,3,'zya'), +(1,2,'uua'), (2,1,'wb'), (5,5,'wc'), (2,3,'wa'), (7,7,'xxxya'), +(3,3,'zyxa'), (3,2,'uuua'), (2,2,'wxa'), (5,5,'xa'), (12,12,'xa'), +(22,12,'yb'), (23,13,'zb'), (21,12,'za'), (24,13,'c'), (32,13,'d'), +(34,15,'wd'), (47,18,'xa'), (54,13,'za'), (51,12,'ub'), (52,11,'wc'), +(5,5,'wd'), (62,13,'wa'), (67,17,'xxxya'), (63,13,'zyxa'), (73,12,'uuua'), +(82,12,'wxa'), (85,15,'xd'); +# range access to t1 by 2-component keys for index idx +explain select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 10 NULL 7 Using where; Rowid-ordered scan +explain format=json select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "10", + "used_key_parts": ["a", "b"], + "rows": 7, + "filtered": 100, + "attached_condition": "(t1.a,t1.b) in (((2,3)),((3,3)),((8,8)),((7,7)))", + "mrr_type": "Rowid-ordered scan" + } + } +} +select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7)); +a b c +3 3 zzzz +2 3 yy +2 3 ww +7 7 xxxyy +3 3 zyxw +3 3 zzza +2 3 ya +2 3 wa +7 7 xxxya +3 3 zyxa +prepare stmt from "select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7))"; +execute stmt; +a b c +3 3 zzzz +2 3 yy +2 3 ww +7 7 xxxyy +3 3 zyxw +3 3 zzza +2 3 ya +2 3 wa +7 7 xxxya +3 3 zyxa +execute stmt; +a b c +3 3 zzzz +2 3 yy +2 3 ww +7 7 xxxyy +3 3 zyxw +3 3 zzza +2 3 ya +2 3 wa +7 7 xxxya +3 3 zyxa +deallocate prepare stmt; +# range access to t1 by 1-component keys for index idx +explain select * from t1 where (a,b+a) IN ((4,9),(8,8),(7,7)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 5 Using where; Rowid-ordered scan +explain format=json select * from t1 where (a,b+a) IN ((4,9),(8,8),(7,7)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 5, + "filtered": 100, + "attached_condition": "(t1.a,t1.b + t1.a) in (((4,9)),((8,8)),((7,7)))", + "mrr_type": "Rowid-ordered scan" + } + } +} +select * from t1 where (a,b+a) IN ((4,9),(8,8),(7,7)); +a b c +4 5 ww +4 5 wa +# range access to t1 by 1-component keys for index idx +explain select * from t1 where (a,b) IN ((4,a-1),(8,a+8),(7,a+7)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 5 Using where; Rowid-ordered scan +explain format=json select * from t1 where (a,b) IN ((4,a-1),(8,a+8),(7,a+7)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 5, + "filtered": 100, + "attached_condition": "(t1.a,t1.b) in ((4,t1.a - 1),(8,t1.a + 8),(7,t1.a + 7))", + "mrr_type": "Rowid-ordered scan" + } + } +} +select * from t1 where (a,b) IN ((4,a-1),(8,a+8),(7,a+7)); +a b c +4 3 zyx +4 3 zya +set @save_optimizer_switch=@@optimizer_switch; +set optimizer_switch='index_merge=off'; +create table t2( +d int, e int, key idx1(d), key idx2(e), f varchar(32) +) engine=myisam; +insert into t2 values +(9,5,'a'), (9,8,'b'), (9,3,'c'), (9,2,'d'), (9,1,'e'), +(6,5,'f'), (6,3,'g'), (6,7,'h'), (3,3,'i'), (6,2,'j'), +(9,5,'aa'), (9,8,'ba'), (9,3,'ca'), (2,2,'da'), (9,1,'ea'), +(6,5,'fa'), (6,3,'ga'), (6,7,'ha'), (9,3,'ia'), (6,2,'ja'); +# join order: (t2,t1) with ref access of t1 +# range access to t1 by keys for index idx1 +explain select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(2,2)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range idx1,idx2 idx1 5 NULL 3 Using index condition; Using where; Rowid-ordered scan +1 SIMPLE t1 ref idx idx 5 test.t2.d 8 +explain format=json select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(2,2)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "5", + "used_key_parts": ["d"], + "rows": 3, + "filtered": 100, + "index_condition": "t2.d is not null", + "attached_condition": "(t2.d,t2.e) in (((3,3)),((7,7)),((2,2)))", + "mrr_type": "Rowid-ordered scan" + }, + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.d"], + "rows": 8, + "filtered": 100 + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(2,2)); +a b c d e f +3 2 uuuw 3 3 i +3 2 uuua 3 3 i +3 3 zzzz 3 3 i +3 3 zyxw 3 3 i +3 3 zzza 3 3 i +3 3 zyxa 3 3 i +2 1 w 2 2 da +2 1 wb 2 2 da +2 2 yyy 2 2 da +2 2 wxz 2 2 da +2 2 yya 2 2 da +2 2 wxa 2 2 da +2 3 yy 2 2 da +2 3 ww 2 2 da +2 3 ya 2 2 da +2 3 wa 2 2 da +insert into t2 values +(4,5,'a'), (7,8,'b'), (4,3,'c'), (1,2,'d'), (2,1,'e'), (5,5,'f'), +(2,3,'g'), (7,7,'h'), (3,3,'i'), (3,2,'j'), (2,2,'k'), (5,5,'l'), +(4,5,'aa'), (7,8,'bb'), (4,3,'cc'), (1,2,'dd'), (2,1,'ee'), (9,5,'ff'), +(2,3,'gg'), (7,7,'hh'), (3,3,'ii'), (3,2,'jj'), (2,2,'kk'), (9,5,'ll'), +(4,5,'aaa'), (7,8,'bbb'), (4,3,'ccc'), (1,2,'ddd'), (2,1,'eee'), (5,5,'fff'), +(2,3,'ggg'), (7,7,'hhh'), (3,3,'iii'), (3,2,'jjj'), (2,2,'kkk'), (5,5,'lll'), +(14,15,'a'), (17,18,'b'), (14,13,'c'), (11,12,'d'), (12,11,'e'), (15,15,'f'), +(12,13,'g'), (17,17,'h'), (13,13,'i'), (13,12,'j'), (12,12,'k'), (15,15,'l'), +(24,25,'a'), (27,28,'b'), (24,23,'c'), (21,22,'d'), (22,21,'e'), (25,25,'f'), +(22,23,'g'), (27,27,'h'), (23,23,'i'), (23,22,'j'), (22,22,'k'), (25,25,'l'), +(34,35,'a'), (37,38,'b'), (34,33,'c'), (31,32,'d'), (32,31,'e'), (35,35,'f'), +(32,33,'g'), (37,37,'h'), (33,33,'i'), (33,32,'j'), (32,32,'k'), (35,35,'l'), +(44,45,'a'), (47,48,'b'), (44,43,'c'), (41,42,'d'), (42,41,'e'), (45,45,'f'), +(42,43,'g'), (47,47,'h'), (43,43,'i'), (43,42,'j'), (42,42,'k'), (45,45,'l'); +# join order: (t1,t2) with ref access of t2 +# range access to t1 by 1-component keys for index idx +explain select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 6 Using index condition; Rowid-ordered scan +1 SIMPLE t2 ref idx1,idx2 idx1 5 test.t1.a 12 Using where +explain format=json select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 6, + "filtered": 100, + "index_condition": "t1.a is not null", + "mrr_type": "Rowid-ordered scan" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 12, + "filtered": 100, + "attached_condition": "(t1.a,t2.e) in (((3,3)),((7,7)),((8,8))) and length(t2.f) = 1" + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1; +a b c d e f +3 3 zzzz 3 3 i +3 3 zzzz 3 3 i +7 8 xxxxx 7 7 h +7 7 xxxyy 7 7 h +3 3 zyxw 3 3 i +3 3 zyxw 3 3 i +3 2 uuuw 3 3 i +3 2 uuuw 3 3 i +3 3 zzza 3 3 i +3 3 zzza 3 3 i +7 8 xxxxa 7 7 h +7 7 xxxya 7 7 h +3 3 zyxa 3 3 i +3 3 zyxa 3 3 i +3 2 uuua 3 3 i +3 2 uuua 3 3 i +prepare stmt from "select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1"; +execute stmt; +a b c d e f +3 3 zzzz 3 3 i +3 3 zzzz 3 3 i +7 8 xxxxx 7 7 h +7 7 xxxyy 7 7 h +3 3 zyxw 3 3 i +3 3 zyxw 3 3 i +3 2 uuuw 3 3 i +3 2 uuuw 3 3 i +3 3 zzza 3 3 i +3 3 zzza 3 3 i +7 8 xxxxa 7 7 h +7 7 xxxya 7 7 h +3 3 zyxa 3 3 i +3 3 zyxa 3 3 i +3 2 uuua 3 3 i +3 2 uuua 3 3 i +execute stmt; +a b c d e f +3 3 zzzz 3 3 i +3 3 zzzz 3 3 i +7 8 xxxxx 7 7 h +7 7 xxxyy 7 7 h +3 3 zyxw 3 3 i +3 3 zyxw 3 3 i +3 2 uuuw 3 3 i +3 2 uuuw 3 3 i +3 3 zzza 3 3 i +3 3 zzza 3 3 i +7 8 xxxxa 7 7 h +7 7 xxxya 7 7 h +3 3 zyxa 3 3 i +3 3 zyxa 3 3 i +3 2 uuua 3 3 i +3 2 uuua 3 3 i +deallocate prepare stmt; +insert into t1 select * from t1; +# join order: (t2,t1) with ref access of t1 +# range access to t2 by keys for index idx2 +explain select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range idx1,idx2 idx2 5 NULL 6 Using where; Rowid-ordered scan +1 SIMPLE t1 ref idx idx 5 test.t2.d 11 +explain format=json select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "5", + "used_key_parts": ["e"], + "rows": 6, + "filtered": 100, + "attached_condition": "(t2.d,t2.e) in (((4,4)),((7,7)),((8,8))) and length(t2.f) = 1 and t2.d is not null", + "mrr_type": "Rowid-ordered scan" + }, + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.d"], + "rows": 11, + "filtered": 100 + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +a b c d e f +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +alter table t2 drop index idx1, drop index idx2, add index idx3(d,e); +# join order: (t2,t1) with ref access of t1 +# range access to t2 by 2-component keys for index idx3 +explain select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range idx3 idx3 10 NULL 5 Using index condition; Using where; Rowid-ordered scan +1 SIMPLE t1 ref idx idx 5 test.t2.d 11 +explain format=json select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "range", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "10", + "used_key_parts": ["d", "e"], + "rows": 5, + "filtered": 100, + "index_condition": "t2.d is not null", + "attached_condition": "(t2.d,t2.e) in (((4,4)),((7,7)),((8,8))) and length(t2.f) = 1", + "mrr_type": "Rowid-ordered scan" + }, + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.d"], + "rows": 11, + "filtered": 100 + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +a b c d e f +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +# join order: (t1,t2) with ref access of t2 +# range access to t1 by 1-component keys for index idx +explain select * from t1,t2 +where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 15 Using index condition; Rowid-ordered scan +1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where +explain format=json select * from t1,t2 +where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 15, + "filtered": 100, + "index_condition": "t1.a is not null", + "mrr_type": "Rowid-ordered scan" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 3, + "filtered": 100, + "attached_condition": "(t1.a,t2.e) in ((4,t1.a + 1),(7,t1.a + 1),(8,t1.a + 1)) and length(t2.f) = 1" + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1; +a b c d e f +4 5 ww 4 5 a +7 8 xxxxx 7 8 b +4 3 zyx 4 5 a +7 7 xxxyy 7 8 b +4 5 wa 4 5 a +7 8 xxxxa 7 8 b +4 3 zya 4 5 a +7 7 xxxya 7 8 b +4 5 ww 4 5 a +7 8 xxxxx 7 8 b +4 3 zyx 4 5 a +7 7 xxxyy 7 8 b +4 5 wa 4 5 a +7 8 xxxxa 7 8 b +4 3 zya 4 5 a +7 7 xxxya 7 8 b +# join order: (t1,t2) with ref access of t2 +# no range access +explain select * from t1,t2 +where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL idx NULL NULL NULL 144 Using where +1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where +explain format=json select * from t1,t2 +where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "ALL", + "possible_keys": ["idx"], + "rows": 144, + "filtered": 100, + "attached_condition": "t1.a is not null" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 3, + "filtered": 100, + "attached_condition": "(t1.a,t2.e) in ((t2.e,t1.a + 1),((7,7)),((8,8))) and length(t2.f) = 1" + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1; +a b c d e f +7 8 xxxxx 7 7 h +7 7 xxxyy 7 7 h +7 8 xxxxa 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 7 xxxyy 7 7 h +7 8 xxxxa 7 7 h +7 7 xxxya 7 7 h +# join order: (t1,t2) with ref access of t2 +# range access to t1 by 1-component keys for index idx +explain select * from t1,t2 +where a = d and (a,2) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 13 Using index condition; Using where; Rowid-ordered scan +1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where +explain format=json select * from t1,t2 +where a = d and (a,2) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 13, + "filtered": 100, + "index_condition": "t1.a is not null", + "attached_condition": "(t1.a,2) in (((2,2)),((7,7)),((8,8))) and length(t1.c) = 1", + "mrr_type": "Rowid-ordered scan" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 3, + "filtered": 100, + "attached_condition": "length(t2.f) = 1" + } + } +} +select * from t1,t2 +where a = d and (a,2) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +a b c d e f +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +prepare stmt from "select * from t1,t2 +where a = d and (a,2) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1"; +execute stmt; +a b c d e f +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +execute stmt; +a b c d e f +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +deallocate prepare stmt; +create table t3 (id int primary key, v int) engine=myisam; +insert into t3 values +(3,2), (1,1), (4,12), (2,15); +# join order: (t3,t1,t2) with const t3 and ref access of t2 +# range access to t1 by 1-component keys for index idx +explain select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 const PRIMARY PRIMARY 4 const 1 +1 SIMPLE t1 range idx idx 5 NULL 13 Using index condition; Using where; Rowid-ordered scan +1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where +explain format=json select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t3", + "access_type": "const", + "possible_keys": ["PRIMARY"], + "key": "PRIMARY", + "key_length": "4", + "used_key_parts": ["id"], + "ref": ["const"], + "rows": 1, + "filtered": 100 + }, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 13, + "filtered": 100, + "index_condition": "t1.a is not null", + "attached_condition": "(t1.a,1 + 1) in (((2,2)),((7,7)),((8,8))) and length(t1.c) = 1", + "mrr_type": "Rowid-ordered scan" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 3, + "filtered": 100, + "attached_condition": "length(t2.f) = 1" + } + } +} +select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +a b c d e f id v +2 1 w 2 1 e 1 1 +2 1 w 2 2 k 1 1 +2 1 w 2 3 g 1 1 +2 1 w 2 1 e 1 1 +2 1 w 2 2 k 1 1 +2 1 w 2 3 g 1 1 +# IN predicate is always FALSE +explain select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((9,9),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +prepare stmt from "select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((9,9),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1"; +execute stmt; +a b c d e f id v +execute stmt; +a b c d e f id v +deallocate prepare stmt; +set optimizer_switch=@save_optimizer_switch; +drop table t1,t2,t3; +# +# End of 10.2 tests +# set optimizer_switch=@mrr_icp_extra_tmp; diff --git a/mysql-test/t/range.test b/mysql-test/t/range.test index 30f4419bd7e..ab951809b7a 100644 --- a/mysql-test/t/range.test +++ b/mysql-test/t/range.test @@ -1857,3 +1857,195 @@ DROP TABLE t1; --echo # --echo # End of 10.1 tests --echo # + +--echo # +--echo # MDEV-10454: range access keys extracted +--echo # from IN () +--echo # + +create table t1(a int, b int, c varchar(16), key idx(a,b)) engine=myisam; + +insert into t1 values + (1,1,'xx'), (2,2,'yyy'), (3,3,'zzzz'), (1,2,'zz'), (1,3,'x'), + (2,3,'yy'), (4,5,'ww'), (7,8,'xxxxx'), (4,3,'zyx'), (1,2,'uuu'), + (2,1,'w'), (5,5,'wx'), (2,3,'ww'), (7,7,'xxxyy'), (3,3,'zyxw'), + (3,2,'uuuw'), (2,2,'wxz'), (5,5,'xw'), (12,12,'xx'), (12,12,'y'), + (13,13,'z'), (11,12,'zz'), (11,13,'x'), (12,13,'y'), (14,15,'w'), + (17,18,'xx'), (14,13,'zx'), (11,12,'u'), (12,11,'w'), (5,5,'wx'), + (12,13,'ww'), (17,17,'xxxyy'), (13,13,'zyxw'), (13,12,'uuuw'), (12,12,'wxz'), + (15,15,'xw'), (1,1,'xa'), (2,2,'yya'), (3,3,'zzza'), (1,2,'za'), + (1,3,'xb'), (2,3,'ya'), (4,5,'wa'), (7,8,'xxxxa'), (4,3,'zya'), + (1,2,'uua'), (2,1,'wb'), (5,5,'wc'), (2,3,'wa'), (7,7,'xxxya'), + (3,3,'zyxa'), (3,2,'uuua'), (2,2,'wxa'), (5,5,'xa'), (12,12,'xa'), + (22,12,'yb'), (23,13,'zb'), (21,12,'za'), (24,13,'c'), (32,13,'d'), + (34,15,'wd'), (47,18,'xa'), (54,13,'za'), (51,12,'ub'), (52,11,'wc'), + (5,5,'wd'), (62,13,'wa'), (67,17,'xxxya'), (63,13,'zyxa'), (73,12,'uuua'), + (82,12,'wxa'), (85,15,'xd'); + +--echo # range access to t1 by 2-component keys for index idx +let $q1= +select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7)); +eval explain $q1; +eval explain format=json $q1; +eval $q1; +eval prepare stmt from "$q1"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +--echo # range access to t1 by 1-component keys for index idx +let $q2= +select * from t1 where (a,b+a) IN ((4,9),(8,8),(7,7)); +eval explain $q2; +eval explain format=json $q2; +eval $q2; + +--echo # range access to t1 by 1-component keys for index idx +let $q3= +select * from t1 where (a,b) IN ((4,a-1),(8,a+8),(7,a+7)); +eval explain $q3; +eval explain format=json $q3; +eval $q3; + +# this setting should be removed after fixes for mdev-12186, mdev-12187 +set @save_optimizer_switch=@@optimizer_switch; +set optimizer_switch='index_merge=off'; + +create table t2( + d int, e int, key idx1(d), key idx2(e), f varchar(32) +) engine=myisam; + +insert into t2 values + (9,5,'a'), (9,8,'b'), (9,3,'c'), (9,2,'d'), (9,1,'e'), + (6,5,'f'), (6,3,'g'), (6,7,'h'), (3,3,'i'), (6,2,'j'), + (9,5,'aa'), (9,8,'ba'), (9,3,'ca'), (2,2,'da'), (9,1,'ea'), + (6,5,'fa'), (6,3,'ga'), (6,7,'ha'), (9,3,'ia'), (6,2,'ja'); + +--echo # join order: (t2,t1) with ref access of t1 +--echo # range access to t1 by keys for index idx1 +let $q4= +select * from t1,t2 + where a = d and (a,e) in ((3,3),(7,7),(2,2)); +eval explain $q4; +eval explain format=json $q4; +eval $q4; + +insert into t2 values + (4,5,'a'), (7,8,'b'), (4,3,'c'), (1,2,'d'), (2,1,'e'), (5,5,'f'), + (2,3,'g'), (7,7,'h'), (3,3,'i'), (3,2,'j'), (2,2,'k'), (5,5,'l'), + (4,5,'aa'), (7,8,'bb'), (4,3,'cc'), (1,2,'dd'), (2,1,'ee'), (9,5,'ff'), + (2,3,'gg'), (7,7,'hh'), (3,3,'ii'), (3,2,'jj'), (2,2,'kk'), (9,5,'ll'), + (4,5,'aaa'), (7,8,'bbb'), (4,3,'ccc'), (1,2,'ddd'), (2,1,'eee'), (5,5,'fff'), + (2,3,'ggg'), (7,7,'hhh'), (3,3,'iii'), (3,2,'jjj'), (2,2,'kkk'), (5,5,'lll'), + (14,15,'a'), (17,18,'b'), (14,13,'c'), (11,12,'d'), (12,11,'e'), (15,15,'f'), + (12,13,'g'), (17,17,'h'), (13,13,'i'), (13,12,'j'), (12,12,'k'), (15,15,'l'), + (24,25,'a'), (27,28,'b'), (24,23,'c'), (21,22,'d'), (22,21,'e'), (25,25,'f'), + (22,23,'g'), (27,27,'h'), (23,23,'i'), (23,22,'j'), (22,22,'k'), (25,25,'l'), + (34,35,'a'), (37,38,'b'), (34,33,'c'), (31,32,'d'), (32,31,'e'), (35,35,'f'), + (32,33,'g'), (37,37,'h'), (33,33,'i'), (33,32,'j'), (32,32,'k'), (35,35,'l'), + (44,45,'a'), (47,48,'b'), (44,43,'c'), (41,42,'d'), (42,41,'e'), (45,45,'f'), + (42,43,'g'), (47,47,'h'), (43,43,'i'), (43,42,'j'), (42,42,'k'), (45,45,'l'); + +--echo # join order: (t1,t2) with ref access of t2 +--echo # range access to t1 by 1-component keys for index idx +let $q5= +select * from t1,t2 + where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1; +eval explain $q5; +eval explain format=json $q5; +eval $q5; +eval prepare stmt from "$q5"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +insert into t1 select * from t1; + +--echo # join order: (t2,t1) with ref access of t1 +--echo # range access to t2 by keys for index idx2 +let $q6= +select * from t1,t2 + where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +eval explain $q6; +eval explain format=json $q6; +eval $q6; + +alter table t2 drop index idx1, drop index idx2, add index idx3(d,e); + +--echo # join order: (t2,t1) with ref access of t1 +--echo # range access to t2 by 2-component keys for index idx3 +let $q7= +select * from t1,t2 + where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +eval explain $q7; +eval explain format=json $q7; +eval $q7; + +--echo # join order: (t1,t2) with ref access of t2 +--echo # range access to t1 by 1-component keys for index idx +let $q8= +select * from t1,t2 + where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1; +eval explain $q8; +eval explain format=json $q8; +eval $q8; + +--echo # join order: (t1,t2) with ref access of t2 +--echo # no range access +let $q9= +select * from t1,t2 + where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1; +eval explain $q9; +eval explain format=json $q9; +eval $q9; + +--echo # join order: (t1,t2) with ref access of t2 +--echo # range access to t1 by 1-component keys for index idx +let $q10= +select * from t1,t2 + where a = d and (a,2) in ((2,2),(7,7),(8,8)) and + length(c) = 1 and length(f) = 1; +eval explain $q10; +eval explain format=json $q10; +eval $q10; +eval prepare stmt from "$q10"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +create table t3 (id int primary key, v int) engine=myisam; + +insert into t3 values + (3,2), (1,1), (4,12), (2,15); + +--echo # join order: (t3,t1,t2) with const t3 and ref access of t2 +--echo # range access to t1 by 1-component keys for index idx +let $q11= +select * from t1,t2,t3 + where id = 1 and a = d and + (a,v+1) in ((2,2),(7,7),(8,8)) and + length(c) = 1 and length(f) = 1; +eval explain $q11; +eval explain format=json $q11; +eval $q11; + +--echo # IN predicate is always FALSE +let $q12= +select * from t1,t2,t3 + where id = 1 and a = d and + (a,v+1) in ((9,9),(7,7),(8,8)) and + length(c) = 1 and length(f) = 1; +eval explain $q12; +eval prepare stmt from "$q12"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +set optimizer_switch=@save_optimizer_switch; + +drop table t1,t2,t3; + +--echo # +--echo # End of 10.2 tests +--echo # + diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 1a2cc3a6c81..9c277220771 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -1662,6 +1662,7 @@ public: void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, table_map usable_tables, SARGABLE_PARAM **sargables); SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr); + SEL_TREE *get_func_row_mm_tree(RANGE_OPT_PARAM *param, Item_row *key_row); Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) { /* @@ -1714,6 +1715,7 @@ public: cmp_item *make_same(); void store_value_by_template(THD *thd, cmp_item *tmpl, Item *); friend void Item_func_in::fix_length_and_dec(); + cmp_item *get_comparator(uint i) { return comparators[i]; } }; @@ -1727,6 +1729,7 @@ public: uchar *get_value(Item *item); friend void Item_func_in::fix_length_and_dec(); Item_result result_type() { return ROW_RESULT; } + cmp_item *get_cmp_item() { return &tmp; } }; /* Functions used by where clause */ diff --git a/sql/item_row.h b/sql/item_row.h index bbfebb56010..26468336dc8 100644 --- a/sql/item_row.h +++ b/sql/item_row.h @@ -119,6 +119,13 @@ public: bool check_cols(uint c); bool null_inside() { return with_null; }; void bring_value(); + + Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + { + Item_args::propagate_equal_fields(thd, Context_identity(), cond); + return this; + } + bool check_vcol_func_processor(void *arg) {return FALSE; } Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return get_item_copy(thd, mem_root, this); } diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 6d088cad91e..d5de96b860a 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -7210,6 +7210,205 @@ SEL_TREE *Item_func_in::get_func_mm_tree(RANGE_OPT_PARAM *param, } +/* + The structure Key_col_info is purely auxiliary and is used + only in the method Item_func_in::get_func_row_mm_tree +*/ +struct Key_col_info { + Field *field; /* If != NULL the column can be used for keys */ + cmp_item *comparator; /* If != 0 the column can be evaluated */ +}; + +/** + Build SEL_TREE for the IN predicate whose arguments are rows + + @param param PARAM from SQL_SELECT::test_quick_select + @param key_row First operand of the IN predicate + + @note + The function builds a SEL_TREE for in IN predicate in the case + when the predicate uses row arguments. First the function + detects among the components of the key_row (c[1],...,c[n]) taken + from in the left part the predicate those that can be usable + for building SEL_TREE (c[i1],...,c[ik]). They have to contain + items whose real items are field items referring to the current + table or equal to the items referring to the current table. + For the remaining components of the row it checks whether they + can be evaluated. The result of the analysis is put into the + array of structures of the type Key_row_col_info. + + After this the function builds the SEL_TREE for the following + formula that can be inferred from the given IN predicate: + c[i11]=a[1][i11] AND ... AND c[i1k1]=a[1][i1k1] + OR + ... + OR + c[im1]=a[m][im1] AND ... AND c[imkm]=a[m][imkm]. + Here a[1],...,a[m] are all arguments of the IN predicate from + the right part and for each j ij1,...,ijkj is a subset of + i1,...,ik such that a[j][ij1],...,a[j][ijkj] can be evaluated. + + If for some j there no a[j][i1],...,a[j][ik] can be evaluated + then no SEL_TREE can be built for this predicate and the + function immediately returns 0. + + If for some j by using evaluated values of key_row it can be + proven that c[ij1]=a[j][ij1] AND ... AND c[ijkj]=a[j][ijkj] + is always FALSE then this disjunct is omitted. + + @returns + the built SEL_TREE if it can be constructed + 0 - otherwise. +*/ + +SEL_TREE *Item_func_in::get_func_row_mm_tree(RANGE_OPT_PARAM *param, + Item_row *key_row) +{ + DBUG_ENTER("Item_func_in::get_func_row_mm_tree"); + + if (negated) + DBUG_RETURN(0); + + SEL_TREE *res_tree= 0; + uint used_key_cols= 0; + uint col_comparators= 0; + table_map param_comp= ~(param->prev_tables | param->read_tables | + param->current_table); + uint row_cols= key_row->cols(); + Dynamic_array key_cols_info(row_cols); + cmp_item_row *row_cmp_item= (cmp_item_row *) + (array ? ((in_row *) array)->get_cmp_item() : + cmp_items[(uint) ROW_RESULT]); + + Item **key_col_ptr= key_row->addr(0); + for(uint i= 0; i < row_cols; i++, key_col_ptr++) + { + Key_col_info key_col_info= {0, NULL}; + Item *key_col= *key_col_ptr; + if (key_col->real_item()->type() == Item::FIELD_ITEM) + { + /* + The i-th component of key_row can be used for key access if + key_col->real_item() points to a field of the current table or + if it is equal to a field item pointing to such a field. + */ + Item_field *col_field_item= (Item_field *) (key_col->real_item()); + Field *key_col_field= col_field_item->field; + if (key_col_field->table->map != param->current_table) + { + Item_equal *item_equal= col_field_item->item_equal; + if (item_equal) + { + Item_equal_fields_iterator it(*item_equal); + while (it++) + { + key_col_field= it.get_curr_field(); + if (key_col_field->table->map == param->current_table) + break; + } + } + } + if (key_col_field->table->map == param->current_table) + { + key_col_info.field= key_col_field; + used_key_cols++; + } + } + else if (!(key_col->used_tables() & (param_comp | param->current_table)) + && !key_col->is_expensive()) + { + /* The i-th component of key_row can be evaluated */ + + /* See the comment in Item::get_mm_tree_for_const */ + MEM_ROOT *tmp_root= param->mem_root; + param->thd->mem_root= param->old_root; + + key_col->bring_value(); + key_col_info.comparator= row_cmp_item->get_comparator(i); + key_col_info.comparator->store_value(key_col); + col_comparators++; + + param->thd->mem_root= tmp_root; + } + key_cols_info.push(key_col_info); + } + + if (!used_key_cols) + DBUG_RETURN(0); + + uint omitted_tuples= 0; + Item **arg_start= arguments() + 1; + Item **arg_end= arg_start + argument_count() - 1; + for (Item **arg= arg_start ; arg < arg_end; arg++) + { + uint i; + + /* + First check whether the disjunct constructed for *arg + is really needed + */ + Item_row *arg_tuple= (Item_row *) (*arg); + if (col_comparators) + { + MEM_ROOT *tmp_root= param->mem_root; + param->thd->mem_root= param->old_root; + for (i= 0; i < row_cols; i++) + { + Key_col_info *key_col_info= &key_cols_info.at(i); + if (key_col_info->comparator) + { + Item *arg_col= arg_tuple->element_index(i); + if (!(arg_col->used_tables() & (param_comp | param->current_table)) && + !arg_col->is_expensive() && + key_col_info->comparator->cmp(arg_col)) + { + omitted_tuples++; + break; + } + } + } + param->thd->mem_root= tmp_root; + if (i < row_cols) + continue; + } + + /* The disjunct for *arg is needed: build it. */ + SEL_TREE *and_tree= 0; + Item **arg_col_ptr= arg_tuple->addr(0); + for (uint i= 0; i < row_cols; i++, arg_col_ptr++) + { + Key_col_info *key_col_info= &key_cols_info.at(i); + if (!key_col_info->field) + continue; + Item *arg_col= *arg_col_ptr; + if (!(arg_col->used_tables() & (param_comp | param->current_table)) && + !arg_col->is_expensive()) + { + and_tree= tree_and(param, and_tree, + get_mm_parts(param, + key_col_info->field, + Item_func::EQ_FUNC, + arg_col->real_item())); + } + } + if (!and_tree) + { + res_tree= 0; + break; + } + /* Join the disjunct the the OR tree that is being constructed */ + res_tree= !res_tree ? and_tree : tree_or(param, res_tree, and_tree); + } + if (omitted_tuples == argument_count() - 1) + { + /* It's turned out that all disjuncts are always FALSE */ + res_tree= new (param->mem_root) SEL_TREE(SEL_TREE::IMPOSSIBLE, + param->mem_root, param->keys); + } + DBUG_RETURN(res_tree); +} + + /* Build conjunction of all SEL_TREEs for a simple predicate applying equalities @@ -7544,12 +7743,22 @@ SEL_TREE *Item_func_in::get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr) if (const_item()) DBUG_RETURN(get_mm_tree_for_const(param)); - if (key_item()->real_item()->type() != Item::FIELD_ITEM) + SEL_TREE *tree= 0; + switch (key_item()->real_item()->type()) { + case Item::FIELD_ITEM: + tree= get_full_func_mm_tree(param, + (Item_field*) (key_item()->real_item()), + NULL); + break; + case Item::ROW_ITEM: + tree= get_func_row_mm_tree(param, + (Item_row *) (key_item()->real_item())); + break; + default: DBUG_RETURN(0); - Item_field *field= (Item_field*) (key_item()->real_item()); - SEL_TREE *tree= get_full_func_mm_tree(param, field, NULL); + } DBUG_RETURN(tree); -} +} SEL_TREE *Item_equal::get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr) diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 54b0b01559b..151e341c49f 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -4709,6 +4709,8 @@ static uint get_semi_join_select_list_index(Field *field) @param num_values Number of values[] that we are comparing against @param usable_tables Tables which can be used for key optimization @param sargables IN/OUT Array of found sargable candidates + @param row_col_no if = n that > 0 then field is compared only + against the n-th component of row values @note If we are doing a NOT NULL comparison on a NOT NULL field in a outer join @@ -4722,7 +4724,8 @@ static void add_key_field(JOIN *join, KEY_FIELD **key_fields,uint and_level, Item_bool_func *cond, Field *field, bool eq_func, Item **value, uint num_values, - table_map usable_tables, SARGABLE_PARAM **sargables) + table_map usable_tables, SARGABLE_PARAM **sargables, + uint row_col_no= 0) { uint optimize= 0; if (eq_func && @@ -4751,7 +4754,15 @@ add_key_field(JOIN *join, bool optimizable=0; for (uint i=0; iused_tables(); + Item *curr_val; + if (row_col_no && value[i]->real_item()->type() == Item::ROW_ITEM) + { + Item_row *value_tuple= (Item_row *) (value[i]->real_item()); + curr_val= value_tuple->element_index(row_col_no - 1); + } + else + curr_val= value[i]; + table_map value_used_tables= curr_val->used_tables(); used_tables|= value_used_tables; if (!(value_used_tables & (field->table->map | RAND_TABLE_BIT))) optimizable=1; @@ -4789,7 +4800,15 @@ add_key_field(JOIN *join, bool is_const=1; for (uint i=0; iconst_item())) + Item *curr_val; + if (row_col_no && value[i]->real_item()->type() == Item::ROW_ITEM) + { + Item_row *value_tuple= (Item_row *) (value[i]->real_item()); + curr_val= value_tuple->element_index(row_col_no - 1); + } + else + curr_val= value[i]; + if (!(is_const&= curr_val->const_item())) break; } if (is_const) @@ -4856,12 +4875,14 @@ add_key_field(JOIN *join, @param key_fields Pointer to add key, if usable @param and_level And level, to be stored in KEY_FIELD @param cond Condition predicate - @param field Field used in comparision + @param field_item Field item used for comparison @param eq_func True if we used =, <=> or IS NULL - @param value Value used for comparison with field - Is NULL for BETWEEN and IN + @param value Value used for comparison with field_item + @param num_values Number of values[] that we are comparing against @param usable_tables Tables which can be used for key optimization @param sargables IN/OUT Array of found sargable candidates + @param row_col_no if = n that > 0 then field is compared only + against the n-th component of row values @note If field items f1 and f2 belong to the same multiple equality and @@ -4876,11 +4897,12 @@ add_key_equal_fields(JOIN *join, KEY_FIELD **key_fields, uint and_level, Item_bool_func *cond, Item *field_item, bool eq_func, Item **val, uint num_values, table_map usable_tables, - SARGABLE_PARAM **sargables) + SARGABLE_PARAM **sargables, uint row_col_no= 0) { Field *field= ((Item_field *) (field_item->real_item()))->field; add_key_field(join, key_fields, and_level, cond, field, - eq_func, val, num_values, usable_tables, sargables); + eq_func, val, num_values, usable_tables, sargables, + row_col_no); Item_equal *item_equal= field_item->get_item_equal(); if (item_equal) { @@ -4896,7 +4918,7 @@ add_key_equal_fields(JOIN *join, KEY_FIELD **key_fields, uint and_level, { add_key_field(join, key_fields, and_level, cond, equal_field, eq_func, val, num_values, usable_tables, - sargables); + sargables, row_col_no); } } } @@ -5078,6 +5100,24 @@ Item_func_in::add_key_fields(JOIN *join, KEY_FIELD **key_fields, (Item_field*) (args[0]->real_item()), false, args + 1, arg_count - 1, usable_tables, sargables); } + else if (key_item()->type() == Item::ROW_ITEM && + !(used_tables() & OUTER_REF_TABLE_BIT)) + { + Item_row *key_row= (Item_row *) key_item(); + Item **key_col= key_row->addr(0); + uint row_cols= key_row->cols(); + for (uint i= 0; i < row_cols; i++, key_col++) + { + if (is_local_field(*key_col)) + { + Item_field *field_item= (Item_field *)((*key_col)->real_item()); + add_key_equal_fields(join, key_fields, *and_level, this, + field_item, false, args + 1, arg_count - 1, + usable_tables, sargables, i + 1); + } + } + } + } From 6d417a0bad205a6bacfee10dbc46dd631b093e75 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 4 Apr 2017 08:50:01 +0000 Subject: [PATCH 228/233] Fix aws_key_management compilation after mismerge Also do not use BUILD_BYPRODUCTS in ExternalPeoject for older cmake --- plugin/aws_key_management/CMakeLists.txt | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/plugin/aws_key_management/CMakeLists.txt b/plugin/aws_key_management/CMakeLists.txt index 1c80b44ede7..66b8074406f 100644 --- a/plugin/aws_key_management/CMakeLists.txt +++ b/plugin/aws_key_management/CMakeLists.txt @@ -103,7 +103,9 @@ ELSE() ADD_LIBRARY(${lib} STATIC IMPORTED GLOBAL) ADD_DEPENDENCIES(${lib} aws_sdk_cpp) SET(loc "${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${lib}${CMAKE_STATIC_LIBRARY_SUFFIX}") - SET(byproducts ${byproducts} BUILD_BYPRODUCTS ${loc}) + IF(CMAKE_VERSION VERSION_GREATER "3.1") + SET(byproducts ${byproducts} BUILD_BYPRODUCTS ${loc}) + ENDIF() SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LOCATION ${loc}) ENDFOREACH() @@ -139,14 +141,13 @@ ENDIF() ADD_DEFINITIONS(${SSL_DEFINES}) # Need to know whether openssl should be initialized SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CXX11_FLAGS}") -MYSQL_ADD_PLUGIN(aws_key_management aws_key_management_plugin.cc - LINK_LIBRARIES ${AWS_SDK_LIBS} - COMPONENT aws-key-management) - IF(WIN32) SET(AWS_CPP_SDK_DEPENDENCIES bcrypt winhttp wininet userenv version) ELSE() SET(AWS_CPP_SDK_DEPENDENCIES ${SSL_LIBRARIES} ${CURL_LIBRARIES} ${UUID_LIBRARIES}) ENDIF() +MYSQL_ADD_PLUGIN(aws_key_management aws_key_management_plugin.cc + LINK_LIBRARIES ${AWS_SDK_LIBS} ${AWS_CPP_SDK_DEPENDENCIES} + COMPONENT aws-key-management) + -TARGET_LINK_LIBRARIES(aws_key_management ${AWS_SDK_LIBS} ${AWS_CPP_SDK_DEPENDENCIES}) From d1fc3cc469d4429c0a94f4ae282a1162be745bd7 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 4 Apr 2017 12:36:14 +0300 Subject: [PATCH 229/233] MariaRocks: temporarily disable a few failing tests --- storage/rocksdb/mysql-test/rocksdb/t/disabled.def | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index 5247bf2aad2..daa280d4848 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -47,6 +47,10 @@ blind_delete_without_tx_api: MDEV-12286: rocksdb.blind_delete_without_tx_api tes persistent_cache: Upstream RocksDB bug https://github.com/facebook/mysql-5.6/issues/579 +collation: Fails on gcc 4.8 and before, MDEV-12433 +rocksdb : Intermittent failures in BB +unique_sec : Intermittent failures in BB + allow_no_pk_concurrent_insert: stress test rocksdb_deadlock_stress_rc: stress test rocksdb_deadlock_stress_rr: stress test From c85ea1ab6d276f90b7b73d99cf357e11ea103822 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 4 Apr 2017 12:39:27 +0300 Subject: [PATCH 230/233] MDEV-12439: (Temporary) Don't run MariaRocks tests under valgrind --- storage/rocksdb/mysql-test/rocksdb/suite.pm | 3 +++ storage/rocksdb/mysql-test/rocksdb/t/disabled.def | 3 +++ 2 files changed, 6 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/suite.pm b/storage/rocksdb/mysql-test/rocksdb/suite.pm index 6d7c352dd13..79c630f87f1 100644 --- a/storage/rocksdb/mysql-test/rocksdb/suite.pm +++ b/storage/rocksdb/mysql-test/rocksdb/suite.pm @@ -18,5 +18,8 @@ my $sst_dump= return "RocksDB is not compiled, no sst_dump" unless $sst_dump; $ENV{MARIAROCKS_SST_DUMP}="$sst_dump"; +# Temporarily disable testing under valgrind, due to MDEV-12439 +return "RocksDB tests disabled under valgrind" if ($::opt_valgrind); + bless { }; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index daa280d4848..8d3fc090273 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -51,6 +51,9 @@ collation: Fails on gcc 4.8 and before, MDEV-12433 rocksdb : Intermittent failures in BB unique_sec : Intermittent failures in BB +# See also storage/rocksdb/mysql-test/rocksdb/suite.pm +# Running tests under valgrind is disabled there. + allow_no_pk_concurrent_insert: stress test rocksdb_deadlock_stress_rc: stress test rocksdb_deadlock_stress_rr: stress test From 23b86a18e67ae7a52da84031bd2bc355f350b814 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 4 Apr 2017 17:46:56 +0300 Subject: [PATCH 231/233] MariaRocks: temporarily disable 32-bit Windows builds --- storage/rocksdb/CMakeLists.txt | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index 0ae3b240273..308bd26592e 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -22,6 +22,13 @@ IF(CMAKE_SYSTEM_PROCESSOR MATCHES "i[36]86") SKIP_ROCKSDB_PLUGIN("Intel 32 bit not supported.") ENDIF() +# +# Also, disable building on 32-bit Windows +# +IF (WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 4) + SKIP_ROCKSDB_PLUGIN("32-Bit Windows are temporarily disabled") +ENDIF() + # This plugin needs recent C++ compilers (it is using C++11 features) # Skip build for the old compilers SET(CXX11_FLAGS) From 15878ee41c5845403faf16d853011375550f0727 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 4 Apr 2017 18:55:18 +0000 Subject: [PATCH 232/233] Windows, compiling : Remove _DEBUG preprocessor constant, to fix debug build with older cmake. The constant is implicitely defined by VS when chosen C runtime is Debug (/MTd, MDd). CMake does not define it since https://public.kitware.com/Bug/view.php?id=15777 was fixed. We remove it from compile flags, to be able to build Debug with /MT runtime using older cmakes. --- cmake/os/Windows.cmake | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmake/os/Windows.cmake b/cmake/os/Windows.cmake index 38f440d251d..22f1ff7f30d 100644 --- a/cmake/os/Windows.cmake +++ b/cmake/os/Windows.cmake @@ -97,6 +97,10 @@ IF(MSVC) # information for use with the debugger. The symbolic debugging # information includes the names and types of variables, as well as # functions and line numbers. No .pdb file is produced by the compiler. + # + # - Remove preprocessor flag _DEBUG that older cmakes use with Config=Debug, + # it is as defined by Debug runtimes itself (/MTd /MDd) + FOREACH(lang C CXX) SET(CMAKE_${lang}_FLAGS_RELEASE "${CMAKE_${lang}_FLAGS_RELEASE} /Z7") ENDFOREACH() @@ -106,6 +110,7 @@ IF(MSVC) CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_RELWITHDEBINFO CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_DEBUG_INIT) STRING(REGEX REPLACE "/M[TD][d]?" "${MSVC_CRT_TYPE}" "${flag}" "${${flag}}" ) + STRING(REGEX REPLACE "/D[ ]?_DEBUG" "" "${flag}" "${${flag}}") STRING(REPLACE "/Zi" "/Z7" "${flag}" "${${flag}}") ENDFOREACH() From 0d34dd7cfb700b91f11c59d189d70142ed652615 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 4 Apr 2017 12:19:42 +0300 Subject: [PATCH 233/233] MDEV-11840 InnoDB: "Cannot open " should not be an error buf_load(): When the file cannot be opened for reading, issue a note, not an error message. --- storage/innobase/buf/buf0dump.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc index f7883ded070..9b86b1c16da 100644 --- a/storage/innobase/buf/buf0dump.cc +++ b/storage/innobase/buf/buf0dump.cc @@ -540,7 +540,7 @@ buf_load() f = fopen(full_filename, "r"); if (f == NULL) { - buf_load_status(STATUS_ERR, + buf_load_status(STATUS_INFO, "Cannot open '%s' for reading: %s", full_filename, strerror(errno)); return;