From 274b25eb8b9df3323a4540a7aaa5e537c39f9f18 Mon Sep 17 00:00:00 2001 From: Yoni Fogel Date: Wed, 17 Apr 2013 00:02:13 -0400 Subject: [PATCH] refs #5081 Replace all usage: BOOL->bool FALSE->false TRUE->true u_int*_t->uint*_t Also poisoned all of the variables git-svn-id: file:///svn/mysql/tokudb-engine/tokudb-engine@46156 c7de825b-a66e-492c-adef-691d508d4ae1 --- storage/tokudb/ha_tokudb.cc | 288 +++++++-------- storage/tokudb/ha_tokudb.h | 58 +-- storage/tokudb/ha_tokudb_alter_51.cc | 34 +- storage/tokudb/ha_tokudb_alter_56.cc | 38 +- storage/tokudb/ha_tokudb_alter_common.cc | 104 +++--- storage/tokudb/ha_tokudb_update_fun.cc | 182 +++++----- storage/tokudb/hatoku_cmp.cc | 444 +++++++++++------------ storage/tokudb/hatoku_cmp.h | 90 ++--- storage/tokudb/hatoku_defines.h | 2 +- storage/tokudb/hatoku_hton.cc | 58 +-- storage/tokudb/hatoku_hton.h | 4 +- 11 files changed, 651 insertions(+), 651 deletions(-) diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc index f6a47832b94..c6300956b5e 100644 --- a/storage/tokudb/ha_tokudb.cc +++ b/storage/tokudb/ha_tokudb.cc @@ -79,7 +79,7 @@ static void share_key_file_unlock(TOKUDB_SHARE * share) // // This offset is calculated starting from AFTER the NULL bytes // -static inline u_int32_t get_fixed_field_size(KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, uint keynr) { +static inline uint32_t get_fixed_field_size(KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, uint keynr) { uint offset = 0; for (uint i = 0; i < table_share->fields; i++) { if (kc_info->field_lengths[i] && !bitmap_is_set(&kc_info->key_filters[keynr],i)) { @@ -90,7 +90,7 @@ static inline u_int32_t get_fixed_field_size(KEY_AND_COL_INFO* kc_info, TABLE_SH } -static inline u_int32_t get_len_of_offsets(KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, uint keynr) { +static inline uint32_t get_len_of_offsets(KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, uint keynr) { uint len = 0; for (uint i = 0; i < table_share->fields; i++) { if (kc_info->length_bytes[i] && !bitmap_is_set(&kc_info->key_filters[keynr],i)) { @@ -121,9 +121,9 @@ static int allocate_key_and_col_info ( TABLE_SHARE* table_share, KEY_AND_COL_INF // // create the field lengths // - kc_info->field_lengths = (u_int16_t *)my_malloc(table_share->fields*sizeof(u_int16_t), MYF(MY_WME | MY_ZEROFILL)); + kc_info->field_lengths = (uint16_t *)my_malloc(table_share->fields*sizeof(uint16_t), MYF(MY_WME | MY_ZEROFILL)); kc_info->length_bytes= (uchar *)my_malloc(table_share->fields, MYF(MY_WME | MY_ZEROFILL)); - kc_info->blob_fields= (u_int32_t *)my_malloc(table_share->fields*sizeof(u_int32_t), MYF(MY_WME | MY_ZEROFILL)); + kc_info->blob_fields= (uint32_t *)my_malloc(table_share->fields*sizeof(uint32_t), MYF(MY_WME | MY_ZEROFILL)); if (kc_info->field_lengths == NULL || kc_info->length_bytes == NULL || @@ -636,7 +636,7 @@ static inline HA_TOKU_ISO_LEVEL tx_to_toku_iso(ulong tx_isolation) { } } -static inline u_int32_t toku_iso_to_txn_flag (HA_TOKU_ISO_LEVEL lvl) { +static inline uint32_t toku_iso_to_txn_flag (HA_TOKU_ISO_LEVEL lvl) { if (lvl == hatoku_iso_read_uncommitted) { return DB_READ_UNCOMMITTED; } @@ -724,7 +724,7 @@ void set_key_filter(MY_BITMAP* key_filter, KEY* key, TABLE* table, bool get_offs static inline uchar* pack_fixed_field( uchar* to_tokudb, const uchar* from_mysql, - u_int32_t num_bytes + uint32_t num_bytes ) { switch (num_bytes) { @@ -753,7 +753,7 @@ static inline uchar* pack_fixed_field( static inline const uchar* unpack_fixed_field( uchar* to_mysql, const uchar* from_tokudb, - u_int32_t num_bytes + uint32_t num_bytes ) { switch (num_bytes) { @@ -784,15 +784,15 @@ static inline uchar* write_var_field( uchar* to_tokudb_data, // location where data is going to be written uchar* to_tokudb_offset_start, //location where offset starts, IS THIS A BAD NAME???? const uchar * data, // the data to write - u_int32_t data_length, // length of data to write - u_int32_t offset_bytes // number of offset bytes + uint32_t data_length, // length of data to write + uint32_t offset_bytes // number of offset bytes ) { memcpy(to_tokudb_data, data, data_length); // // for offset, we pack the offset where the data ENDS! // - u_int32_t offset = to_tokudb_data + data_length - to_tokudb_offset_start; + uint32_t offset = to_tokudb_data + data_length - to_tokudb_offset_start; switch(offset_bytes) { case (1): to_tokudb_offset_ptr[0] = (uchar)offset; @@ -807,12 +807,12 @@ static inline uchar* write_var_field( return to_tokudb_data + data_length; } -static inline u_int32_t get_var_data_length( +static inline uint32_t get_var_data_length( const uchar * from_mysql, - u_int32_t mysql_length_bytes + uint32_t mysql_length_bytes ) { - u_int32_t data_length; + uint32_t data_length; switch(mysql_length_bytes) { case(1): data_length = from_mysql[0]; @@ -832,8 +832,8 @@ static inline uchar* pack_var_field( uchar* to_tokudb_data, // pointer to where tokudb data should be written uchar* to_tokudb_offset_start, //location where data starts, IS THIS A BAD NAME???? const uchar * from_mysql, // mysql data - u_int32_t mysql_length_bytes, //number of bytes used to store length in from_mysql - u_int32_t offset_bytes //number of offset_bytes used in tokudb row + uint32_t mysql_length_bytes, //number of bytes used to store length in from_mysql + uint32_t offset_bytes //number of offset_bytes used in tokudb row ) { uint data_length = get_var_data_length(from_mysql, mysql_length_bytes); @@ -850,8 +850,8 @@ static inline uchar* pack_var_field( static inline void unpack_var_field( uchar* to_mysql, const uchar* from_tokudb_data, - u_int32_t from_tokudb_data_len, - u_int32_t mysql_length_bytes + uint32_t from_tokudb_data_len, + uint32_t mysql_length_bytes ) { // @@ -880,14 +880,14 @@ static uchar* pack_toku_field_blob( Field* field ) { - u_int32_t len_bytes = field->row_pack_length(); - u_int32_t length = 0; + uint32_t len_bytes = field->row_pack_length(); + uint32_t length = 0; uchar* data_ptr = NULL; memcpy(to_tokudb, from_mysql, len_bytes); switch (len_bytes) { case (1): - length = (u_int32_t)(*from_mysql); + length = (uint32_t)(*from_mysql); break; case (2): length = uint2korr(from_mysql); @@ -1067,13 +1067,13 @@ static inline int tokudb_generate_row( DB* curr_db = dest_db; uchar* row_desc = NULL; - u_int32_t desc_size; + uint32_t desc_size; uchar* buff = NULL; - u_int32_t max_key_len = 0; + uint32_t max_key_len = 0; row_desc = (uchar *)curr_db->descriptor->dbt.data; - row_desc += (*(u_int32_t *)row_desc); - desc_size = (*(u_int32_t *)row_desc) - 4; + row_desc += (*(uint32_t *)row_desc); + desc_size = (*(uint32_t *)row_desc) - 4; row_desc += 4; if (is_key_pk(row_desc, desc_size)) { @@ -1141,7 +1141,7 @@ static inline int tokudb_generate_row( } row_desc += desc_size; - desc_size = (*(u_int32_t *)row_desc) - 4; + desc_size = (*(uint32_t *)row_desc) - 4; row_desc += 4; if (dest_val != NULL) { if (!is_key_clustering(row_desc, desc_size) || src_val->size == 0) { @@ -1257,7 +1257,7 @@ ha_tokudb::ha_tokudb(handlerton * hton, TABLE_SHARE * table_arg):handler(hton, t abort_loader = false; memset(&lc, 0, sizeof(lc)); lock.type = TL_IGNORE; - for (u_int32_t i = 0; i < MAX_KEY+1; i++) { + for (uint32_t i = 0; i < MAX_KEY+1; i++) { mult_put_flags[i] = 0; mult_del_flags[i] = DB_DELETE_ANY; mult_dbt_flags[i] = DB_DBT_REALLOC; @@ -1307,7 +1307,7 @@ static int open_status_dictionary(DB** ptr, const char* name, DB_TXN* txn) { int error; char* newname = NULL; uint open_mode = DB_THREAD; - u_int32_t pagesize = 0; + uint32_t pagesize = 0; newname = (char *)my_malloc( get_max_dict_name_path_length(name), MYF(MY_WME) @@ -1461,8 +1461,8 @@ static int initialize_col_pack_info(KEY_AND_COL_INFO* kc_info, TABLE_SHARE* tabl goto exit; } { - u_int32_t curr_fixed_offset = 0; - u_int32_t curr_var_index = 0; + uint32_t curr_fixed_offset = 0; + uint32_t curr_var_index = 0; for (uint j = 0; j < table_share->fields; j++) { COL_PACK_INFO* curr = &kc_info->cp_info[keynr][j]; // @@ -1511,8 +1511,8 @@ static void reset_key_and_col_info(KEY_AND_COL_INFO *kc_info, uint keynr) { static int initialize_key_and_col_info(TABLE_SHARE* table_share, TABLE* table, KEY_AND_COL_INFO* kc_info, uint hidden_primary_key, uint primary_key) { int error = 0; - u_int32_t curr_blob_field_index = 0; - u_int32_t max_var_bytes = 0; + uint32_t curr_blob_field_index = 0; + uint32_t max_var_bytes = 0; // // fill in the field lengths. 0 means it is a variable sized field length // fill in length_bytes, 0 means it is fixed or blob @@ -1529,7 +1529,7 @@ static int initialize_key_and_col_info(TABLE_SHARE* table_share, TABLE* table, K case toku_type_fixstring: pack_length = field->pack_length(); assert(pack_length < 1<<16); - kc_info->field_lengths[i] = (u_int16_t)pack_length; + kc_info->field_lengths[i] = (uint16_t)pack_length; kc_info->length_bytes[i] = 0; break; case toku_type_blob: @@ -1643,7 +1643,7 @@ int ha_tokudb::initialize_share( ) { int error = 0; - u_int64_t num_rows = 0; + uint64_t num_rows = 0; bool table_exists; DB_TXN* txn = NULL; bool do_commit = false; @@ -1734,7 +1734,7 @@ int ha_tokudb::initialize_share( // We need to set the ref_length to start at 5, to account for // the "infinity byte" in keys, and for placing the DBT size in the first four bytes // - ref_length = sizeof(u_int32_t) + sizeof(uchar); + ref_length = sizeof(uint32_t) + sizeof(uchar); KEY_PART_INFO *key_part = table->key_info[primary_key].key_part; KEY_PART_INFO *end = key_part + table->key_info[primary_key].key_parts; for (; key_part != end; key_part++) { @@ -1811,7 +1811,7 @@ int ha_tokudb::open(const char *name, int mode, uint test_if_locked) { primary_key = table_share->keys; key_used_on_scan = MAX_KEY; hidden_primary_key = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH; - ref_length = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH + sizeof(u_int32_t); + ref_length = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH + sizeof(uint32_t); } else { key_used_on_scan = primary_key; @@ -1829,8 +1829,8 @@ int ha_tokudb::open(const char *name, int mode, uint test_if_locked) { &prelocked_left_range, max_key_length, &prelocked_right_range, max_key_length, &primary_key_buff, (hidden_primary_key ? 0 : max_key_length), - &fixed_cols_for_query, table_share->fields*sizeof(u_int32_t), - &var_cols_for_query, table_share->fields*sizeof(u_int32_t), + &fixed_cols_for_query, table_share->fields*sizeof(uint32_t), + &var_cols_for_query, table_share->fields*sizeof(uint32_t), NullS ); if (alloc_ptr == NULL) { @@ -1859,11 +1859,11 @@ int ha_tokudb::open(const char *name, int mode, uint test_if_locked) { goto exit; } - for (u_int32_t i = 0; i < sizeof(mult_key_dbt)/sizeof(mult_key_dbt[0]); i++) { + for (uint32_t i = 0; i < sizeof(mult_key_dbt)/sizeof(mult_key_dbt[0]); i++) { mult_key_dbt[i].flags = DB_DBT_REALLOC; } - for (u_int32_t i = 0; i < curr_num_DBs; i++) { + for (uint32_t i = 0; i < curr_num_DBs; i++) { mult_rec_dbt[i].flags = DB_DBT_REALLOC; } @@ -1929,7 +1929,7 @@ exit: // 0 on success // error otherwise // -int ha_tokudb::estimate_num_rows(DB* db, u_int64_t* num_rows, DB_TXN* txn) { +int ha_tokudb::estimate_num_rows(DB* db, uint64_t* num_rows, DB_TXN* txn) { int error = ENOSYS; DBC* crsr = NULL; bool do_commit = false; @@ -2193,13 +2193,13 @@ int ha_tokudb::__close(int mutex_is_locked) { my_free(blob_buff, MYF(MY_ALLOW_ZERO_PTR)); my_free(alloc_ptr, MYF(MY_ALLOW_ZERO_PTR)); my_free(range_query_buff, MYF(MY_ALLOW_ZERO_PTR)); - for (u_int32_t i = 0; i < sizeof(mult_rec_dbt)/sizeof(mult_rec_dbt[0]); i++) { + for (uint32_t i = 0; i < sizeof(mult_rec_dbt)/sizeof(mult_rec_dbt[0]); i++) { if (mult_rec_dbt[i].flags == DB_DBT_REALLOC && mult_rec_dbt[i].data != NULL) { free(mult_rec_dbt[i].data); } } - for (u_int32_t i = 0; i < sizeof(mult_key_dbt)/sizeof(mult_key_dbt[0]); i++) { + for (uint32_t i = 0; i < sizeof(mult_key_dbt)/sizeof(mult_key_dbt[0]); i++) { if (mult_key_dbt[i].flags == DB_DBT_REALLOC && mult_key_dbt[i].data != NULL) { free(mult_key_dbt[i].data); @@ -2366,7 +2366,7 @@ int ha_tokudb::pack_old_row_for_update( int ha_tokudb::unpack_blobs( uchar* record, const uchar* from_tokudb_blob, - u_int32_t num_bytes, + uint32_t num_bytes, bool check_bitmap ) { @@ -2390,13 +2390,13 @@ int ha_tokudb::unpack_blobs( memcpy(blob_buff, from_tokudb_blob, num_bytes); buff= blob_buff; for (uint i = 0; i < share->kc_info.num_blobs; i++) { - u_int32_t curr_field_index = share->kc_info.blob_fields[i]; + uint32_t curr_field_index = share->kc_info.blob_fields[i]; bool skip = check_bitmap ? !(bitmap_is_set(table->read_set,curr_field_index) || bitmap_is_set(table->write_set,curr_field_index)) : false; Field* field = table->field[curr_field_index]; - u_int32_t len_bytes = field->row_pack_length(); + uint32_t len_bytes = field->row_pack_length(); buff = unpack_toku_field_blob( record + field_offset(field, table), buff, @@ -2432,7 +2432,7 @@ int ha_tokudb::unpack_row( const uchar* fixed_field_ptr = (const uchar *) row->data; const uchar* var_field_offset_ptr = NULL; const uchar* var_field_data_ptr = NULL; - u_int32_t data_end_offset = 0; + uint32_t data_end_offset = 0; memcpy(record, fixed_field_ptr, table_share->null_bytes); fixed_field_ptr += table_share->null_bytes; @@ -2446,7 +2446,7 @@ int ha_tokudb::unpack_row( unpack_key(record,key,index); } - u_int32_t last_offset = 0; + uint32_t last_offset = 0; // // we have two methods of unpacking, one if we need to unpack the entire row // the second if we unpack a subset of the entire row @@ -2499,7 +2499,7 @@ int ha_tokudb::unpack_row( error = unpack_blobs( record, var_field_data_ptr, - row->size - (u_int32_t)(var_field_data_ptr - (const uchar *)row->data), + row->size - (uint32_t)(var_field_data_ptr - (const uchar *)row->data), false ); if (error) { @@ -2514,7 +2514,7 @@ int ha_tokudb::unpack_row( // // first the fixed fields // - for (u_int32_t i = 0; i < num_fixed_cols_for_query; i++) { + for (uint32_t i = 0; i < num_fixed_cols_for_query; i++) { uint field_index = fixed_cols_for_query[i]; Field* field = table->field[field_index]; unpack_fixed_field( @@ -2528,12 +2528,12 @@ int ha_tokudb::unpack_row( // now the var fields // here, we do NOT modify var_field_data_ptr or var_field_offset_ptr // - for (u_int32_t i = 0; i < num_var_cols_for_query; i++) { + for (uint32_t i = 0; i < num_var_cols_for_query; i++) { uint field_index = var_cols_for_query[i]; Field* field = table->field[field_index]; - u_int32_t var_field_index = share->kc_info.cp_info[index][field_index].col_pack_val; - u_int32_t data_start_offset; - u_int32_t field_len; + uint32_t var_field_index = share->kc_info.cp_info[index][field_index].col_pack_val; + uint32_t data_start_offset; + uint32_t field_len; get_var_field_info( &field_len, @@ -2566,7 +2566,7 @@ int ha_tokudb::unpack_row( error = unpack_blobs( record, var_field_data_ptr, - row->size - (u_int32_t)(var_field_data_ptr - (const uchar *)row->data), + row->size - (uint32_t)(var_field_data_ptr - (const uchar *)row->data), true ); if (error) { @@ -2579,7 +2579,7 @@ exit: return error; } -u_int32_t ha_tokudb::place_key_into_mysql_buff( +uint32_t ha_tokudb::place_key_into_mysql_buff( KEY* key_info, uchar * record, uchar* data @@ -2626,7 +2626,7 @@ u_int32_t ha_tokudb::place_key_into_mysql_buff( // unpacking a key of // void ha_tokudb::unpack_key(uchar * record, DBT const *key, uint index) { - u_int32_t bytes_read; + uint32_t bytes_read; uchar *pos = (uchar *) key->data + 1; bytes_read = place_key_into_mysql_buff( &table->key_info[index], @@ -2645,7 +2645,7 @@ void ha_tokudb::unpack_key(uchar * record, DBT const *key, uint index) { } } -u_int32_t ha_tokudb::place_key_into_dbt_buff( +uint32_t ha_tokudb::place_key_into_dbt_buff( KEY* key_info, uchar * buff, const uchar * record, @@ -2721,7 +2721,7 @@ DBT* ha_tokudb::create_dbt_key_from_key( int key_length ) { - u_int32_t size = 0; + uint32_t size = 0; uchar* tmp_buff = buff; my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set); @@ -3065,12 +3065,12 @@ ha_rows ha_tokudb::estimate_rows_upper_bound() { int ha_tokudb::cmp_ref(const uchar * ref1, const uchar * ref2) { int ret_val = 0; ret_val = tokudb_compare_two_keys( - ref1 + sizeof(u_int32_t), - *(u_int32_t *)ref1, - ref2 + sizeof(u_int32_t), - *(u_int32_t *)ref2, + ref1 + sizeof(uint32_t), + *(uint32_t *)ref1, + ref2 + sizeof(uint32_t), + *(uint32_t *)ref2, (uchar *)share->file->descriptor->dbt.data + 4, - *(u_int32_t *)share->file->descriptor->dbt.data - 4, + *(uint32_t *)share->file->descriptor->dbt.data - 4, false ); return ret_val; @@ -3173,7 +3173,7 @@ void ha_tokudb::start_bulk_insert(ha_rows rows) { if (!thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS) && !hidden_primary_key) { mult_put_flags[primary_key] = DB_NOOVERWRITE; } - u_int32_t loader_flags = (get_load_save_space(thd)) ? + uint32_t loader_flags = (get_load_save_space(thd)) ? LOADER_USE_PUTS : 0; int error = db_env->create_loader( @@ -3307,7 +3307,7 @@ int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_in DBC* tmp_cursor1 = NULL; DBC* tmp_cursor2 = NULL; DBT key1, key2, val, packed_key1, packed_key2; - u_int64_t cnt = 0; + uint64_t cnt = 0; char status_msg[MAX_ALIAS_NAME + 200]; //buffer of 200 should be a good upper bound. THD* thd = ha_thd(); memset(&key1, 0, sizeof(key1)); @@ -3594,9 +3594,9 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) { tmp_pk_val.size = pk_val->size; for (uint keynr = 0; keynr < table_share->keys; keynr++) { - u_int32_t tmp_num_bytes = 0; + uint32_t tmp_num_bytes = 0; uchar* row_desc = NULL; - u_int32_t desc_size = 0; + uint32_t desc_size = 0; if (keynr == primary_key) { continue; @@ -3608,8 +3608,8 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) { // TEST // row_desc = (uchar *)share->key_file[keynr]->descriptor->dbt.data; - row_desc += (*(u_int32_t *)row_desc); - desc_size = (*(u_int32_t *)row_desc) - 4; + row_desc += (*(uint32_t *)row_desc); + desc_size = (*(uint32_t *)row_desc) - 4; row_desc += 4; tmp_num_bytes = pack_key_from_desc( key_buff3, @@ -3632,9 +3632,9 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) { tmp_buff = (uchar *)my_malloc(alloced_rec_buff_length,MYF(MY_WME)); assert(tmp_buff); row_desc = (uchar *)share->key_file[keynr]->descriptor->dbt.data; - row_desc += (*(u_int32_t *)row_desc); - row_desc += (*(u_int32_t *)row_desc); - desc_size = (*(u_int32_t *)row_desc) - 4; + row_desc += (*(uint32_t *)row_desc); + row_desc += (*(uint32_t *)row_desc); + desc_size = (*(uint32_t *)row_desc) - 4; row_desc += 4; tmp_num_bytes = pack_clustering_val_from_desc( tmp_buff, @@ -3667,10 +3667,10 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) { void ha_tokudb::set_main_dict_put_flags( THD* thd, bool opt_eligible, - u_int32_t* put_flags + uint32_t* put_flags ) { - u_int32_t old_prelock_flags = 0; + uint32_t old_prelock_flags = 0; uint curr_num_DBs = table->s->keys + test(hidden_primary_key); bool in_hot_index = share->num_DBs > curr_num_DBs; bool using_ignore_flag_opt = do_ignore_flag_optimization( @@ -3713,7 +3713,7 @@ void ha_tokudb::set_main_dict_put_flags( int ha_tokudb::insert_row_to_main_dictionary(uchar* record, DBT* pk_key, DBT* pk_val, DB_TXN* txn) { int error = 0; - u_int32_t put_flags = mult_put_flags[primary_key]; + uint32_t put_flags = mult_put_flags[primary_key]; THD *thd = ha_thd(); uint curr_num_DBs = table->s->keys + test(hidden_primary_key); @@ -4234,8 +4234,8 @@ cleanup: // and var_cols_for_query // void ha_tokudb::set_query_columns(uint keynr) { - u_int32_t curr_fixed_col_index = 0; - u_int32_t curr_var_col_index = 0; + uint32_t curr_fixed_col_index = 0; + uint32_t curr_var_col_index = 0; read_key = false; read_blobs = false; // @@ -4699,7 +4699,7 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_ DBT row; DBT lookup_key; int error = 0; - u_int32_t flags = 0; + uint32_t flags = 0; THD* thd = ha_thd(); tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; struct smart_dbt_info info; @@ -4801,7 +4801,7 @@ int ha_tokudb::read_data_from_range_query_buff(uchar* buf, bool need_val) { memset((void *) &curr_key, 0, sizeof(curr_key)); // get key info - u_int32_t key_size = *(u_int32_t *)curr_pos; + uint32_t key_size = *(uint32_t *)curr_pos; curr_pos += sizeof(key_size); uchar* curr_key_buff = curr_pos; curr_pos += key_size; @@ -4821,7 +4821,7 @@ int ha_tokudb::read_data_from_range_query_buff(uchar* buf, bool need_val) { DBT curr_val; memset((void *) &curr_val, 0, sizeof(curr_val)); uchar* curr_val_buff = NULL; - u_int32_t val_size = 0; + uint32_t val_size = 0; // in this case, we don't have a val, we are simply extracting the pk if (!need_val) { curr_val.data = curr_val_buff; @@ -4834,7 +4834,7 @@ int ha_tokudb::read_data_from_range_query_buff(uchar* buf, bool need_val) { // need to extract a val and place it into buf if (unpack_entire_row) { // get val info - val_size = *(u_int32_t *)curr_pos; + val_size = *(uint32_t *)curr_pos; curr_pos += sizeof(val_size); curr_val_buff = curr_pos; curr_pos += val_size; @@ -4853,7 +4853,7 @@ int ha_tokudb::read_data_from_range_query_buff(uchar* buf, bool need_val) { curr_pos += table_share->null_bytes; // now the fixed sized rows - for (u_int32_t i = 0; i < num_fixed_cols_for_query; i++) { + for (uint32_t i = 0; i < num_fixed_cols_for_query; i++) { uint field_index = fixed_cols_for_query[i]; Field* field = table->field[field_index]; unpack_fixed_field( @@ -4864,10 +4864,10 @@ int ha_tokudb::read_data_from_range_query_buff(uchar* buf, bool need_val) { curr_pos += share->kc_info.field_lengths[field_index]; } // now the variable sized rows - for (u_int32_t i = 0; i < num_var_cols_for_query; i++) { + for (uint32_t i = 0; i < num_var_cols_for_query; i++) { uint field_index = var_cols_for_query[i]; Field* field = table->field[field_index]; - u_int32_t field_len = *(u_int32_t *)curr_pos; + uint32_t field_len = *(uint32_t *)curr_pos; curr_pos += sizeof(field_len); unpack_var_field( buf + field_offset(field, table), @@ -4879,7 +4879,7 @@ int ha_tokudb::read_data_from_range_query_buff(uchar* buf, bool need_val) { } // now the blobs if (read_blobs) { - u_int32_t blob_size = *(u_int32_t *)curr_pos; + uint32_t blob_size = *(uint32_t *)curr_pos; curr_pos += sizeof(blob_size); error = unpack_blobs( buf, @@ -4921,24 +4921,24 @@ int ha_tokudb::fill_range_query_buf( // // first put the value into range_query_buf // - u_int32_t size_remaining = size_range_query_buff - bytes_used_in_range_query_buff; - u_int32_t size_needed; - u_int32_t user_defined_size = get_tokudb_read_buf_size(thd); + uint32_t size_remaining = size_range_query_buff - bytes_used_in_range_query_buff; + uint32_t size_needed; + uint32_t user_defined_size = get_tokudb_read_buf_size(thd); uchar* curr_pos = NULL; if (need_val) { if (unpack_entire_row) { - size_needed = 2*sizeof(u_int32_t) + key->size + row->size; + size_needed = 2*sizeof(uint32_t) + key->size + row->size; } else { // this is an upper bound - size_needed = sizeof(u_int32_t) + // size of key length + size_needed = sizeof(uint32_t) + // size of key length key->size + row->size + //key and row - num_var_cols_for_query*(sizeof(u_int32_t)) + //lengths of varchars stored - sizeof(u_int32_t); //length of blobs + num_var_cols_for_query*(sizeof(uint32_t)) + //lengths of varchars stored + sizeof(uint32_t); //length of blobs } } else { - size_needed = sizeof(u_int32_t) + key->size; + size_needed = sizeof(uint32_t) + key->size; } if (size_remaining < size_needed) { range_query_buff = (uchar *)my_realloc( @@ -4958,14 +4958,14 @@ int ha_tokudb::fill_range_query_buf( // curr_pos = range_query_buff + bytes_used_in_range_query_buff; - *(u_int32_t *)curr_pos = key->size; - curr_pos += sizeof(u_int32_t); + *(uint32_t *)curr_pos = key->size; + curr_pos += sizeof(uint32_t); memcpy(curr_pos, key->data, key->size); curr_pos += key->size; if (need_val) { if (unpack_entire_row) { - *(u_int32_t *)curr_pos = row->size; - curr_pos += sizeof(u_int32_t); + *(uint32_t *)curr_pos = row->size; + curr_pos += sizeof(uint32_t); memcpy(curr_pos, row->data, row->size); curr_pos += row->size; } @@ -4987,7 +4987,7 @@ int ha_tokudb::fill_range_query_buf( // // first the fixed fields // - for (u_int32_t i = 0; i < num_fixed_cols_for_query; i++) { + for (uint32_t i = 0; i < num_fixed_cols_for_query; i++) { uint field_index = fixed_cols_for_query[i]; memcpy( curr_pos, @@ -5000,11 +5000,11 @@ int ha_tokudb::fill_range_query_buf( // // now the var fields // - for (u_int32_t i = 0; i < num_var_cols_for_query; i++) { + for (uint32_t i = 0; i < num_var_cols_for_query; i++) { uint field_index = var_cols_for_query[i]; - u_int32_t var_field_index = share->kc_info.cp_info[active_index][field_index].col_pack_val; - u_int32_t data_start_offset; - u_int32_t field_len; + uint32_t var_field_index = share->kc_info.cp_info[active_index][field_index].col_pack_val; + uint32_t data_start_offset; + uint32_t field_len; get_var_field_info( &field_len, @@ -5020,8 +5020,8 @@ int ha_tokudb::fill_range_query_buf( } if (read_blobs) { - u_int32_t blob_offset = 0; - u_int32_t data_size = 0; + uint32_t blob_offset = 0; + uint32_t data_size = 0; // // now the blobs // @@ -5031,7 +5031,7 @@ int ha_tokudb::fill_range_query_buf( var_field_data_ptr, share->kc_info.num_offset_bytes ); - data_size = row->size - blob_offset - (u_int32_t)(var_field_data_ptr - (const uchar *)row->data); + data_size = row->size - blob_offset - (uint32_t)(var_field_data_ptr - (const uchar *)row->data); memcpy(curr_pos, &data_size, sizeof(data_size)); curr_pos += sizeof(data_size); memcpy(curr_pos, var_field_data_ptr + blob_offset, data_size); @@ -5111,7 +5111,7 @@ cleanup: int ha_tokudb::get_next(uchar* buf, int direction) { int error = 0; - u_int32_t flags = SET_PRELOCK_FLAG(0); + uint32_t flags = SET_PRELOCK_FLAG(0); THD* thd = ha_thd(); tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; bool need_val; @@ -5244,7 +5244,7 @@ int ha_tokudb::index_first(uchar * buf) { invalidate_bulk_fetch(); int error = 0; struct smart_dbt_info info; - u_int32_t flags = SET_PRELOCK_FLAG(0); + uint32_t flags = SET_PRELOCK_FLAG(0); THD* thd = ha_thd(); tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; HANDLE_INVALID_CURSOR(); @@ -5287,7 +5287,7 @@ int ha_tokudb::index_last(uchar * buf) { invalidate_bulk_fetch(); int error = 0; struct smart_dbt_info info; - u_int32_t flags = SET_PRELOCK_FLAG(0); + uint32_t flags = SET_PRELOCK_FLAG(0); THD* thd = ha_thd(); tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; HANDLE_INVALID_CURSOR(); @@ -5430,8 +5430,8 @@ DBT *ha_tokudb::get_pos(DBT * to, uchar * pos) { TOKUDB_DBUG_ENTER("ha_tokudb::get_pos"); /* We don't need to set app_data here */ memset((void *) to, 0, sizeof(*to)); - to->data = pos + sizeof(u_int32_t); - to->size = *(u_int32_t *)pos; + to->data = pos + sizeof(uint32_t); + to->size = *(uint32_t *)pos; DBUG_DUMP("key", (const uchar *) to->data, to->size); DBUG_RETURN(to); } @@ -5613,20 +5613,20 @@ void ha_tokudb::position(const uchar * record) { TOKUDB_DBUG_ENTER("ha_tokudb::position"); DBT key; if (hidden_primary_key) { - DBUG_ASSERT(ref_length == (TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH + sizeof(u_int32_t))); - memcpy_fixed(ref + sizeof(u_int32_t), current_ident, TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH); - *(u_int32_t *)ref = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH; + DBUG_ASSERT(ref_length == (TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH + sizeof(uint32_t))); + memcpy_fixed(ref + sizeof(uint32_t), current_ident, TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH); + *(uint32_t *)ref = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH; } else { bool has_null; // // save the data // - create_dbt_key_from_table(&key, primary_key, ref + sizeof(u_int32_t), record, &has_null); + create_dbt_key_from_table(&key, primary_key, ref + sizeof(uint32_t), record, &has_null); // // save the size of data in the first four bytes of ref // - memcpy(ref, &key.size, sizeof(u_int32_t)); + memcpy(ref, &key.size, sizeof(uint32_t)); } DBUG_VOID_RETURN; } @@ -5652,7 +5652,7 @@ int ha_tokudb::info(uint flag) { } stats.deleted = 0; if (!(flag & HA_STATUS_NO_LOCK)) { - u_int64_t num_rows = 0; + uint64_t num_rows = 0; TOKU_DB_FRAGMENTATION_S frag_info; memset(&frag_info, 0, sizeof frag_info); @@ -5698,14 +5698,14 @@ int ha_tokudb::info(uint flag) { // in this case, we have a hidden primary key, do not // want to report space taken up by the hidden primary key to the user // - u_int64_t hpk_space = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH*dict_stats.bt_ndata; + uint64_t hpk_space = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH*dict_stats.bt_ndata; stats.data_file_length = (hpk_space > stats.data_file_length) ? 0 : stats.data_file_length - hpk_space; } else { // // one infinity byte per key needs to be subtracted // - u_int64_t inf_byte_space = dict_stats.bt_ndata; + uint64_t inf_byte_space = dict_stats.bt_ndata; stats.data_file_length = (inf_byte_space > stats.data_file_length) ? 0 : stats.data_file_length - inf_byte_space; } @@ -5875,7 +5875,7 @@ int ha_tokudb::create_txn(THD* thd, tokudb_trx_data* trx) { TOKUDB_TRACE("just created master:%p\n", trx->all); } trx->sp_level = trx->all; - trans_register_ha(thd, TRUE, tokudb_hton); + trans_register_ha(thd, true, tokudb_hton); } DBUG_PRINT("trans", ("starting transaction stmt")); if (trx->stmt) { @@ -5883,7 +5883,7 @@ int ha_tokudb::create_txn(THD* thd, tokudb_trx_data* trx) { TOKUDB_TRACE("warning:stmt=%p\n", trx->stmt); } } - u_int32_t txn_begin_flags; + uint32_t txn_begin_flags; if (trx->all == NULL) { txn_begin_flags = toku_iso_to_txn_flag(toku_iso_level); if (txn_begin_flags == 0 && is_autocommit && thd_sql_command(thd) == SQLCOM_SELECT) { @@ -5903,7 +5903,7 @@ int ha_tokudb::create_txn(THD* thd, tokudb_trx_data* trx) { TOKUDB_TRACE("just created stmt:%p:%p\n", trx->sp_level, trx->stmt); } reset_stmt_progress(&trx->stmt_progress); - trans_register_ha(thd, FALSE, tokudb_hton); + trans_register_ha(thd, false, tokudb_hton); cleanup: return error; } @@ -6062,13 +6062,13 @@ int ha_tokudb::start_stmt(THD * thd, thr_lock_type lock_type) { share->rows_from_locked_table = added_rows - deleted_rows; } transaction = trx->sub_sp_level; - trans_register_ha(thd, FALSE, tokudb_hton); + trans_register_ha(thd, false, tokudb_hton); cleanup: TOKUDB_DBUG_RETURN(error); } -u_int32_t ha_tokudb::get_cursor_isolation_flags(enum thr_lock_type lock_type, THD* thd) { +uint32_t ha_tokudb::get_cursor_isolation_flags(enum thr_lock_type lock_type, THD* thd) { uint sql_command = thd_sql_command(thd); bool in_lock_tables = thd_in_lock_tables(thd); @@ -6241,7 +6241,7 @@ static int create_sub_table( TOKUDB_DBUG_ENTER("create_sub_table"); int error; DB *file = NULL; - u_int32_t create_flags; + uint32_t create_flags; error = db_create(&file, db_env, 0); @@ -6392,22 +6392,22 @@ void ha_tokudb::trace_create_table_info(const char *name, TABLE * form) { } } -static u_int32_t get_max_desc_size(KEY_AND_COL_INFO* kc_info, TABLE* form) { - u_int32_t max_row_desc_buff_size; +static uint32_t get_max_desc_size(KEY_AND_COL_INFO* kc_info, TABLE* form) { + uint32_t max_row_desc_buff_size; max_row_desc_buff_size = 2*(form->s->fields * 6)+10; // upper bound of key comparison descriptor max_row_desc_buff_size += get_max_secondary_key_pack_desc_size(kc_info); // upper bound for sec. key part max_row_desc_buff_size += get_max_clustering_val_pack_desc_size(form->s); // upper bound for clustering val part return max_row_desc_buff_size; } -static u_int32_t create_secondary_key_descriptor( +static uint32_t create_secondary_key_descriptor( uchar* buf, KEY* key_info, KEY* prim_key, uint hpk, TABLE* form, uint primary_key, - u_int32_t keynr, + uint32_t keynr, KEY_AND_COL_INFO* kc_info ) { @@ -6453,7 +6453,7 @@ int ha_tokudb::create_secondary_dictionary( KEY* key_info, DB_TXN* txn, KEY_AND_COL_INFO* kc_info, - u_int32_t keynr, + uint32_t keynr, bool is_hot_index, enum row_type row_type ) @@ -6464,7 +6464,7 @@ int ha_tokudb::create_secondary_dictionary( char* newname = NULL; KEY* prim_key = NULL; char dict_name[MAX_DICT_NAME_LEN]; - u_int32_t max_row_desc_buff_size; + uint32_t max_row_desc_buff_size; uint hpk= (form->s->primary_key >= MAX_KEY) ? TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH : 0; uint32_t block_size; uint32_t read_block_size; @@ -6518,7 +6518,7 @@ cleanup: } -static u_int32_t create_main_key_descriptor( +static uint32_t create_main_key_descriptor( uchar* buf, KEY* prim_key, uint hpk, @@ -6561,7 +6561,7 @@ int ha_tokudb::create_main_dictionary(const char* name, TABLE* form, DB_TXN* txn uchar* row_desc_buff = NULL; char* newname = NULL; KEY* prim_key = NULL; - u_int32_t max_row_desc_buff_size; + uint32_t max_row_desc_buff_size; uint hpk= (form->s->primary_key >= MAX_KEY) ? TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH : 0; uint32_t block_size; uint32_t read_block_size; @@ -7115,9 +7115,9 @@ ha_rows ha_tokudb::records_in_range(uint keynr, key_range* start_key, key_range* DBT key; ha_rows ret_val = HA_TOKUDB_RANGE_COUNT; DB *kfile = share->key_file[keynr]; - u_int64_t less, equal, greater; - u_int64_t total_rows_estimate = HA_TOKUDB_RANGE_COUNT; - u_int64_t start_rows, end_rows, rows; + uint64_t less, equal, greater; + uint64_t total_rows_estimate = HA_TOKUDB_RANGE_COUNT; + uint64_t start_rows, end_rows, rows; int is_exact; int error; uchar inf_byte; @@ -7364,11 +7364,11 @@ int ha_tokudb::tokudb_add_index( DB_INDEXER* indexer = NULL; bool loader_use_puts = get_load_save_space(thd); bool use_hot_index = (lock.type == TL_WRITE_ALLOW_WRITE); - u_int32_t loader_flags = loader_use_puts ? LOADER_USE_PUTS : 0; - u_int32_t indexer_flags = 0; - u_int32_t mult_db_flags[MAX_KEY + 1] = {0}; - u_int32_t mult_put_flags[MAX_KEY + 1]; - u_int32_t mult_dbt_flags[MAX_KEY + 1]; + uint32_t loader_flags = loader_use_puts ? LOADER_USE_PUTS : 0; + uint32_t indexer_flags = 0; + uint32_t mult_db_flags[MAX_KEY + 1] = {0}; + uint32_t mult_put_flags[MAX_KEY + 1]; + uint32_t mult_dbt_flags[MAX_KEY + 1]; bool creating_hot_index = false; struct loader_context lc; memset(&lc, 0, sizeof lc); @@ -7380,7 +7380,7 @@ int ha_tokudb::tokudb_add_index( *modified_DBs = false; invalidate_bulk_fetch(); unpack_entire_row = true; // for bulk fetching rows - for (u_int32_t i = 0; i < MAX_KEY+1; i++) { + for (uint32_t i = 0; i < MAX_KEY+1; i++) { mult_put_flags[i] = 0; mult_dbt_flags[i] = DB_DBT_REALLOC; } @@ -7526,7 +7526,7 @@ int ha_tokudb::tokudb_add_index( // you need the val if you have a clustering index and key_read is not 0; bf_info.direction = 1; bf_info.thd = ha_thd(); - bf_info.need_val = TRUE; + bf_info.need_val = true; error = db_env->create_loader( db_env, @@ -7596,14 +7596,14 @@ int ha_tokudb::tokudb_add_index( // at this point, we know the range query buffer has at least one key/val pair uchar* curr_pos = range_query_buff+curr_range_query_buff_offset; - u_int32_t key_size = *(u_int32_t *)curr_pos; + uint32_t key_size = *(uint32_t *)curr_pos; curr_pos += sizeof(key_size); uchar* curr_key_buff = curr_pos; curr_pos += key_size; curr_pk_key.data = curr_key_buff; curr_pk_key.size = key_size; - u_int32_t val_size = *(u_int32_t *)curr_pos; + uint32_t val_size = *(uint32_t *)curr_pos; curr_pos += sizeof(val_size); uchar* curr_val_buff = curr_pos; curr_pos += val_size; diff --git a/storage/tokudb/ha_tokudb.h b/storage/tokudb/ha_tokudb.h index 1e7951d79d3..c69d99e2fc5 100644 --- a/storage/tokudb/ha_tokudb.h +++ b/storage/tokudb/ha_tokudb.h @@ -81,7 +81,7 @@ typedef struct st_tokudb_share { bool has_unique_keys; bool replace_into_fast; rw_lock_t num_DBs_lock; - u_int32_t num_DBs; + uint32_t num_DBs; } TOKUDB_SHARE; #define HA_TOKU_ORIG_VERSION 4 @@ -149,12 +149,12 @@ private: // uchar *rec_update_buff; ulong alloced_update_rec_buff_length; - u_int32_t max_key_length; + uint32_t max_key_length; uchar* range_query_buff; // range query buffer - u_int32_t size_range_query_buff; // size of the allocated range query buffer - u_int32_t bytes_used_in_range_query_buff; // number of bytes used in the range query buffer - u_int32_t curr_range_query_buff_offset; // current offset into the range query buffer for queries to read + uint32_t size_range_query_buff; // size of the allocated range query buffer + uint32_t bytes_used_in_range_query_buff; // number of bytes used in the range query buffer + uint32_t curr_range_query_buff_offset; // current offset into the range query buffer for queries to read uint64_t bulk_fetch_iteration; uint64_t rows_fetched_using_bulk_fetch; bool doing_bulk_fetch; @@ -185,9 +185,9 @@ private: // ranges of prelocked area, used to know how much to bulk fetch // uchar *prelocked_left_range; - u_int32_t prelocked_left_range_size; + uint32_t prelocked_left_range_size; uchar *prelocked_right_range; - u_int32_t prelocked_right_range_size; + uint32_t prelocked_right_range_size; // @@ -195,9 +195,9 @@ private: // DBT mult_key_dbt[2*(MAX_KEY + 1)]; DBT mult_rec_dbt[MAX_KEY + 1]; - u_int32_t mult_put_flags[MAX_KEY + 1]; - u_int32_t mult_del_flags[MAX_KEY + 1]; - u_int32_t mult_dbt_flags[MAX_KEY + 1]; + uint32_t mult_put_flags[MAX_KEY + 1]; + uint32_t mult_del_flags[MAX_KEY + 1]; + uint32_t mult_dbt_flags[MAX_KEY + 1]; // @@ -207,7 +207,7 @@ private: // query // uchar* blob_buff; - u_int32_t num_blob_bytes; + uint32_t num_blob_bytes; bool unpack_entire_row; @@ -215,10 +215,10 @@ private: // buffers (and their sizes) that will hold the indexes // of fields that need to be read for a query // - u_int32_t* fixed_cols_for_query; - u_int32_t num_fixed_cols_for_query; - u_int32_t* var_cols_for_query; - u_int32_t num_var_cols_for_query; + uint32_t* fixed_cols_for_query; + uint32_t num_fixed_cols_for_query; + uint32_t* var_cols_for_query; + uint32_t num_var_cols_for_query; bool read_blobs; bool read_key; @@ -235,7 +235,7 @@ private: // instance of cursor being used for init_xxx and rnd_xxx functions // DBC *cursor; - u_int32_t cursor_flags; // flags for cursor + uint32_t cursor_flags; // flags for cursor // // flags that are returned in table_flags() // @@ -265,7 +265,7 @@ private: // // For instances where we successfully prelock a range or a table, - // we set this to TRUE so that successive cursor calls can know + // we set this to true so that successive cursor calls can know // know to limit the locking overhead in a call to the fractal tree // bool range_lock_grabbed; @@ -291,7 +291,7 @@ private: int loader_error; bool num_DBs_locked_in_bulk; - u_int32_t lock_count; + uint32_t lock_count; bool fix_rec_buff_for_blob(ulong length); bool fix_rec_update_buff_for_blob(ulong length); @@ -314,9 +314,9 @@ private: const uchar* record, uint index ); - u_int32_t place_key_into_mysql_buff(KEY* key_info, uchar * record, uchar* data); + uint32_t place_key_into_mysql_buff(KEY* key_info, uchar * record, uchar* data); void unpack_key(uchar * record, DBT const *key, uint index); - u_int32_t place_key_into_dbt_buff(KEY* key_info, uchar * buff, const uchar * record, bool* has_null, int key_length); + uint32_t place_key_into_dbt_buff(KEY* key_info, uchar * buff, const uchar * record, bool* has_null, int key_length); DBT* create_dbt_key_from_key(DBT * key, KEY* key_info, uchar * buff, const uchar * record, bool* has_null, bool dont_pack_pk, int key_length = MAX_KEY_LENGTH); DBT *create_dbt_key_from_table(DBT * key, uint keynr, uchar * buff, const uchar * record, bool* has_null, int key_length = MAX_KEY_LENGTH); DBT* create_dbt_key_for_lookup(DBT * key, KEY* key_info, uchar * buff, const uchar * record, bool* has_null, int key_length = MAX_KEY_LENGTH); @@ -328,7 +328,7 @@ private: int open_main_dictionary(const char* name, bool is_read_only, DB_TXN* txn); int open_secondary_dictionary(DB** ptr, KEY* key_info, const char* name, bool is_read_only, DB_TXN* txn); int acquire_table_lock (DB_TXN* trans, TABLE_LOCK_TYPE lt); - int estimate_num_rows(DB* db, u_int64_t* num_rows, DB_TXN* txn); + int estimate_num_rows(DB* db, uint64_t* num_rows, DB_TXN* txn); bool has_auto_increment_flag(uint* index); int write_frm_data(DB* db, DB_TXN* txn, const char* frm_name); @@ -365,7 +365,7 @@ private: KEY* key_info, DB_TXN* txn, KEY_AND_COL_INFO* kc_info, - u_int32_t keynr, + uint32_t keynr, bool is_hot_index, enum row_type row_type ); @@ -374,17 +374,17 @@ private: int is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_info); int is_val_unique(bool* is_unique, uchar* record, KEY* key_info, uint dict_index, DB_TXN* txn); int do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd); - void set_main_dict_put_flags(THD* thd, bool opt_eligible, u_int32_t* put_flags); + void set_main_dict_put_flags(THD* thd, bool opt_eligible, uint32_t* put_flags); int insert_row_to_main_dictionary(uchar* record, DBT* pk_key, DBT* pk_val, DB_TXN* txn); int insert_rows_to_dictionaries_mult(DBT* pk_key, DBT* pk_val, DB_TXN* txn, THD* thd); void test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val); - u_int32_t fill_row_mutator( + uint32_t fill_row_mutator( uchar* buf, - u_int32_t* dropped_columns, - u_int32_t num_dropped_columns, + uint32_t* dropped_columns, + uint32_t num_dropped_columns, TABLE* altered_table, KEY_AND_COL_INFO* altered_kc_info, - u_int32_t keynr, + uint32_t keynr, bool is_add ); @@ -490,7 +490,7 @@ public: ha_rows records_in_range(uint inx, key_range * min_key, key_range * max_key); - u_int32_t get_cursor_isolation_flags(enum thr_lock_type lock_type, THD* thd); + uint32_t get_cursor_isolation_flags(enum thr_lock_type lock_type, THD* thd); THR_LOCK_DATA **store_lock(THD * thd, THR_LOCK_DATA ** to, enum thr_lock_type lock_type); int get_status(DB_TXN* trans); @@ -609,7 +609,7 @@ public: int unpack_blobs( uchar* record, const uchar* from_tokudb_blob, - u_int32_t num_blob_bytes, + uint32_t num_blob_bytes, bool check_bitmap ); int unpack_row( diff --git a/storage/tokudb/ha_tokudb_alter_51.cc b/storage/tokudb/ha_tokudb_alter_51.cc index bde4a3eaf88..dda3e47a7ae 100644 --- a/storage/tokudb/ha_tokudb_alter_51.cc +++ b/storage/tokudb/ha_tokudb_alter_51.cc @@ -306,8 +306,8 @@ ha_tokudb::check_if_supported_alter(TABLE *altered_table, goto cleanup; } if (has_added_columns && !has_non_added_changes) { - u_int32_t added_columns[altered_table->s->fields]; - u_int32_t num_added_columns = 0; + uint32_t added_columns[altered_table->s->fields]; + uint32_t num_added_columns = 0; int r = find_changed_columns( added_columns, &num_added_columns, @@ -319,8 +319,8 @@ ha_tokudb::check_if_supported_alter(TABLE *altered_table, goto cleanup; } if (tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE_INFO) { - for (u_int32_t i = 0; i < num_added_columns; i++) { - u_int32_t curr_added_index = added_columns[i]; + for (uint32_t i = 0; i < num_added_columns; i++) { + uint32_t curr_added_index = added_columns[i]; Field* curr_added_field = altered_table->field[curr_added_index]; printf( "Added column: index %d, name %s\n", @@ -331,8 +331,8 @@ ha_tokudb::check_if_supported_alter(TABLE *altered_table, } } if (has_dropped_columns && !has_non_dropped_changes) { - u_int32_t dropped_columns[table->s->fields]; - u_int32_t num_dropped_columns = 0; + uint32_t dropped_columns[table->s->fields]; + uint32_t num_dropped_columns = 0; int r = find_changed_columns( dropped_columns, &num_dropped_columns, @@ -344,8 +344,8 @@ ha_tokudb::check_if_supported_alter(TABLE *altered_table, goto cleanup; } if (tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE_INFO) { - for (u_int32_t i = 0; i < num_dropped_columns; i++) { - u_int32_t curr_dropped_index = dropped_columns[i]; + for (uint32_t i = 0; i < num_dropped_columns; i++) { + uint32_t curr_dropped_index = dropped_columns[i]; Field* curr_dropped_field = table->field[curr_dropped_index]; printf( "Dropped column: index %d, name %s\n", @@ -420,7 +420,7 @@ ha_tokudb::alter_table_phase2( bool has_row_format_changes = alter_flags->is_set(HA_ALTER_ROW_FORMAT); KEY_AND_COL_INFO altered_kc_info; memset(&altered_kc_info, 0, sizeof(altered_kc_info)); - u_int32_t max_new_desc_size = 0; + uint32_t max_new_desc_size = 0; uchar* row_desc_buff = NULL; uchar* column_extra = NULL; bool dropping_indexes = alter_info->index_drop_count > 0 && !tables_have_same_keys(table,altered_table,false, false); @@ -504,11 +504,11 @@ ha_tokudb::alter_table_phase2( if (has_dropped_columns || has_added_columns) { DBT column_dbt; memset(&column_dbt, 0, sizeof(DBT)); - u_int32_t max_column_extra_size; - u_int32_t num_column_extra; - u_int32_t columns[table->s->fields + altered_table->s->fields]; // set size such that we know it is big enough for both cases - u_int32_t num_columns = 0; - u_int32_t curr_num_DBs = table->s->keys + test(hidden_primary_key); + uint32_t max_column_extra_size; + uint32_t num_column_extra; + uint32_t columns[table->s->fields + altered_table->s->fields]; // set size such that we know it is big enough for both cases + uint32_t num_columns = 0; + uint32_t curr_num_DBs = table->s->keys + test(hidden_primary_key); memset(columns, 0, sizeof(columns)); if (has_added_columns && has_dropped_columns) { @@ -554,7 +554,7 @@ ha_tokudb::alter_table_phase2( column_extra = (uchar *)my_malloc(max_column_extra_size, MYF(MY_WME)); if (column_extra == NULL) { error = ENOMEM; goto cleanup; } - for (u_int32_t i = 0; i < curr_num_DBs; i++) { + for (uint32_t i = 0; i < curr_num_DBs; i++) { DBT row_descriptor; memset(&row_descriptor, 0, sizeof(row_descriptor)); KEY* prim_key = (hidden_primary_key) ? NULL : &altered_table->s->key_info[primary_key]; @@ -624,8 +624,8 @@ ha_tokudb::alter_table_phase2( method = row_type_to_compression_method(create_info->row_type); // Set the new type. - u_int32_t curr_num_DBs = table->s->keys + test(hidden_primary_key); - for (u_int32_t i = 0; i < curr_num_DBs; ++i) { + uint32_t curr_num_DBs = table->s->keys + test(hidden_primary_key); + for (uint32_t i = 0; i < curr_num_DBs; ++i) { DB *db = share->key_file[i]; error = db->change_compression_method(db, method); if (error) { diff --git a/storage/tokudb/ha_tokudb_alter_56.cc b/storage/tokudb/ha_tokudb_alter_56.cc index 3949dd17f62..29986bc3726 100644 --- a/storage/tokudb/ha_tokudb_alter_56.cc +++ b/storage/tokudb/ha_tokudb_alter_56.cc @@ -158,13 +158,13 @@ ha_tokudb::check_if_supported_inplace_alter(TABLE *altered_table, Alter_inplace_ } else // add column if (only_flags(handler_flags, Alter_inplace_info::ADD_COLUMN + Alter_inplace_info::ALTER_COLUMN_ORDER)) { - u_int32_t added_columns[altered_table->s->fields]; - u_int32_t num_added_columns = 0; + uint32_t added_columns[altered_table->s->fields]; + uint32_t num_added_columns = 0; int r = find_changed_columns(added_columns, &num_added_columns, table, altered_table); if (r == 0) { if (tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE_INFO) { - for (u_int32_t i = 0; i < num_added_columns; i++) { - u_int32_t curr_added_index = added_columns[i]; + for (uint32_t i = 0; i < num_added_columns; i++) { + uint32_t curr_added_index = added_columns[i]; Field* curr_added_field = altered_table->field[curr_added_index]; printf("Added column: index %d, name %s\n", curr_added_index, curr_added_field->field_name); } @@ -174,13 +174,13 @@ ha_tokudb::check_if_supported_inplace_alter(TABLE *altered_table, Alter_inplace_ } else // drop column if (only_flags(handler_flags, Alter_inplace_info::DROP_COLUMN + Alter_inplace_info::ALTER_COLUMN_ORDER)) { - u_int32_t dropped_columns[table->s->fields]; - u_int32_t num_dropped_columns = 0; + uint32_t dropped_columns[table->s->fields]; + uint32_t num_dropped_columns = 0; int r = find_changed_columns(dropped_columns, &num_dropped_columns, altered_table, table); if (r == 0) { if (tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE_INFO) { - for (u_int32_t i = 0; i < num_dropped_columns; i++) { - u_int32_t curr_dropped_index = dropped_columns[i]; + for (uint32_t i = 0; i < num_dropped_columns; i++) { + uint32_t curr_dropped_index = dropped_columns[i]; Field* curr_dropped_field = table->field[curr_dropped_index]; printf("Dropped column: index %d, name %s\n", curr_dropped_index, curr_dropped_field->field_name); } @@ -248,8 +248,8 @@ ha_tokudb::inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha_alte assert(error == 0); ctx->compression_changed = true; // Set the new type. - u_int32_t curr_num_DBs = table->s->keys + test(hidden_primary_key); - for (u_int32_t i = 0; i < curr_num_DBs; i++) { + uint32_t curr_num_DBs = table->s->keys + test(hidden_primary_key); + for (uint32_t i = 0; i < curr_num_DBs; i++) { db = share->key_file[i]; error = db->change_compression_method(db, method); if (error) @@ -315,13 +315,13 @@ ha_tokudb::alter_table_add_or_drop_column(TABLE *altered_table, Alter_inplace_in int error; uchar *column_extra = NULL; uchar *row_desc_buff = NULL; - u_int32_t max_new_desc_size = 0; - u_int32_t max_column_extra_size; - u_int32_t num_column_extra; - u_int32_t num_columns = 0; - u_int32_t curr_num_DBs = table->s->keys + test(hidden_primary_key); + uint32_t max_new_desc_size = 0; + uint32_t max_column_extra_size; + uint32_t num_column_extra; + uint32_t num_columns = 0; + uint32_t curr_num_DBs = table->s->keys + test(hidden_primary_key); - u_int32_t columns[table->s->fields + altered_table->s->fields]; // set size such that we know it is big enough for both cases + uint32_t columns[table->s->fields + altered_table->s->fields]; // set size such that we know it is big enough for both cases memset(columns, 0, sizeof(columns)); KEY_AND_COL_INFO altered_kc_info; @@ -369,7 +369,7 @@ ha_tokudb::alter_table_add_or_drop_column(TABLE *altered_table, Alter_inplace_in column_extra = (uchar *)my_malloc(max_column_extra_size, MYF(MY_WME)); if (column_extra == NULL) { error = ENOMEM; goto cleanup; } - for (u_int32_t i = 0; i < curr_num_DBs; i++) { + for (uint32_t i = 0; i < curr_num_DBs; i++) { DBT row_descriptor; memset(&row_descriptor, 0, sizeof(row_descriptor)); KEY* prim_key = (hidden_primary_key) ? NULL : &altered_table->s->key_info[primary_key]; @@ -495,8 +495,8 @@ ha_tokudb::commit_inplace_alter_table(TABLE *altered_table, Alter_inplace_info * restore_drop_indexes(table, index_drop_offsets, ha_alter_info->index_drop_count); } if (ctx->compression_changed) { - u_int32_t curr_num_DBs = table->s->keys + test(hidden_primary_key); - for (u_int32_t i = 0; i < curr_num_DBs; i++) { + uint32_t curr_num_DBs = table->s->keys + test(hidden_primary_key); + for (uint32_t i = 0; i < curr_num_DBs; i++) { DB *db = share->key_file[i]; int error = db->change_compression_method(db, ctx->orig_compression_method); assert(error == 0); diff --git a/storage/tokudb/ha_tokudb_alter_common.cc b/storage/tokudb/ha_tokudb_alter_common.cc index bc6753adde1..4f5a49c2f90 100644 --- a/storage/tokudb/ha_tokudb_alter_common.cc +++ b/storage/tokudb/ha_tokudb_alter_common.cc @@ -19,7 +19,7 @@ tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print_error, bool retval = false; goto cleanup; } - for (u_int32_t i=0; i < table->s->keys; i++) { + for (uint32_t i=0; i < table->s->keys; i++) { KEY* curr_orig_key = &table->key_info[i]; KEY* curr_altered_key = &altered_table->key_info[i]; if (strcmp(curr_orig_key->name, curr_altered_key->name)) { @@ -70,7 +70,7 @@ tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print_error, bool // // now verify that each field in the key is the same // - for (u_int32_t j = 0; j < curr_orig_key->key_parts; j++) { + for (uint32_t j = 0; j < curr_orig_key->key_parts; j++) { KEY_PART_INFO* curr_orig_part = &curr_orig_key->key_part[j]; KEY_PART_INFO* curr_altered_part = &curr_altered_key->key_part[j]; Field* curr_orig_field = curr_orig_part->field; @@ -115,9 +115,9 @@ cleanup: // to evaluate whether a field is NULL or not. This value is a power of 2, from // 2^0 to 2^7. We return the position of the bit within the byte, which is // lg null_bit -static inline u_int32_t -get_null_bit_position(u_int32_t null_bit) { - u_int32_t retval = 0; +static inline uint32_t +get_null_bit_position(uint32_t null_bit) { + uint32_t retval = 0; switch(null_bit) { case (1): retval = 0; @@ -150,24 +150,24 @@ get_null_bit_position(u_int32_t null_bit) { } // returns the index of the null bit of field. -static inline u_int32_t +static inline uint32_t get_overall_null_bit_position(TABLE* table, Field* field) { - u_int32_t offset = get_null_offset(table, field); - u_int32_t null_bit = field->null_bit; + uint32_t offset = get_null_offset(table, field); + uint32_t null_bit = field->null_bit; return offset*8 + get_null_bit_position(null_bit); } // not static since 51 uses this and 56 does not bool are_null_bits_in_order(TABLE* table) { - u_int32_t curr_null_pos = 0; + uint32_t curr_null_pos = 0; bool first = true; bool retval = true; for (uint i = 0; i < table->s->fields; i++) { Field* curr_field = table->field[i]; bool nullable = (curr_field->null_bit != 0); if (nullable) { - u_int32_t pos = get_overall_null_bit_position( + uint32_t pos = get_overall_null_bit_position( table, curr_field ); @@ -182,9 +182,9 @@ are_null_bits_in_order(TABLE* table) { return retval; } -static u_int32_t +static uint32_t get_first_null_bit_pos(TABLE* table) { - u_int32_t table_pos = 0; + uint32_t table_pos = 0; for (uint i = 0; i < table->s->fields; i++) { Field* curr_field = table->field[i]; bool nullable = (curr_field->null_bit != 0); @@ -201,12 +201,12 @@ get_first_null_bit_pos(TABLE* table) { #if 0 static bool -is_column_default_null(TABLE* src_table, u_int32_t field_index) { +is_column_default_null(TABLE* src_table, uint32_t field_index) { Field* curr_field = src_table->field[field_index]; bool is_null_default = false; bool nullable = curr_field->null_bit != 0; if (nullable) { - u_int32_t null_bit_position = get_overall_null_bit_position(src_table, curr_field); + uint32_t null_bit_position = get_overall_null_bit_position(src_table, curr_field); is_null_default = is_overall_null_position_set( src_table->s->default_values, null_bit_position @@ -216,14 +216,14 @@ is_column_default_null(TABLE* src_table, u_int32_t field_index) { } #endif -static u_int32_t +static uint32_t fill_static_row_mutator( uchar* buf, TABLE* orig_table, TABLE* altered_table, KEY_AND_COL_INFO* orig_kc_info, KEY_AND_COL_INFO* altered_kc_info, - u_int32_t keynr + uint32_t keynr ) { // @@ -255,7 +255,7 @@ fill_static_row_mutator( // // size of fixed fields // - u_int32_t fixed_field_size = orig_kc_info->mcp_info[keynr].fixed_field_size; + uint32_t fixed_field_size = orig_kc_info->mcp_info[keynr].fixed_field_size; memcpy(pos, &fixed_field_size, sizeof(fixed_field_size)); pos += sizeof(fixed_field_size); fixed_field_size = altered_kc_info->mcp_info[keynr].fixed_field_size; @@ -265,17 +265,17 @@ fill_static_row_mutator( // // length of offsets // - u_int32_t len_of_offsets = orig_kc_info->mcp_info[keynr].len_of_offsets; + uint32_t len_of_offsets = orig_kc_info->mcp_info[keynr].len_of_offsets; memcpy(pos, &len_of_offsets, sizeof(len_of_offsets)); pos += sizeof(len_of_offsets); len_of_offsets = altered_kc_info->mcp_info[keynr].len_of_offsets; memcpy(pos, &len_of_offsets, sizeof(len_of_offsets)); pos += sizeof(len_of_offsets); - u_int32_t orig_start_null_pos = get_first_null_bit_pos(orig_table); + uint32_t orig_start_null_pos = get_first_null_bit_pos(orig_table); memcpy(pos, &orig_start_null_pos, sizeof(orig_start_null_pos)); pos += sizeof(orig_start_null_pos); - u_int32_t altered_start_null_pos = get_first_null_bit_pos(altered_table); + uint32_t altered_start_null_pos = get_first_null_bit_pos(altered_table); memcpy(pos, &altered_start_null_pos, sizeof(altered_start_null_pos)); pos += sizeof(altered_start_null_pos); @@ -283,25 +283,25 @@ fill_static_row_mutator( return pos - buf; } -static u_int32_t +static uint32_t fill_dynamic_row_mutator( uchar* buf, - u_int32_t* columns, - u_int32_t num_columns, + uint32_t* columns, + uint32_t num_columns, TABLE* src_table, KEY_AND_COL_INFO* src_kc_info, - u_int32_t keynr, + uint32_t keynr, bool is_add, bool* out_has_blobs ) { uchar* pos = buf; bool has_blobs = false; - u_int32_t cols = num_columns; + uint32_t cols = num_columns; memcpy(pos, &cols, sizeof(cols)); pos += sizeof(cols); - for (u_int32_t i = 0; i < num_columns; i++) { - u_int32_t curr_index = columns[i]; + for (uint32_t i = 0; i < num_columns; i++) { + uint32_t curr_index = columns[i]; Field* curr_field = src_table->field[curr_index]; pos[0] = is_add ? COL_ADD : COL_DROP; @@ -319,7 +319,7 @@ fill_dynamic_row_mutator( pos[0] = 1; pos++; // write position of null byte that is to be removed - u_int32_t null_bit_position = get_overall_null_bit_position(src_table, curr_field); + uint32_t null_bit_position = get_overall_null_bit_position(src_table, curr_field); memcpy(pos, &null_bit_position, sizeof(null_bit_position)); pos += sizeof(null_bit_position); // @@ -340,11 +340,11 @@ fill_dynamic_row_mutator( pos[0] = COL_FIXED; pos++; //store the offset - u_int32_t fixed_field_offset = src_kc_info->cp_info[keynr][curr_index].col_pack_val; + uint32_t fixed_field_offset = src_kc_info->cp_info[keynr][curr_index].col_pack_val; memcpy(pos, &fixed_field_offset, sizeof(fixed_field_offset)); pos += sizeof(fixed_field_offset); //store the number of bytes - u_int32_t num_bytes = src_kc_info->field_lengths[curr_index]; + uint32_t num_bytes = src_kc_info->field_lengths[curr_index]; memcpy(pos, &num_bytes, sizeof(num_bytes)); pos += sizeof(num_bytes); if (is_add && !is_null_default) { @@ -361,13 +361,13 @@ fill_dynamic_row_mutator( pos[0] = COL_VAR; pos++; //store the index of the variable column - u_int32_t var_field_index = src_kc_info->cp_info[keynr][curr_index].col_pack_val; + uint32_t var_field_index = src_kc_info->cp_info[keynr][curr_index].col_pack_val; memcpy(pos, &var_field_index, sizeof(var_field_index)); pos += sizeof(var_field_index); if (is_add && !is_null_default) { uint curr_field_offset = field_offset(curr_field, src_table); - u_int32_t len_bytes = src_kc_info->length_bytes[curr_index]; - u_int32_t data_length = get_var_data_length( + uint32_t len_bytes = src_kc_info->length_bytes[curr_index]; + uint32_t data_length = get_var_data_length( src_table->s->default_values + curr_field_offset, len_bytes ); @@ -391,7 +391,7 @@ fill_dynamic_row_mutator( return pos-buf; } -static u_int32_t +static uint32_t fill_static_blob_row_mutator( uchar* buf, TABLE* src_table, @@ -403,10 +403,10 @@ fill_static_blob_row_mutator( memcpy(pos, &src_kc_info->num_blobs, sizeof(src_kc_info->num_blobs)); pos += sizeof(src_kc_info->num_blobs); // copy length bytes for each blob - for (u_int32_t i = 0; i < src_kc_info->num_blobs; i++) { - u_int32_t curr_field_index = src_kc_info->blob_fields[i]; + for (uint32_t i = 0; i < src_kc_info->num_blobs; i++) { + uint32_t curr_field_index = src_kc_info->blob_fields[i]; Field* field = src_table->field[curr_field_index]; - u_int32_t len_bytes = field->row_pack_length(); + uint32_t len_bytes = field->row_pack_length(); assert(len_bytes <= 4); pos[0] = len_bytes; pos++; @@ -415,27 +415,27 @@ fill_static_blob_row_mutator( return pos-buf; } -static u_int32_t +static uint32_t fill_dynamic_blob_row_mutator( uchar* buf, - u_int32_t* columns, - u_int32_t num_columns, + uint32_t* columns, + uint32_t num_columns, TABLE* src_table, KEY_AND_COL_INFO* src_kc_info, bool is_add ) { uchar* pos = buf; - for (u_int32_t i = 0; i < num_columns; i++) { - u_int32_t curr_field_index = columns[i]; + for (uint32_t i = 0; i < num_columns; i++) { + uint32_t curr_field_index = columns[i]; Field* curr_field = src_table->field[curr_field_index]; if (src_kc_info->field_lengths[curr_field_index] == 0 && src_kc_info->length_bytes[curr_field_index]== 0 ) { // find out which blob it is - u_int32_t blob_index = src_kc_info->num_blobs; - for (u_int32_t j = 0; j < src_kc_info->num_blobs; j++) { + uint32_t blob_index = src_kc_info->num_blobs; + for (uint32_t j = 0; j < src_kc_info->num_blobs; j++) { if (curr_field_index == src_kc_info->blob_fields[j]) { blob_index = j; break; @@ -448,7 +448,7 @@ fill_dynamic_blob_row_mutator( memcpy(pos, &blob_index, sizeof(blob_index)); pos += sizeof(blob_index); if (is_add) { - u_int32_t len_bytes = curr_field->row_pack_length(); + uint32_t len_bytes = curr_field->row_pack_length(); assert(len_bytes <= 4); pos[0] = len_bytes; pos++; @@ -471,14 +471,14 @@ fill_dynamic_blob_row_mutator( // TODO: carefully review to make sure that the right information is used // TODO: namely, when do we get stuff from share->kc_info and when we get // TODO: it from altered_kc_info, and when is keynr associated with the right thing -u_int32_t +uint32_t ha_tokudb::fill_row_mutator( uchar* buf, - u_int32_t* columns, - u_int32_t num_columns, + uint32_t* columns, + uint32_t num_columns, TABLE* altered_table, KEY_AND_COL_INFO* altered_kc_info, - u_int32_t keynr, + uint32_t keynr, bool is_add ) { @@ -626,15 +626,15 @@ cleanup: static int find_changed_columns( - u_int32_t* changed_columns, - u_int32_t* num_changed_columns, + uint32_t* changed_columns, + uint32_t* num_changed_columns, TABLE* smaller_table, TABLE* bigger_table ) { int retval; uint curr_new_col_index = 0; - u_int32_t curr_num_changed_columns=0; + uint32_t curr_num_changed_columns=0; assert(bigger_table->s->fields > smaller_table->s->fields); for (uint i = 0; i < smaller_table->s->fields; i++, curr_new_col_index++) { if (curr_new_col_index >= bigger_table->s->fields) { diff --git a/storage/tokudb/ha_tokudb_update_fun.cc b/storage/tokudb/ha_tokudb_update_fun.cc index bd15f54860a..349090691ce 100644 --- a/storage/tokudb/ha_tokudb_update_fun.cc +++ b/storage/tokudb/ha_tokudb_update_fun.cc @@ -54,8 +54,8 @@ So, upperbound is num_blobs(1+4+1+4) = num_columns*10 // checks whether the bit at index pos in data is set or not // static inline bool -is_overall_null_position_set(uchar* data, u_int32_t pos) { - u_int32_t offset = pos/8; +is_overall_null_position_set(uchar* data, uint32_t pos) { + uint32_t offset = pos/8; uchar remainder = pos%8; uchar null_bit = 1<size + extra->size + new_len_of_offsets + new_fixed_field_size; new_val_data = (uchar *)my_malloc( @@ -283,10 +283,10 @@ tokudb_update_fun( new_null_bytes = new_val_data; - memcpy(&curr_old_null_pos, extra_pos, sizeof(u_int32_t)); - extra_pos += sizeof(u_int32_t); - memcpy(&curr_new_null_pos, extra_pos, sizeof(u_int32_t)); - extra_pos += sizeof(u_int32_t); + memcpy(&curr_old_null_pos, extra_pos, sizeof(uint32_t)); + extra_pos += sizeof(uint32_t); + memcpy(&curr_new_null_pos, extra_pos, sizeof(uint32_t)); + extra_pos += sizeof(uint32_t); memcpy(&num_columns, extra_pos, sizeof(num_columns)); extra_pos += sizeof(num_columns); @@ -294,7 +294,7 @@ tokudb_update_fun( // // now go through and apply the change into new_val_data // - for (u_int32_t i = 0; i < num_columns; i++) { + for (uint32_t i = 0; i < num_columns; i++) { uchar op_type = extra_pos[0]; bool is_null_default = false; extra_pos++; @@ -303,10 +303,10 @@ tokudb_update_fun( bool nullable = (extra_pos[0] != 0); extra_pos++; if (nullable) { - u_int32_t null_bit_position; - memcpy(&null_bit_position, extra_pos, sizeof(u_int32_t)); - extra_pos += sizeof(u_int32_t); - u_int32_t num_bits; + uint32_t null_bit_position; + memcpy(&null_bit_position, extra_pos, sizeof(uint32_t)); + extra_pos += sizeof(uint32_t); + uint32_t num_bits; if (op_type == COL_DROP) { assert(curr_old_null_pos <= null_bit_position); num_bits = null_bit_position - curr_old_null_pos; @@ -342,13 +342,13 @@ tokudb_update_fun( uchar col_type = extra_pos[0]; extra_pos++; if (col_type == COL_FIXED) { - u_int32_t col_offset; - u_int32_t col_size; - u_int32_t num_bytes_to_copy; - memcpy(&col_offset, extra_pos, sizeof(u_int32_t)); - extra_pos += sizeof(u_int32_t); - memcpy(&col_size, extra_pos, sizeof(u_int32_t)); - extra_pos += sizeof(u_int32_t); + uint32_t col_offset; + uint32_t col_size; + uint32_t num_bytes_to_copy; + memcpy(&col_offset, extra_pos, sizeof(uint32_t)); + extra_pos += sizeof(uint32_t); + memcpy(&col_size, extra_pos, sizeof(uint32_t)); + extra_pos += sizeof(uint32_t); if (op_type == COL_DROP) { num_bytes_to_copy = col_offset - curr_old_fixed_offset; @@ -386,9 +386,9 @@ tokudb_update_fun( } else if (col_type == COL_VAR) { - u_int32_t var_col_index; - memcpy(&var_col_index, extra_pos, sizeof(u_int32_t)); - extra_pos += sizeof(u_int32_t); + uint32_t var_col_index; + memcpy(&var_col_index, extra_pos, sizeof(uint32_t)); + extra_pos += sizeof(uint32_t); if (op_type == COL_DROP) { num_var_fields_to_copy = var_col_index - curr_old_num_var_field; } @@ -428,7 +428,7 @@ tokudb_update_fun( curr_new_var_field_offset_ptr += new_num_offset_bytes; } else { - u_int32_t data_length; + uint32_t data_length; memcpy(&data_length, extra_pos, sizeof(data_length)); extra_pos += sizeof(data_length); curr_new_var_field_data_ptr = write_var_field( @@ -514,9 +514,9 @@ tokudb_update_fun( // else, there is blob information to process else { uchar* len_bytes = NULL; - u_int32_t curr_old_blob = 0; - u_int32_t curr_new_blob = 0; - u_int32_t num_old_blobs = 0; + uint32_t curr_old_blob = 0; + uint32_t curr_new_blob = 0; + uint32_t num_old_blobs = 0; uchar* curr_old_blob_ptr = start_blob_ptr; memcpy(&num_old_blobs, extra_pos, sizeof(num_old_blobs)); extra_pos += sizeof(num_old_blobs); @@ -526,8 +526,8 @@ tokudb_update_fun( while ((extra_pos - extra_pos_start) < extra->size) { uchar op_type = extra_pos[0]; extra_pos++; - u_int32_t num_blobs_to_copy = 0; - u_int32_t blob_index; + uint32_t num_blobs_to_copy = 0; + uint32_t blob_index; memcpy(&blob_index, extra_pos, sizeof(blob_index)); extra_pos += sizeof(blob_index); assert (op_type == COL_DROP || op_type == COL_ADD); @@ -537,8 +537,8 @@ tokudb_update_fun( else { num_blobs_to_copy = blob_index - curr_new_blob; } - for (u_int32_t i = 0; i < num_blobs_to_copy; i++) { - u_int32_t num_bytes_written = copy_toku_blob( + for (uint32_t i = 0; i < num_blobs_to_copy; i++) { + uint32_t num_bytes_written = copy_toku_blob( curr_new_var_field_data_ptr, curr_old_blob_ptr, len_bytes[curr_old_blob + i], @@ -551,7 +551,7 @@ tokudb_update_fun( curr_new_blob += num_blobs_to_copy; if (op_type == COL_DROP) { // skip over blob in row - u_int32_t num_bytes = copy_toku_blob( + uint32_t num_bytes = copy_toku_blob( NULL, curr_old_blob_ptr, len_bytes[curr_old_blob], @@ -562,9 +562,9 @@ tokudb_update_fun( } else { // copy new data - u_int32_t new_len_bytes = extra_pos[0]; + uint32_t new_len_bytes = extra_pos[0]; extra_pos++; - u_int32_t num_bytes = copy_toku_blob( + uint32_t num_bytes = copy_toku_blob( curr_new_var_field_data_ptr, extra_pos, new_len_bytes, diff --git a/storage/tokudb/hatoku_cmp.cc b/storage/tokudb/hatoku_cmp.cc index 777c987e852..06fe2fb7226 100644 --- a/storage/tokudb/hatoku_cmp.cc +++ b/storage/tokudb/hatoku_cmp.cc @@ -5,15 +5,15 @@ #endif void get_var_field_info( - u_int32_t* field_len, // output: length of field - u_int32_t* start_offset, // output, length of offset where data starts - u_int32_t var_field_index, //input, index of var field we want info on + uint32_t* field_len, // output: length of field + uint32_t* start_offset, // output, length of offset where data starts + uint32_t var_field_index, //input, index of var field we want info on const uchar* var_field_offset_ptr, //input, pointer to where offset information for all var fields begins - u_int32_t num_offset_bytes //input, number of bytes used to store offsets starting at var_field_offset_ptr + uint32_t num_offset_bytes //input, number of bytes used to store offsets starting at var_field_offset_ptr ) { - u_int32_t data_start_offset = 0; - u_int32_t data_end_offset = 0; + uint32_t data_start_offset = 0; + uint32_t data_end_offset = 0; switch (num_offset_bytes) { case (1): data_end_offset = (var_field_offset_ptr + var_field_index)[0]; @@ -49,13 +49,13 @@ void get_var_field_info( } void get_blob_field_info( - u_int32_t* start_offset, - u_int32_t len_of_offsets, + uint32_t* start_offset, + uint32_t len_of_offsets, const uchar* var_field_data_ptr, - u_int32_t num_offset_bytes + uint32_t num_offset_bytes ) { - u_int32_t data_end_offset; + uint32_t data_end_offset; // // need to set var_field_data_ptr to point to beginning of blobs, which // is at the end of the var stuff (if they exist), if var stuff does not exist @@ -157,7 +157,7 @@ exit: } -static inline CHARSET_INFO* get_charset_from_num (u_int32_t charset_number) { +static inline CHARSET_INFO* get_charset_from_num (uint32_t charset_number) { // // patternmatched off of InnoDB, due to MySQL bug 42649 // @@ -177,10 +177,10 @@ static inline CHARSET_INFO* get_charset_from_num (u_int32_t charset_number) { // // used to read the length of a variable sized field in a tokudb key (buf). // -static inline u_int32_t get_length_from_var_tokudata (uchar* buf, u_int32_t length_bytes) { - u_int32_t length = (u_int32_t)(buf[0]); +static inline uint32_t get_length_from_var_tokudata (uchar* buf, uint32_t length_bytes) { + uint32_t length = (uint32_t)(buf[0]); if (length_bytes == 2) { - u_int32_t rest_of_length = (u_int32_t)buf[1]; + uint32_t rest_of_length = (uint32_t)buf[1]; length += rest_of_length<<8; } return length; @@ -190,7 +190,7 @@ static inline u_int32_t get_length_from_var_tokudata (uchar* buf, u_int32_t leng // used to deduce the number of bytes used to store the length of a varstring/varbinary // in a key field stored in tokudb // -static inline u_int32_t get_length_bytes_from_max(u_int32_t max_num_bytes) { +static inline uint32_t get_length_bytes_from_max(uint32_t max_num_bytes) { return (max_num_bytes > 255) ? 2 : 1; } @@ -199,7 +199,7 @@ static inline u_int32_t get_length_bytes_from_max(u_int32_t max_num_bytes) { // // assuming MySQL in little endian, and we are storing in little endian // -static inline uchar* pack_toku_int (uchar* to_tokudb, uchar* from_mysql, u_int32_t num_bytes) { +static inline uchar* pack_toku_int (uchar* to_tokudb, uchar* from_mysql, uint32_t num_bytes) { switch (num_bytes) { case (1): memcpy(to_tokudb, from_mysql, 1); @@ -225,7 +225,7 @@ static inline uchar* pack_toku_int (uchar* to_tokudb, uchar* from_mysql, u_int32 // // assuming MySQL in little endian, and we are unpacking to little endian // -static inline uchar* unpack_toku_int(uchar* to_mysql, uchar* from_tokudb, u_int32_t num_bytes) { +static inline uchar* unpack_toku_int(uchar* to_mysql, uchar* from_tokudb, uint32_t num_bytes) { switch (num_bytes) { case (1): memcpy(to_mysql, from_tokudb, 1); @@ -248,14 +248,14 @@ static inline uchar* unpack_toku_int(uchar* to_mysql, uchar* from_tokudb, u_int3 return from_tokudb+num_bytes; } -static inline int cmp_toku_int (uchar* a_buf, uchar* b_buf, bool is_unsigned, u_int32_t num_bytes) { +static inline int cmp_toku_int (uchar* a_buf, uchar* b_buf, bool is_unsigned, uint32_t num_bytes) { int ret_val = 0; // // case for unsigned integers // if (is_unsigned) { - u_int32_t a_num, b_num = 0; - u_int64_t a_big_num, b_big_num = 0; + uint32_t a_num, b_num = 0; + uint64_t a_big_num, b_big_num = 0; switch (num_bytes) { case (1): a_num = *a_buf; @@ -419,12 +419,12 @@ exit: } -static inline uchar* pack_toku_binary(uchar* to_tokudb, uchar* from_mysql, u_int32_t num_bytes) { +static inline uchar* pack_toku_binary(uchar* to_tokudb, uchar* from_mysql, uint32_t num_bytes) { memcpy(to_tokudb, from_mysql, num_bytes); return to_tokudb + num_bytes; } -static inline uchar* unpack_toku_binary(uchar* to_mysql, uchar* from_tokudb, u_int32_t num_bytes) { +static inline uchar* unpack_toku_binary(uchar* to_mysql, uchar* from_tokudb, uint32_t num_bytes) { memcpy(to_mysql, from_tokudb, num_bytes); return from_tokudb + num_bytes; } @@ -432,13 +432,13 @@ static inline uchar* unpack_toku_binary(uchar* to_mysql, uchar* from_tokudb, u_i static inline int cmp_toku_binary( uchar* a_buf, - u_int32_t a_num_bytes, + uint32_t a_num_bytes, uchar* b_buf, - u_int32_t b_num_bytes + uint32_t b_num_bytes ) { int ret_val = 0; - u_int32_t num_bytes_to_cmp = (a_num_bytes < b_num_bytes) ? a_num_bytes : b_num_bytes; + uint32_t num_bytes_to_cmp = (a_num_bytes < b_num_bytes) ? a_num_bytes : b_num_bytes; ret_val = memcmp(a_buf, b_buf, num_bytes_to_cmp); if ((ret_val != 0) || (a_num_bytes == b_num_bytes)) { goto exit; @@ -461,12 +461,12 @@ exit: uchar* pack_toku_varbinary_from_desc( uchar* to_tokudb, const uchar* from_desc, - u_int32_t key_part_length, //number of bytes to use to encode the length in to_tokudb - u_int32_t field_length //length of field + uint32_t key_part_length, //number of bytes to use to encode the length in to_tokudb + uint32_t field_length //length of field ) { - u_int32_t length_bytes_in_tokudb = get_length_bytes_from_max(key_part_length); - u_int32_t length = field_length; + uint32_t length_bytes_in_tokudb = get_length_bytes_from_max(key_part_length); + uint32_t length = field_length; set_if_smaller(length, key_part_length); // @@ -486,18 +486,18 @@ uchar* pack_toku_varbinary_from_desc( static inline uchar* pack_toku_varbinary( uchar* to_tokudb, uchar* from_mysql, - u_int32_t length_bytes_in_mysql, //number of bytes used to encode the length in from_mysql - u_int32_t max_num_bytes + uint32_t length_bytes_in_mysql, //number of bytes used to encode the length in from_mysql + uint32_t max_num_bytes ) { - u_int32_t length = 0; - u_int32_t length_bytes_in_tokudb; + uint32_t length = 0; + uint32_t length_bytes_in_tokudb; switch (length_bytes_in_mysql) { case (0): length = max_num_bytes; break; case (1): - length = (u_int32_t)(*from_mysql); + length = (uint32_t)(*from_mysql); break; case (2): length = uint2korr(from_mysql); @@ -533,11 +533,11 @@ static inline uchar* pack_toku_varbinary( static inline uchar* unpack_toku_varbinary( uchar* to_mysql, uchar* from_tokudb, - u_int32_t length_bytes_in_tokudb, // number of bytes used to encode length in from_tokudb - u_int32_t length_bytes_in_mysql // number of bytes used to encode length in to_mysql + uint32_t length_bytes_in_tokudb, // number of bytes used to encode length in from_tokudb + uint32_t length_bytes_in_mysql // number of bytes used to encode length in to_mysql ) { - u_int32_t length = get_length_from_var_tokudata(from_tokudb, length_bytes_in_tokudb); + uint32_t length = get_length_from_var_tokudata(from_tokudb, length_bytes_in_tokudb); // // copy the length into the mysql buffer @@ -570,14 +570,14 @@ static inline uchar* unpack_toku_varbinary( static inline int cmp_toku_varbinary( uchar* a_buf, uchar* b_buf, - u_int32_t length_bytes, //number of bytes used to encode length in a_buf and b_buf - u_int32_t* a_bytes_read, - u_int32_t* b_bytes_read + uint32_t length_bytes, //number of bytes used to encode length in a_buf and b_buf + uint32_t* a_bytes_read, + uint32_t* b_bytes_read ) { int ret_val = 0; - u_int32_t a_len = get_length_from_var_tokudata(a_buf, length_bytes); - u_int32_t b_len = get_length_from_var_tokudata(b_buf, length_bytes); + uint32_t a_len = get_length_from_var_tokudata(a_buf, length_bytes); + uint32_t b_len = get_length_from_var_tokudata(b_buf, length_bytes); ret_val = cmp_toku_binary( a_buf + length_bytes, a_len, @@ -592,9 +592,9 @@ static inline int cmp_toku_varbinary( static inline uchar* pack_toku_blob( uchar* to_tokudb, uchar* from_mysql, - u_int32_t length_bytes_in_tokudb, //number of bytes to use to encode the length in to_tokudb - u_int32_t length_bytes_in_mysql, //number of bytes used to encode the length in from_mysql - u_int32_t max_num_bytes, + uint32_t length_bytes_in_tokudb, //number of bytes to use to encode the length in to_tokudb + uint32_t length_bytes_in_mysql, //number of bytes used to encode the length in from_mysql + uint32_t max_num_bytes, #if MYSQL_VERSION_ID >= 50600 const CHARSET_INFO* charset #else @@ -602,8 +602,8 @@ static inline uchar* pack_toku_blob( #endif ) { - u_int32_t length = 0; - u_int32_t local_char_length = 0; + uint32_t length = 0; + uint32_t local_char_length = 0; uchar* blob_buf = NULL; switch (length_bytes_in_mysql) { @@ -611,7 +611,7 @@ static inline uchar* pack_toku_blob( length = max_num_bytes; break; case (1): - length = (u_int32_t)(*from_mysql); + length = (uint32_t)(*from_mysql); break; case (2): length = uint2korr(from_mysql); @@ -659,11 +659,11 @@ static inline uchar* pack_toku_blob( static inline uchar* unpack_toku_blob( uchar* to_mysql, uchar* from_tokudb, - u_int32_t length_bytes_in_tokudb, // number of bytes used to encode length in from_tokudb - u_int32_t length_bytes_in_mysql // number of bytes used to encode length in to_mysql + uint32_t length_bytes_in_tokudb, // number of bytes used to encode length in from_tokudb + uint32_t length_bytes_in_mysql // number of bytes used to encode length in to_mysql ) { - u_int32_t length = get_length_from_var_tokudata(from_tokudb, length_bytes_in_tokudb); + uint32_t length = get_length_from_var_tokudata(from_tokudb, length_bytes_in_tokudb); uchar* blob_pos = NULL; // // copy the length into the mysql buffer @@ -701,15 +701,15 @@ static inline uchar* unpack_toku_blob( uchar* pack_toku_varstring_from_desc( uchar* to_tokudb, const uchar* from_desc, - u_int32_t key_part_length, //number of bytes to use to encode the length in to_tokudb - u_int32_t field_length, - u_int32_t charset_num//length of field + uint32_t key_part_length, //number of bytes to use to encode the length in to_tokudb + uint32_t field_length, + uint32_t charset_num//length of field ) { CHARSET_INFO* charset = NULL; - u_int32_t length_bytes_in_tokudb = get_length_bytes_from_max(key_part_length); - u_int32_t length = field_length; - u_int32_t local_char_length = 0; + uint32_t length_bytes_in_tokudb = get_length_bytes_from_max(key_part_length); + uint32_t length = field_length; + uint32_t local_char_length = 0; set_if_smaller(length, key_part_length); charset = get_charset_from_num(charset_num); @@ -748,9 +748,9 @@ uchar* pack_toku_varstring_from_desc( static inline uchar* pack_toku_varstring( uchar* to_tokudb, uchar* from_mysql, - u_int32_t length_bytes_in_tokudb, //number of bytes to use to encode the length in to_tokudb - u_int32_t length_bytes_in_mysql, //number of bytes used to encode the length in from_mysql - u_int32_t max_num_bytes, + uint32_t length_bytes_in_tokudb, //number of bytes to use to encode the length in to_tokudb + uint32_t length_bytes_in_mysql, //number of bytes used to encode the length in from_mysql + uint32_t max_num_bytes, #if MYSQL_VERSION_ID >= 50600 const CHARSET_INFO *charset #else @@ -758,15 +758,15 @@ static inline uchar* pack_toku_varstring( #endif ) { - u_int32_t length = 0; - u_int32_t local_char_length = 0; + uint32_t length = 0; + uint32_t local_char_length = 0; switch (length_bytes_in_mysql) { case (0): length = max_num_bytes; break; case (1): - length = (u_int32_t)(*from_mysql); + length = (uint32_t)(*from_mysql); break; case (2): length = uint2korr(from_mysql); @@ -810,10 +810,10 @@ static inline uchar* pack_toku_varstring( static inline int cmp_toku_string( uchar* a_buf, - u_int32_t a_num_bytes, + uint32_t a_num_bytes, uchar* b_buf, - u_int32_t b_num_bytes, - u_int32_t charset_number + uint32_t b_num_bytes, + uint32_t charset_number ) { int ret_val = 0; @@ -835,15 +835,15 @@ static inline int cmp_toku_string( static inline int cmp_toku_varstring( uchar* a_buf, uchar* b_buf, - u_int32_t length_bytes, //number of bytes used to encode length in a_buf and b_buf - u_int32_t charset_num, - u_int32_t* a_bytes_read, - u_int32_t* b_bytes_read + uint32_t length_bytes, //number of bytes used to encode length in a_buf and b_buf + uint32_t charset_num, + uint32_t* a_bytes_read, + uint32_t* b_bytes_read ) { int ret_val = 0; - u_int32_t a_len = get_length_from_var_tokudata(a_buf, length_bytes); - u_int32_t b_len = get_length_from_var_tokudata(b_buf, length_bytes); + uint32_t a_len = get_length_from_var_tokudata(a_buf, length_bytes); + uint32_t b_len = get_length_from_var_tokudata(b_buf, length_bytes); ret_val = cmp_toku_string( a_buf + length_bytes, a_len, @@ -858,9 +858,9 @@ static inline int cmp_toku_varstring( static inline int tokudb_compare_two_hidden_keys( const void* new_key_data, - const u_int32_t new_key_size, + const uint32_t new_key_size, const void* saved_key_data, - const u_int32_t saved_key_size + const uint32_t saved_key_size ) { assert( (new_key_size >= TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH) && (saved_key_size >= TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH) ); ulonglong a = hpk_char_to_num((uchar *) new_key_data); @@ -878,7 +878,7 @@ static inline int tokudb_compare_two_hidden_keys( // created in create_toku_key_descriptor_for_key. The first // byte points to the TOKU_TYPE. // -u_int32_t skip_field_in_descriptor(uchar* row_desc) { +uint32_t skip_field_in_descriptor(uchar* row_desc) { uchar* row_desc_pos = row_desc; TOKU_TYPE toku_type = (TOKU_TYPE)row_desc_pos[0]; row_desc_pos++; @@ -899,13 +899,13 @@ u_int32_t skip_field_in_descriptor(uchar* row_desc) { case (toku_type_varstring): case (toku_type_blob): row_desc_pos++; - row_desc_pos += sizeof(u_int32_t); + row_desc_pos += sizeof(uint32_t); break; default: assert(false); break; } - return (u_int32_t)(row_desc_pos - row_desc); + return (uint32_t)(row_desc_pos - row_desc); } // @@ -915,8 +915,8 @@ u_int32_t skip_field_in_descriptor(uchar* row_desc) { // int create_toku_key_descriptor_for_key(KEY* key, uchar* buf) { uchar* pos = buf; - u_int32_t num_bytes_in_field = 0; - u_int32_t charset_num = 0; + uint32_t num_bytes_in_field = 0; + uint32_t charset_num = 0; for (uint i = 0; i < key->key_parts; i++){ Field* field = key->key_part[i].field; // @@ -1031,8 +1031,8 @@ int create_toku_key_descriptor( // ends. // uchar* pos = buf + 4; - u_int32_t num_bytes = 0; - u_int32_t offset = 0; + uint32_t num_bytes = 0; + uint32_t offset = 0; if (is_first_hpk) { @@ -1093,16 +1093,16 @@ static inline int compare_toku_field( uchar* a_buf, uchar* b_buf, uchar* row_desc, - u_int32_t* a_bytes_read, - u_int32_t* b_bytes_read, - u_int32_t* row_desc_bytes_read + uint32_t* a_bytes_read, + uint32_t* b_bytes_read, + uint32_t* row_desc_bytes_read ) { int ret_val = 0; uchar* row_desc_pos = row_desc; - u_int32_t num_bytes = 0; - u_int32_t length_bytes = 0; - u_int32_t charset_num = 0; + uint32_t num_bytes = 0; + uint32_t length_bytes = 0; + uint32_t charset_num = 0; bool is_unsigned = false; TOKU_TYPE toku_type = (TOKU_TYPE)row_desc_pos[0]; @@ -1168,8 +1168,8 @@ static inline int compare_toku_field( // // not sure we want to read charset_num like this // - charset_num = *(u_int32_t *)row_desc_pos; - row_desc_pos += sizeof(u_int32_t); + charset_num = *(uint32_t *)row_desc_pos; + row_desc_pos += sizeof(uint32_t); ret_val = cmp_toku_varstring( a_buf, b_buf, @@ -1196,11 +1196,11 @@ uchar* pack_toku_key_field( uchar* to_tokudb, uchar* from_mysql, Field* field, - u_int32_t key_part_length //I really hope this is temporary as I phase out the pack_cmp stuff + uint32_t key_part_length //I really hope this is temporary as I phase out the pack_cmp stuff ) { uchar* new_pos = NULL; - u_int32_t num_bytes = 0; + uint32_t num_bytes = 0; TOKU_TYPE toku_type = mysql_to_toku_type(field); switch(toku_type) { case (toku_type_int): @@ -1289,7 +1289,7 @@ uchar* pack_key_toku_key_field( uchar* to_tokudb, uchar* from_mysql, Field* field, - u_int32_t key_part_length //I really hope this is temporary as I phase out the pack_cmp stuff + uint32_t key_part_length //I really hope this is temporary as I phase out the pack_cmp stuff ) { uchar* new_pos = NULL; @@ -1335,12 +1335,12 @@ uchar* unpack_toku_key_field( uchar* to_mysql, uchar* from_tokudb, Field* field, - u_int32_t key_part_length + uint32_t key_part_length ) { uchar* new_pos = NULL; - u_int32_t num_bytes = 0; - u_int32_t num_bytes_copied; + uint32_t num_bytes = 0; + uint32_t num_bytes_copied; TOKU_TYPE toku_type = mysql_to_toku_type(field); switch(toku_type) { case (toku_type_int): @@ -1410,11 +1410,11 @@ exit: int tokudb_compare_two_keys( const void* new_key_data, - const u_int32_t new_key_size, + const uint32_t new_key_size, const void* saved_key_data, - const u_int32_t saved_key_size, + const uint32_t saved_key_size, const void* row_desc, - const u_int32_t row_desc_size, + const uint32_t row_desc_size, bool cmp_prefix ) { @@ -1426,8 +1426,8 @@ int tokudb_compare_two_keys( uchar *new_key_ptr = (uchar *)new_key_data; uchar *saved_key_ptr = (uchar *)saved_key_data; - u_int32_t new_key_bytes_left = new_key_size; - u_int32_t saved_key_bytes_left = saved_key_size; + uint32_t new_key_bytes_left = new_key_size; + uint32_t saved_key_bytes_left = saved_key_size; // // if the keys have an infinity byte, set it @@ -1440,14 +1440,14 @@ int tokudb_compare_two_keys( } row_desc_ptr++; - while ( (u_int32_t)(new_key_ptr - (uchar *)new_key_data) < new_key_size && - (u_int32_t)(saved_key_ptr - (uchar *)saved_key_data) < saved_key_size && - (u_int32_t)(row_desc_ptr - (uchar *)row_desc) < row_desc_size + while ( (uint32_t)(new_key_ptr - (uchar *)new_key_data) < new_key_size && + (uint32_t)(saved_key_ptr - (uchar *)saved_key_data) < saved_key_size && + (uint32_t)(row_desc_ptr - (uchar *)row_desc) < row_desc_size ) { - u_int32_t new_key_field_length; - u_int32_t saved_key_field_length; - u_int32_t row_desc_field_length; + uint32_t new_key_field_length; + uint32_t saved_key_field_length; + uint32_t row_desc_field_length; // // if there is a null byte at this point in the key // @@ -1493,12 +1493,12 @@ int tokudb_compare_two_keys( goto exit; } - assert((u_int32_t)(new_key_ptr - (uchar *)new_key_data) <= new_key_size); - assert((u_int32_t)(saved_key_ptr - (uchar *)saved_key_data) <= saved_key_size); - assert((u_int32_t)(row_desc_ptr - (uchar *)row_desc) <= row_desc_size); + assert((uint32_t)(new_key_ptr - (uchar *)new_key_data) <= new_key_size); + assert((uint32_t)(saved_key_ptr - (uchar *)saved_key_data) <= saved_key_size); + assert((uint32_t)(row_desc_ptr - (uchar *)row_desc) <= row_desc_size); } - new_key_bytes_left = new_key_size - ((u_int32_t)(new_key_ptr - (uchar *)new_key_data)); - saved_key_bytes_left = saved_key_size - ((u_int32_t)(saved_key_ptr - (uchar *)saved_key_data)); + new_key_bytes_left = new_key_size - ((uint32_t)(new_key_ptr - (uchar *)new_key_data)); + saved_key_bytes_left = saved_key_size - ((uint32_t)(saved_key_ptr - (uchar *)saved_key_data)); if (cmp_prefix) { ret_val = 0; } @@ -1545,7 +1545,7 @@ int tokudb_cmp_dbt_key(DB* file, const DBT *keya, const DBT *keyb) { keyb->data, keyb->size, (uchar *)file->cmp_descriptor->dbt.data + 4, - (*(u_int32_t *)file->cmp_descriptor->dbt.data) - 4, + (*(uint32_t *)file->cmp_descriptor->dbt.data) - 4, false ); } @@ -1560,14 +1560,14 @@ int tokudb_prefix_cmp_dbt_key(DB *file, const DBT *keya, const DBT *keyb) { keyb->data, keyb->size, (uchar *)file->cmp_descriptor->dbt.data + 4, - *(u_int32_t *)file->cmp_descriptor->dbt.data - 4, + *(uint32_t *)file->cmp_descriptor->dbt.data - 4, true ); return cmp; } -u_int32_t create_toku_main_key_pack_descriptor ( +uint32_t create_toku_main_key_pack_descriptor ( uchar* buf ) { @@ -1576,7 +1576,7 @@ u_int32_t create_toku_main_key_pack_descriptor ( // ends. // uchar* pos = buf + 4; - u_int32_t offset = 0; + uint32_t offset = 0; // // one byte states if this is the main dictionary // @@ -1611,18 +1611,18 @@ exit: #define COPY_OFFSET_TO_BUF memcpy ( \ pos, \ &kc_info->cp_info[pk_index][field_index].col_pack_val, \ - sizeof(u_int32_t) \ + sizeof(uint32_t) \ ); \ - pos += sizeof(u_int32_t); + pos += sizeof(uint32_t); -u_int32_t pack_desc_pk_info(uchar* buf, KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, KEY_PART_INFO* key_part) { +uint32_t pack_desc_pk_info(uchar* buf, KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, KEY_PART_INFO* key_part) { uchar* pos = buf; uint16 field_index = key_part->field->field_index; Field* field = table_share->field[field_index]; TOKU_TYPE toku_type = mysql_to_toku_type(field); - u_int32_t key_part_length = key_part->length; - u_int32_t field_length; + uint32_t key_part_length = key_part->length; + uint32_t field_length; uchar len_bytes = 0; switch(toku_type) { @@ -1661,7 +1661,7 @@ u_int32_t pack_desc_pk_info(uchar* buf, KEY_AND_COL_INFO* kc_info, TABLE_SHARE* return pos - buf; } -u_int32_t pack_desc_pk_offset_info( +uint32_t pack_desc_pk_offset_info( uchar* buf, KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, @@ -1673,10 +1673,10 @@ u_int32_t pack_desc_pk_offset_info( uchar* pos = buf; uint16 field_index = key_part->field->field_index; bool found_col_in_pk = false; - u_int32_t index_in_pk; + uint32_t index_in_pk; bool is_constant_offset = true; - u_int32_t offset = 0; + uint32_t offset = 0; for (uint i = 0; i < prim_key->key_parts; i++) { KEY_PART_INFO curr = prim_key->key_part[i]; uint16 curr_field_index = curr.field->field_index; @@ -1710,7 +1710,7 @@ u_int32_t pack_desc_pk_offset_info( return pos - buf; } -u_int32_t pack_desc_offset_info(uchar* buf, KEY_AND_COL_INFO* kc_info, uint pk_index, TABLE_SHARE* table_share, KEY_PART_INFO* key_part) { +uint32_t pack_desc_offset_info(uchar* buf, KEY_AND_COL_INFO* kc_info, uint pk_index, TABLE_SHARE* table_share, KEY_PART_INFO* key_part) { uchar* pos = buf; uint16 field_index = key_part->field->field_index; Field* field = table_share->field[field_index]; @@ -1740,12 +1740,12 @@ u_int32_t pack_desc_offset_info(uchar* buf, KEY_AND_COL_INFO* kc_info, uint pk_i case (toku_type_blob): pos[0] = COL_BLOB_FIELD; pos++; - for (u_int32_t i = 0; i < kc_info->num_blobs; i++) { - u_int32_t blob_index = kc_info->blob_fields[i]; + for (uint32_t i = 0; i < kc_info->num_blobs; i++) { + uint32_t blob_index = kc_info->blob_fields[i]; if (blob_index == field_index) { - u_int32_t val = i; - memcpy(pos, &val, sizeof(u_int32_t)); - pos += sizeof(u_int32_t); + uint32_t val = i; + memcpy(pos, &val, sizeof(uint32_t)); + pos += sizeof(uint32_t); found_index = true; break; } @@ -1759,13 +1759,13 @@ u_int32_t pack_desc_offset_info(uchar* buf, KEY_AND_COL_INFO* kc_info, uint pk_i return pos - buf; } -u_int32_t pack_desc_key_length_info(uchar* buf, KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, KEY_PART_INFO* key_part) { +uint32_t pack_desc_key_length_info(uchar* buf, KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, KEY_PART_INFO* key_part) { uchar* pos = buf; uint16 field_index = key_part->field->field_index; Field* field = table_share->field[field_index]; TOKU_TYPE toku_type = mysql_to_toku_type(field); - u_int32_t key_part_length = key_part->length; - u_int32_t field_length; + uint32_t key_part_length = key_part->length; + uint32_t field_length; switch(toku_type) { case (toku_type_int): @@ -1794,12 +1794,12 @@ u_int32_t pack_desc_key_length_info(uchar* buf, KEY_AND_COL_INFO* kc_info, TABLE return pos - buf; } -u_int32_t pack_desc_char_info(uchar* buf, KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, KEY_PART_INFO* key_part) { +uint32_t pack_desc_char_info(uchar* buf, KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, KEY_PART_INFO* key_part) { uchar* pos = buf; uint16 field_index = key_part->field->field_index; Field* field = table_share->field[field_index]; TOKU_TYPE toku_type = mysql_to_toku_type(field); - u_int32_t charset_num = 0; + uint32_t charset_num = 0; switch(toku_type) { case (toku_type_int): @@ -1831,7 +1831,7 @@ u_int32_t pack_desc_char_info(uchar* buf, KEY_AND_COL_INFO* kc_info, TABLE_SHARE return pos - buf; } -u_int32_t pack_some_row_info ( +uint32_t pack_some_row_info ( uchar* buf, uint pk_index, TABLE_SHARE* table_share, @@ -1839,7 +1839,7 @@ u_int32_t pack_some_row_info ( ) { uchar* pos = buf; - u_int32_t num_null_bytes = 0; + uint32_t num_null_bytes = 0; // // four bytes stating number of null bytes // @@ -1860,17 +1860,17 @@ u_int32_t pack_some_row_info ( return pos - buf; } -u_int32_t get_max_clustering_val_pack_desc_size( +uint32_t get_max_clustering_val_pack_desc_size( TABLE_SHARE* table_share ) { - u_int32_t ret_val = 0; + uint32_t ret_val = 0; // // the fixed stuff: // first the things in pack_some_row_info // second another mcp_info // third a byte that states if blobs exist - ret_val += sizeof(u_int32_t) + sizeof(MULTI_COL_PACK_INFO) + 1; + ret_val += sizeof(uint32_t) + sizeof(MULTI_COL_PACK_INFO) + 1; ret_val += sizeof(MULTI_COL_PACK_INFO); ret_val++; // @@ -1878,7 +1878,7 @@ u_int32_t get_max_clustering_val_pack_desc_size( // an upper bound is, for each field, byte stating if it is fixed or var, followed // by 8 bytes for endpoints // - ret_val += (table_share->fields)*(1 + 2*sizeof(u_int32_t)); + ret_val += (table_share->fields)*(1 + 2*sizeof(uint32_t)); // // four bytes storing the length of this portion // @@ -1887,19 +1887,19 @@ u_int32_t get_max_clustering_val_pack_desc_size( return ret_val; } -u_int32_t create_toku_clustering_val_pack_descriptor ( +uint32_t create_toku_clustering_val_pack_descriptor ( uchar* buf, uint pk_index, TABLE_SHARE* table_share, KEY_AND_COL_INFO* kc_info, - u_int32_t keynr, + uint32_t keynr, bool is_clustering ) { uchar* pos = buf + 4; - u_int32_t offset = 0; + uint32_t offset = 0; bool start_range_set = false; - u_int32_t last_col = 0; + uint32_t last_col = 0; // // do not need to write anything if the key is not clustering // @@ -1957,7 +1957,7 @@ u_int32_t create_toku_clustering_val_pack_descriptor ( // need to set the end range // start_range_set = false; - u_int32_t end_offset = kc_info->cp_info[pk_index][last_col].col_pack_val + kc_info->field_lengths[last_col]; + uint32_t end_offset = kc_info->cp_info[pk_index][last_col].col_pack_val + kc_info->field_lengths[last_col]; memcpy(pos, &end_offset, sizeof(end_offset)); pos += sizeof(end_offset); } @@ -1966,7 +1966,7 @@ u_int32_t create_toku_clustering_val_pack_descriptor ( pos[0] = CK_FIX_RANGE; pos++; start_range_set = true; - u_int32_t start_offset = kc_info->cp_info[pk_index][i].col_pack_val; + uint32_t start_offset = kc_info->cp_info[pk_index][i].col_pack_val; memcpy(pos, &start_offset , sizeof(start_offset)); pos += sizeof(start_offset); } @@ -1981,7 +1981,7 @@ u_int32_t create_toku_clustering_val_pack_descriptor ( // need to set the end range // start_range_set = false; - u_int32_t end_offset = kc_info->cp_info[pk_index][last_col].col_pack_val+ kc_info->field_lengths[last_col]; + uint32_t end_offset = kc_info->cp_info[pk_index][last_col].col_pack_val+ kc_info->field_lengths[last_col]; memcpy(pos, &end_offset, sizeof(end_offset)); pos += sizeof(end_offset); } @@ -2004,7 +2004,7 @@ u_int32_t create_toku_clustering_val_pack_descriptor ( // need to set the end range // start_range_set = false; - u_int32_t end_offset = kc_info->cp_info[pk_index][last_col].col_pack_val; + uint32_t end_offset = kc_info->cp_info[pk_index][last_col].col_pack_val; memcpy(pos, &end_offset, sizeof(end_offset)); pos += sizeof(end_offset); } @@ -2014,7 +2014,7 @@ u_int32_t create_toku_clustering_val_pack_descriptor ( pos++; start_range_set = true; - u_int32_t start_offset = kc_info->cp_info[pk_index][i].col_pack_val; + uint32_t start_offset = kc_info->cp_info[pk_index][i].col_pack_val; memcpy(pos, &start_offset , sizeof(start_offset)); pos += sizeof(start_offset); } @@ -2026,7 +2026,7 @@ u_int32_t create_toku_clustering_val_pack_descriptor ( } if (start_range_set) { start_range_set = false; - u_int32_t end_offset = kc_info->cp_info[pk_index][last_col].col_pack_val; + uint32_t end_offset = kc_info->cp_info[pk_index][last_col].col_pack_val; memcpy(pos, &end_offset, sizeof(end_offset)); pos += sizeof(end_offset); } @@ -2041,10 +2041,10 @@ exit: return pos - buf; } -u_int32_t pack_clustering_val_from_desc( +uint32_t pack_clustering_val_from_desc( uchar* buf, void* row_desc, - u_int32_t row_desc_size, + uint32_t row_desc_size, const DBT* pk_val ) { @@ -2057,8 +2057,8 @@ u_int32_t pack_clustering_val_from_desc( uchar* var_dest_data_ptr = NULL; uchar* orig_var_dest_data_ptr = NULL; uchar* desc_pos = (uchar *)row_desc; - u_int32_t num_null_bytes = 0; - u_int32_t num_offset_bytes; + uint32_t num_null_bytes = 0; + uint32_t num_offset_bytes; MULTI_COL_PACK_INFO src_mcp_info, dest_mcp_info; uchar has_blobs; @@ -2094,8 +2094,8 @@ u_int32_t pack_clustering_val_from_desc( // copy the null bytes // memcpy(buf, null_bytes_src_ptr, num_null_bytes); - while ( (u_int32_t)(desc_pos - (uchar *)row_desc) < row_desc_size) { - u_int32_t start, end, length; + while ( (uint32_t)(desc_pos - (uchar *)row_desc) < row_desc_size) { + uint32_t start, end, length; uchar curr = desc_pos[0]; desc_pos++; @@ -2114,11 +2114,11 @@ u_int32_t pack_clustering_val_from_desc( fixed_dest_ptr += length; } else if (curr == CK_VAR_RANGE) { - u_int32_t start_data_size; - u_int32_t start_data_offset; - u_int32_t end_data_size; - u_int32_t end_data_offset; - u_int32_t offset_diffs; + uint32_t start_data_size; + uint32_t start_data_offset; + uint32_t end_data_size; + uint32_t end_data_offset; + uint32_t offset_diffs; get_var_field_info( &start_data_size, @@ -2148,16 +2148,16 @@ u_int32_t pack_clustering_val_from_desc( // // put in offset info // - offset_diffs = (end_data_offset + end_data_size) - (u_int32_t)(var_dest_data_ptr - orig_var_dest_data_ptr); - for (u_int32_t i = start; i <= end; i++) { + offset_diffs = (end_data_offset + end_data_size) - (uint32_t)(var_dest_data_ptr - orig_var_dest_data_ptr); + for (uint32_t i = start; i <= end; i++) { if ( num_offset_bytes == 1 ) { assert(offset_diffs < 256); var_dest_offset_ptr[0] = var_src_offset_ptr[i] - (uchar)offset_diffs; var_dest_offset_ptr++; } else if ( num_offset_bytes == 2 ) { - u_int32_t tmp = uint2korr(var_src_offset_ptr + 2*i); - u_int32_t new_offset = tmp - offset_diffs; + uint32_t tmp = uint2korr(var_src_offset_ptr + 2*i); + uint32_t new_offset = tmp - offset_diffs; assert(new_offset < 1<<16); int2store(var_dest_offset_ptr,new_offset); var_dest_offset_ptr += 2; @@ -2177,8 +2177,8 @@ u_int32_t pack_clustering_val_from_desc( // so, we put the blobs at var_dest_data_ptr // if (has_blobs) { - u_int32_t num_blob_bytes; - u_int32_t start_offset; + uint32_t num_blob_bytes; + uint32_t start_offset; uchar* src_blob_ptr = NULL; get_blob_field_info( &start_offset, @@ -2195,11 +2195,11 @@ u_int32_t pack_clustering_val_from_desc( } -u_int32_t get_max_secondary_key_pack_desc_size( +uint32_t get_max_secondary_key_pack_desc_size( KEY_AND_COL_INFO* kc_info ) { - u_int32_t ret_val = 0; + uint32_t ret_val = 0; // // the fixed stuff: // byte that states if main dictionary @@ -2207,7 +2207,7 @@ u_int32_t get_max_secondary_key_pack_desc_size( // the things in pack_some_row_info ret_val++; ret_val++; - ret_val += sizeof(u_int32_t) + sizeof(MULTI_COL_PACK_INFO) + 1; + ret_val += sizeof(uint32_t) + sizeof(MULTI_COL_PACK_INFO) + 1; // // now variable sized stuff // @@ -2226,7 +2226,7 @@ u_int32_t get_max_secondary_key_pack_desc_size( // null bit, then null byte, // then 1 byte stating what it is, then 4 for offset, 4 for key length, // 1 for if charset exists, and 4 for charset - ret_val += MAX_REF_PARTS*(1 + sizeof(u_int32_t) + 1 + 3*sizeof(u_int32_t) + 1); + ret_val += MAX_REF_PARTS*(1 + sizeof(uint32_t) + 1 + 3*sizeof(uint32_t) + 1); // // four bytes storing the length of this portion // @@ -2234,7 +2234,7 @@ u_int32_t get_max_secondary_key_pack_desc_size( return ret_val; } -u_int32_t create_toku_secondary_key_pack_descriptor ( +uint32_t create_toku_secondary_key_pack_descriptor ( uchar* buf, bool has_hpk, uint pk_index, @@ -2251,7 +2251,7 @@ u_int32_t create_toku_secondary_key_pack_descriptor ( // uchar* pk_info = NULL; uchar* pos = buf + 4; - u_int32_t offset = 0; + uint32_t offset = 0; // // first byte states that it is NOT main dictionary @@ -2281,8 +2281,8 @@ u_int32_t create_toku_secondary_key_pack_descriptor ( // store blob information // memcpy(pos, &kc_info->num_blobs, sizeof(kc_info->num_blobs)); - pos += sizeof(u_int32_t); - for (u_int32_t i = 0; i < kc_info->num_blobs; i++) { + pos += sizeof(uint32_t); + for (uint32_t i = 0; i < kc_info->num_blobs; i++) { // // store length bytes for each blob // @@ -2354,9 +2354,9 @@ u_int32_t create_toku_secondary_key_pack_descriptor ( } if (field->null_bit) { - u_int32_t null_offset = get_null_offset(table,table->field[field_index]); - memcpy(pos, &null_offset, sizeof(u_int32_t)); - pos += sizeof(u_int32_t); + uint32_t null_offset = get_null_offset(table,table->field[field_index]); + memcpy(pos, &null_offset, sizeof(uint32_t)); + pos += sizeof(uint32_t); } if (is_col_in_pk) { pos += pack_desc_pk_offset_info( @@ -2400,7 +2400,7 @@ u_int32_t create_toku_secondary_key_pack_descriptor ( return pos - buf; } -u_int32_t skip_key_in_desc( +uint32_t skip_key_in_desc( uchar* row_desc ) { @@ -2414,12 +2414,12 @@ u_int32_t skip_key_in_desc( // // skip the offset information // - pos += sizeof(u_int32_t); + pos += sizeof(uint32_t); // // skip the key_part_length info // - pos += sizeof(u_int32_t); + pos += sizeof(uint32_t); col_bin_or_char = pos[0]; pos++; if (col_bin_or_char == COL_HAS_NO_CHARSET) { @@ -2432,22 +2432,22 @@ u_int32_t skip_key_in_desc( exit: - return (u_int32_t)(pos-row_desc); + return (uint32_t)(pos-row_desc); } -u_int32_t max_key_size_from_desc( +uint32_t max_key_size_from_desc( void* row_desc, - u_int32_t row_desc_size + uint32_t row_desc_size ) { uchar* desc_pos = (uchar *)row_desc; - u_int32_t num_blobs; - u_int32_t num_pk_columns; + uint32_t num_blobs; + uint32_t num_pk_columns; // // start at 1 for the infinity byte // - u_int32_t max_size = 1; + uint32_t max_size = 1; // skip byte that states if main dictionary bool is_main_dictionary = desc_pos[0]; @@ -2458,7 +2458,7 @@ u_int32_t max_key_size_from_desc( desc_pos++; // skip num_null_bytes - desc_pos += sizeof(u_int32_t); + desc_pos += sizeof(uint32_t); // skip mcp_info desc_pos += sizeof(MULTI_COL_PACK_INFO); @@ -2476,9 +2476,9 @@ u_int32_t max_key_size_from_desc( desc_pos++; desc_pos += 2*num_pk_columns; - while ( (u_int32_t)(desc_pos - (uchar *)row_desc) < row_desc_size) { + while ( (uint32_t)(desc_pos - (uchar *)row_desc) < row_desc_size) { uchar has_charset; - u_int32_t key_length = 0; + uint32_t key_length = 0; uchar null_bit = desc_pos[0]; desc_pos++; @@ -2488,7 +2488,7 @@ u_int32_t max_key_size_from_desc( // column is NULLable, skip null_offset, and add a null byte // max_size++; - desc_pos += sizeof(u_int32_t); + desc_pos += sizeof(uint32_t); } // // skip over byte that states if fix or var @@ -2496,7 +2496,7 @@ u_int32_t max_key_size_from_desc( desc_pos++; // skip over offset - desc_pos += sizeof(u_int32_t); + desc_pos += sizeof(uint32_t); // // get the key length and add it to return value @@ -2509,7 +2509,7 @@ u_int32_t max_key_size_from_desc( has_charset = desc_pos[0]; desc_pos++; - u_int32_t charset_num; + uint32_t charset_num; if (has_charset == COL_HAS_CHARSET) { // skip over charsent num desc_pos += sizeof(charset_num); @@ -2521,18 +2521,18 @@ u_int32_t max_key_size_from_desc( return max_size; } -u_int32_t pack_key_from_desc( +uint32_t pack_key_from_desc( uchar* buf, void* row_desc, - u_int32_t row_desc_size, + uint32_t row_desc_size, const DBT* pk_key, const DBT* pk_val ) { MULTI_COL_PACK_INFO mcp_info; - u_int32_t num_null_bytes; - u_int32_t num_blobs; - u_int32_t num_pk_columns; + uint32_t num_null_bytes; + uint32_t num_blobs; + uint32_t num_pk_columns; uchar* blob_lengths = NULL; uchar* pk_info = NULL; uchar* pk_data_ptr = NULL; @@ -2540,7 +2540,7 @@ u_int32_t pack_key_from_desc( uchar* fixed_field_ptr = NULL; uchar* var_field_offset_ptr = NULL; const uchar* var_field_data_ptr = NULL; - u_int32_t num_offset_bytes; + uint32_t num_offset_bytes; uchar* packed_key_pos = buf; uchar* desc_pos = (uchar *)row_desc; @@ -2594,11 +2594,11 @@ u_int32_t pack_key_from_desc( fixed_field_ptr = null_bytes_ptr + num_null_bytes; var_field_offset_ptr = fixed_field_ptr + mcp_info.fixed_field_size; var_field_data_ptr = var_field_offset_ptr + mcp_info.len_of_offsets; - while ( (u_int32_t)(desc_pos - (uchar *)row_desc) < row_desc_size) { + while ( (uint32_t)(desc_pos - (uchar *)row_desc) < row_desc_size) { uchar col_fix_val; uchar has_charset; - u_int32_t col_pack_val = 0; - u_int32_t key_length = 0; + uint32_t col_pack_val = 0; + uint32_t key_length = 0; uchar null_bit = desc_pos[0]; desc_pos++; @@ -2607,7 +2607,7 @@ u_int32_t pack_key_from_desc( // // column is NULLable, need to check the null bytes to see if it is NULL // - u_int32_t null_offset = 0; + uint32_t null_offset = 0; bool is_field_null; memcpy(&null_offset, desc_pos, sizeof(null_offset)); desc_pos += sizeof(null_offset); @@ -2639,7 +2639,7 @@ u_int32_t pack_key_from_desc( has_charset = desc_pos[0]; desc_pos++; - u_int32_t charset_num = 0; + uint32_t charset_num = 0; if (has_charset == COL_HAS_CHARSET) { memcpy(&charset_num, desc_pos, sizeof(charset_num)); desc_pos += sizeof(charset_num); @@ -2656,9 +2656,9 @@ u_int32_t pack_key_from_desc( packed_key_pos += key_length; } else if (col_fix_val == COL_VAR_FIELD && has_charset == COL_HAS_NO_CHARSET) { - u_int32_t data_start_offset = 0; + uint32_t data_start_offset = 0; - u_int32_t data_size = 0; + uint32_t data_size = 0; get_var_field_info( &data_size, &data_start_offset, @@ -2680,8 +2680,8 @@ u_int32_t pack_key_from_desc( } else { const uchar* data_start = NULL; - u_int32_t data_start_offset = 0; - u_int32_t data_size = 0; + uint32_t data_start_offset = 0; + uint32_t data_size = 0; if (col_fix_val == COL_FIX_FIELD) { data_start_offset = col_pack_val; @@ -2699,11 +2699,11 @@ u_int32_t pack_key_from_desc( data_start = var_field_data_ptr + data_start_offset; } else if (col_fix_val == COL_BLOB_FIELD) { - u_int32_t blob_index = col_pack_val; - u_int32_t blob_offset; + uint32_t blob_index = col_pack_val; + uint32_t blob_offset; const uchar* blob_ptr = NULL; - u_int32_t field_len; - u_int32_t field_len_bytes = blob_lengths[blob_index]; + uint32_t field_len; + uint32_t field_len_bytes = blob_lengths[blob_index]; get_blob_field_info( &blob_offset, mcp_info.len_of_offsets, @@ -2715,7 +2715,7 @@ u_int32_t pack_key_from_desc( // // skip over other blobs to get to the one we want to make a key out of // - for (u_int32_t i = 0; i < blob_index; i++) { + for (uint32_t i = 0; i < blob_index; i++) { blob_ptr = unpack_toku_field_blob( NULL, blob_ptr, @@ -2758,17 +2758,17 @@ u_int32_t pack_key_from_desc( } else if (col_fix_val == COL_VAR_PK_OFFSET) { uchar* tmp_pk_data_ptr = pk_data_ptr; - u_int32_t index_in_pk = col_pack_val; + uint32_t index_in_pk = col_pack_val; // // skip along in pk to the right column // - for (u_int32_t i = 0; i < index_in_pk; i++) { + for (uint32_t i = 0; i < index_in_pk; i++) { if (pk_info[2*i] == COL_FIX_FIELD) { tmp_pk_data_ptr += pk_info[2*i + 1]; } else if (pk_info[2*i] == COL_VAR_FIELD) { - u_int32_t len_bytes = pk_info[2*i + 1]; - u_int32_t len; + uint32_t len_bytes = pk_info[2*i + 1]; + uint32_t len; if (len_bytes == 1) { len = tmp_pk_data_ptr[0]; tmp_pk_data_ptr++; @@ -2789,15 +2789,15 @@ u_int32_t pack_key_from_desc( // // at this point, tmp_pk_data_ptr is pointing at the column // - u_int32_t is_fix_field = pk_info[2*index_in_pk]; + uint32_t is_fix_field = pk_info[2*index_in_pk]; if (is_fix_field == COL_FIX_FIELD) { memcpy(packed_key_pos, tmp_pk_data_ptr, key_length); packed_key_pos += key_length; } else if (is_fix_field == COL_VAR_FIELD) { const uchar* data_start = NULL; - u_int32_t data_size = 0; - u_int32_t len_bytes = pk_info[2*index_in_pk + 1]; + uint32_t data_size = 0; + uint32_t len_bytes = pk_info[2*index_in_pk + 1]; if (len_bytes == 1) { data_size = tmp_pk_data_ptr[0]; tmp_pk_data_ptr++; @@ -2842,7 +2842,7 @@ u_int32_t pack_key_from_desc( } } - assert( (u_int32_t)(desc_pos - (uchar *)row_desc) == row_desc_size); + assert( (uint32_t)(desc_pos - (uchar *)row_desc) == row_desc_size); // // now append the primary key to the end of the key @@ -2856,7 +2856,7 @@ u_int32_t pack_key_from_desc( packed_key_pos += (pk_key->size - 1); } - return (u_int32_t)(packed_key_pos - buf); // + return (uint32_t)(packed_key_pos - buf); // } bool fields_have_same_name( diff --git a/storage/tokudb/hatoku_cmp.h b/storage/tokudb/hatoku_cmp.h index a1481caab8b..b8e618b2bbb 100644 --- a/storage/tokudb/hatoku_cmp.h +++ b/storage/tokudb/hatoku_cmp.h @@ -47,7 +47,7 @@ // used for queries typedef struct st_col_pack_info { - u_int32_t col_pack_val; //offset if fixed, pack_index if var + uint32_t col_pack_val; //offset if fixed, pack_index if var } COL_PACK_INFO; // @@ -62,8 +62,8 @@ typedef struct st_col_pack_info { // To figure out where the blobs start, find the last offset listed (if offsets exist) // typedef struct st_multi_col_pack_info { - u_int32_t fixed_field_size; //where the fixed length stuff ends and the offsets for var stuff begins - u_int32_t len_of_offsets; //length of the offset bytes in a packed row + uint32_t fixed_field_size; //where the fixed length stuff ends and the offsets for var stuff begins + uint32_t len_of_offsets; //length of the offset bytes in a packed row } MULTI_COL_PACK_INFO; @@ -91,10 +91,10 @@ typedef struct st_key_and_col_info { // length_bytes[i] is 0 // 'i' shows up in blob_fields // - u_int16_t* field_lengths; //stores the field lengths of fixed size fields (1<<16 - 1 max), + uint16_t* field_lengths; //stores the field lengths of fixed size fields (1<<16 - 1 max), uchar* length_bytes; // stores the length of lengths of varchars and varbinaries - u_int32_t* blob_fields; // list of indexes of blob fields, - u_int32_t num_blobs; // number of blobs in the table + uint32_t* blob_fields; // list of indexes of blob fields, + uint32_t num_blobs; // number of blobs in the table // // val packing info for all dictionaries. i'th one represents info for i'th dictionary // @@ -105,33 +105,33 @@ typedef struct st_key_and_col_info { // The number of var fields in a val for dictionary i can be evaluated by // mcp_info[i].len_of_offsets/num_offset_bytes. // - u_int32_t num_offset_bytes; //number of bytes needed to encode the offset + uint32_t num_offset_bytes; //number of bytes needed to encode the offset } KEY_AND_COL_INFO; void get_var_field_info( - u_int32_t* field_len, - u_int32_t* start_offset, - u_int32_t var_field_index, + uint32_t* field_len, + uint32_t* start_offset, + uint32_t var_field_index, const uchar* var_field_offset_ptr, - u_int32_t num_offset_bytes + uint32_t num_offset_bytes ); void get_blob_field_info( - u_int32_t* start_offset, - u_int32_t len_of_offsets, + uint32_t* start_offset, + uint32_t len_of_offsets, const uchar* var_field_data_ptr, - u_int32_t num_offset_bytes + uint32_t num_offset_bytes ); -static inline u_int32_t get_blob_field_len( +static inline uint32_t get_blob_field_len( const uchar* from_tokudb, - u_int32_t len_bytes + uint32_t len_bytes ) { - u_int32_t length = 0; + uint32_t length = 0; switch (len_bytes) { case (1): - length = (u_int32_t)(*from_tokudb); + length = (uint32_t)(*from_tokudb); break; case (2): length = uint2korr(from_tokudb); @@ -152,11 +152,11 @@ static inline u_int32_t get_blob_field_len( static inline const uchar* unpack_toku_field_blob( uchar *to_mysql, const uchar* from_tokudb, - u_int32_t len_bytes, + uint32_t len_bytes, bool skip ) { - u_int32_t length = 0; + uint32_t length = 0; const uchar* data_ptr = NULL; if (!skip) { memcpy(to_mysql, from_tokudb, len_bytes); @@ -194,16 +194,16 @@ TOKU_TYPE mysql_to_toku_type (Field* field); uchar* pack_toku_varbinary_from_desc( uchar* to_tokudb, const uchar* from_desc, - u_int32_t key_part_length, //number of bytes to use to encode the length in to_tokudb - u_int32_t field_length //length of field + uint32_t key_part_length, //number of bytes to use to encode the length in to_tokudb + uint32_t field_length //length of field ); uchar* pack_toku_varstring_from_desc( uchar* to_tokudb, const uchar* from_desc, - u_int32_t key_part_length, //number of bytes to use to encode the length in to_tokudb - u_int32_t field_length, - u_int32_t charset_num//length of field + uint32_t key_part_length, //number of bytes to use to encode the length in to_tokudb + uint32_t field_length, + uint32_t charset_num//length of field ); @@ -211,21 +211,21 @@ uchar* pack_toku_key_field( uchar* to_tokudb, uchar* from_mysql, Field* field, - u_int32_t key_part_length //I really hope this is temporary as I phase out the pack_cmp stuff + uint32_t key_part_length //I really hope this is temporary as I phase out the pack_cmp stuff ); uchar* pack_key_toku_key_field( uchar* to_tokudb, uchar* from_mysql, Field* field, - u_int32_t key_part_length //I really hope this is temporary as I phase out the pack_cmp stuff + uint32_t key_part_length //I really hope this is temporary as I phase out the pack_cmp stuff ); uchar* unpack_toku_key_field( uchar* to_mysql, uchar* from_tokudb, Field* field, - u_int32_t key_part_length + uint32_t key_part_length ); @@ -263,11 +263,11 @@ static inline ulonglong hpk_char_to_num(uchar* val) { int tokudb_compare_two_keys( const void* new_key_data, - const u_int32_t new_key_size, + const uint32_t new_key_size, const void* saved_key_data, - const u_int32_t saved_key_size, + const uint32_t saved_key_size, const void* row_desc, - const u_int32_t row_desc_size, + const uint32_t row_desc_size, bool cmp_prefix ); @@ -286,43 +286,43 @@ int create_toku_key_descriptor( ); -u_int32_t create_toku_main_key_pack_descriptor ( +uint32_t create_toku_main_key_pack_descriptor ( uchar* buf ); -u_int32_t get_max_clustering_val_pack_desc_size( +uint32_t get_max_clustering_val_pack_desc_size( TABLE_SHARE* table_share ); -u_int32_t create_toku_clustering_val_pack_descriptor ( +uint32_t create_toku_clustering_val_pack_descriptor ( uchar* buf, uint pk_index, TABLE_SHARE* table_share, KEY_AND_COL_INFO* kc_info, - u_int32_t keynr, + uint32_t keynr, bool is_clustering ); static inline bool is_key_clustering( void* row_desc, - u_int32_t row_desc_size + uint32_t row_desc_size ) { return (row_desc_size > 0); } -u_int32_t pack_clustering_val_from_desc( +uint32_t pack_clustering_val_from_desc( uchar* buf, void* row_desc, - u_int32_t row_desc_size, + uint32_t row_desc_size, const DBT* pk_val ); -u_int32_t get_max_secondary_key_pack_desc_size( +uint32_t get_max_secondary_key_pack_desc_size( KEY_AND_COL_INFO* kc_info ); -u_int32_t create_toku_secondary_key_pack_descriptor ( +uint32_t create_toku_secondary_key_pack_descriptor ( uchar* buf, bool has_hpk, uint pk_index, @@ -335,23 +335,23 @@ u_int32_t create_toku_secondary_key_pack_descriptor ( static inline bool is_key_pk( void* row_desc, - u_int32_t row_desc_size + uint32_t row_desc_size ) { uchar* buf = (uchar *)row_desc; return buf[0]; } -u_int32_t max_key_size_from_desc( +uint32_t max_key_size_from_desc( void* row_desc, - u_int32_t row_desc_size + uint32_t row_desc_size ); -u_int32_t pack_key_from_desc( +uint32_t pack_key_from_desc( uchar* buf, void* row_desc, - u_int32_t row_desc_size, + uint32_t row_desc_size, const DBT* pk_key, const DBT* pk_val ); diff --git a/storage/tokudb/hatoku_defines.h b/storage/tokudb/hatoku_defines.h index 69cc935cc96..007f7f4949a 100644 --- a/storage/tokudb/hatoku_defines.h +++ b/storage/tokudb/hatoku_defines.h @@ -209,7 +209,7 @@ static inline void make_name(char *newname, const char *tablename, const char *d nn += sprintf(nn, "-%s", dictname); } -static inline void commit_txn(DB_TXN* txn, u_int32_t flags) { +static inline void commit_txn(DB_TXN* txn, uint32_t flags) { int r; r = txn->commit(txn, flags); if (r != 0) { diff --git a/storage/tokudb/hatoku_hton.cc b/storage/tokudb/hatoku_hton.cc index 0470db2dc85..475db9c5e07 100644 --- a/storage/tokudb/hatoku_hton.cc +++ b/storage/tokudb/hatoku_hton.cc @@ -46,7 +46,7 @@ static MYSQL_THDVAR_BOOL(commit_sync, "sync on txn commit", /* check */ NULL, /* update */ NULL, - /* default*/ TRUE + /* default*/ true ); static MYSQL_THDVAR_UINT(pk_insert_mode, @@ -64,49 +64,49 @@ static MYSQL_THDVAR_BOOL(load_save_space, "if on, intial loads are slower but take less space", NULL, NULL, - FALSE + false ); static MYSQL_THDVAR_BOOL(disable_slow_alter, 0, "if on, alter tables that require copy are disabled", NULL, NULL, - FALSE + false ); static MYSQL_THDVAR_BOOL(disable_hot_alter, 0, "if on, hot alter table is disabled", NULL, NULL, - FALSE + false ); static MYSQL_THDVAR_BOOL(create_index_online, 0, "if on, create index done online", NULL, NULL, - TRUE + true ); static MYSQL_THDVAR_BOOL(disable_prefetching, 0, "if on, prefetching disabled", NULL, NULL, - FALSE + false ); static MYSQL_THDVAR_BOOL(prelock_empty, 0, "Tokudb Prelock Empty Table", NULL, NULL, - TRUE + true ); static MYSQL_THDVAR_BOOL(log_client_errors, 0, "Tokudb Log Client Errors", NULL, NULL, - FALSE + false ); static MYSQL_THDVAR_UINT(block_size, 0, @@ -150,7 +150,7 @@ tokudb_checkpoint_lock_update( const void* save) { my_bool* val = (my_bool *) var_ptr; - *val= *(my_bool *) save ? TRUE : FALSE; + *val= *(my_bool *) save ? true : false; if (*val) { tokudb_checkpoint_lock(thd); } @@ -164,7 +164,7 @@ static MYSQL_THDVAR_BOOL(checkpoint_lock, "Tokudb Checkpoint Lock", NULL, tokudb_checkpoint_lock_update, - FALSE + false ); static const char *tokudb_row_format_names[] = { @@ -255,16 +255,16 @@ void toku_hton_assert_fail(const char* expr_as_string, const char * fun, const c -//my_bool tokudb_shared_data = FALSE; -static u_int32_t tokudb_init_flags = +//my_bool tokudb_shared_data = false; +static uint32_t tokudb_init_flags = DB_CREATE | DB_THREAD | DB_PRIVATE | DB_INIT_LOCK | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOG | DB_RECOVER; -static u_int32_t tokudb_env_flags = 0; -// static u_int32_t tokudb_lock_type = DB_LOCK_DEFAULT; +static uint32_t tokudb_env_flags = 0; +// static uint32_t tokudb_lock_type = DB_LOCK_DEFAULT; // static ulong tokudb_log_buffer_size = 0; // static ulong tokudb_log_file_size = 0; static ulonglong tokudb_cache_size = 0; @@ -276,9 +276,9 @@ static char *tokudb_log_dir; // static ulong tokudb_region_size = 0; // static ulong tokudb_cache_parts = 1; const char *tokudb_hton_name = "TokuDB"; -static u_int32_t tokudb_checkpointing_period; -u_int32_t tokudb_write_status_frequency; -u_int32_t tokudb_read_status_frequency; +static uint32_t tokudb_checkpointing_period; +uint32_t tokudb_write_status_frequency; +uint32_t tokudb_read_status_frequency; #ifdef TOKUDB_VERSION char *tokudb_version = (char*) TOKUDB_VERSION; #else @@ -444,7 +444,7 @@ static int tokudb_init_func(void *p) { } if (tokudb_cache_size) { DBUG_PRINT("info", ("tokudb_cache_size: %lld\n", tokudb_cache_size)); - r = db_env->set_cachesize(db_env, (u_int32_t)(tokudb_cache_size >> 30), (u_int32_t)(tokudb_cache_size % (1024L * 1024L * 1024L)), 1); + r = db_env->set_cachesize(db_env, (uint32_t)(tokudb_cache_size >> 30), (uint32_t)(tokudb_cache_size % (1024L * 1024L * 1024L)), 1); if (r) { DBUG_PRINT("info", ("set_cachesize %d\n", r)); goto error; @@ -462,7 +462,7 @@ static int tokudb_init_func(void *p) { } } - u_int32_t gbytes, bytes; int parts; + uint32_t gbytes, bytes; int parts; r = db_env->get_cachesize(db_env, &gbytes, &bytes, &parts); if (r == 0) if (tokudb_debug & TOKUDB_DEBUG_INIT) @@ -551,7 +551,7 @@ static int tokudb_init_func(void *p) { //3938: succeeded, set the init status flag and unlock tokudb_hton_initialized = 1; rw_unlock(&tokudb_hton_initialized_lock); - DBUG_RETURN(FALSE); + DBUG_RETURN(false); error: if (metadata_db) { @@ -567,7 +567,7 @@ error: // 3938: failed to initialized, drop the flag and lock tokudb_hton_initialized = 0; rw_unlock(&tokudb_hton_initialized_lock); - DBUG_RETURN(TRUE); + DBUG_RETURN(true); } static int tokudb_done_func(void *p) { @@ -637,7 +637,7 @@ bool tokudb_flush_logs(handlerton * hton) { TOKUDB_DBUG_ENTER("tokudb_flush_logs"); int error; bool result = 0; - u_int32_t curr_tokudb_checkpointing_period = 0; + uint32_t curr_tokudb_checkpointing_period = 0; // // get the current checkpointing period @@ -780,7 +780,7 @@ void txn_progress_func(TOKU_TXN_PROGRESS progress, void* extra) { } -static void commit_txn_with_progress(DB_TXN* txn, u_int32_t flags, THD* thd) { +static void commit_txn_with_progress(DB_TXN* txn, uint32_t flags, THD* thd) { int r; struct txn_progress_info info; info.thd = thd; @@ -805,7 +805,7 @@ static void abort_txn_with_progress(DB_TXN* txn, THD* thd) { static int tokudb_commit(handlerton * hton, THD * thd, bool all) { TOKUDB_DBUG_ENTER("tokudb_commit"); DBUG_PRINT("trans", ("ending transaction %s", all ? "all" : "stmt")); - u_int32_t syncflag = THDVAR(thd, commit_sync) ? 0 : DB_TXN_NOSYNC; + uint32_t syncflag = THDVAR(thd, commit_sync) ? 0 : DB_TXN_NOSYNC; tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot); DB_TXN **txn = all ? &trx->all : &trx->stmt; if (*txn) { @@ -1059,7 +1059,7 @@ cleanup: TOKUDB_DBUG_RETURN(error); } -static int store_dbname_tablename_size(TABLE *table, char *name, u_int64_t size, THD *thd) { +static int store_dbname_tablename_size(TABLE *table, char *name, uint64_t size, THD *thd) { char *tp = strrchr(name, '/'); assert(tp); char *tablename = tp + 1; @@ -1181,7 +1181,7 @@ static int tokudb_get_user_data_size(TABLE *table, THD *thd, bool exact) { if (!error) { char* name = (char *)curr_key.data; char* newname; - u_int64_t curr_num_bytes = 0; + uint64_t curr_num_bytes = 0; DB_BTREE_STAT64 dict_stats; error = db_create(&curr_db, db_env, 0); @@ -1241,14 +1241,14 @@ static int tokudb_get_user_data_size(TABLE *table, THD *thd, bool exact) { // in this case, we have a hidden primary key, do not // want to report space taken up by the hidden primary key to the user // - u_int64_t hpk_space = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH*dict_stats.bt_ndata; + uint64_t hpk_space = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH*dict_stats.bt_ndata; curr_num_bytes = (hpk_space > curr_num_bytes) ? 0 : curr_num_bytes - hpk_space; } else { // // one infinity byte per key needs to be subtracted // - u_int64_t inf_byte_space = dict_stats.bt_ndata; + uint64_t inf_byte_space = dict_stats.bt_ndata; curr_num_bytes = (inf_byte_space > curr_num_bytes) ? 0 : curr_num_bytes - inf_byte_space; } @@ -1456,7 +1456,7 @@ static bool tokudb_show_status(handlerton * hton, THD * thd, stat_print_fn * sta default: break; } - return FALSE; + return false; } static void tokudb_print_error(const DB_ENV * db_env, const char *db_errpfx, const char *buffer) { diff --git a/storage/tokudb/hatoku_hton.h b/storage/tokudb/hatoku_hton.h index ca61bc72005..101e2ac8f25 100644 --- a/storage/tokudb/hatoku_hton.h +++ b/storage/tokudb/hatoku_hton.h @@ -37,7 +37,7 @@ srv_row_format_t get_row_format(THD *thd); extern HASH tokudb_open_tables; extern pthread_mutex_t tokudb_mutex; extern pthread_mutex_t tokudb_meta_mutex; -extern u_int32_t tokudb_write_status_frequency; -extern u_int32_t tokudb_read_status_frequency; +extern uint32_t tokudb_write_status_frequency; +extern uint32_t tokudb_read_status_frequency; #endif //#ifdef _HATOKU_HTON