1
0
mirror of https://github.com/MariaDB/server.git synced 2026-01-06 05:22:24 +03:00

Merge branch 'merge-tokudb-5.6' into 10.0

This commit is contained in:
Oleksandr Byelkin
2018-09-06 21:04:56 +02:00
192 changed files with 3936 additions and 194538 deletions

View File

@@ -1,4 +1,4 @@
SET(TOKUDB_VERSION 5.6.39-83.1)
SET(TOKUDB_VERSION 5.6.41-84.1)
# PerconaFT only supports x86-64 and cmake-2.8.9+
IF(CMAKE_VERSION VERSION_LESS "2.8.9")
MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
@@ -132,7 +132,11 @@ ELSEIF (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/ft-index/")
MESSAGE(WARNING "Found ft-index sources, ft-index is deprecated and replaced with PerconaFT.")
SET(TOKU_FT_DIR_NAME "ft-index")
ELSE ()
MESSAGE(FATAL_ERROR "Could not find PerconaFT sources.")
MESSAGE(FATAL_ERROR "Could not find PerconaFT sources.")
ENDIF ()
IF (WITH_VALGRIND)
SET(USE_VALGRIND "ON")
ENDIF ()
ADD_SUBDIRECTORY(${TOKU_FT_DIR_NAME})

View File

@@ -17,7 +17,8 @@ ENDIF()
# detect when we are being built as a subproject
if (DEFINED MYSQL_PROJECT_NAME_DOCSTRING)
add_definitions( -DMYSQL_TOKUDB_ENGINE=1)
add_definitions(-DMYSQL_TOKUDB_ENGINE=1)
add_definitions(-DMYSQL_VERSION_ID=${MYSQL_VERSION_ID})
# Extended PFS instrumentation:
# -DTOKU_PFS_MUTEX_EXTENDED_CACHETABLEMMUTEX=1
if (WITH_PERFSCHEMA_STORAGE_ENGINE)

View File

@@ -142,6 +142,9 @@ set_ldflags_if_supported(
-Wno-error=strict-overflow
)
# new flag sets in MySQL 8.0 seem to explicitly disable this
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fexceptions")
## set extra debugging flags and preprocessor definitions
set(CMAKE_C_FLAGS_DEBUG "-g3 -O0 ${CMAKE_C_FLAGS_DEBUG}")
set(CMAKE_CXX_FLAGS_DEBUG "-g3 -O0 ${CMAKE_CXX_FLAGS_DEBUG}")

View File

@@ -1290,7 +1290,6 @@ int toku_cachetable_get_and_pin (
CACHEKEY key,
uint32_t fullhash,
void**value,
long *sizep,
CACHETABLE_WRITE_CALLBACK write_callback,
CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
@@ -1311,7 +1310,6 @@ int toku_cachetable_get_and_pin (
key,
fullhash,
value,
sizep,
write_callback,
fetch_callback,
pf_req_callback,
@@ -1559,7 +1557,6 @@ int toku_cachetable_get_and_pin_with_dep_pairs (
CACHEKEY key,
uint32_t fullhash,
void**value,
long *sizep,
CACHETABLE_WRITE_CALLBACK write_callback,
CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
@@ -1743,7 +1740,6 @@ beginning:
}
got_value:
*value = p->value_data;
if (sizep) *sizep = p->attr.size;
return 0;
}
@@ -1856,6 +1852,22 @@ int toku_cachetable_maybe_get_and_pin_clean (CACHEFILE cachefile, CACHEKEY key,
return r;
}
int toku_cachetable_get_attr (CACHEFILE cachefile, CACHEKEY key, uint32_t fullhash, PAIR_ATTR *attr) {
CACHETABLE ct = cachefile->cachetable;
int r;
ct->list.pair_lock_by_fullhash(fullhash);
PAIR p = ct->list.find_pair(cachefile, key, fullhash);
if (p) {
// Assumes pair lock and full hash lock are the same mutex
*attr = p->attr;
r = 0;
} else {
r = -1;
}
ct->list.pair_unlock_by_fullhash(fullhash);
return r;
}
//
// internal function to unpin a PAIR.
// As of Clayface, this is may be called in two ways:
@@ -1997,7 +2009,6 @@ int toku_cachetable_get_and_pin_nonblocking(
CACHEKEY key,
uint32_t fullhash,
void**value,
long* UU(sizep),
CACHETABLE_WRITE_CALLBACK write_callback,
CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,

View File

@@ -352,7 +352,6 @@ int toku_cachetable_get_and_pin_with_dep_pairs (
CACHEKEY key,
uint32_t fullhash,
void**value,
long *sizep,
CACHETABLE_WRITE_CALLBACK write_callback,
CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
@@ -374,7 +373,6 @@ int toku_cachetable_get_and_pin (
CACHEKEY key,
uint32_t fullhash,
void**value,
long *sizep,
CACHETABLE_WRITE_CALLBACK write_callback,
CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
@@ -408,7 +406,6 @@ int toku_cachetable_get_and_pin_nonblocking (
CACHEKEY key,
uint32_t fullhash,
void**value,
long *sizep,
CACHETABLE_WRITE_CALLBACK write_callback,
CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
@@ -428,6 +425,11 @@ int toku_cachetable_maybe_get_and_pin (CACHEFILE, CACHEKEY, uint32_t /*fullhash*
int toku_cachetable_maybe_get_and_pin_clean (CACHEFILE, CACHEKEY, uint32_t /*fullhash*/, pair_lock_type, void**);
// Effect: Like maybe get and pin, but may pin a clean pair.
int toku_cachetable_get_attr(CACHEFILE, CACHEKEY, uint32_t /*fullhash*/, PAIR_ATTR *);
// Effect: get the attributes for cachekey
// Returns: 0 if success, non-zero if cachekey is not cached
// Notes: this function exists for tests
int toku_cachetable_unpin(CACHEFILE, PAIR, enum cachetable_dirty dirty, PAIR_ATTR size);
// Effect: Unpin a memory object
// Modifies: If the memory object is in the cachetable, then OR the dirty flag,

View File

@@ -178,7 +178,6 @@ toku_pin_ftnode_for_query(
blocknum,
fullhash,
&node_v,
NULL,
get_write_callbacks_for_node(ft_handle->ft),
toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback,
@@ -209,7 +208,6 @@ toku_pin_ftnode_for_query(
blocknum,
fullhash,
&node_v,
NULL,
get_write_callbacks_for_node(ft_handle->ft),
toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback,
@@ -289,7 +287,6 @@ toku_pin_ftnode_with_dep_nodes(
blocknum,
fullhash,
&node_v,
NULL,
get_write_callbacks_for_node(ft),
toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback,

View File

@@ -130,7 +130,6 @@ int toku_testsetup_get_sersize(FT_HANDLE ft_handle, BLOCKNUM diskoff) // Return
ft_handle->ft->cf, diskoff,
toku_cachetable_hash(ft_handle->ft->cf, diskoff),
&node_v,
NULL,
get_write_callbacks_for_node(ft_handle->ft),
toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback,
@@ -158,7 +157,6 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, const
blocknum,
toku_cachetable_hash(ft_handle->ft->cf, blocknum),
&node_v,
NULL,
get_write_callbacks_for_node(ft_handle->ft),
toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback,
@@ -236,7 +234,6 @@ int toku_testsetup_insert_to_nonleaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, en
blocknum,
toku_cachetable_hash(ft_handle->ft->cf, blocknum),
&node_v,
NULL,
get_write_callbacks_for_node(ft_handle->ft),
toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback,

View File

@@ -44,6 +44,9 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "ft/ft-ops.h"
#include "ft/logger/log.h"
#include "util/dbt.h"
#ifndef TOKU_MYSQL_WITH_PFS
#include <my_global.h>
#endif
typedef struct ft *FT;
typedef struct ft_options *FT_OPTIONS;

View File

@@ -156,6 +156,8 @@ void toku_evict_bn_from_memory(FTNODE node, int childnum, FT ft) {
assert(!node->dirty);
BASEMENTNODE bn = BLB(node, childnum);
toku_ft_decrease_stats(&ft->in_memory_stats, bn->stat64_delta);
toku_ft_adjust_logical_row_count(ft, -BLB_LRD(node, childnum));
BLB_LRD(node, childnum) = 0;
destroy_basement_node(bn);
set_BNULL(node, childnum);
BP_STATE(node, childnum) = PT_ON_DISK;

View File

@@ -49,7 +49,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "ft/serialize/block_allocator.h"
#include "ft/serialize/rbtree_mhs.h"
#if TOKU_DEBUG_PARANOID
#if defined(TOKU_DEBUG_PARANOID) && TOKU_DEBUG_PARANOID
#define VALIDATE() Validate()
#else
#define VALIDATE()

View File

@@ -42,13 +42,11 @@ CACHEFILE f1;
static void *pin_nonblocking(void *arg) {
void* v1;
long s1;
int r = toku_cachetable_get_and_pin_nonblocking(
f1,
make_blocknum(1),
toku_cachetable_hash(f1, make_blocknum(1)),
&v1,
&s1,
def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback,
PL_WRITE_EXPENSIVE,
NULL,
@@ -70,12 +68,10 @@ cachetable_test (void) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
r = toku_cachetable_get_and_pin(f1,
make_blocknum(1),
toku_cachetable_hash(f1, make_blocknum(1)),
&v1,
&s1,
def_write_callback(NULL),
def_fetch,
def_pf_req_callback,

View File

@@ -42,13 +42,11 @@ CACHEFILE f1;
static void *pin_nonblocking(void *arg) {
void* v1;
long s1;
int r = toku_cachetable_get_and_pin_nonblocking(
f1,
make_blocknum(1),
toku_cachetable_hash(f1, make_blocknum(1)),
&v1,
&s1,
def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback,
PL_WRITE_EXPENSIVE,
NULL,
@@ -92,12 +90,10 @@ cachetable_test (void) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
r = toku_cachetable_get_and_pin(f1,
make_blocknum(1),
toku_cachetable_hash(f1, make_blocknum(1)),
&v1,
&s1,
def_write_callback(nullptr),
def_fetch,
def_pf_req_callback,

View File

@@ -88,7 +88,6 @@ flush (CACHEFILE f __attribute__((__unused__)),
static void *f2_pin(void *arg) {
int r;
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
//
// these booleans for pe_callback just ensure that the
@@ -98,7 +97,7 @@ static void *f2_pin(void *arg) {
// This is just to ensure that the bug is being exercised
//
check_pe_callback = true;
r = toku_cachetable_get_and_pin(f2, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f2, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert(r == 0);
ct->ev.signal_eviction_thread();
usleep(1*1024*1024);
@@ -141,13 +140,12 @@ cachetable_test (void) {
assert(r == 0);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.pe_callback = pe_callback;
wc.flush_callback = flush;
// pin and unpin a node 20 times, just to get clock count up
for (int i = 0; i < 20; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert(r == 0);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
assert(r == 0);

View File

@@ -131,13 +131,11 @@ static void *repin_one(void *UU(arg)) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
struct unlockers unlockers = {true, unpin_two, NULL, NULL};
void* v1;
long s1;
int r = toku_cachetable_get_and_pin_nonblocking(
f1,
make_blocknum(1),
1,
&v1,
&s1,
wc,
def_fetch,
def_pf_req_callback,
@@ -164,13 +162,12 @@ cachetable_test (void) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
// bring pairs 1 and 2 into memory, then unpin
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch_one, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch_one, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v1, &s1, wc, fetch_two, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v1, wc, fetch_two, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
toku_pthread_t tid1;

View File

@@ -125,13 +125,11 @@ static void *repin_one(void *UU(arg)) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
struct unlockers unlockers = {true, unpin_four, NULL, NULL};
void* v1;
long s1;
int r = toku_cachetable_get_and_pin_nonblocking(
f1,
make_blocknum(1),
1,
&v1,
&s1,
wc,
def_fetch,
def_pf_req_callback,
@@ -149,13 +147,11 @@ static void *repin_two(void *UU(arg)) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
struct unlockers unlockers = {true, unpin_three, NULL, NULL};
void* v1;
long s1;
int r = toku_cachetable_get_and_pin_nonblocking(
f1,
make_blocknum(2),
2,
&v1,
&s1,
wc,
def_fetch,
def_pf_req_callback,
@@ -181,20 +177,19 @@ cachetable_test (void) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
// bring pairs 1 and 2 into memory, then unpin
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
// now pin pairs 3 and 4
r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v1, &s1, wc, fetch_three, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v1, wc, fetch_three, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v1, &s1, wc, fetch_four, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v1, wc, fetch_four, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
toku_pthread_t tid1;

View File

@@ -73,12 +73,11 @@ cachetable_test (void) {
void* v1;
void* v2;
long s1, s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8));

View File

@@ -99,13 +99,15 @@ do_update (void *UU(ignore))
CACHEKEY key = make_blocknum(i);
uint32_t hi = toku_cachetable_hash(cf, key);
void *vv;
long size;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
int r = toku_cachetable_get_and_pin(cf, key, hi, &vv, &size, wc, fetch_die, def_pf_req_callback, def_pf_callback, true, 0);
int r = toku_cachetable_get_and_pin(cf, key, hi, &vv, wc, fetch_die, def_pf_req_callback, def_pf_callback, true, 0);
//printf("g");
assert(r==0);
assert(size==sizeof(int));
PAIR_ATTR attr;
r = toku_cachetable_get_attr(cf, key, hi, &attr);
assert(r==0);
assert(attr.size==sizeof(int));
int *CAST_FROM_VOIDP(v, vv);
assert(*v==42);
*v = 43;

View File

@@ -110,13 +110,11 @@ cachetable_test (void) {
void* v1;
void* v2;
long s1;
long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(&dirty_val);
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, true, &dirty_val);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch, def_pf_req_callback, def_pf_callback, true, &dirty_val);
wc.write_extraargs = NULL;
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
//
// Here is the test, we have two pairs, v1 is dirty, v2 is clean, but both are currently pinned

View File

@@ -103,13 +103,10 @@ cachetable_test (void) {
create_dummy_functions(f1);
void* v1;
//void* v2;
long s1;
//long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.cleaner_callback = cleaner_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
PAIR_ATTR attr = make_pair_attr(8);
attr.cache_pressure_size = 8;
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, attr);

View File

@@ -103,13 +103,10 @@ cachetable_test (void) {
create_dummy_functions(f1);
void* v1;
//void* v2;
long s1;
//long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.cleaner_callback = cleaner_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
PAIR_ATTR attr = make_pair_attr(8);
attr.cache_pressure_size = 8;
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, attr);

View File

@@ -107,15 +107,12 @@ run_test (void) {
assert(STATUS_VALUE(CT_SIZE_CACHEPRESSURE) == 0);
void* vs[n_pairs];
//void* v2;
long ss[n_pairs];
//long s2;
PAIR_ATTR expect = { .size = 0, .nonleaf_size = 0, .leaf_size = 0, .rollback_size = 0, .cache_pressure_size = 0 };
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.write_extraargs = &expect;
for (int i = 0; i < n_pairs; ++i) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(i+1), i+1, &vs[i], &ss[i],
r = toku_cachetable_get_and_pin(f1, make_blocknum(i+1), i+1, &vs[i],
wc,
def_fetch,
def_pf_req_callback,
@@ -139,8 +136,7 @@ run_test (void) {
assert(STATUS_VALUE(CT_SIZE_CACHEPRESSURE) == (uint64_t) expect.cache_pressure_size);
void *big_v;
long big_s;
r = toku_cachetable_get_and_pin(f1, make_blocknum(n_pairs + 1), n_pairs + 1, &big_v, &big_s,
r = toku_cachetable_get_and_pin(f1, make_blocknum(n_pairs + 1), n_pairs + 1, &big_v,
wc,
def_fetch,
def_pf_req_callback,

View File

@@ -71,13 +71,10 @@ run_test (void) {
assert(r==0);
void* vs[8];
//void* v2;
long ss[8];
//long s2;
for (int i = 0; i < 8; ++i) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.cleaner_callback = everything_pinned_cleaner_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(i+1), i+1, &vs[i], &ss[i],
r = toku_cachetable_get_and_pin(f1, make_blocknum(i+1), i+1, &vs[i],
wc,
def_fetch,
def_pf_req_callback,

View File

@@ -69,13 +69,10 @@ run_test (void) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* vs[8];
//void* v2;
long ss[8];
//long s2;
for (int i = 0; i < 8; ++i) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.cleaner_callback = everything_pinned_cleaner_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(i+1), i+1, &vs[i], &ss[i],
r = toku_cachetable_get_and_pin(f1, make_blocknum(i+1), i+1, &vs[i],
wc,
def_fetch,
def_pf_req_callback,

View File

@@ -76,12 +76,9 @@ run_test (void) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* vs[5];
//void* v2;
long ss[5];
//long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.cleaner_callback = my_cleaner_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &vs[0], &ss[0],
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &vs[0],
wc,
def_fetch,
def_pf_req_callback,
@@ -92,7 +89,7 @@ run_test (void) {
attr.cache_pressure_size = 100;
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, attr);
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 1, &vs[1], &ss[1],
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 1, &vs[1],
wc,
def_fetch,
def_pf_req_callback,

View File

@@ -77,12 +77,9 @@ run_test (void) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* vs[5];
//void* v2;
long ss[5];
//long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.cleaner_callback = my_cleaner_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(100), 100, &vs[4], &ss[4],
r = toku_cachetable_get_and_pin(f1, make_blocknum(100), 100, &vs[4],
wc,
def_fetch,
def_pf_req_callback,
@@ -94,7 +91,7 @@ run_test (void) {
r = toku_test_cachetable_unpin(f1, make_blocknum(100), 100, CACHETABLE_CLEAN, attr);
for (int i = 0; i < 4; ++i) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(i+1), i+1, &vs[i], &ss[i],
r = toku_cachetable_get_and_pin(f1, make_blocknum(i+1), i+1, &vs[i],
wc,
def_fetch,
def_pf_req_callback,

View File

@@ -99,25 +99,24 @@ cachetable_test (void) {
void* v1;
void* v2;
long s1, s2;
flush_may_occur = false;
check_flush = true;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
for (int i = 0; i < 100000; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(1));
}
for (int i = 0; i < 8; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(1));
}
for (int i = 0; i < 4; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(1));
}
for (int i = 0; i < 2; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(1));
}
flush_may_occur = true;

View File

@@ -142,34 +142,33 @@ cachetable_test (void) {
void* v1;
void* v2;
long s1, s2;
flush_may_occur = false;
for (int i = 0; i < 100000; i++) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.pe_callback = pe_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(4));
}
for (int i = 0; i < 8; i++) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.pe_callback = pe_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(4));
}
for (int i = 0; i < 4; i++) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.pe_callback = pe_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(4));
}
for (int i = 0; i < 2; i++) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.pe_callback = pe_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(4));
}
flush_may_occur = false;

View File

@@ -159,14 +159,13 @@ cachetable_test (void) {
void* v1;
void* v2;
long s1, s2;
flush_may_occur = false;
for (int i = 0; i < 100000; i++) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.pe_est_callback = pe_est_callback;
wc.pe_callback = pe_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(4));
}
for (int i = 0; i < 8; i++) {
@@ -174,7 +173,7 @@ cachetable_test (void) {
wc.flush_callback = flush;
wc.pe_est_callback = pe_est_callback;
wc.pe_callback = pe_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(4));
}
for (int i = 0; i < 4; i++) {
@@ -182,7 +181,7 @@ cachetable_test (void) {
wc.flush_callback = flush;
wc.pe_est_callback = pe_est_callback;
wc.pe_callback = pe_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(4));
}
for (int i = 0; i < 2; i++) {
@@ -190,7 +189,7 @@ cachetable_test (void) {
wc.flush_callback = flush;
wc.pe_est_callback = pe_est_callback;
wc.pe_callback = pe_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(4));
}
flush_may_occur = false;

View File

@@ -137,7 +137,6 @@ cachetable_test (void) {
void* v1;
void* v2;
long s1, s2;
flush_may_occur = false;
check_flush = true;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
@@ -145,19 +144,19 @@ cachetable_test (void) {
wc.pe_est_callback = pe_est_callback;
wc.pe_callback = pe_callback;
for (int i = 0; i < 100000; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(1));
}
for (int i = 0; i < 8; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(1));
}
for (int i = 0; i < 4; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(1));
}
for (int i = 0; i < 2; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(1));
}
flush_may_occur = true;

View File

@@ -101,11 +101,10 @@ cachetable_test (void) {
create_dummy_functions(f1);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.clone_callback = clone_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
assert_zero(r);
@@ -124,7 +123,7 @@ cachetable_test (void) {
usleep(1 * 1024 * 1024);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
assert(clone_flush_started && !clone_flush_completed);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));

View File

@@ -95,12 +95,11 @@ cachetable_test (void) {
create_dummy_functions(f1);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.clone_callback = clone_callback;
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
assert_zero(r);
@@ -109,13 +108,13 @@ cachetable_test (void) {
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
toku_cachetable_begin_checkpoint(cp, NULL);
assert_zero(r);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert_zero(r);
pf_called = false;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
assert(!pf_called);
toku_cachetable_pf_pinned_pair(v1, true_pf_callback, NULL, f1, make_blocknum(1), 1);

View File

@@ -100,12 +100,11 @@ cachetable_test (void) {
create_dummy_functions(f1);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.clone_callback = clone_callback;
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
assert_zero(r);
@@ -114,13 +113,13 @@ cachetable_test (void) {
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
toku_cachetable_begin_checkpoint(cp, NULL);
assert_zero(r);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert_zero(r);
pf_called = false;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, true_pf_req_callback, true_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, true_pf_req_callback, true_pf_callback, true, NULL);
assert_zero(r);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert_zero(r);

View File

@@ -82,23 +82,22 @@ cachetable_test (enum cachetable_dirty dirty, bool cloneable) {
create_dummy_functions(f1);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.clone_callback = cloneable ? clone_callback : NULL;
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, dirty, make_pair_attr(8));
// test that having a pin that passes false for may_modify_value does not stall behind checkpoint
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
toku_cachetable_begin_checkpoint(cp, NULL);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_READ, NULL, NULL);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_READ, NULL, NULL);
assert(r == 0);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert(r == 0);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
assert(r == 0);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));

View File

@@ -93,12 +93,11 @@ cachetable_test (void) {
create_dummy_functions(f1);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.clone_callback = clone_callback;
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), CACHETABLE_DIRTY, make_pair_attr(8));
assert_zero(r);
@@ -108,7 +107,7 @@ cachetable_test (void) {
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
toku_cachetable_begin_checkpoint(cp, NULL);
assert_zero(r);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_test_cachetable_unpin_and_remove(f1, make_blocknum(1), NULL, NULL);
assert_zero(r);

View File

@@ -113,9 +113,7 @@ static void cachetable_eviction_full_test (void) {
uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
void* value1;
long size1;
void* value2;
long size2;
//
// let's pin a node multiple times
// and really bring up its clock count
@@ -129,7 +127,6 @@ static void cachetable_eviction_full_test (void) {
key,
fullhash,
&value1,
&size1,
wc,
fetch,
def_pf_req_callback,
@@ -150,7 +147,6 @@ static void cachetable_eviction_full_test (void) {
make_blocknum(1),
1,
&value2,
&size2,
wc,
fetch,
def_pf_req_callback,

View File

@@ -126,9 +126,7 @@ static void cachetable_eviction_full_test (void) {
uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
void* value1;
long size1;
void* value2;
long size2;
//
// let's pin a node multiple times
// and really bring up its clock count
@@ -143,7 +141,6 @@ static void cachetable_eviction_full_test (void) {
key,
fullhash,
&value1,
&size1,
wc,
fetch,
def_pf_req_callback,
@@ -165,7 +162,6 @@ static void cachetable_eviction_full_test (void) {
make_blocknum(1),
1,
&value2,
&size2,
wc,
fetch,
def_pf_req_callback,

View File

@@ -83,7 +83,6 @@ static void cachetable_predef_fetch_maybegetandpin_test (void) {
// let's get and pin this node a bunch of times to drive up the clock count
for (int i = 0; i < 20; i++) {
void* value;
long size;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(
@@ -91,7 +90,6 @@ static void cachetable_predef_fetch_maybegetandpin_test (void) {
key,
fullhash,
&value,
&size,
wc,
def_fetch,
def_pf_req_callback,
@@ -109,14 +107,12 @@ static void cachetable_predef_fetch_maybegetandpin_test (void) {
// def_fetch another block, causing an eviction of the first block we made above
do_sleep = true;
void* value2;
long size2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
r = toku_cachetable_get_and_pin(
f1,
make_blocknum(1),
1,
&value2,
&size2,
wc,
def_fetch,
def_pf_req_callback,
@@ -131,14 +127,16 @@ static void cachetable_predef_fetch_maybegetandpin_test (void) {
toku_cachetable_verify(ct);
void *v = 0;
long size = 0;
// now verify that the block we are trying to evict is gone
wc = def_write_callback(NULL);
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin_nonblocking(f1, key, fullhash, &v, &size, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
r = toku_cachetable_get_and_pin_nonblocking(f1, key, fullhash, &v, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
assert(r == TOKUDB_TRY_AGAIN);
r = toku_cachetable_get_and_pin(f1, key, fullhash, &v, &size, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert(r == 0 && v == 0 && size == 8);
r = toku_cachetable_get_and_pin(f1, key, fullhash, &v, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert(r == 0 && v == 0);
PAIR_ATTR attr;
r = toku_cachetable_get_attr(f1, key, fullhash, &attr);
assert(r == 0 && attr.size == 8);
do_sleep = false;
struct timeval tend;

View File

@@ -93,13 +93,11 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
wc.pe_callback = pe_callback;
for (int i = 0; i < 20; i++) {
void* value;
long size;
r = toku_cachetable_get_and_pin(
f1,
key,
fullhash,
&value,
&size,
wc,
def_fetch,
def_pf_req_callback,
@@ -116,13 +114,11 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
// fetch another block, causing an eviction of the first block we made above
void* value2;
long size2;
r = toku_cachetable_get_and_pin(
f1,
make_blocknum(1),
1,
&value2,
&size2,
wc,
def_fetch,
def_pf_req_callback,
@@ -139,14 +135,12 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
toku_cachetable_verify(ct);
void *v = 0;
long size = 0;
// now verify that the block we are trying to evict may be pinned
r = toku_cachetable_get_and_pin_nonblocking(
f1,
key,
fullhash,
&v,
&size,
wc,
def_fetch,
def_pf_req_callback,
@@ -161,7 +155,6 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
key,
fullhash,
&v,
&size,
wc,
def_fetch,
def_pf_req_callback,
@@ -169,7 +162,10 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
true,
NULL
);
assert(r == 0 && v == 0 && size == 1);
assert(r == 0 && v == 0);
PAIR_ATTR attr;
r = toku_cachetable_get_attr(f1, key, fullhash, &attr);
assert(r == 0 && attr.size == 1);
struct timeval tend;
gettimeofday(&tend, NULL);

View File

@@ -70,9 +70,8 @@ cachetable_test (enum pin_evictor_test_type test_type, bool nonblocking) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
// at this point, we should have 8 bytes of data in a cachetable that supports 7
@@ -82,11 +81,11 @@ cachetable_test (enum pin_evictor_test_type test_type, bool nonblocking) {
if (test_type == pin_in_memory) {
old_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
if (nonblocking) {
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
assert_zero(r);
}
else {
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
}
new_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
@@ -97,13 +96,13 @@ cachetable_test (enum pin_evictor_test_type test_type, bool nonblocking) {
else if (test_type == pin_fetch) {
old_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
if (nonblocking) {
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(2), 2, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(2), 2, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
assert(r == TOKUDB_TRY_AGAIN);
new_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
assert(new_num_ev_runs > old_num_ev_runs);
}
else {
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
new_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
assert(new_num_ev_runs > old_num_ev_runs);
@@ -114,13 +113,13 @@ cachetable_test (enum pin_evictor_test_type test_type, bool nonblocking) {
else if (test_type == pin_partial_fetch) {
old_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
if (nonblocking) {
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, pf_req_callback, pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, pf_req_callback, pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
assert(r == TOKUDB_TRY_AGAIN);
new_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
assert(new_num_ev_runs > old_num_ev_runs);
}
else {
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, pf_req_callback, pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, pf_req_callback, pf_callback, true, NULL);
assert_zero(r);
new_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
assert(new_num_ev_runs > old_num_ev_runs);

View File

@@ -73,11 +73,10 @@ cachetable_test (void) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
for (int i = 0; i < 10; i++) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.cleaner_callback = cleaner_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(i), i, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(i), i, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
PAIR_ATTR attr = make_pair_attr(8);
attr.cache_pressure_size = 8;
r = toku_test_cachetable_unpin(f1, make_blocknum(i), i, CACHETABLE_DIRTY, attr);

View File

@@ -93,12 +93,14 @@ cachetable_getandpin_test (int n) {
for (i=1; i<=n; i++) {
uint32_t hi;
hi = toku_cachetable_hash(f1, make_blocknum(i));
void *v; long size;
void *v;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(i), hi, &v, &size, wc, fetch, def_pf_req_callback, def_pf_callback, true, 0);
r = toku_cachetable_get_and_pin(f1, make_blocknum(i), hi, &v, wc, fetch, def_pf_req_callback, def_pf_callback, true, 0);
assert(r == 0);
assert(size == i);
PAIR_ATTR attr;
r = toku_cachetable_get_attr(f1, make_blocknum(i), hi, &attr);
assert(r == 0 && attr.size == i);
r = toku_test_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(i));
assert(r == 0);

View File

@@ -69,10 +69,9 @@ run_test (void) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
foo = false;
cachefile_kibbutz_enq(f1, kibbutz_work, f1);
toku_cachefile_close(&f1, false, ZERO_LSN);

View File

@@ -121,11 +121,8 @@ cachetable_test (void) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
//void* v2;
long s1;
//long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, pf_req_callback, pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch, pf_req_callback, pf_callback, true, NULL);
assert(&fetch_val == v1);
//
// verify that a prefetch of this node will fail
@@ -148,16 +145,19 @@ cachetable_test (void) {
//
// now get and pin node again, and make sure that partial fetch and fetch are not called
//
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, err_fetch, pf_req_callback, err_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, err_fetch, pf_req_callback, err_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
//
// now make sure that if we say a partial fetch is required, that we get a partial fetch
// and that read_extraargs properly passed down
//
pf_req_called = false;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, err_fetch, true_pf_req_callback, true_pf_callback, true, &fetch_val);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, err_fetch, true_pf_req_callback, true_pf_callback, true, &fetch_val);
assert(pf_req_called);
assert(s1 == sizeof(fetch_val)+1);
PAIR_ATTR attr;
r = toku_cachetable_get_attr(f1, make_blocknum(1), 1, &attr);
assert(r == 0);
assert(attr.size == sizeof(fetch_val)+1);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
// close and reopen cachefile so we can do some simple prefetch tests
@@ -185,7 +185,7 @@ cachetable_test (void) {
//
// now verify we can pin it, and NO fetch callback should get called
//
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, err_fetch, pf_req_callback, err_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, err_fetch, pf_req_callback, err_pf_callback, true, NULL);
assert(&fetch_val == v1);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
@@ -205,7 +205,7 @@ cachetable_test (void) {
&doing_prefetch
);
assert(doing_prefetch);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, err_fetch, pf_req_callback, err_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, err_fetch, pf_req_callback, err_pf_callback, true, NULL);
assert(&fetch_val == v1);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));

View File

@@ -171,7 +171,6 @@ static void *move_numbers(void *arg) {
*/
void* v1;
long s1;
CACHEKEY less_key;
less_key.b = less;
uint32_t less_fullhash = less;
@@ -184,7 +183,6 @@ static void *move_numbers(void *arg) {
less_key,
less,
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
PL_WRITE_CHEAP,
NULL,
@@ -205,7 +203,6 @@ static void *move_numbers(void *arg) {
make_blocknum(greater),
greater,
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
PL_WRITE_CHEAP,
NULL,
@@ -238,7 +235,6 @@ static void *move_numbers(void *arg) {
make_blocknum(third),
third,
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
PL_WRITE_CHEAP,
NULL,
@@ -264,7 +260,6 @@ static void *read_random_numbers(void *arg) {
while(run_test) {
int rand_key1 = random() % NUM_ELEMENTS;
void* v1;
long s1;
int r1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
@@ -274,7 +269,6 @@ static void *read_random_numbers(void *arg) {
make_blocknum(rand_key1),
rand_key1,
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
PL_READ,
NULL,

View File

@@ -57,18 +57,16 @@ run_test (void) {
void* v1;
void* v2;
long s1;
long s2;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
for (int i = 0; i < 20; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
}
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
toku_cachetable_begin_checkpoint(cp, NULL);
// mark nodes as pending a checkpoint, so that get_and_pin_nonblocking on block 1 will return TOKUDB_TRY_AGAIN
@@ -79,7 +77,6 @@ run_test (void) {
make_blocknum(1),
1,
&v1,
&s1,
def_write_callback(NULL),
def_fetch,
def_pf_req_callback,

View File

@@ -107,13 +107,11 @@ static void cachetable_prefetch_full_test (bool partial_fetch) {
if (partial_fetch) {
expect_pf = true;
void* value;
long size;
r = toku_cachetable_get_and_pin(
f1,
key,
fullhash,
&value,
&size,
wc,
fetch,
def_pf_req_callback,

View File

@@ -124,13 +124,11 @@ static void cachetable_prefetch_maybegetandpin_test (bool do_partial_fetch) {
if (do_partial_fetch) {
expect_pf = true;
void* value;
long size;
r = toku_cachetable_get_and_pin(
f1,
key,
fullhash,
&value,
&size,
wc,
fetch,
pf_req_callback,
@@ -152,12 +150,14 @@ static void cachetable_prefetch_maybegetandpin_test (bool do_partial_fetch) {
// verify that get_and_pin waits while the prefetch is in progress
void *v = 0;
long size = 0;
do_pf = false;
r = toku_cachetable_get_and_pin_nonblocking(f1, key, fullhash, &v, &size, wc, fetch, pf_req_callback, pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
r = toku_cachetable_get_and_pin_nonblocking(f1, key, fullhash, &v, wc, fetch, pf_req_callback, pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
assert(r==TOKUDB_TRY_AGAIN);
r = toku_cachetable_get_and_pin(f1, key, fullhash, &v, &size, wc, fetch, pf_req_callback, pf_callback, true, NULL);
assert(r == 0 && v == 0 && size == 2);
r = toku_cachetable_get_and_pin(f1, key, fullhash, &v, wc, fetch, pf_req_callback, pf_callback, true, NULL);
assert(r == 0 && v == 0);
PAIR_ATTR attr;
r = toku_cachetable_get_attr(f1, key, fullhash, &attr);
assert(r == 0 && attr.size == 2);
struct timeval tend;
gettimeofday(&tend, NULL);

View File

@@ -171,7 +171,6 @@ static void move_number_to_child(
child = ((random() % 2) == 0) ? (2*parent + 1) : (2*parent + 2);
void* v1;
long s1;
CACHEKEY parent_key;
parent_key.b = parent;
uint32_t parent_fullhash = toku_cachetable_hash(f1, parent_key);
@@ -189,7 +188,6 @@ static void move_number_to_child(
child_key,
child_fullhash,
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
PL_WRITE_CHEAP,
NULL,
@@ -222,7 +220,6 @@ static void *move_numbers(void *arg) {
int parent = 0;
int r;
void* v1;
long s1;
CACHEKEY parent_key;
parent_key.b = parent;
uint32_t parent_fullhash = toku_cachetable_hash(f1, parent_key);
@@ -234,7 +231,6 @@ static void *move_numbers(void *arg) {
parent_key,
parent_fullhash,
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
PL_WRITE_CHEAP,
NULL,
@@ -280,7 +276,6 @@ static void merge_and_split_child(
assert(child != other_child);
void* v1;
long s1;
CACHEKEY parent_key;
parent_key.b = parent;
@@ -299,7 +294,6 @@ static void merge_and_split_child(
child_key,
child_fullhash,
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
PL_WRITE_CHEAP,
NULL,
@@ -325,7 +319,6 @@ static void merge_and_split_child(
other_child_key,
other_child_fullhash,
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
PL_WRITE_CHEAP,
NULL,
@@ -387,7 +380,6 @@ static void *merge_and_split(void *arg) {
int parent = 0;
int r;
void* v1;
long s1;
CACHEKEY parent_key;
parent_key.b = parent;
uint32_t parent_fullhash = toku_cachetable_hash(f1, parent_key);
@@ -399,7 +391,6 @@ static void *merge_and_split(void *arg) {
parent_key,
parent_fullhash,
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
PL_WRITE_CHEAP,
NULL,

View File

@@ -106,11 +106,10 @@ test_clean (enum cachetable_dirty dirty, bool cloneable) {
create_dummy_functions(f1);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.clone_callback = cloneable ? clone_callback : NULL;
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, dirty, make_pair_attr(8));
check_flush = true;
@@ -127,13 +126,13 @@ test_clean (enum cachetable_dirty dirty, bool cloneable) {
gettimeofday(&tstart, NULL);
// test that having a pin that passes false for may_modify_value does not stall behind checkpoint
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, false, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, false, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
gettimeofday(&tend, NULL);
assert(tdelta_usec(&tend, &tstart) <= 2000000);
assert(!clone_called);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
gettimeofday(&tend, NULL);
// we take 5 seconds for a write

View File

@@ -94,11 +94,10 @@ test_clean (enum cachetable_dirty dirty, bool cloneable) {
check_flush = false;
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.clone_callback = cloneable ? clone_callback : NULL;
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
// begin checkpoint, since pair is clean, we should not
@@ -106,7 +105,7 @@ test_clean (enum cachetable_dirty dirty, bool cloneable) {
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
toku_cachetable_begin_checkpoint(cp, NULL);
assert_zero(r);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
// at this point, there should be no more dirty writes
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, dirty, make_pair_attr(8));

View File

@@ -113,10 +113,9 @@ simple_test(bool unlink_on_close) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
set_cf_userdata(f1);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), CACHETABLE_DIRTY, make_pair_attr(8));
toku_cachetable_verify(ct);
if (unlink_on_close) {
@@ -169,9 +168,8 @@ static void test_pair_stays_in_cache(enum cachetable_dirty dirty) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), dirty, make_pair_attr(8));
toku_cachefile_close(&f1, false, ZERO_LSN);
// now reopen the cachefile
@@ -217,28 +215,25 @@ static void test_multiple_cachefiles(bool use_same_hash) {
r = toku_cachetable_openf(&f3, ct, fname3, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
void* v2;
long s2;
void* v3;
long s3;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
for (int j = 0; j < 3; j++) {
uint32_t hash = use_same_hash ? 1 : toku_cachetable_hash(f1, make_blocknum(j));
r = toku_cachetable_get_and_pin(f1, make_blocknum(j), hash, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(j), hash, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(j), hash, CACHETABLE_CLEAN, make_pair_attr(8));
}
for (int j = 0; j < 3; j++) {
uint32_t hash = use_same_hash ? 1 : toku_cachetable_hash(f2, make_blocknum(j));
r = toku_cachetable_get_and_pin(f2, make_blocknum(j), hash, &v2, &s2, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f2, make_blocknum(j), hash, &v2, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f2, make_blocknum(j), hash, CACHETABLE_CLEAN, make_pair_attr(8));
}
for (int j = 0; j < 3; j++) {
uint32_t hash = use_same_hash ? 1 : toku_cachetable_hash(f3, make_blocknum(j));
r = toku_cachetable_get_and_pin(f3, make_blocknum(j), hash, &v3, &s3, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f3, make_blocknum(j), hash, &v3, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f3, make_blocknum(j), hash, CACHETABLE_CLEAN, make_pair_attr(8));
}
@@ -299,9 +294,8 @@ static void test_evictor(void) {
set_cf_userdata(f1);
r = toku_cachetable_openf(&f2, ct, fname2, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), CACHETABLE_CLEAN, make_pair_attr(8));
close_called = false;
free_called = false;
@@ -311,7 +305,7 @@ static void test_evictor(void) {
// at this point, we should f1, along with one PAIR, stale in the cachetable
// now let's pin another node, and ensure that it causes an eviction and free of f1
r = toku_cachetable_get_and_pin(f2, make_blocknum(1), toku_cachetable_hash(f2, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f2, make_blocknum(1), toku_cachetable_hash(f2, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f2, make_blocknum(1), toku_cachetable_hash(f2, make_blocknum(1)), CACHETABLE_CLEAN, make_pair_attr(8));
// now sleep for 2 seconds, and check to see if f1 has been closed
sleep(2);

View File

@@ -57,11 +57,10 @@ cachetable_test (void) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
void* v1;
long s1;
// nothing in cachetable, so this should fail
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
assert(r==-1);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
// maybe_get_and_pin_clean should succeed, maybe_get_and_pin should fail

View File

@@ -80,12 +80,11 @@ run_test (pair_lock_type lock_type) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
r = toku_cachetable_get_and_pin_with_dep_pairs(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, lock_type, NULL, 0, NULL, NULL);
r = toku_cachetable_get_and_pin_with_dep_pairs(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, lock_type, NULL, 0, NULL, NULL);
cachefile_kibbutz_enq(f1, kibbutz_work, f1);
reset_unlockers(&unlockers);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, &unlockers);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, &unlockers);
// to fix #5393, we changed behavior on full fetch where if we
// requested a PL_WRITE_CHEAP, and had to grab a PL_WRITE_EXPENSIVE for
// a full fetch, we keep it as a PL_WRITE_EXPENSIVE because downgrading back
@@ -100,11 +99,11 @@ run_test (pair_lock_type lock_type) {
// now do the same test with a partial fetch required
pf_called = false;
r = toku_cachetable_get_and_pin_with_dep_pairs(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, true_pf_req_callback, true_pf_callback, lock_type, NULL, 0, NULL, NULL);
r = toku_cachetable_get_and_pin_with_dep_pairs(f1, make_blocknum(1), 1, &v1, wc, def_fetch, true_pf_req_callback, true_pf_callback, lock_type, NULL, 0, NULL, NULL);
assert(pf_called);
cachefile_kibbutz_enq(f1, kibbutz_work, f1);
reset_unlockers(&unlockers);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, &unlockers);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, &unlockers);
if (lock_type == PL_WRITE_EXPENSIVE) {
assert(r == TOKUDB_TRY_AGAIN); assert(!unlockers.locked);
}

View File

@@ -116,18 +116,15 @@ cachetable_test (bool write_first, bool write_second, bool start_checkpoint) {
void* v1;
void* v2;
void* v3;
long s1;
long s2;
long s3;
PAIR dependent_pairs[2];
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(&val1);
wc.flush_callback = flush;
wc.write_extraargs = &val1;
dest_pair = &dependent_pairs[0];
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, true, &val1);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch, def_pf_req_callback, def_pf_callback, true, &val1);
dest_pair = &dependent_pairs[1];
wc.write_extraargs = &val2;
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, &val2);
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, &val2);
// now we set the dirty state of these two.
enum cachetable_dirty cd[2];
@@ -152,7 +149,6 @@ cachetable_test (bool write_first, bool write_second, bool start_checkpoint) {
make_blocknum(3),
3,
&v3,
&s3,
wc, fetch, def_pf_req_callback, def_pf_callback,
PL_WRITE_EXPENSIVE,
&val3,

View File

@@ -78,15 +78,14 @@ static void reset_unlockers(UNLOCKERS unlockers) {
static void
run_case_that_should_succeed(CACHEFILE f1, pair_lock_type first_lock, pair_lock_type second_lock) {
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
struct unlockers unlockers = {true, unlock_dummy, NULL, NULL};
int r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, first_lock, NULL, NULL);
int r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, first_lock, NULL, NULL);
assert(r==0);
cachefile_kibbutz_enq(f1, kibbutz_work, f1);
reset_unlockers(&unlockers);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, second_lock, NULL, &unlockers);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, second_lock, NULL, &unlockers);
assert(r==0); assert(unlockers.locked);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
}
@@ -94,22 +93,25 @@ run_case_that_should_succeed(CACHEFILE f1, pair_lock_type first_lock, pair_lock_
static void
run_case_that_should_fail(CACHEFILE f1, pair_lock_type first_lock, pair_lock_type second_lock) {
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
struct unlockers unlockers = {true, unlock_dummy, NULL, NULL};
int r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, first_lock, NULL, NULL);
int r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, first_lock, NULL, NULL);
assert(r==0);
cachefile_kibbutz_enq(f1, kibbutz_work, f1);
reset_unlockers(&unlockers);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, second_lock, NULL, &unlockers);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, second_lock, NULL, &unlockers);
assert(r == TOKUDB_TRY_AGAIN); assert(!unlockers.locked);
}
static void
run_test (void) {
const int test_limit = 12;
// sometimes the cachetable evictor runs during the test. this sometimes causes cachetable pair locking contention,
// which results with a TOKUDB_TRY_AGAIN error occurring. unfortunately, the test does not expect this and fails.
// set cachetable size limit to a value big enough so that the cachetable evictor is not triggered during the test.
const int test_limit = 100;
int r;
CACHETABLE ct;
toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
@@ -119,14 +121,13 @@ run_test (void) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
//
// test that if we are getting a PAIR for the first time that TOKUDB_TRY_AGAIN is returned
// because the PAIR was not in the cachetable.
//
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
assert(r==TOKUDB_TRY_AGAIN);

View File

@@ -103,34 +103,33 @@ run_test (void) {
create_dummy_functions(f1);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
//
// test that if we are getting a PAIR for the first time that TOKUDB_TRY_AGAIN is returned
// because the PAIR was not in the cachetable.
//
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
assert(r==TOKUDB_TRY_AGAIN);
// now it should succeed
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
assert(r==0);
foo = false;
cachefile_kibbutz_enq(f1, kibbutz_work, f1);
// because node is in use, should return TOKUDB_TRY_AGAIN
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
assert(r==TOKUDB_TRY_AGAIN);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert(foo);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
// now make sure we get TOKUDB_TRY_AGAIN when a partial fetch is involved
// first make sure value is there
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
assert(r==0);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
// now make sure that we get TOKUDB_TRY_AGAIN for the partial fetch
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, true_def_pf_req_callback, true_def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, true_def_pf_req_callback, true_def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
assert(r==TOKUDB_TRY_AGAIN);
toku_cachetable_verify(ct);

View File

@@ -95,21 +95,18 @@ run_test (void) {
create_dummy_functions(f1);
void* v1;
//void* v2;
long s1;
//long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
foo = false;
cachefile_kibbutz_enq(f1, kibbutz_work, f1);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert(foo);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
//now let's do a simple checkpoint test
// first dirty the PAIR
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
// now this should mark the pair for checkpoint
@@ -121,7 +118,7 @@ run_test (void) {
//
check_me = true;
flush_called = false;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert(flush_called);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));

View File

@@ -130,16 +130,14 @@ cachetable_test (bool write_first, bool write_second, bool start_checkpoint) {
void* v1;
void* v2;
long s1;
long s2;
PAIR dependent_pairs[2];
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
dest_pair = &dependent_pairs[0];
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, true, &val1);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch, def_pf_req_callback, def_pf_callback, true, &val1);
assert(r==0);
dest_pair = &dependent_pairs[1];
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, &val2);
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, &val2);
assert(r==0);
// now we set the dirty state of these two.

View File

@@ -75,9 +75,8 @@ static int sleep_pf_callback(void* UU(ftnode_pv), void* UU(disk_data), void* UU(
static void *run_expensive_pf(void *arg) {
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
int r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, sleep_fetch, sleep_pf_req_callback, sleep_pf_callback, PL_READ, NULL, NULL);
int r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, sleep_fetch, sleep_pf_req_callback, sleep_pf_callback, PL_READ, NULL, NULL);
assert(r == TOKUDB_TRY_AGAIN);
assert(pf_called);
return arg;
@@ -85,9 +84,8 @@ static void *run_expensive_pf(void *arg) {
static void *run_expensive_fetch(void *arg) {
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
int r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, sleep_fetch, sleep_pf_req_callback, sleep_pf_callback, PL_READ, NULL, NULL);
int r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, sleep_fetch, sleep_pf_req_callback, sleep_pf_callback, PL_READ, NULL, NULL);
assert(fetch_called);
assert(r == TOKUDB_TRY_AGAIN);
return arg;
@@ -106,7 +104,6 @@ run_test (void) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
toku_pthread_t fetch_tid;
@@ -118,7 +115,6 @@ run_test (void) {
make_blocknum(1),
1,
&v1,
&s1,
wc,
sleep_fetch,
def_pf_req_callback,
@@ -133,9 +129,9 @@ run_test (void) {
assert_zero(r);
// call with may_modify_node = false twice, make sure we can get it
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, sleep_fetch, def_pf_req_callback, def_pf_callback, PL_READ, NULL, NULL);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, sleep_fetch, def_pf_req_callback, def_pf_callback, PL_READ, NULL, NULL);
assert_zero(r);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, sleep_fetch, def_pf_req_callback, def_pf_callback, PL_READ, NULL, NULL);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, sleep_fetch, def_pf_req_callback, def_pf_callback, PL_READ, NULL, NULL);
assert_zero(r);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert(r==0);
@@ -151,7 +147,6 @@ run_test (void) {
make_blocknum(1),
1,
&v1,
&s1,
wc,
sleep_fetch,
def_pf_req_callback,

View File

@@ -77,11 +77,10 @@ static int sleep_pf_callback(void* UU(ftnode_pv), void* UU(disk_data), void* UU(
static void *run_expensive_pf(void *arg) {
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
pf_called = false;
fetch_called = false;
int r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, sleep_fetch, sleep_pf_req_callback, sleep_pf_callback, false, NULL);
int r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, sleep_fetch, sleep_pf_req_callback, sleep_pf_callback, false, NULL);
assert_zero(r);
assert(pf_called);
return arg;
@@ -89,11 +88,10 @@ static void *run_expensive_pf(void *arg) {
static void *run_expensive_fetch(void *arg) {
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
pf_called = false;
fetch_called = false;
int r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, sleep_fetch, sleep_pf_req_callback, sleep_pf_callback, false, NULL);
int r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, sleep_fetch, sleep_pf_req_callback, sleep_pf_callback, false, NULL);
assert_zero(r);
assert(fetch_called);
return arg;
@@ -112,7 +110,6 @@ run_test (void) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
toku_pthread_t fetch_tid;
@@ -124,7 +121,6 @@ run_test (void) {
make_blocknum(1),
1,
&v1,
&s1,
wc,
sleep_fetch,
def_pf_req_callback,
@@ -141,9 +137,9 @@ run_test (void) {
assert_zero(r);
// call with may_modify_node = false twice, make sure we can get it
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, sleep_fetch, def_pf_req_callback, def_pf_callback, false, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, sleep_fetch, def_pf_req_callback, def_pf_callback, false, NULL);
assert_zero(r);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, sleep_fetch, def_pf_req_callback, def_pf_callback, false, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, sleep_fetch, def_pf_req_callback, def_pf_callback, false, NULL);
assert_zero(r);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert(r==0);
@@ -159,7 +155,6 @@ run_test (void) {
make_blocknum(1),
1,
&v1,
&s1,
wc,
sleep_fetch,
def_pf_req_callback,

View File

@@ -70,11 +70,8 @@ cachetable_test (void) {
create_dummy_functions(f1);
void* v1;
//void* v2;
long s1;
//long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
toku_cachetable_begin_checkpoint(cp, NULL);
r = toku_test_cachetable_unpin_and_remove(f1, make_blocknum(1), remove_key_expect_checkpoint, NULL);
@@ -85,7 +82,7 @@ cachetable_test (void) {
NULL
);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin_and_remove(f1, make_blocknum(1), remove_key_expect_no_checkpoint, NULL);
toku_cachetable_verify(ct);

View File

@@ -50,11 +50,8 @@ cachetable_test (void) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
//void* v2;
long s1;
//long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
toku_cachetable_verify(ct);
toku_cachefile_close(&f1, false, ZERO_LSN);

View File

@@ -129,7 +129,7 @@ static void test_nested_pin (void) {
wc.flush_callback = flush_n;
toku_cachetable_put(f, make_blocknum(1), f1hash, &i0, make_pair_attr(1), wc, put_callback_nop);
r = toku_test_cachetable_unpin(f, make_blocknum(1), f1hash, CACHETABLE_CLEAN, make_pair_attr(test_object_size));
r = toku_cachetable_get_and_pin(f, make_blocknum(1), f1hash, &vv, NULL, wc, fetch_n, def_pf_req_callback, def_pf_callback, true, f2);
r = toku_cachetable_get_and_pin(f, make_blocknum(1), f1hash, &vv, wc, fetch_n, def_pf_req_callback, def_pf_callback, true, f2);
assert(r==0);
assert(vv==&i0);
assert(i0==0);
@@ -215,12 +215,12 @@ static void test_multi_filehandles (void) {
wc.flush_callback = null_flush;
toku_cachetable_put(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), (void*)124, make_pair_attr(test_object_size), wc, put_callback_nop);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), CACHETABLE_DIRTY, make_pair_attr(0)); assert(r==0);
r = toku_cachetable_get_and_pin(f2, make_blocknum(1), toku_cachetable_hash(f2, make_blocknum(1)), &v, NULL, wc, add123_fetch, def_pf_req_callback, def_pf_callback, true, (void*)123); assert(r==0);
r = toku_cachetable_get_and_pin(f2, make_blocknum(1), toku_cachetable_hash(f2, make_blocknum(1)), &v, wc, add123_fetch, def_pf_req_callback, def_pf_callback, true, (void*)123); assert(r==0);
assert((unsigned long)v==124);
r = toku_cachetable_get_and_pin(f2, make_blocknum(2), toku_cachetable_hash(f2, make_blocknum(2)), &v, NULL, wc, add123_fetch, def_pf_req_callback, def_pf_callback, true, (void*)123); assert(r==0);
r = toku_cachetable_get_and_pin(f2, make_blocknum(2), toku_cachetable_hash(f2, make_blocknum(2)), &v, wc, add123_fetch, def_pf_req_callback, def_pf_callback, true, (void*)123); assert(r==0);
assert((unsigned long)v==125);
wc.write_extraargs = (void*)222;
r = toku_cachetable_get_and_pin(f3, make_blocknum(2), toku_cachetable_hash(f3, make_blocknum(2)), &v, NULL, wc, add222_fetch, def_pf_req_callback, def_pf_callback, true, (void*)222); assert(r==0);
r = toku_cachetable_get_and_pin(f3, make_blocknum(2), toku_cachetable_hash(f3, make_blocknum(2)), &v, wc, add222_fetch, def_pf_req_callback, def_pf_callback, true, (void*)222); assert(r==0);
assert((unsigned long)v==224);
// we support only one close for a file handle
@@ -296,7 +296,7 @@ static void test_dirty(void) {
assert(dirty == 1);
assert(pinned == 0);
r = toku_cachetable_get_and_pin(f, key, hkey, &value, NULL, wc,
r = toku_cachetable_get_and_pin(f, key, hkey, &value, wc,
test_dirty_fetch, def_pf_req_callback, def_pf_callback, true, 0);
assert(r == 0);
@@ -318,7 +318,7 @@ static void test_dirty(void) {
key = make_blocknum(2);
hkey = toku_cachetable_hash(f, key);
r = toku_cachetable_get_and_pin(f, key, hkey,
&value, NULL, wc,
&value, wc,
test_dirty_fetch, def_pf_req_callback, def_pf_callback, true, 0);
assert(r == 0);
@@ -338,7 +338,7 @@ static void test_dirty(void) {
assert(pinned == 0);
r = toku_cachetable_get_and_pin(f, key, hkey,
&value, NULL, wc,
&value, wc,
test_dirty_fetch, def_pf_req_callback, def_pf_callback, true, 0);
assert(r == 0);
@@ -429,11 +429,13 @@ static void test_size_resize(void) {
assert(r == 0);
void *current_value;
long current_size;
r = toku_cachetable_get_and_pin(f, key, hkey, &current_value, &current_size, wc, 0, def_pf_req_callback, def_pf_callback, true, 0);
r = toku_cachetable_get_and_pin(f, key, hkey, &current_value, wc, 0, def_pf_req_callback, def_pf_callback, true, 0);
assert(r == 0);
assert(current_value == value);
assert(current_size == new_size);
PAIR_ATTR attr;
r = toku_cachetable_get_attr(f, key, hkey, &attr);
assert(r == 0);
assert(attr.size == new_size);
r = toku_test_cachetable_unpin(f, key, hkey, CACHETABLE_CLEAN, make_pair_attr(new_size));
assert(r == 0);

View File

@@ -139,8 +139,8 @@ cachetable_put_evict_remove_test (int n) {
}
// get 0
void *v; long s;
r = toku_cachetable_get_and_pin(f1, make_blocknum(0), hi[0], &v, &s, wc, fetch, def_pf_req_callback, def_pf_callback, true, 0);
void *v;
r = toku_cachetable_get_and_pin(f1, make_blocknum(0), hi[0], &v, wc, fetch, def_pf_req_callback, def_pf_callback, true, 0);
assert(r == 0);
// remove 0

View File

@@ -71,10 +71,7 @@ run_test (void) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
void* v1;
//void* v2;
long s1;
//long s2;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
toku_test_cachetable_unpin(
f1,
make_blocknum(1),
@@ -90,7 +87,6 @@ run_test (void) {
make_blocknum(1),
toku_cachetable_hash(f1, make_blocknum(1)),
&v1,
&s1,
wc,
def_fetch,
def_pf_req_callback,

View File

@@ -102,7 +102,6 @@ unpin_and_evictor_test(enum unpin_evictor_test_type test_type) {
evictor_test_helpers::disable_ev_thread(&ct->ev);
void* value2;
long size2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
// this should put in the cachetable a pair of size 8
r = toku_cachetable_get_and_pin(
@@ -110,7 +109,6 @@ unpin_and_evictor_test(enum unpin_evictor_test_type test_type) {
make_blocknum(1),
1,
&value2,
&size2,
wc,
def_fetch,
def_pf_req_callback,

View File

@@ -0,0 +1,178 @@
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id$"
/*======
This file is part of PerconaFT.
Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
----------------------------------------
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License, version 3,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
======= */
#ident \
"Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
/* The goal of this test. Make sure that inserts stay behind deletes. */
#include "test.h"
#include "cachetable/checkpoint.h"
#include "ft-flusher-internal.h"
#include "ft-flusher.h"
#include <ft-cachetable-wrappers.h>
static TOKUTXN const null_txn = 0;
enum { NODESIZE = 1024, KSIZE = NODESIZE - 100, TOKU_PSIZE = 20 };
CACHETABLE ct;
FT_HANDLE ft;
const char *fname = TOKU_TEST_FILENAME;
static int update_func(DB *UU(db), const DBT *key, const DBT *old_val,
const DBT *UU(extra),
void (*set_val)(const DBT *new_val, void *set_extra),
void *set_extra) {
DBT new_val;
assert(old_val->size > 0);
if (verbose) {
printf("applying update to %s\n", (char *)key->data);
}
toku_init_dbt(&new_val);
set_val(&new_val, set_extra);
return 0;
}
static void doit() {
BLOCKNUM node_leaf;
BLOCKNUM node_root;
BLOCKNUM node_internal;
int r;
toku_cachetable_create(&ct, 500 * 1024 * 1024, ZERO_LSN, nullptr);
unlink(fname);
r = toku_open_ft_handle(fname, 1, &ft, NODESIZE, NODESIZE / 2,
TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn,
toku_builtin_compare_fun);
assert(r == 0);
ft->options.update_fun = update_func;
ft->ft->update_fun = update_func;
toku_testsetup_initialize(); // must precede any other toku_testsetup calls
char *pivots[1];
pivots[0] = toku_strdup("kkkkk");
int pivot_len = 6;
r = toku_testsetup_leaf(ft, &node_leaf, 2, pivots, &pivot_len);
assert(r == 0);
toku_free(pivots[0]);
r = toku_testsetup_nonleaf(ft, 1, &node_internal, 1, &node_leaf, 0, 0);
assert(r == 0);
r = toku_testsetup_nonleaf(ft, 2, &node_root, 1, &node_internal, 0, 0);
assert(r == 0);
r = toku_testsetup_root(ft, node_root);
assert(r == 0);
r = toku_testsetup_insert_to_leaf(ft, node_leaf,
"a", // key
2, // keylen
"aa", 3);
assert(r == 0);
r = toku_testsetup_insert_to_leaf(ft, node_leaf,
"z", // key
2, // keylen
"zz", 3);
assert(r == 0);
char filler[400];
memset(filler, 0, sizeof(filler));
// now we insert filler data so that the rebalance
// keeps it at two nodes
r = toku_testsetup_insert_to_leaf(ft, node_leaf,
"b", // key
2, // keylen
filler, sizeof(filler));
assert(r == 0);
r = toku_testsetup_insert_to_leaf(ft, node_leaf,
"y", // key
2, // keylen
filler, sizeof(filler));
assert(r == 0);
r = toku_testsetup_insert_to_nonleaf(ft, node_internal, FT_INSERT,
"a", // key
2, // keylen
"yy", 3);
assert(r == 0);
r = toku_testsetup_insert_to_nonleaf(ft, node_root, FT_INSERT,
"a", // key
2, // keylen
"zz", 3);
assert(r == 0);
// at this point of time, the logical row count will be 6. This has to be
// manually set up as the tests work under the interface of the ft_send_msg
ft->ft->in_memory_logical_rows = 6;
// now run a checkpoint to get everything clean
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert_zero(r);
// now do a lookup on one of the keys, this should bring a leaf node up to
// date
DBT k;
struct check_pair pair = {2, "a", 3, "zz", 0};
r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair);
assert(r == 0);
assert(ft->ft->in_memory_logical_rows == 4);
FTNODE node;
// now lock and release the leaf node to make sure it is what we expect it to
// be.
toku_pin_node_with_min_bfe(&node, node_leaf, ft);
for (int i = 0; i < 20; i++) {
toku_ftnode_pe_callback(node, make_pair_attr(0xffffffff), ft->ft,
def_pe_finalize_impl, nullptr);
}
toku_unpin_ftnode(ft->ft, node);
assert(ft->ft->in_memory_logical_rows == 6);
r = toku_close_ft_handle_nolsn(ft, 0);
assert(r == 0);
toku_cachetable_close(&ct);
}
int test_main(int argc __attribute__((__unused__)),
const char *argv[] __attribute__((__unused__))) {
default_parse_args(argc, argv);
doit();
return 0;
}

View File

@@ -0,0 +1,208 @@
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id$"
/*======
This file is part of PerconaFT.
Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
----------------------------------------
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License, version 3,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
======= */
#ident \
"Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
/* The goal of this test. Make sure that inserts stay behind deletes. */
#include "test.h"
#include "cachetable/checkpoint.h"
#include "ft-flusher-internal.h"
#include "ft-flusher.h"
#include <ft-cachetable-wrappers.h>
static TOKUTXN const null_txn = 0;
enum { NODESIZE = 1024, KSIZE = NODESIZE - 100, TOKU_PSIZE = 20 };
CACHETABLE ct;
FT_HANDLE ft;
const char *fname = TOKU_TEST_FILENAME;
static int update_func(DB *UU(db), const DBT *key, const DBT *old_val,
const DBT *UU(extra),
void (*set_val)(const DBT *new_val, void *set_extra),
void *set_extra) {
DBT new_val;
assert(old_val->size > 0);
if (verbose) {
printf("applying update to %s\n", (char *)key->data);
}
toku_init_dbt(&new_val);
set_val(&new_val, set_extra);
return 0;
}
// callback functions for toku_ft_flush_some_child
static bool destroy_bn(void *UU(extra)) { return true; }
static bool recursively_flush_should_not_happen(FTNODE UU(child),
void *UU(extra)) {
assert(false);
}
static int child_to_flush(FT UU(h), FTNODE parent, void *UU(extra)) {
assert(parent->height == 1);
assert(parent->n_children == 1);
return 0;
}
static void dummy_update_status(FTNODE UU(child), int UU(dirtied),
void *UU(extra)) {}
static void doit() {
BLOCKNUM node_leaf;
BLOCKNUM node_root;
BLOCKNUM node_internal;
int r;
toku_cachetable_create(&ct, 500 * 1024 * 1024, ZERO_LSN, nullptr);
unlink(fname);
r = toku_open_ft_handle(fname, 1, &ft, NODESIZE, NODESIZE / 2,
TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn,
toku_builtin_compare_fun);
assert(r == 0);
ft->options.update_fun = update_func;
ft->ft->update_fun = update_func;
toku_testsetup_initialize(); // must precede any other toku_testsetup calls
char *pivots[1];
pivots[0] = toku_strdup("kkkkk");
int pivot_len = 6;
r = toku_testsetup_leaf(ft, &node_leaf, 2, pivots, &pivot_len);
assert(r == 0);
toku_free(pivots[0]);
r = toku_testsetup_nonleaf(ft, 1, &node_internal, 1, &node_leaf, 0, 0);
assert(r == 0);
r = toku_testsetup_nonleaf(ft, 2, &node_root, 1, &node_internal, 0, 0);
assert(r == 0);
r = toku_testsetup_root(ft, node_root);
assert(r == 0);
r = toku_testsetup_insert_to_leaf(ft, node_leaf,
"a", // key
2, // keylen
"aa", 3);
assert(r == 0);
r = toku_testsetup_insert_to_leaf(ft, node_leaf,
"z", // key
2, // keylen
"zz", 3);
assert(r == 0);
char filler[400];
memset(filler, 0, sizeof(filler));
// now we insert filler data so that the rebalance
// keeps it at two nodes
r = toku_testsetup_insert_to_leaf(ft, node_leaf,
"b", // key
2, // keylen
filler, sizeof(filler));
assert(r == 0);
r = toku_testsetup_insert_to_leaf(ft, node_leaf,
"y", // key
2, // keylen
filler, sizeof(filler));
assert(r == 0);
r = toku_testsetup_insert_to_nonleaf(ft, node_internal, FT_INSERT,
"a", // key
2, // keylen
"yy", 3);
assert(r == 0);
r = toku_testsetup_insert_to_nonleaf(ft, node_root, FT_INSERT,
"a", // key
2, // keylen
"zz", 3);
assert(r == 0);
// at this point of time, the logical row count will be 6. This has to be
// manually set up as the tests work under the interface of the ft_send_msg
ft->ft->in_memory_logical_rows = 6;
// now run a checkpoint to get everything clean
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert_zero(r);
// now do a lookup on one of the keys, this should bring a leaf node up to
// date
DBT k;
struct check_pair pair = {2, "a", 3, "zz", 0};
r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair);
assert(r == 0);
assert(ft->ft->in_memory_logical_rows == 4);
// now lock and release the leaf node to make sure it is what we expect it to
// be.
FTNODE node = NULL;
ftnode_fetch_extra bfe;
bfe.create_for_min_read(ft->ft);
toku_pin_ftnode_with_dep_nodes(
ft->ft, node_internal, toku_cachetable_hash(ft->ft->cf, node_internal),
&bfe, PL_WRITE_EXPENSIVE, 0, NULL, &node, true);
assert(node->height == 1);
assert(node->n_children == 1);
struct flusher_advice fa;
flusher_advice_init(&fa, child_to_flush, destroy_bn,
recursively_flush_should_not_happen, default_merge_child,
dummy_update_status, default_pick_child_after_split,
NULL);
// do the flush which forces an evict of the leaf. logical row count back to
// 6 before the flush
toku_ft_flush_some_child(ft->ft, node, &fa);
assert(ft->ft->in_memory_logical_rows == 5);
r = toku_close_ft_handle_nolsn(ft, 0);
assert(r == 0);
toku_cachetable_close(&ct);
}
int test_main(int argc __attribute__((__unused__)),
const char *argv[] __attribute__((__unused__))) {
default_parse_args(argc, argv);
doit();
return 0;
}

View File

@@ -230,8 +230,10 @@ int toku_rollback_commit(TOKUTXN txn, LSN lsn) {
//If this transaction needs an fsync (if it commits)
//save that in the parent. Since the commit really happens in the root txn.
toku_txn_lock(txn->parent);
txn->parent->force_fsync_on_commit |= txn->force_fsync_on_commit;
txn->parent->roll_info.num_rollentries += txn->roll_info.num_rollentries;
toku_txn_unlock(txn->parent);
} else {
r = apply_txn(txn, lsn, toku_commit_rollback_item);
assert(r==0);

View File

@@ -276,7 +276,7 @@ void toku_get_and_pin_rollback_log(TOKUTXN txn, BLOCKNUM blocknum, ROLLBACK_LOG_
FT CAST_FROM_VOIDP(h, toku_cachefile_get_userdata(cf));
uint32_t hash = toku_cachetable_hash(cf, blocknum);
int r = toku_cachetable_get_and_pin_with_dep_pairs(cf, blocknum, hash,
&value, NULL,
&value,
get_write_callbacks_for_rollback_log(h),
toku_rollback_fetch_callback,
toku_rollback_pf_req_callback,

View File

@@ -39,7 +39,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "malloc_utils.hpp"
#if !HAVE_BITS_FUNCTEXCEPT_H
#if !defined(HAVE_BITS_FUNCTEXCEPT_H) || !HAVE_BITS_FUNCTEXCEPT_H
namespace std {

View File

@@ -47,7 +47,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include <cassert>
#include <cstdlib>
#if HAVE_BITS_FUNCTEXCEPT_H
#if defined(HAVE_BITS_FUNCTEXCEPT_H) && HAVE_BITS_FUNCTEXCEPT_H
# include <bits/functexcept.h>

View File

@@ -182,7 +182,7 @@ toku_memory_footprint(void * p, size_t touched)
void *
toku_malloc(size_t size) {
#if __APPLE__
#if defined(__APPLE__)
if (size == 0) {
return nullptr;
}
@@ -209,7 +209,7 @@ toku_malloc(size_t size) {
}
void *toku_malloc_aligned(size_t alignment, size_t size) {
#if __APPLE__
#if defined(__APPLE__)
if (size == 0) {
return nullptr;
}
@@ -245,7 +245,7 @@ toku_calloc(size_t nmemb, size_t size) {
void *
toku_realloc(void *p, size_t size) {
#if __APPLE__
#if defined(__APPLE__)
if (size == 0) {
if (p != nullptr) {
toku_free(p);
@@ -276,7 +276,7 @@ toku_realloc(void *p, size_t size) {
}
void *toku_realloc_aligned(size_t alignment, void *p, size_t size) {
#if __APPLE__
#if defined(__APPLE__)
if (size == 0) {
if (p != nullptr) {
toku_free(p);
@@ -345,7 +345,7 @@ toku_free(void *p) {
void *
toku_xmalloc(size_t size) {
#if __APPLE__
#if defined(__APPLE__)
if (size == 0) {
return nullptr;
}
@@ -375,7 +375,7 @@ void* toku_xmalloc_aligned(size_t alignment, size_t size)
// Fail with a resource_assert if the allocation fails (don't return an error code).
// Requires: alignment is a power of two.
{
#if __APPLE__
#if defined(__APPLE__)
if (size == 0) {
return nullptr;
}
@@ -409,7 +409,7 @@ toku_xcalloc(size_t nmemb, size_t size) {
void *
toku_xrealloc(void *v, size_t size) {
#if __APPLE__
#if defined(__APPLE__)
if (size == 0) {
if (v != nullptr) {
toku_free(v);

View File

@@ -126,7 +126,7 @@ void db_env_do_backtrace(FILE *outf);
#define resource_assert_zero(a) assert_zero(a) // indicates resource must be available, otherwise unrecoverable
#define resource_assert_equals(a, b) assert_equals(a, b) // indicates resource must be available, otherwise unrecoverable
#if TOKU_DEBUG_PARANOID
#if defined(TOKU_DEBUG_PARANOID) && TOKU_DEBUG_PARANOID
#define paranoid_invariant(a) assert(a)
#define paranoid_invariant_null(a) assert_null(a)
#define paranoid_invariant_notnull(a) assert(a)

View File

@@ -40,7 +40,8 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
struct tokutxn;
#if defined(ENABLED_DEBUG_SYNC)
#if defined(MYSQL_TOKUDB_ENGINE) && MYSQL_TOKUDB_ENGINE && \
defined(ENABLED_DEBUG_SYNC) && ENABLED_DEBUG_SYNC
/*
the below macros are defined in my_global.h, which is included in m_string.h,

View File

@@ -1,4 +1,4 @@
#ifdef MYSQL_TOKUDB_ENGINE
#ifdef TOKU_MYSQL_WITH_PFS
#include "toku_portability.h"
#include "toku_pthread.h"
@@ -18,7 +18,7 @@ int toku_pthread_create(const toku_instr_key &key,
const pthread_attr_t *attr,
void *(*start_routine)(void *),
void *arg) {
#if (MYSQL_VERSION_MAJOR >= 5) && (MYSQL_VERSION_MINOR >= 7)
#if (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
return PSI_THREAD_CALL(spawn_thread)(
key.id(), reinterpret_cast<my_thread_handle *>(thread),
attr, start_routine, arg);
@@ -362,4 +362,4 @@ void toku_instr_rwlock_unlock(toku_pthread_rwlock_t &rwlock) {
PSI_RWLOCK_CALL(unlock_rwlock)(rwlock.psi_rwlock);
}
#endif // MYSQL_TOKUDB_ENGINE
#endif // TOKU_MYSQL_WITH_PFS

View File

@@ -41,7 +41,7 @@ class toku_instr_probe_empty {
extern toku_instr_key toku_uninstrumented;
#ifndef MYSQL_TOKUDB_ENGINE
#ifndef TOKU_MYSQL_WITH_PFS
#include <pthread.h>
@@ -245,10 +245,10 @@ inline void toku_instr_rwlock_wrlock_wait_end(
inline void toku_instr_rwlock_unlock(UU(toku_pthread_rwlock_t &rwlock)) {}
#else // MYSQL_TOKUDB_ENGINE
#else // TOKU_MYSQL_WITH_PFS
// There can be not only mysql but also mongodb or any other PFS stuff
#include <toku_instr_mysql.h>
#endif // MYSQL_TOKUDB_ENGINE
#endif // TOKU_MYSQL_WITH_PFS
extern toku_instr_key toku_uninstrumented;

View File

@@ -69,7 +69,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include <sys/stat.h>
#include <stdio.h>
#if __FreeBSD__
#if defined(__FreeBSD__)
#include <stdarg.h>
#endif

View File

@@ -40,7 +40,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include <portability/toku_config.h>
#if defined(__linux__) && USE_VALGRIND
#if defined(__linux__) && defined(USE_VALGRIND) && USE_VALGRIND
# include <valgrind/helgrind.h>
# include <valgrind/drd.h>

View File

@@ -161,16 +161,18 @@ static void do_test(size_t ct_size, int num_keys)
r = env->txn_begin(env, nullptr, &txn, 0);
CKERR(r);
DBT key, value;
for (i = 0; i < num_keys; i++) {
int v, k = toku_htonl(i);
dbt_init(&key, &k, sizeof(int));
dbt_init(&value, &v, sizeof(int));
get_value_by_key(&key, &value);
r = db->put(db, txn, &key, &value, 0);
CKERR(r);
{
DBT key, value;
for (i = 0; i < num_keys; i++) {
int v, k = toku_htonl(i);
dbt_init(&key, &k, sizeof(int));
dbt_init(&value, &v, sizeof(int));
get_value_by_key(&key, &value);
if (0) printf("put %d\n", k);
r = db->put(db, txn, &key, &value, 0);
CKERR(r);
}
}
CKERR(r);
int expect_r = num_keys == 0 ? DB_NOTFOUND : 0;
check_last_key_matches(db, expect_r, num_keys - 1);
@@ -186,13 +188,23 @@ static void do_test(size_t ct_size, int num_keys)
r = env->txn_begin(env, nullptr, &txn, 0);
CKERR(r);
r = db->del(db, txn, &key, 0);
// Delete the last key
{
DBT key;
int k = toku_htonl(num_keys - 1);
dbt_init(&key, &k, sizeof(int));
if (0) printf("del %d\n", *(int*)key.data);
r = db->del(db, txn, &key, 0);
CKERR(r);
}
check_last_key_matches(db, 0, num_keys - 1);
r = txn->commit(txn, 0);
CKERR(r);
check_last_key_matches(db, 0, num_keys - 1);
r = txn2->commit(txn2, 0);
CKERR(r);
check_last_key_matches(db, 0, num_keys - 1);
//Run Garbage collection (NOTE does not work when everything fits in root??? WHY)

View File

@@ -185,9 +185,12 @@ toku_ydb_init(void) {
// Do not clean up resources if env is panicked, just exit ugly
void
toku_ydb_destroy(void) {
if (!ydb_layer_status.initialized)
return;
if (env_is_panicked == 0) {
toku_ft_layer_destroy();
}
ydb_layer_status.initialized = false;
}
static int

View File

@@ -80,8 +80,8 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::create_from_sorted_memory_of_fix
paranoid_invariant(numvalues > 0);
void *ptr = toku_mempool_malloc(&this->mp, aligned_memsize);
paranoid_invariant_notnull(ptr);
uint8_t * CAST_FROM_VOIDP(dest, ptr);
const uint8_t * CAST_FROM_VOIDP(src, mem);
uint8_t * const dest = static_cast<uint8_t *>(ptr);
const uint8_t * const src = static_cast<const uint8_t *>(mem);
if (pad_bytes == 0) {
paranoid_invariant(aligned_memsize == mem_length);
memcpy(dest, src, aligned_memsize);

View File

@@ -85,8 +85,9 @@ minicron_do (void *pv)
toku_cond_wait(&p->condvar, &p->mutex);
}
else if (p->period_in_ms <= 1000) {
uint32_t period_in_ms = p->period_in_ms;
toku_mutex_unlock(&p->mutex);
usleep(p->period_in_ms * 1000);
usleep(period_in_ms * 1000);
toku_mutex_lock(&p->mutex);
}
else {

View File

@@ -94,7 +94,7 @@ namespace toku {
}
void destroy() {
#if TOKU_SCOPED_MALLOC_DEBUG
#if defined(TOKU_SCOPED_MALLOC_DEBUG) && TOKU_SCOPED_MALLOC_DEBUG
printf("%s %p %p\n", __FUNCTION__, this, m_stack);
#endif
if (m_stack != NULL) {

View File

@@ -0,0 +1,66 @@
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id$"
/*======
This file is part of PerconaFT.
Copyright (c) 2018, Percona and/or its affiliates. All rights reserved.
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
----------------------------------------
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License, version 3,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
======= */
#ident "Copyright (c) 2018, Percona and/or its affiliates. All rights reserved."
#include <toku_portability.h>
#include "test.h"
#include "util/minicron.h"
#include <unistd.h>
#include <string.h>
#include <stdlib.h>
// The thread sanitizer detected a data race in the minicron in a test unrelated to the minicron.
// This test reproduces the data race in a much smaller test which merely runs minicron tasks
// while changing the minicron period in an unrelated thread.
static int do_nothing(void *UU(v)) {
return 0;
}
int test_main (int argc, const char *argv[]) {
default_parse_args(argc,argv);
minicron m = {};
int r = toku_minicron_setup(&m, 1, do_nothing, nullptr);
assert(r == 0);
for (int i=0; i<1000; i++)
toku_minicron_change_period(&m, 1);
r = toku_minicron_shutdown(&m);
assert(r == 0);
return 0;
}

View File

@@ -34,7 +34,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
pfs_key_t ha_tokudb_mutex_key;
pfs_key_t num_DBs_lock_key;
#if TOKU_INCLUDE_EXTENDED_KEYS
#if defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
static inline uint get_ext_key_parts(const KEY *key) {
#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
(50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
@@ -45,9 +45,9 @@ static inline uint get_ext_key_parts(const KEY *key) {
#error
#endif
}
#endif
#endif // defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
HASH TOKUDB_SHARE::_open_tables;
std::unordered_map<std::string, TOKUDB_SHARE*> TOKUDB_SHARE::_open_tables;
tokudb::thread::mutex_t TOKUDB_SHARE::_open_tables_mutex;
static const char* ha_tokudb_exts[] = {
@@ -152,30 +152,18 @@ static void free_key_and_col_info (KEY_AND_COL_INFO* kc_info) {
}
uchar* TOKUDB_SHARE::hash_get_key(
TOKUDB_SHARE* share,
size_t* length,
TOKUDB_UNUSED(my_bool not_used)) {
*length = share->_full_table_name.length();
return (uchar *) share->_full_table_name.c_ptr();
}
void TOKUDB_SHARE::hash_free_element(TOKUDB_SHARE* share) {
share->destroy();
delete share;
}
void TOKUDB_SHARE::static_init() {
my_hash_init(
&_open_tables,
table_alias_charset,
32,
0,
0,
(my_hash_get_key)hash_get_key,
(my_hash_free_key)hash_free_element, 0);
assert_always(_open_tables.size() == 0);
}
void TOKUDB_SHARE::static_destroy() {
my_hash_free(&_open_tables);
for (auto it = _open_tables.cbegin(); it != _open_tables.cend(); it++) {
TOKUDB_TRACE("_open_tables %s %p", it->first.c_str(), it->second);
TOKUDB_SHARE* share = it->second;
share->destroy();
delete share;
}
_open_tables.clear();
assert_always(_open_tables.size() == 0);
}
const char* TOKUDB_SHARE::get_state_string(share_state_t state) {
static const char* state_string[] = {
@@ -227,15 +215,16 @@ void TOKUDB_SHARE::destroy() {
TOKUDB_SHARE_DBUG_VOID_RETURN();
}
TOKUDB_SHARE* TOKUDB_SHARE::get_share(const char* table_name,
TABLE_SHARE* table_share,
THR_LOCK_DATA* data,
bool create_new) {
std::string find_table_name(table_name);
mutex_t_lock(_open_tables_mutex);
int error = 0;
uint length = (uint)strlen(table_name);
TOKUDB_SHARE* share = (TOKUDB_SHARE*)my_hash_search(
&_open_tables, (uchar*)table_name, length);
auto it = _open_tables.find(find_table_name);
TOKUDB_SHARE *share = nullptr;
if (it != _open_tables.end()) {
share = it->second;
assert_always(strcmp(table_name, share->full_table_name()) == 0);
}
TOKUDB_TRACE_FOR_FLAGS(
TOKUDB_DEBUG_SHARE,
"existing share[%s] %s:share[%p]",
@@ -253,14 +242,7 @@ TOKUDB_SHARE* TOKUDB_SHARE::get_share(const char* table_name,
share->init(table_name);
error = my_hash_insert(&_open_tables, (uchar*)share);
if (error) {
free_key_and_col_info(&share->kc_info);
share->destroy();
tokudb::memory::free((uchar*)share);
share = NULL;
goto exit;
}
_open_tables.insert({find_table_name, share});
}
share->addref();
@@ -281,7 +263,10 @@ void TOKUDB_SHARE::drop_share(TOKUDB_SHARE* share) {
share->_use_count);
mutex_t_lock(_open_tables_mutex);
my_hash_delete(&_open_tables, (uchar*)share);
size_t n = _open_tables.erase(std::string(share->full_table_name()));
assert_always(n == 1);
share->destroy();
delete share;
mutex_t_unlock(_open_tables_mutex);
}
TOKUDB_SHARE::share_state_t TOKUDB_SHARE::addref() {
@@ -439,7 +424,7 @@ const char *ha_tokudb::table_type() const {
return tokudb_hton_name;
}
const char *ha_tokudb::index_type(uint inx) {
const char *ha_tokudb::index_type(TOKUDB_UNUSED(uint inx)) {
return "BTREE";
}
@@ -487,7 +472,9 @@ ulonglong ha_tokudb::table_flags() const {
// Returns a bit mask of capabilities of the key or its part specified by
// the arguments. The capabilities are defined in sql/handler.h.
//
ulong ha_tokudb::index_flags(uint idx, uint part, bool all_parts) const {
ulong ha_tokudb::index_flags(uint idx,
TOKUDB_UNUSED(uint part),
TOKUDB_UNUSED(bool all_parts)) const {
TOKUDB_HANDLER_DBUG_ENTER("");
assert_always(table_share);
ulong flags = (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER |
@@ -531,8 +518,10 @@ typedef struct index_read_info {
// want to actually do anything with the data, hence
// callback does nothing
//
static int smart_dbt_do_nothing (DBT const *key, DBT const *row, void *context) {
return 0;
static int smart_dbt_do_nothing(TOKUDB_UNUSED(DBT const* key),
TOKUDB_UNUSED(DBT const* row),
TOKUDB_UNUSED(void* context)) {
return 0;
}
static int
@@ -545,8 +534,9 @@ smart_dbt_callback_rowread_ptquery (DBT const *key, DBT const *row, void *conte
//
// Smart DBT callback function in case where we have a covering index
//
static int
smart_dbt_callback_keyread(DBT const *key, DBT const *row, void *context) {
static int smart_dbt_callback_keyread(DBT const* key,
DBT TOKUDB_UNUSED(const* row),
void* context) {
SMART_DBT_INFO info = (SMART_DBT_INFO)context;
info->ha->extract_hidden_primary_key(info->keynr, key);
info->ha->read_key_only(info->buf,info->keynr,key);
@@ -568,20 +558,24 @@ smart_dbt_callback_rowread(DBT const *key, DBT const *row, void *context) {
//
// Smart DBT callback function in case where we have a covering index
//
static int
smart_dbt_callback_ir_keyread(DBT const *key, DBT const *row, void *context) {
static int smart_dbt_callback_ir_keyread(DBT const* key,
TOKUDB_UNUSED(DBT const* row),
void* context) {
INDEX_READ_INFO ir_info = (INDEX_READ_INFO)context;
ir_info->cmp = ir_info->smart_dbt_info.ha->prefix_cmp_dbts(ir_info->smart_dbt_info.keynr, ir_info->orig_key, key);
ir_info->cmp = ir_info->smart_dbt_info.ha->prefix_cmp_dbts(
ir_info->smart_dbt_info.keynr, ir_info->orig_key, key);
if (ir_info->cmp) {
return 0;
}
return smart_dbt_callback_keyread(key, row, &ir_info->smart_dbt_info);
}
static int
smart_dbt_callback_lookup(DBT const *key, DBT const *row, void *context) {
static int smart_dbt_callback_lookup(DBT const* key,
TOKUDB_UNUSED(DBT const* row),
void* context) {
INDEX_READ_INFO ir_info = (INDEX_READ_INFO)context;
ir_info->cmp = ir_info->smart_dbt_info.ha->prefix_cmp_dbts(ir_info->smart_dbt_info.keynr, ir_info->orig_key, key);
ir_info->cmp = ir_info->smart_dbt_info.ha->prefix_cmp_dbts(
ir_info->smart_dbt_info.keynr, ir_info->orig_key, key);
return 0;
}
@@ -1020,16 +1014,12 @@ cleanup:
return error;
}
static inline int tokudb_generate_row(
DB *dest_db,
DB *src_db,
DBT *dest_key,
DBT *dest_val,
const DBT *src_key,
const DBT *src_val
)
{
static inline int tokudb_generate_row(DB* dest_db,
TOKUDB_UNUSED(DB* src_db),
DBT* dest_key,
DBT* dest_val,
const DBT* src_key,
const DBT* src_val) {
int error;
DB* curr_db = dest_db;
@@ -1043,7 +1033,7 @@ static inline int tokudb_generate_row(
desc_size = (*(uint32_t *)row_desc) - 4;
row_desc += 4;
if (is_key_pk(row_desc, desc_size)) {
if (is_key_pk(row_desc)) {
if (dest_key->flags == DB_DBT_REALLOC && dest_key->data != NULL) {
free(dest_key->data);
}
@@ -1106,7 +1096,7 @@ static inline int tokudb_generate_row(
desc_size = (*(uint32_t *)row_desc) - 4;
row_desc += 4;
if (dest_val != NULL) {
if (!is_key_clustering(row_desc, desc_size) || src_val->size == 0) {
if (!is_key_clustering(desc_size) || src_val->size == 0) {
dest_val->size = 0;
} else {
uchar* buff = NULL;
@@ -1245,7 +1235,9 @@ ha_tokudb::ha_tokudb(handlerton * hton, TABLE_SHARE * table_arg):handler(hton, t
tokudb_active_index = MAX_KEY;
invalidate_icp();
trx_handler_list.data = this;
#if defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
in_rpl_write_rows = in_rpl_delete_rows = in_rpl_update_rows = false;
#endif // defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
TOKUDB_HANDLER_DBUG_VOID_RETURN;
}
@@ -1645,7 +1637,8 @@ int ha_tokudb::initialize_share(const char* name, int mode) {
goto exit;
}
#if WITH_PARTITION_STORAGE_ENGINE
#if defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
#if defined(WITH_PARTITION_STORAGE_ENGINE) && WITH_PARTITION_STORAGE_ENGINE
// verify frm data for non-partitioned tables
if (TOKU_PARTITION_WRITE_FRM_DATA || table->part_info == NULL) {
error = verify_frm_data(table->s->path.str, txn);
@@ -1661,7 +1654,8 @@ int ha_tokudb::initialize_share(const char* name, int mode) {
error = verify_frm_data(table->s->path.str, txn);
if (error)
goto exit;
#endif
#endif // defined(WITH_PARTITION_STORAGE_ENGINE) && WITH_PARTITION_STORAGE_ENGINE
#endif // defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
error =
initialize_key_and_col_info(
@@ -1884,7 +1878,7 @@ int ha_tokudb::open(const char *name, int mode, uint test_if_locked) {
}
// lookup or create share
share = TOKUDB_SHARE::get_share(name, table_share, &lock, true);
share = TOKUDB_SHARE::get_share(name, &lock, true);
assert_always(share);
if (share->state() != TOKUDB_SHARE::OPENED) {
@@ -2082,6 +2076,7 @@ cleanup:
return error;
}
#if defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
int ha_tokudb::write_frm_data(DB* db, DB_TXN* txn, const char* frm_name) {
TOKUDB_HANDLER_DBUG_ENTER("%p %p %s", db, txn, frm_name);
@@ -2110,7 +2105,9 @@ int ha_tokudb::remove_frm_data(DB *db, DB_TXN *txn) {
return remove_from_status(db, hatoku_frm_data, txn);
}
static int smart_dbt_callback_verify_frm (DBT const *key, DBT const *row, void *context) {
static int smart_dbt_callback_verify_frm(TOKUDB_UNUSED(DBT const* key),
DBT const* row,
void* context) {
DBT* stored_frm = (DBT *)context;
stored_frm->size = row->size;
stored_frm->data = (uchar *)tokudb::memory::malloc(row->size, MYF(MY_WME));
@@ -2170,6 +2167,7 @@ cleanup:
tokudb::memory::free(stored_frm.data);
TOKUDB_HANDLER_DBUG_RETURN(error);
}
#endif // defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
//
// Updates status.tokudb with a new max value used for the auto increment column
@@ -2881,11 +2879,11 @@ DBT* ha_tokudb::pack_key(
key_length,
key_length > 0 ? key_ptr[0] : 0,
inf_byte);
#if TOKU_INCLUDE_EXTENDED_KEYS
#if defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
if (keynr != primary_key && !tokudb_test(hidden_primary_key)) {
DBUG_RETURN(pack_ext_key(key, keynr, buff, key_ptr, key_length, inf_byte));
}
#endif
#endif // defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
KEY* key_info = &table->key_info[keynr];
KEY_PART_INFO* key_part = key_info->key_part;
KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
@@ -2930,7 +2928,7 @@ DBT* ha_tokudb::pack_key(
DBUG_RETURN(key);
}
#if TOKU_INCLUDE_EXTENDED_KEYS
#if defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
DBT* ha_tokudb::pack_ext_key(
DBT* key,
uint keynr,
@@ -3036,7 +3034,7 @@ DBT* ha_tokudb::pack_ext_key(
dbug_tmp_restore_column_map(table->write_set, old_map);
DBUG_RETURN(key);
}
#endif
#endif // defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
//
// get max used hidden primary key value
@@ -3383,21 +3381,21 @@ int ha_tokudb::bulk_insert_poll(void* extra, float progress) {
#endif
return 0;
}
void ha_tokudb::loader_add_index_err(DB* db,
int i,
int err,
DBT* key,
DBT* val,
void ha_tokudb::loader_add_index_err(TOKUDB_UNUSED(DB* db),
TOKUDB_UNUSED(int i),
TOKUDB_UNUSED(int err),
TOKUDB_UNUSED(DBT* key),
TOKUDB_UNUSED(DBT* val),
void* error_extra) {
LOADER_CONTEXT context = (LOADER_CONTEXT)error_extra;
assert_always(context->ha);
context->ha->set_loader_error(err);
}
void ha_tokudb::loader_dup(DB* db,
int i,
void ha_tokudb::loader_dup(TOKUDB_UNUSED(DB* db),
TOKUDB_UNUSED(int i),
int err,
DBT* key,
DBT* val,
TOKUDB_UNUSED(DBT* val),
void* error_extra) {
LOADER_CONTEXT context = (LOADER_CONTEXT)error_extra;
assert_always(context->ha);
@@ -3412,7 +3410,7 @@ void ha_tokudb::loader_dup(DB* db,
// (ha_tokudb::write_row). If start_bulk_insert is called, then
// this is guaranteed to be called.
//
int ha_tokudb::end_bulk_insert(bool abort) {
int ha_tokudb::end_bulk_insert(TOKUDB_UNUSED(bool abort)) {
TOKUDB_HANDLER_DBUG_ENTER("");
int error = 0;
THD* thd = ha_thd();
@@ -3660,7 +3658,8 @@ cleanup:
return error;
}
static void maybe_do_unique_checks_delay(THD *thd) {
#if defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
static void maybe_do_unique_checks_delay_fn(THD *thd) {
if (thd->slave_thread) {
uint64_t delay_ms = tokudb::sysvars::rpl_unique_checks_delay(thd);
if (delay_ms)
@@ -3668,11 +3667,19 @@ static void maybe_do_unique_checks_delay(THD *thd) {
}
}
#define maybe_do_unique_checks_delay(__thd) \
(maybe_do_unique_checks_delay_fn(__thd))
#define maybe_do_unique_checks_delay_if_flags_set( \
__thd, __flags_set, __flags_check) \
{ if (((__flags_set) & DB_OPFLAGS_MASK) == \
(__flags_check)) maybe_do_unique_checks_delay_fn(__thd); }
static bool need_read_only(THD *thd) {
return opt_readonly || !tokudb::sysvars::rpl_check_readonly(thd);
}
static bool do_unique_checks(THD *thd, bool do_rpl_event) {
static bool do_unique_checks_fn(THD *thd, bool do_rpl_event) {
if (do_rpl_event &&
thd->slave_thread &&
need_read_only(thd) &&
@@ -3683,6 +3690,26 @@ static bool do_unique_checks(THD *thd, bool do_rpl_event) {
}
}
#define do_unique_checks(__thd, __flags) \
(do_unique_checks_fn(__thd, __flags))
#else
#define maybe_do_unique_checks_delay(__thd) ((void)0)
#define maybe_do_unique_checks_delay_if_flags_set( \
__thd, __flags_set, __flags_check) \
((void)0)
static bool do_unique_checks_fn(THD *thd) {
return !thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS);
}
#define do_unique_checks(__thd, _flags) \
(do_unique_checks_fn(__thd))
#endif // defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
int ha_tokudb::do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd) {
int error = 0;
//
@@ -3867,7 +3894,11 @@ void ha_tokudb::set_main_dict_put_flags(THD* thd, bool opt_eligible, uint32_t* p
}
}
int ha_tokudb::insert_row_to_main_dictionary(uchar* record, DBT* pk_key, DBT* pk_val, DB_TXN* txn) {
int ha_tokudb::insert_row_to_main_dictionary(
DBT* pk_key,
DBT* pk_val,
DB_TXN* txn) {
int error = 0;
uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
assert_always(curr_num_DBs == 1);
@@ -3877,8 +3908,7 @@ int ha_tokudb::insert_row_to_main_dictionary(uchar* record, DBT* pk_key, DBT* pk
set_main_dict_put_flags(thd, true, &put_flags);
// for test, make unique checks have a very long duration
if ((put_flags & DB_OPFLAGS_MASK) == DB_NOOVERWRITE)
maybe_do_unique_checks_delay(thd);
maybe_do_unique_checks_delay_if_flags_set(thd, put_flags, DB_NOOVERWRITE);
error = share->file->put(share->file, txn, pk_key, pk_val, put_flags);
if (error) {
@@ -3897,8 +3927,7 @@ int ha_tokudb::insert_rows_to_dictionaries_mult(DBT* pk_key, DBT* pk_val, DB_TXN
uint32_t flags = mult_put_flags[primary_key];
// for test, make unique checks have a very long duration
if ((flags & DB_OPFLAGS_MASK) == DB_NOOVERWRITE)
maybe_do_unique_checks_delay(thd);
maybe_do_unique_checks_delay_if_flags_set(thd, flags, DB_NOOVERWRITE);
// the insert ignore optimization uses DB_NOOVERWRITE_NO_ERROR,
// which is not allowed with env->put_multiple.
@@ -4091,7 +4120,7 @@ int ha_tokudb::write_row(uchar * record) {
goto cleanup;
}
if (curr_num_DBs == 1) {
error = insert_row_to_main_dictionary(record, &prim_key, &row, txn);
error = insert_row_to_main_dictionary(&prim_key, &row, txn);
if (error) { goto cleanup; }
} else {
error = insert_rows_to_dictionaries_mult(&prim_key, &row, txn, thd);
@@ -4619,10 +4648,10 @@ int ha_tokudb::index_init(uint keynr, bool sorted) {
}
tokudb_active_index = keynr;
#if TOKU_CLUSTERING_IS_COVERING
#if defined(TOKU_CLUSTERING_IS_COVERING) && TOKU_CLUSTERING_IS_COVERING
if (keynr < table->s->keys && table->key_info[keynr].option_struct->clustering)
key_read = false;
#endif
#endif // defined(TOKU_CLUSTERING_IS_COVERING) && TOKU_CLUSTERING_IS_COVERING
last_cursor_error = 0;
range_lock_grabbed = false;
@@ -4709,8 +4738,7 @@ int ha_tokudb::index_end() {
TOKUDB_HANDLER_DBUG_RETURN(0);
}
int ha_tokudb::handle_cursor_error(int error, int err_to_return, uint keynr) {
int ha_tokudb::handle_cursor_error(int error, int err_to_return) {
TOKUDB_HANDLER_DBUG_ENTER("");
if (error) {
error = map_to_handler_error(error);
@@ -4847,21 +4875,37 @@ int ha_tokudb::read_full_row(uchar * buf) {
// assumes key is stored in this->last_key
//
error = share->file->getf_set(
share->file,
transaction,
cursor_flags,
&last_key,
smart_dbt_callback_rowread_ptquery,
&info
);
error = share->file->getf_set(share->file,
transaction,
cursor_flags,
&last_key,
smart_dbt_callback_rowread_ptquery,
&info);
DBUG_EXECUTE_IF("tokudb_fake_db_notfound_error_in_read_full_row", {
error = DB_NOTFOUND;
});
if (error) {
if (error == DB_LOCK_NOTGRANTED) {
error = HA_ERR_LOCK_WAIT_TIMEOUT;
} else if (error == DB_NOTFOUND) {
error = HA_ERR_CRASHED;
if (tokudb_active_index < share->_keys) {
sql_print_error(
"ha_tokudb::read_full_row on table %s cound not locate "
"record in PK that matches record found in key %s",
share->full_table_name(),
share->_key_descriptors[tokudb_active_index]._name);
} else {
sql_print_error(
"ha_tokudb::read_full_row on table %s cound not locate "
"record in PK that matches record found in key %d",
share->full_table_name(),
tokudb_active_index);
}
}
table->status = STATUS_NOT_FOUND;
TOKUDB_HANDLER_DBUG_RETURN(error == DB_NOTFOUND ? HA_ERR_CRASHED : error);
}
TOKUDB_HANDLER_DBUG_RETURN(error);
@@ -4912,7 +4956,7 @@ int ha_tokudb::index_next_same(uchar* buf, const uchar* key, uint keylen) {
}
cleanup:
error = handle_cursor_error(error, HA_ERR_END_OF_FILE, tokudb_active_index);
error = handle_cursor_error(error, HA_ERR_END_OF_FILE);
TOKUDB_HANDLER_DBUG_RETURN(error);
}
@@ -5039,7 +5083,7 @@ int ha_tokudb::index_read(
error = HA_ERR_UNSUPPORTED;
break;
}
error = handle_cursor_error(error,HA_ERR_KEY_NOT_FOUND,tokudb_active_index);
error = handle_cursor_error(error, HA_ERR_KEY_NOT_FOUND);
if (!error && !key_read && tokudb_active_index != primary_key && !key_is_clustering(&table->key_info[tokudb_active_index])) {
error = read_full_row(buf);
}
@@ -5546,11 +5590,7 @@ int ha_tokudb::get_next(
bulk_fetch_iteration++;
}
error =
handle_cursor_error(
error,
HA_ERR_END_OF_FILE,
tokudb_active_index);
error = handle_cursor_error(error, HA_ERR_END_OF_FILE);
if (error) {
goto cleanup;
}
@@ -5581,11 +5621,7 @@ int ha_tokudb::get_next(
SMART_DBT_CALLBACK(do_key_read),
&info);
}
error =
handle_cursor_error(
error,
HA_ERR_END_OF_FILE,
tokudb_active_index);
error = handle_cursor_error(error, HA_ERR_END_OF_FILE);
}
}
}
@@ -5683,7 +5719,7 @@ int ha_tokudb::index_first(uchar * buf) {
info.keynr = tokudb_active_index;
error = cursor->c_getf_first(cursor, flags, SMART_DBT_CALLBACK(key_read), &info);
error = handle_cursor_error(error,HA_ERR_END_OF_FILE,tokudb_active_index);
error = handle_cursor_error(error, HA_ERR_END_OF_FILE);
//
// still need to get entire contents of the row if operation done on
@@ -5727,7 +5763,7 @@ int ha_tokudb::index_last(uchar * buf) {
info.keynr = tokudb_active_index;
error = cursor->c_getf_last(cursor, flags, SMART_DBT_CALLBACK(key_read), &info);
error = handle_cursor_error(error,HA_ERR_END_OF_FILE,tokudb_active_index);
error = handle_cursor_error(error, HA_ERR_END_OF_FILE);
//
// still need to get entire contents of the row if operation done on
// secondary DB and it was NOT a covering index
@@ -5910,14 +5946,16 @@ int ha_tokudb::rnd_pos(uchar * buf, uchar * pos) {
ha_statistic_increment(&SSV::ha_read_rnd_count);
tokudb_active_index = MAX_KEY;
// test rpl slave by inducing a delay before the point query
THD *thd = ha_thd();
#if defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
// test rpl slave by inducing a delay before the point query
if (thd->slave_thread && (in_rpl_delete_rows || in_rpl_update_rows)) {
DBUG_EXECUTE_IF("tokudb_crash_if_rpl_looks_up_row", DBUG_ASSERT(0););
uint64_t delay_ms = tokudb::sysvars::rpl_lookup_rows_delay(thd);
if (delay_ms)
usleep(delay_ms * 1000);
}
#endif // defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
info.ha = this;
info.buf = buf;
@@ -6107,11 +6145,11 @@ void ha_tokudb::position(const uchar * record) {
int ha_tokudb::info(uint flag) {
TOKUDB_HANDLER_DBUG_ENTER("%d", flag);
int error = 0;
#if TOKU_CLUSTERING_IS_COVERING
#if defined(TOKU_CLUSTERING_IS_COVERING) && TOKU_CLUSTERING_IS_COVERING
for (uint i=0; i < table->s->keys; i++)
if (key_is_clustering(&table->key_info[i]))
table->covering_keys.set_bit(i);
#endif
#endif // defined(TOKU_CLUSTERING_IS_COVERING) && TOKU_CLUSTERING_IS_COVERING
DB_TXN* txn = NULL;
if (flag & HA_STATUS_VARIABLE) {
stats.records = share->row_count() + share->rows_from_locked_table;
@@ -6685,12 +6723,14 @@ static toku_compression_method get_compression_method(DB* file) {
return method;
}
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
#if defined(TOKU_INCLUDE_ROW_TYPE_COMPRESSION) && \
TOKU_INCLUDE_ROW_TYPE_COMPRESSION
enum row_type ha_tokudb::get_row_type() const {
toku_compression_method compression_method = get_compression_method(share->file);
return toku_compression_method_to_row_type(compression_method);
}
#endif
#endif // defined(TOKU_INCLUDE_ROW_TYPE_COMPRESSION) &&
// TOKU_INCLUDE_ROW_TYPE_COMPRESSION
static int create_sub_table(
const char* table_name,
@@ -6815,7 +6855,8 @@ void ha_tokudb::update_create_info(HA_CREATE_INFO* create_info) {
create_info->auto_increment_value = stats.auto_increment_value;
}
}
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
#if defined(TOKU_INCLUDE_ROW_TYPE_COMPRESSION) && \
TOKU_INCLUDE_ROW_TYPE_COMPRESSION
if (!(create_info->used_fields & HA_CREATE_USED_ROW_FORMAT)) {
// show create table asks us to update this create_info, this makes it
// so we'll always show what compression type we're using
@@ -6825,7 +6866,8 @@ void ha_tokudb::update_create_info(HA_CREATE_INFO* create_info) {
create_info->row_type = ROW_TYPE_DEFAULT;
}
}
#endif
#endif // defined(TOKU_INCLUDE_ROW_TYPE_COMPRESSION) &&
// TOKU_INCLUDE_ROW_TYPE_COMPRESSION
}
//
@@ -6888,7 +6930,7 @@ int ha_tokudb::write_key_name_to_status(DB* status_block, char* key_name, DB_TXN
// some tracing moved out of ha_tokudb::create, because ::create was
// getting cluttered
//
void ha_tokudb::trace_create_table_info(const char *name, TABLE * form) {
void ha_tokudb::trace_create_table_info(TABLE* form) {
uint i;
//
// tracing information about what type of table we are creating
@@ -7221,7 +7263,7 @@ int ha_tokudb::create(
form->s->write_frm_image();
#endif
#if TOKU_INCLUDE_OPTION_STRUCTS
#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
const tokudb::sysvars::row_format_t row_format =
(tokudb::sysvars::row_format_t)form->s->option_struct->row_format;
#else
@@ -7247,7 +7289,7 @@ int ha_tokudb::create(
"TokuDB: invalid ROW_FORMAT specifier.");
}
}
#endif
#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
const toku_compression_method compression_method =
row_format_to_toku_compression_method(row_format);
@@ -7302,7 +7344,7 @@ int ha_tokudb::create(
}
/* do some tracing */
trace_create_table_info(name,form);
trace_create_table_info(form);
/* Create status.tokudb and save relevant metadata */
make_name(newname, newname_len, name, "status");
@@ -7340,7 +7382,8 @@ int ha_tokudb::create(
goto cleanup;
}
#if WITH_PARTITION_STORAGE_ENGINE
#if defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
#if defined(WITH_PARTITION_STORAGE_ENGINE) && WITH_PARTITION_STORAGE_ENGINE
if (TOKU_PARTITION_WRITE_FRM_DATA || form->part_info == NULL) {
error = write_frm_data(status_block, txn, form->s->path.str);
if (error) {
@@ -7352,7 +7395,8 @@ int ha_tokudb::create(
if (error) {
goto cleanup;
}
#endif
#endif // defined(WITH_PARTITION_STORAGE_ENGINE) && WITH_PARTITION_STORAGE_ENGINE
#endif // defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
error = allocate_key_and_col_info(form->s, &kc_info);
if (error) {
@@ -7423,7 +7467,7 @@ cleanup:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
int ha_tokudb::discard_or_import_tablespace(my_bool discard) {
int ha_tokudb::discard_or_import_tablespace(TOKUDB_UNUSED(my_bool discard)) {
/*
if (discard) {
my_errno=HA_ERR_WRONG_COMMAND;
@@ -7622,7 +7666,7 @@ cleanup:
//
int ha_tokudb::delete_table(const char *name) {
TOKUDB_HANDLER_DBUG_ENTER("%s", name);
TOKUDB_SHARE* share = TOKUDB_SHARE::get_share(name, NULL, NULL, false);
TOKUDB_SHARE* share = TOKUDB_SHARE::get_share(name, NULL, false);
if (share) {
share->unlock();
share->release();
@@ -7684,7 +7728,7 @@ static bool tokudb_check_db_dir_exist_from_table_name(const char *table_name) {
//
int ha_tokudb::rename_table(const char *from, const char *to) {
TOKUDB_HANDLER_DBUG_ENTER("%s %s", from, to);
TOKUDB_SHARE* share = TOKUDB_SHARE::get_share(from, NULL, NULL, false);
TOKUDB_SHARE* share = TOKUDB_SHARE::get_share(from, NULL, false);
if (share) {
share->unlock();
share->release();
@@ -8555,13 +8599,10 @@ void ha_tokudb::restore_add_index(
// Internal function called by ha_tokudb::prepare_drop_index and ha_tokudb::alter_table_phase2
// With a transaction, drops dictionaries associated with indexes in key_num
//
int ha_tokudb::drop_indexes(
TABLE* table_arg,
uint* key_num,
uint num_of_keys,
KEY* key_info,
DB_TXN* txn) {
int ha_tokudb::drop_indexes(uint* key_num,
uint num_of_keys,
KEY* key_info,
DB_TXN* txn) {
TOKUDB_HANDLER_DBUG_ENTER("");
assert_always(txn);
@@ -8619,11 +8660,7 @@ cleanup:
// Restores dropped indexes in case of error in error path of
// prepare_drop_index and alter_table_phase2
//
void ha_tokudb::restore_drop_indexes(
TABLE* table_arg,
uint* key_num,
uint num_of_keys) {
void ha_tokudb::restore_drop_indexes(uint* key_num, uint num_of_keys) {
//
// reopen closed dictionaries
//
@@ -8935,6 +8972,7 @@ void ha_tokudb::remove_from_trx_handler_list() {
trx->handlers = list_delete(trx->handlers, &trx_handler_list);
}
#if defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
void ha_tokudb::rpl_before_write_rows() {
in_rpl_write_rows = true;
}
@@ -8965,6 +9003,7 @@ bool ha_tokudb::rpl_lookup_rows() {
else
return tokudb::sysvars::rpl_lookup_rows(ha_thd());
}
#endif // defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
// table admin
#include "ha_tokudb_admin.cc"

View File

@@ -79,7 +79,6 @@ public:
// doesn't exist, otherwise will return NULL if an existing is not found.
static TOKUDB_SHARE* get_share(
const char* table_name,
TABLE_SHARE* table_share,
THR_LOCK_DATA* data,
bool create_new);
@@ -274,16 +273,9 @@ public:
uint32_t num_DBs;
private:
static HASH _open_tables;
static std::unordered_map<std::string, TOKUDB_SHARE*> _open_tables;
static tokudb::thread::mutex_t _open_tables_mutex;
static uchar* hash_get_key(
TOKUDB_SHARE* share,
size_t* length,
TOKUDB_UNUSED(my_bool not_used));
static void hash_free_element(TOKUDB_SHARE* share);
//*********************************
// Spans open-close-open
mutable tokudb::thread::mutex_t _mutex;
@@ -657,11 +649,11 @@ private:
DBT *create_dbt_key_from_table(DBT * key, uint keynr, uchar * buff, const uchar * record, bool* has_null, int key_length = MAX_KEY_LENGTH);
DBT* create_dbt_key_for_lookup(DBT * key, KEY* key_info, uchar * buff, const uchar * record, bool* has_null, int key_length = MAX_KEY_LENGTH);
DBT *pack_key(DBT * key, uint keynr, uchar * buff, const uchar * key_ptr, uint key_length, int8_t inf_byte);
#if TOKU_INCLUDE_EXTENDED_KEYS
#if defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
DBT *pack_ext_key(DBT * key, uint keynr, uchar * buff, const uchar * key_ptr, uint key_length, int8_t inf_byte);
#endif
#endif // defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
bool key_changed(uint keynr, const uchar * old_row, const uchar * new_row);
int handle_cursor_error(int error, int err_to_return, uint keynr);
int handle_cursor_error(int error, int err_to_return);
DBT *get_pos(DBT * to, uchar * pos);
int open_main_dictionary(const char* name, bool is_read_only, DB_TXN* txn);
@@ -670,9 +662,11 @@ private:
int estimate_num_rows(DB* db, uint64_t* num_rows, DB_TXN* txn);
bool has_auto_increment_flag(uint* index);
#if defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
int write_frm_data(DB* db, DB_TXN* txn, const char* frm_name);
int verify_frm_data(const char* frm_name, DB_TXN* trans);
int remove_frm_data(DB *db, DB_TXN *txn);
#endif // defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
int write_to_status(DB* db, HA_METADATA_KEY curr_key_data, void* data, uint size, DB_TXN* txn);
int remove_from_status(DB* db, HA_METADATA_KEY curr_key_data, DB_TXN* txn);
@@ -706,12 +700,12 @@ private:
toku_compression_method compression_method
);
int create_main_dictionary(const char* name, TABLE* form, DB_TXN* txn, KEY_AND_COL_INFO* kc_info, toku_compression_method compression_method);
void trace_create_table_info(const char *name, TABLE * form);
void trace_create_table_info(TABLE* form);
int is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_info, int lock_flags);
int is_val_unique(bool* is_unique, uchar* record, KEY* key_info, uint dict_index, DB_TXN* txn);
int do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd);
void set_main_dict_put_flags(THD* thd, bool opt_eligible, uint32_t* put_flags);
int insert_row_to_main_dictionary(uchar* record, DBT* pk_key, DBT* pk_val, DB_TXN* txn);
int insert_row_to_main_dictionary(DBT* pk_key, DBT* pk_val, DB_TXN* txn);
int insert_rows_to_dictionaries_mult(DBT* pk_key, DBT* pk_val, DB_TXN* txn, THD* thd);
void test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val);
uint32_t fill_row_mutator(
@@ -921,15 +915,15 @@ public:
Item* idx_cond_push(uint keyno, class Item* idx_cond);
void cancel_pushed_idx_cond();
#if TOKU_INCLUDE_ALTER_56
#if defined(TOKU_INCLUDE_ALTER_56) && TOKU_INCLUDE_ALTER_56
public:
enum_alter_inplace_result check_if_supported_inplace_alter(TABLE *altered_table, Alter_inplace_info *ha_alter_info);
bool prepare_inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha_alter_info);
bool inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha_alter_info);
bool commit_inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha_alter_info, bool commit);
private:
int alter_table_add_index(TABLE *altered_table, Alter_inplace_info *ha_alter_info);
int alter_table_drop_index(TABLE *altered_table, Alter_inplace_info *ha_alter_info);
int alter_table_add_index(Alter_inplace_info* ha_alter_info);
int alter_table_drop_index(Alter_inplace_info* ha_alter_info);
int alter_table_add_or_drop_column(TABLE *altered_table, Alter_inplace_info *ha_alter_info);
int alter_table_expand_varchar_offsets(TABLE *altered_table, Alter_inplace_info *ha_alter_info);
int alter_table_expand_columns(TABLE *altered_table, Alter_inplace_info *ha_alter_info);
@@ -937,18 +931,21 @@ public:
int alter_table_expand_blobs(TABLE *altered_table, Alter_inplace_info *ha_alter_info);
void print_alter_info(TABLE *altered_table, Alter_inplace_info *ha_alter_info);
int setup_kc_info(TABLE *altered_table, KEY_AND_COL_INFO *kc_info);
int new_row_descriptor(TABLE *table, TABLE *altered_table, Alter_inplace_info *ha_alter_info, uint32_t idx, DBT *row_descriptor);
int new_row_descriptor(TABLE* altered_table,
Alter_inplace_info* ha_alter_info,
uint32_t idx,
DBT* row_descriptor);
public:
#endif
#if TOKU_INCLUDE_ALTER_55
#endif // defined(TOKU_INCLUDE_ALTER_56) && TOKU_INCLUDE_ALTER_56
#if defined(TOKU_INCLUDE_ALTER_55) && TOKU_INCLUDE_ALTER_55
public:
// Returns true of the 5.6 inplace alter table interface is used.
bool try_hot_alter_table();
// Used by the partition storage engine to provide new frm data for the table.
int new_alter_table_frm_data(const uchar *frm_data, size_t frm_len);
#endif
#endif // defined(TOKU_INCLUDE_ALTER_55) && TOKU_INCLUDE_ALTER_55
private:
int tokudb_add_index(TABLE* table_arg,
@@ -962,12 +959,8 @@ public:
uint num_of_keys,
bool incremented_numDBs,
bool modified_DBs);
int drop_indexes(TABLE* table_arg,
uint* key_num,
uint num_of_keys,
KEY* key_info,
DB_TXN* txn);
void restore_drop_indexes(TABLE* table_arg, uint* key_num, uint num_of_keys);
int drop_indexes(uint* key_num, uint num_of_keys, KEY* key_info, DB_TXN* txn);
void restore_drop_indexes(uint* key_num, uint num_of_keys);
public:
// delete all rows from the table
@@ -1018,9 +1011,11 @@ public:
uchar* buf,
DBT* key_to_compare);
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
#if defined(TOKU_INCLUDE_ROW_TYPE_COMPRESSION) && \
TOKU_INCLUDE_ROW_TYPE_COMPRESSION
enum row_type get_row_type() const;
#endif
#endif // defined(TOKU_INCLUDE_ROW_TYPE_COMPRESSION) &&
// TOKU_INCLUDE_ROW_TYPE_COMPRESSION
private:
int read_full_row(uchar * buf);
int __close();
@@ -1034,18 +1029,35 @@ private:
void close_dsmrr();
void reset_dsmrr();
#if TOKU_INCLUDE_WRITE_FRM_DATA
#if defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
int write_frm_data(const uchar *frm_data, size_t frm_len);
#endif
#if TOKU_INCLUDE_UPSERT
#endif // defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
private:
int fast_update(THD *thd, List<Item> &update_fields, List<Item> &update_values, Item *conds);
bool check_fast_update(THD *thd, List<Item> &update_fields, List<Item> &update_values, Item *conds);
int send_update_message(List<Item> &update_fields, List<Item> &update_values, Item *conds, DB_TXN *txn);
int upsert(THD *thd, List<Item> &update_fields, List<Item> &update_values);
bool check_upsert(THD *thd, List<Item> &update_fields, List<Item> &update_values);
int send_upsert_message(THD *thd, List<Item> &update_fields, List<Item> &update_values, DB_TXN *txn);
#endif
#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
MY_NODISCARD int fast_update(THD *thd,
List<Item> &update_fields,
List<Item> &update_values,
Item *conds);
MY_NODISCARD bool check_fast_update(THD *thd,
List<Item> &update_fields,
List<Item> &update_values,
Item *conds);
MY_NODISCARD int send_update_message(List<Item> &update_fields,
List<Item> &update_values,
Item *conds,
DB_TXN *txn);
MY_NODISCARD int upsert(THD *thd,
List<Item> &update_fields,
List<Item> &update_values);
MY_NODISCARD bool check_upsert(THD *thd,
List<Item> &update_fields,
List<Item> &update_values);
MY_NODISCARD int send_upsert_message(List<Item> &update_fields,
List<Item> &update_values,
DB_TXN *txn);
#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
public:
// mysql sometimes retires a txn before a cursor that references the txn is closed.
// for example, commit is sometimes called before index_end. the following methods
@@ -1060,6 +1072,7 @@ private:
int do_optimize(THD *thd);
int map_to_handler_error(int error);
#if defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
public:
void rpl_before_write_rows();
void rpl_after_write_rows();
@@ -1072,6 +1085,7 @@ private:
bool in_rpl_write_rows;
bool in_rpl_delete_rows;
bool in_rpl_update_rows;
#endif // defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
};
#endif // _HA_TOKUDB_H

View File

@@ -760,7 +760,7 @@ done:
} // namespace tokudb
int ha_tokudb::analyze(THD *thd, HA_CHECK_OPT *check_opt) {
int ha_tokudb::analyze(THD *thd, TOKUDB_UNUSED(HA_CHECK_OPT *check_opt)) {
TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name());
int result = HA_ADMIN_OK;
tokudb::sysvars::analyze_mode_t mode = tokudb::sysvars::analyze_mode(thd);
@@ -985,7 +985,8 @@ cleanup:
TOKUDB_HANDLER_DBUG_RETURN(error);
}
int ha_tokudb::optimize(THD* thd, HA_CHECK_OPT* check_opt) {
int ha_tokudb::optimize(TOKUDB_UNUSED(THD* thd),
TOKUDB_UNUSED(HA_CHECK_OPT* check_opt)) {
TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name());
int error;
#if TOKU_OPTIMIZE_WITH_RECREATE
@@ -1000,7 +1001,8 @@ struct check_context {
THD* thd;
};
static int ha_tokudb_check_progress(void* extra, float progress) {
static int ha_tokudb_check_progress(void* extra,
TOKUDB_UNUSED(float progress)) {
struct check_context* context = (struct check_context*)extra;
int result = 0;
if (thd_killed(context->thd))

View File

@@ -35,7 +35,11 @@ bool ha_tokudb::try_hot_alter_table() {
}
int ha_tokudb::new_alter_table_frm_data(const uchar *frm_data, size_t frm_len) {
#if defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
return write_frm_data(frm_data, frm_len);
#else
return 0;
#endif // defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
}
#endif

View File

@@ -163,17 +163,13 @@ static int find_changed_fields(
return changed_fields.elements();
}
static bool change_length_is_supported(
TABLE* table,
TABLE* altered_table,
Alter_inplace_info* ha_alter_info,
tokudb_alter_ctx* ctx);
static bool change_length_is_supported(TABLE* table,
TABLE* altered_table,
tokudb_alter_ctx* ctx);
static bool change_type_is_supported(
TABLE* table,
TABLE* altered_table,
Alter_inplace_info* ha_alter_info,
tokudb_alter_ctx* ctx);
static bool change_type_is_supported(TABLE* table,
TABLE* altered_table,
tokudb_alter_ctx* ctx);
// The ha_alter_info->handler_flags can not be trusted.
// This function maps the bogus handler flags to something we like.
@@ -248,6 +244,40 @@ static bool only_flags(ulong bits, ulong mask) {
return (bits & mask) != 0 && (bits & ~mask) == 0;
}
// Table create options that should be ignored by TokuDB
// There are 25 total create options defined by mysql server (see handler.h),
// and only 4 options will touch engine data, either rebuild engine data or
// just update meta info:
// 1. HA_CREATE_USED_AUTO update auto_inc info
// 2. HA_CREATE_USED_CHARSET rebuild table if contains character columns
// 3. HA_CREATE_USED_ENGINE rebuild table
// 4. HA_CREATE_USED_ROW_FORMAT update compression method info
//
// All the others are either not supported by TokuDB or no need to
// touch engine data.
static constexpr uint32_t TOKUDB_IGNORED_ALTER_CREATE_OPTION_FIELDS =
HA_CREATE_USED_RAID | // deprecated field
HA_CREATE_USED_UNION | // for MERGE table
HA_CREATE_USED_INSERT_METHOD | // for MERGE table
HA_CREATE_USED_MIN_ROWS | // for MEMORY table
HA_CREATE_USED_MAX_ROWS | // for NDB table
HA_CREATE_USED_AVG_ROW_LENGTH | // for MyISAM table
HA_CREATE_USED_PACK_KEYS | // for MyISAM table
HA_CREATE_USED_DEFAULT_CHARSET | // no need to rebuild
HA_CREATE_USED_DATADIR | // ignored by alter
HA_CREATE_USED_INDEXDIR | // ignored by alter
HA_CREATE_USED_CHECKSUM | // for MyISAM table
HA_CREATE_USED_DELAY_KEY_WRITE | // for MyISAM table
HA_CREATE_USED_COMMENT | // no need to rebuild
HA_CREATE_USED_PASSWORD | // not supported by community version
HA_CREATE_USED_CONNECTION | // for FEDERATED table
HA_CREATE_USED_KEY_BLOCK_SIZE | // not supported by TokuDB
HA_CREATE_USED_TRANSACTIONAL | // unused
HA_CREATE_USED_PAGE_CHECKSUM | // unsued
HA_CREATE_USED_STATS_PERSISTENT | // not supported by TokuDB
HA_CREATE_USED_STATS_AUTO_RECALC | // not supported by TokuDB
HA_CREATE_USED_STATS_SAMPLE_PAGES; // not supported by TokuDB
// Check if an alter table operation on this table and described by the alter
// table parameters is supported inplace and if so, what type of locking is
// needed to execute it. return values:
@@ -446,10 +476,7 @@ enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter(
setup_kc_info(altered_table, ctx->altered_table_kc_info) == 0) {
// change column length
if (change_length_is_supported(
table,
altered_table,
ha_alter_info, ctx)) {
if (change_length_is_supported(table, altered_table, ctx)) {
result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK;
}
} else if ((ctx->handler_flags & Alter_inplace_info::ALTER_COLUMN_TYPE) &&
@@ -465,10 +492,7 @@ enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter(
setup_kc_info(altered_table, ctx->altered_table_kc_info) == 0) {
// change column type
if (change_type_is_supported(
table,
altered_table,
ha_alter_info, ctx)) {
if (change_type_is_supported(table, altered_table, ctx)) {
result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK;
}
} else if (only_flags(
@@ -503,6 +527,10 @@ enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter(
tokudb::sysvars::alter_print_error(thd) != 0)) {
result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK;
}
} else if (only_flags(
create_info->used_fields,
TOKUDB_IGNORED_ALTER_CREATE_OPTION_FIELDS)) {
result = HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE;
}
}
#if TOKU_OPTIMIZE_WITH_RECREATE
@@ -538,10 +566,8 @@ enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter(
}
// Prepare for the alter operations
bool ha_tokudb::prepare_inplace_alter_table(
TABLE* altered_table,
Alter_inplace_info* ha_alter_info) {
bool ha_tokudb::prepare_inplace_alter_table(TOKUDB_UNUSED(TABLE* altered_table),
Alter_inplace_info* ha_alter_info) {
TOKUDB_HANDLER_DBUG_ENTER("");
tokudb_alter_ctx* ctx =
static_cast<tokudb_alter_ctx*>(ha_alter_info->handler_ctx);
@@ -571,13 +597,13 @@ bool ha_tokudb::inplace_alter_table(
(ctx->handler_flags &
(Alter_inplace_info::DROP_INDEX +
Alter_inplace_info::DROP_UNIQUE_INDEX))) {
error = alter_table_drop_index(altered_table, ha_alter_info);
error = alter_table_drop_index(ha_alter_info);
}
if (error == 0 &&
(ctx->handler_flags &
(Alter_inplace_info::ADD_INDEX +
Alter_inplace_info::ADD_UNIQUE_INDEX))) {
error = alter_table_add_index(altered_table, ha_alter_info);
error = alter_table_add_index(ha_alter_info);
}
if (error == 0 &&
(ctx->handler_flags &
@@ -644,20 +670,24 @@ bool ha_tokudb::inplace_alter_table(
error = do_optimize(ha_thd());
}
#if defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
#if (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
(50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
#if WITH_PARTITION_STORAGE_ENGINE
#if defined(WITH_PARTITION_STORAGE_ENGINE) && WITH_PARTITION_STORAGE_ENGINE
if (error == 0 &&
(TOKU_PARTITION_WRITE_FRM_DATA || altered_table->part_info == NULL)) {
#else
if (error == 0) {
#endif
#endif // defined(WITH_PARTITION_STORAGE_ENGINE) && WITH_PARTITION_STORAGE_ENGINE
error = write_frm_data(
share->status_block,
ctx->alter_txn,
altered_table->s->path.str);
}
#endif
#endif // (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) ||
// (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
#endif // defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
bool result = false; // success
if (error) {
@@ -668,9 +698,7 @@ bool ha_tokudb::inplace_alter_table(
DBUG_RETURN(result);
}
int ha_tokudb::alter_table_add_index(
TABLE* altered_table,
Alter_inplace_info* ha_alter_info) {
int ha_tokudb::alter_table_add_index(Alter_inplace_info* ha_alter_info) {
// sort keys in add index order
KEY* key_info = (KEY*)tokudb::memory::malloc(
@@ -741,9 +769,7 @@ static bool find_index_of_key(
return false;
}
int ha_tokudb::alter_table_drop_index(
TABLE* altered_table,
Alter_inplace_info* ha_alter_info) {
int ha_tokudb::alter_table_drop_index(Alter_inplace_info* ha_alter_info) {
KEY *key_info = table->key_info;
// translate key names to indexes into the key_info array
@@ -771,12 +797,10 @@ int ha_tokudb::alter_table_drop_index(
static_cast<tokudb_alter_ctx*>(ha_alter_info->handler_ctx);
ctx->drop_index_changed = true;
int error = drop_indexes(
table,
index_drop_offsets,
ha_alter_info->index_drop_count,
key_info,
ctx->alter_txn);
int error = drop_indexes(index_drop_offsets,
ha_alter_info->index_drop_count,
key_info,
ctx->alter_txn);
if (error == 0)
ctx->reset_card = true;
@@ -837,11 +861,7 @@ int ha_tokudb::alter_table_add_or_drop_column(
// change to a new descriptor
DBT row_descriptor; memset(&row_descriptor, 0, sizeof row_descriptor);
error = new_row_descriptor(
table,
altered_table,
ha_alter_info,
i,
&row_descriptor);
altered_table, ha_alter_info, i, &row_descriptor);
if (error)
goto cleanup;
error = share->key_file[i]->change_descriptor(
@@ -891,11 +911,9 @@ int ha_tokudb::alter_table_add_or_drop_column(
// transaction.
// If abort then abort the alter transaction and try to rollback the
// non-transactional changes.
bool ha_tokudb::commit_inplace_alter_table(
TABLE* altered_table,
Alter_inplace_info* ha_alter_info,
bool commit) {
bool ha_tokudb::commit_inplace_alter_table(TOKUDB_UNUSED(TABLE* altered_table),
Alter_inplace_info* ha_alter_info,
bool commit) {
TOKUDB_HANDLER_DBUG_ENTER("");
tokudb_alter_ctx* ctx =
@@ -911,13 +929,14 @@ bool ha_tokudb::commit_inplace_alter_table(
ha_alter_info->group_commit_ctx = NULL;
}
#endif
#if defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
#if (50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599) || \
(100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099)
#if WITH_PARTITION_STORAGE_ENGINE
#if defined(WITH_PARTITION_STORAGE_ENGINE) && WITH_PARTITION_STORAGE_ENGINE
if (TOKU_PARTITION_WRITE_FRM_DATA || altered_table->part_info == NULL) {
#else
if (true) {
#endif
#endif // defined(WITH_PARTITION_STORAGE_ENGINE) && WITH_PARTITION_STORAGE_ENGINE
int error = write_frm_data(
share->status_block,
ctx->alter_txn,
@@ -928,7 +947,9 @@ bool ha_tokudb::commit_inplace_alter_table(
print_error(error, MYF(0));
}
}
#endif
#endif // (50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599) ||
// (100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099)
#endif // defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
}
if (!commit) {
@@ -1010,10 +1031,8 @@ bool ha_tokudb::commit_inplace_alter_table(
&index_drop_offsets[i]);
assert_always(found);
}
restore_drop_indexes(
table,
index_drop_offsets,
ha_alter_info->index_drop_count);
restore_drop_indexes(index_drop_offsets,
ha_alter_info->index_drop_count);
}
if (ctx->compression_changed) {
uint32_t curr_num_DBs =
@@ -1060,11 +1079,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets(
// change to a new descriptor
DBT row_descriptor; memset(&row_descriptor, 0, sizeof row_descriptor);
error = new_row_descriptor(
table,
altered_table,
ha_alter_info,
i,
&row_descriptor);
altered_table, ha_alter_info, i, &row_descriptor);
if (error)
break;
error = share->key_file[i]->change_descriptor(
@@ -1142,14 +1157,9 @@ static bool field_in_key_of_table(TABLE *table, Field *field) {
// Return true if all changed varchar/varbinary field lengths can be changed
// inplace, otherwise return false
static bool change_varchar_length_is_supported(
Field* old_field,
Field* new_field,
TABLE* table,
TABLE* altered_table,
Alter_inplace_info* ha_alter_info,
tokudb_alter_ctx* ctx) {
static bool change_varchar_length_is_supported(Field* old_field,
Field* new_field,
tokudb_alter_ctx* ctx) {
if (old_field->real_type() != MYSQL_TYPE_VARCHAR ||
new_field->real_type() != MYSQL_TYPE_VARCHAR ||
old_field->binary() != new_field->binary() ||
@@ -1168,12 +1178,9 @@ static bool change_varchar_length_is_supported(
// Return true if all changed field lengths can be changed inplace, otherwise
// return false
static bool change_length_is_supported(
TABLE* table,
TABLE* altered_table,
Alter_inplace_info* ha_alter_info,
tokudb_alter_ctx* ctx) {
static bool change_length_is_supported(TABLE* table,
TABLE* altered_table,
tokudb_alter_ctx* ctx) {
if (table->s->fields != altered_table->s->fields)
return false;
if (table->s->null_bytes != altered_table->s->null_bytes)
@@ -1193,13 +1200,7 @@ static bool change_length_is_supported(
if (field_in_key_of_table(table, old_field) ||
field_in_key_of_table(altered_table, new_field))
return false; // not in any key
if (!change_varchar_length_is_supported(
old_field,
new_field,
table,
altered_table,
ha_alter_info,
ctx))
if (!change_varchar_length_is_supported(old_field, new_field, ctx))
return false;
}
@@ -1307,11 +1308,7 @@ int ha_tokudb::alter_table_expand_one_column(
// change to a new descriptor
DBT row_descriptor; memset(&row_descriptor, 0, sizeof row_descriptor);
error = new_row_descriptor(
table,
altered_table,
ha_alter_info,
i,
&row_descriptor);
altered_table, ha_alter_info, i, &row_descriptor);
if (error)
break;
error = share->key_file[i]->change_descriptor(
@@ -1426,11 +1423,7 @@ int ha_tokudb::alter_table_expand_blobs(
// change to a new descriptor
DBT row_descriptor; memset(&row_descriptor, 0, sizeof row_descriptor);
error = new_row_descriptor(
table,
altered_table,
ha_alter_info,
i,
&row_descriptor);
altered_table, ha_alter_info, i, &row_descriptor);
if (error)
break;
error = share->key_file[i]->change_descriptor(
@@ -1486,13 +1479,9 @@ int ha_tokudb::alter_table_expand_blobs(
}
// Return true if two fixed length fields can be changed inplace
static bool change_fixed_length_is_supported(
TABLE* table,
TABLE* altered_table,
Field* old_field,
Field* new_field,
tokudb_alter_ctx* ctx) {
static bool change_fixed_length_is_supported(Field* old_field,
Field* new_field,
tokudb_alter_ctx* ctx) {
// no change in size is supported
if (old_field->pack_length() == new_field->pack_length())
return true;
@@ -1503,13 +1492,9 @@ static bool change_fixed_length_is_supported(
return true;
}
static bool change_blob_length_is_supported(
TABLE* table,
TABLE* altered_table,
Field* old_field,
Field* new_field,
tokudb_alter_ctx* ctx) {
static bool change_blob_length_is_supported(Field* old_field,
Field* new_field,
tokudb_alter_ctx* ctx) {
// blob -> longer or equal length blob
if (old_field->binary() && new_field->binary() &&
old_field->pack_length() <= new_field->pack_length()) {
@@ -1541,26 +1526,16 @@ static bool is_int_type(enum_field_types t) {
}
// Return true if two field types can be changed inplace
static bool change_field_type_is_supported(
Field* old_field,
Field* new_field,
TABLE* table,
TABLE* altered_table,
Alter_inplace_info* ha_alter_info,
tokudb_alter_ctx* ctx) {
static bool change_field_type_is_supported(Field* old_field,
Field* new_field,
tokudb_alter_ctx* ctx) {
enum_field_types old_type = old_field->real_type();
enum_field_types new_type = new_field->real_type();
if (is_int_type(old_type)) {
// int and unsigned int expansion
if (is_int_type(new_type) &&
is_unsigned(old_field) == is_unsigned(new_field))
return change_fixed_length_is_supported(
table,
altered_table,
old_field,
new_field,
ctx);
return change_fixed_length_is_supported(old_field, new_field, ctx);
else
return false;
} else if (old_type == MYSQL_TYPE_STRING) {
@@ -1568,43 +1543,24 @@ static bool change_field_type_is_supported(
if (new_type == MYSQL_TYPE_STRING &&
old_field->binary() == new_field->binary() &&
old_field->charset()->number == new_field->charset()->number)
return change_fixed_length_is_supported(
table,
altered_table,
old_field,
new_field,
ctx);
return change_fixed_length_is_supported(old_field, new_field, ctx);
else
return false;
} else if (old_type == MYSQL_TYPE_VARCHAR) {
// varchar(X) -> varchar(Y) and varbinary(X) -> varbinary(Y) expansion
// where X < 256 <= Y the ALTER_COLUMN_TYPE handler flag is set for
// these cases
return change_varchar_length_is_supported(
old_field,
new_field,
table,
altered_table,
ha_alter_info,
ctx);
return change_varchar_length_is_supported(old_field, new_field, ctx);
} else if (old_type == MYSQL_TYPE_BLOB && new_type == MYSQL_TYPE_BLOB) {
return change_blob_length_is_supported(
table,
altered_table,
old_field,
new_field,
ctx);
return change_blob_length_is_supported(old_field, new_field, ctx);
} else
return false;
}
// Return true if all changed field types can be changed inplace
static bool change_type_is_supported(
TABLE* table,
TABLE* altered_table,
Alter_inplace_info* ha_alter_info,
tokudb_alter_ctx* ctx) {
static bool change_type_is_supported(TABLE* table,
TABLE* altered_table,
tokudb_alter_ctx* ctx) {
if (table->s->null_bytes != altered_table->s->null_bytes)
return false;
if (table->s->fields != altered_table->s->fields)
@@ -1620,13 +1576,7 @@ static bool change_type_is_supported(
if (field_in_key_of_table(table, old_field) ||
field_in_key_of_table(altered_table, new_field))
return false;
if (!change_field_type_is_supported(
old_field,
new_field,
table,
altered_table,
ha_alter_info,
ctx))
if (!change_field_type_is_supported(old_field, new_field, ctx))
return false;
}
return true;
@@ -1636,13 +1586,10 @@ static bool change_type_is_supported(
// table identified with idx.
// Return the new descriptor in the row_descriptor DBT.
// Return non-zero on error.
int ha_tokudb::new_row_descriptor(
TABLE* table,
TABLE* altered_table,
Alter_inplace_info* ha_alter_info,
uint32_t idx,
DBT* row_descriptor) {
int ha_tokudb::new_row_descriptor(TABLE* altered_table,
Alter_inplace_info* ha_alter_info,
uint32_t idx,
DBT* row_descriptor) {
int error = 0;
tokudb_alter_ctx* ctx =
static_cast<tokudb_alter_ctx*>(ha_alter_info->handler_ctx);

View File

@@ -764,13 +764,14 @@ exit:
return retval;
}
#if TOKU_INCLUDE_WRITE_FRM_DATA
#if defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
// write the new frm data to the status dictionary using the alter table
// transaction
int ha_tokudb::write_frm_data(const uchar* frm_data, size_t frm_len) {
TOKUDB_DBUG_ENTER("write_frm_data");
int error = 0;
#if defined(WITH_PARTITION_STORAGE_ENGINE) && WITH_PARTITION_STORAGE_ENGINE
if (TOKU_PARTITION_WRITE_FRM_DATA || table->part_info == NULL) {
// write frmdata to status
THD* thd = ha_thd();
@@ -787,9 +788,10 @@ int ha_tokudb::write_frm_data(const uchar* frm_data, size_t frm_len) {
(uint)frm_len,
txn);
}
#endif // defined(WITH_PARTITION_STORAGE_ENGINE) && WITH_PARTITION_STORAGE_ENGINE
TOKUDB_DBUG_RETURN(error);
}
#endif
#endif // defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
#endif

View File

@@ -23,8 +23,6 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
#if TOKU_INCLUDE_UPSERT
// Point updates and upserts
// Restrictions:
@@ -52,6 +50,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
// Support more complicated update expressions
// Replace field_offset
#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
// Debug function to dump an Item
static void dump_item(Item* item) {
fprintf(stderr, "%u", item->type());
@@ -130,7 +129,7 @@ static void dump_item_list(const char* h, List<Item> &l) {
}
// Find a Field by its Item name
static Field* find_field_by_name(TABLE* table, Item* item) {
static Field* find_field_by_name(TOKUDB_UNUSED(TABLE* table), Item* item) {
if (item->type() != Item::FIELD_ITEM)
return NULL;
Item_field* field_item = static_cast<Item_field*>(item);
@@ -191,12 +190,9 @@ static uint32_t var_field_index(
return v_index;
}
static uint32_t blob_field_index(
TABLE* table,
KEY_AND_COL_INFO* kc_info,
uint idx,
uint field_num) {
static uint32_t blob_field_index(TABLE* table,
KEY_AND_COL_INFO* kc_info,
uint field_num) {
assert_always(field_num < table->s->fields);
uint b_index;
for (b_index = 0; b_index < kc_info->num_blobs; b_index++) {
@@ -221,43 +217,46 @@ int ha_tokudb::fast_update(
TOKUDB_HANDLER_DBUG_ENTER("");
int error = 0;
if (!tokudb::sysvars::enable_fast_update(thd)) {
error = ENOTSUP;
goto exit;
}
if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_UPSERT))) {
dump_item_list("fields", update_fields);
dump_item_list("values", update_values);
if (conds) {
fprintf(stderr, "conds\n"); dump_item(conds); fprintf(stderr, "\n");
fprintf(stderr, "conds\n");
dump_item(conds);
fprintf(stderr, "\n");
}
}
if (update_fields.elements < 1 ||
update_fields.elements != update_values.elements) {
error = ENOTSUP; // something is fishy with the parameters
goto return_error;
goto exit;
}
if (!check_fast_update(thd, update_fields, update_values, conds)) {
error = ENOTSUP;
goto check_error;
error = HA_ERR_UNSUPPORTED;
goto exit;
}
error = send_update_message(
update_fields,
update_values,
conds,
transaction);
if (error != 0) {
goto check_error;
}
update_fields, update_values, conds, transaction);
check_error:
if (error != 0) {
if (tokudb::sysvars::disable_slow_update(thd) != 0)
if (error) {
int mapped_error = map_to_handler_error(error);
if (mapped_error == error)
error = HA_ERR_UNSUPPORTED;
if (error != ENOTSUP)
print_error(error, MYF(0));
}
return_error:
exit:
if (error != 0 && error != ENOTSUP)
print_error(error, MYF(0));
TOKUDB_HANDLER_DBUG_RETURN(error);
}
@@ -773,11 +772,7 @@ static void marshall_update(
update_operation = '=';
field_type = lhs_field->binary() ? UPDATE_TYPE_BLOB : UPDATE_TYPE_TEXT;
offset =
blob_field_index(
table,
&share->kc_info,
table->s->primary_key,
lhs_field->field_index);
blob_field_index(table, &share->kc_info, lhs_field->field_index);
v_str = *rhs_item->val_str(&v_str);
v_length = v_str.length();
if (v_length >= lhs_field->max_data_length()) {
@@ -953,9 +948,13 @@ int ha_tokudb::upsert(
List<Item>& update_values) {
TOKUDB_HANDLER_DBUG_ENTER("");
int error = 0;
if (!tokudb::sysvars::enable_fast_upsert(thd)) {
error = ENOTSUP;
goto exit;
}
if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_UPSERT))) {
fprintf(stderr, "upsert\n");
dump_item_list("update_fields", update_fields);
@@ -966,28 +965,27 @@ int ha_tokudb::upsert(
if (update_fields.elements < 1 ||
update_fields.elements != update_values.elements) {
error = ENOTSUP;
goto return_error;
goto exit;
}
if (!check_upsert(thd, update_fields, update_values)) {
error = ENOTSUP;
goto check_error;
}
error = send_upsert_message(thd, update_fields, update_values, transaction);
if (error != 0) {
goto check_error;
error = HA_ERR_UNSUPPORTED;
goto exit;
}
check_error:
if (error != 0) {
if (tokudb::sysvars::disable_slow_upsert(thd) != 0)
error = send_upsert_message(update_fields, update_values, transaction);
if (error) {
int mapped_error = map_to_handler_error(error);
if (mapped_error == error)
error = HA_ERR_UNSUPPORTED;
if (error != ENOTSUP)
print_error(error, MYF(0));
}
return_error:
exit:
if (error != 0 && error != ENOTSUP)
print_error(error, MYF(0));
TOKUDB_HANDLER_DBUG_RETURN(error);
}
@@ -1036,7 +1034,6 @@ bool ha_tokudb::check_upsert(
// Generate an upsert message and send it into the primary tree.
// Return 0 if successful.
int ha_tokudb::send_upsert_message(
THD* thd,
List<Item>& update_fields,
List<Item>& update_values,
DB_TXN* txn) {
@@ -1131,5 +1128,4 @@ int ha_tokudb::send_upsert_message(
return error;
}
#endif
#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT

View File

@@ -1865,15 +1865,10 @@ static uint32_t pack_desc_pk_info(uchar* buf, KEY_AND_COL_INFO* kc_info, TABLE_S
return pos - buf;
}
static uint32_t pack_desc_pk_offset_info(
uchar* buf,
KEY_AND_COL_INFO* kc_info,
TABLE_SHARE* table_share,
KEY_PART_INFO* key_part,
KEY* prim_key,
uchar* pk_info
)
{
static uint32_t pack_desc_pk_offset_info(uchar* buf,
KEY_PART_INFO* key_part,
KEY* prim_key,
uchar* pk_info) {
uchar* pos = buf;
uint16 field_index = key_part->field->field_index;
bool found_col_in_pk = false;
@@ -1999,7 +1994,9 @@ static uint32_t pack_desc_key_length_info(uchar* buf, KEY_AND_COL_INFO* kc_info,
return pos - buf;
}
static uint32_t pack_desc_char_info(uchar* buf, KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, KEY_PART_INFO* key_part) {
static uint32_t pack_desc_char_info(uchar* buf,
TABLE_SHARE* table_share,
KEY_PART_INFO* key_part) {
uchar* pos = buf;
uint16 field_index = key_part->field->field_index;
Field* field = table_share->field[field_index];
@@ -2561,14 +2558,7 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
pos += sizeof(uint32_t);
}
if (is_col_in_pk) {
pos += pack_desc_pk_offset_info(
pos,
kc_info,
table_share,
&curr_kpi,
prim_key,
pk_info
);
pos += pack_desc_pk_offset_info(pos, &curr_kpi, prim_key, pk_info);
}
else {
pos += pack_desc_offset_info(
@@ -2585,12 +2575,7 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
table_share,
&curr_kpi
);
pos += pack_desc_char_info(
pos,
kc_info,
table_share,
&curr_kpi
);
pos += pack_desc_char_info(pos, table_share, &curr_kpi);
}
offset = pos - buf;

View File

@@ -354,11 +354,7 @@ static uint32_t create_toku_clustering_val_pack_descriptor (
bool is_clustering
);
static inline bool is_key_clustering(
void* row_desc,
uint32_t row_desc_size
)
{
static inline bool is_key_clustering(uint32_t row_desc_size) {
return (row_desc_size > 0);
}
@@ -384,12 +380,8 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
KEY* prim_key
);
static inline bool is_key_pk(
void* row_desc,
uint32_t row_desc_size
)
{
uchar* buf = (uchar *)row_desc;
static inline bool is_key_pk(void* row_desc) {
uchar* buf = (uchar*)row_desc;
return buf[0];
}

View File

@@ -35,7 +35,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "log.h"
#include "sql_class.h"
#include "sql_show.h"
#include "discover.h"
#include "item_cmpfunc.h"
//#include <binlog.h>
#include "debug_sync.h"
@@ -54,12 +54,17 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include <ctype.h>
#include <stdint.h>
#if !defined(__STDC_FORMAT_MACROS)
#define __STDC_FORMAT_MACROS
#endif // !defined(__STDC_FORMAT_MACROS)
#include <inttypes.h>
#if defined(_WIN32)
#include "misc.h"
#endif
#include <string>
#include <unordered_map>
#include "db.h"
#include "toku_os.h"
#include "toku_time.h"
@@ -69,14 +74,28 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#pragma interface /* gcc class implementation */
#endif
// TOKU_INCLUDE_WRITE_FRM_DATA, TOKU_PARTITION_WRITE_FRM_DATA, and
// TOKU_INCLUDE_DISCOVER_FRM all work together as two opposing sides
// of the same functionality. The 'WRITE' includes functionality to
// write a copy of every tables .frm data into the tables status dictionary on
// CREATE or ALTER. When WRITE is in, the .frm data is also verified whenever a
// table is opened.
//
// The 'DISCOVER' then implements the MySQL table discovery API which reads
// this same data and returns it back to MySQL.
// In most cases, they should all be in or out without mixing. There may be
// extreme cases though where one side (WRITE) is supported but perhaps
// 'DISCOVERY' may not be, thus the need for individual indicators.
#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099
// mariadb 10.0
#define TOKU_USE_DB_TYPE_TOKUDB 1
#define TOKU_INCLUDE_ALTER_56 1
#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 0
#define TOKU_INCLUDE_XA 1
#define TOKU_INCLUDE_WRITE_FRM_DATA 0
#define TOKU_INCLUDE_WRITE_FRM_DATA 1
#define TOKU_PARTITION_WRITE_FRM_DATA 0
#define TOKU_INCLUDE_DISCOVER_FRM 1
#if defined(MARIADB_BASE_VERSION)
#define TOKU_INCLUDE_EXTENDED_KEYS 1
#endif
@@ -90,7 +109,10 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#define TOKU_USE_DB_TYPE_UNKNOWN 1
#define TOKU_INCLUDE_ALTER_56 1
#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 0
#define TOKU_INCLUDE_WRITE_FRM_DATA 1
#define TOKU_PARTITION_WRITE_FRM_DATA 0
#define TOKU_INCLUDE_DISCOVER_FRM 1
#define TOKU_INCLUDE_RFR 1
#else
#error
#endif
@@ -102,21 +124,25 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#define TOKU_INCLUDE_ALTER_56 1
#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 0
#define TOKU_INCLUDE_XA 0
#define TOKU_INCLUDE_WRITE_FRM_DATA 1
#define TOKU_PARTITION_WRITE_FRM_DATA 0
#define TOKU_INCLUDE_DISCOVER_FRM 1
#else
// mysql 5.6 with tokutek patches
#define TOKU_USE_DB_TYPE_TOKUDB 1 // has DB_TYPE_TOKUDB patch
#define TOKU_INCLUDE_ALTER_56 1
#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 1 // has tokudb row format compression patch
#define TOKU_INCLUDE_XA 1 // has patch that fixes TC_LOG_MMAP code
#define TOKU_INCLUDE_WRITE_FRM_DATA 1
#define TOKU_PARTITION_WRITE_FRM_DATA 0
#define TOKU_INCLUDE_WRITE_FRM_DATA 0
#define TOKU_INCLUDE_DISCOVER_FRM 1
#define TOKU_INCLUDE_UPSERT 1 // has tokudb upsert patch
#if defined(HTON_SUPPORTS_EXTENDED_KEYS)
#define TOKU_INCLUDE_EXTENDED_KEYS 1
#endif
#endif
#define TOKU_OPTIMIZE_WITH_RECREATE 1
#define TOKU_INCLUDE_RFR 1
#elif 50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599
#define TOKU_USE_DB_TYPE_TOKUDB 1
@@ -126,6 +152,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#define TOKU_INCLUDE_XA 1
#define TOKU_PARTITION_WRITE_FRM_DATA 0 /* MariaDB 5.5 */
#define TOKU_INCLUDE_WRITE_FRM_DATA 0 /* MariaDB 5.5 */
#define TOKU_INCLUDE_DISCOVER_FRM 1
#define TOKU_INCLUDE_UPSERT 0 /* MariaDB 5.5 */
#if defined(MARIADB_BASE_VERSION)
#define TOKU_INCLUDE_EXTENDED_KEYS 1
@@ -142,6 +169,11 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#endif
#if defined(TOKU_INCLUDE_DISCOVER_FRM) && TOKU_INCLUDE_DISCOVER_FRM
#include "discover.h"
#endif // defined(TOKU_INCLUDE_DISCOVER_FRM) && TOKU_INCLUDE_DISCOVER_FRM
#ifdef MARIADB_BASE_VERSION
// In MariaDB 5.3, thread progress reporting was introduced.
// Only include that functionality if we're using maria 5.3 +
@@ -257,13 +289,17 @@ inline uint tokudb_uint3korr(const uchar *a) {
typedef unsigned int pfs_key_t;
#if defined(HAVE_PSI_MUTEX_INTERFACE)
#if defined(SAFE_MUTEX) || defined(HAVE_PSI_MUTEX_INTERFACE)
#define mutex_t_lock(M) M.lock(__FILE__, __LINE__)
#define mutex_t_unlock(M) M.unlock(__FILE__, __LINE__)
#else // HAVE_PSI_MUTEX_INTERFACE
#else // SAFE_MUTEX || HAVE_PSI_MUTEX_INTERFACE
#define mutex_t_lock(M) M.lock()
#endif // SAFE_MUTEX || HAVE_PSI_MUTEX_INTERFACE
#if defined(SAFE_MUTEX)
#define mutex_t_unlock(M) M.unlock(__FILE__, __LINE__)
#else // SAFE_MUTEX
#define mutex_t_unlock(M) M.unlock()
#endif // HAVE_PSI_MUTEX_INTERFACE
#endif // SAFE_MUTEX
#if defined(HAVE_PSI_RWLOCK_INTERFACE)
#define rwlock_t_lock_read(M) M.lock_read(__FILE__, __LINE__)

View File

@@ -28,6 +28,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#define TOKU_METADB_NAME "tokudb_meta"
#if defined(HAVE_PSI_MUTEX_INTERFACE)
static pfs_key_t tokudb_map_mutex_key;
static PSI_mutex_info all_tokudb_mutexes[] = {
@@ -38,6 +39,7 @@ static PSI_mutex_info all_tokudb_mutexes[] = {
static PSI_rwlock_info all_tokudb_rwlocks[] = {
{&num_DBs_lock_key, "num_DBs_lock", 0},
};
#endif /* HAVE_PSI_MUTEX_INTERFACE */
typedef struct savepoint_info {
DB_TXN* txn;
@@ -62,19 +64,21 @@ static bool tokudb_show_status(
THD* thd,
stat_print_fn* print,
enum ha_stat_type);
#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
#if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static void tokudb_handle_fatal_signal(handlerton* hton, THD* thd, int sig);
#endif
#endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
// TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static int tokudb_close_connection(handlerton* hton, THD* thd);
static void tokudb_kill_connection(handlerton *hton, THD *thd, enum thd_kill_levels level);
static int tokudb_commit(handlerton* hton, THD* thd, bool all);
static int tokudb_rollback(handlerton* hton, THD* thd, bool all);
#if TOKU_INCLUDE_XA
#if defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all);
static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len);
static int tokudb_commit_by_xid(handlerton* hton, XID* xid);
static int tokudb_rollback_by_xid(handlerton* hton, XID* xid);
#endif
#endif // defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
static int tokudb_rollback_to_savepoint(
handlerton* hton,
@@ -92,6 +96,7 @@ static int tokudb_discover_table_existence(
const char* db,
const char* name);
#endif
#if defined(TOKU_INCLUDE_DISCOVER_FRM) && TOKU_INCLUDE_DISCOVER_FRM
static int tokudb_discover(
handlerton* hton,
THD* thd,
@@ -115,13 +120,14 @@ static int tokudb_discover3(
char* path,
uchar** frmblob,
size_t* frmlen);
#endif // defined(TOKU_INCLUDE_DISCOVER_FRM) && TOKU_INCLUDE_DISCOVER_FRM
handlerton* tokudb_hton;
const char* ha_tokudb_ext = ".tokudb";
DB_ENV* db_env;
static tokudb::thread::mutex_t tokudb_map_mutex;
#if TOKU_THDVAR_MEMALLOC_BUG
#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
static TREE tokudb_map;
struct tokudb_map_pair {
THD* thd;
@@ -131,7 +137,7 @@ struct tokudb_map_pair {
static int tokudb_map_pair_cmp(void *custom_arg, const void *a, const void *b) {
#else
static int tokudb_map_pair_cmp(
const void* custom_arg,
TOKUDB_UNUSED(const void* custom_arg),
const void* a,
const void* b) {
#endif
@@ -145,7 +151,7 @@ static int tokudb_map_pair_cmp(
else
return 0;
};
#endif
#endif // defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
static PARTITIONED_COUNTER tokudb_primary_key_bytes_inserted;
void toku_hton_update_primary_key_bytes_inserted(uint64_t row_size) {
@@ -371,31 +377,35 @@ static int tokudb_init_func(void *p) {
tokudb_hton->discover_table = tokudb_discover_table;
tokudb_hton->discover_table_existence = tokudb_discover_table_existence;
#else
#if defined(TOKU_INCLUDE_DISCOVER_FRM) && TOKU_INCLUDE_DISCOVER_FRM
tokudb_hton->discover = tokudb_discover;
#if defined(MYSQL_HANDLERTON_INCLUDE_DISCOVER2)
tokudb_hton->discover2 = tokudb_discover2;
#endif
#endif
#endif // MYSQL_HANDLERTON_INCLUDE_DISCOVER2
#endif // defined(TOKU_INCLUDE_DISCOVER_FRM) && TOKU_INCLUDE_DISCOVER_FRM
#endif // 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099
tokudb_hton->commit = tokudb_commit;
tokudb_hton->rollback = tokudb_rollback;
#if TOKU_INCLUDE_XA
#if defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
tokudb_hton->prepare = tokudb_xa_prepare;
tokudb_hton->recover = tokudb_xa_recover;
tokudb_hton->commit_by_xid = tokudb_commit_by_xid;
tokudb_hton->rollback_by_xid = tokudb_rollback_by_xid;
#endif
#endif // defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
tokudb_hton->panic = tokudb_end;
tokudb_hton->flush_logs = tokudb_flush_logs;
tokudb_hton->show_status = tokudb_show_status;
#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
#if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
tokudb_hton->handle_fatal_signal = tokudb_handle_fatal_signal;
#endif
#endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
// TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
#if TOKU_INCLUDE_OPTION_STRUCTS
#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
tokudb_hton->table_options = tokudb::sysvars::tokudb_table_options;
tokudb_hton->index_options = tokudb::sysvars::tokudb_index_options;
#endif
#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
if (!tokudb_home)
tokudb_home = mysql_real_data_home;
@@ -635,9 +645,9 @@ static int tokudb_init_func(void *p) {
tokudb_primary_key_bytes_inserted = create_partitioned_counter();
#if TOKU_THDVAR_MEMALLOC_BUG
#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
init_tree(&tokudb_map, 0, 0, 0, tokudb_map_pair_cmp, true, NULL, NULL);
#endif
#endif // defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
if (tokudb::sysvars::strip_frm_data) {
r = tokudb::metadata::strip_frm_data(db_env);
@@ -666,7 +676,7 @@ error:
DBUG_RETURN(true);
}
static int tokudb_done_func(void* p) {
static int tokudb_done_func(TOKUDB_UNUSED(void* p)) {
TOKUDB_DBUG_ENTER("");
tokudb::memory::free(toku_global_status_variables);
toku_global_status_variables = NULL;
@@ -682,7 +692,8 @@ static handler* tokudb_create_handler(
return new(mem_root) ha_tokudb(hton, table);
}
int tokudb_end(handlerton* hton, ha_panic_function type) {
int tokudb_end(TOKUDB_UNUSED(handlerton* hton),
TOKUDB_UNUSED(ha_panic_function type)) {
TOKUDB_DBUG_ENTER("");
int error = 0;
@@ -702,7 +713,7 @@ int tokudb_end(handlerton* hton, ha_panic_function type) {
// count the total number of prepared txn's that we discard
long total_prepared = 0;
#if TOKU_INCLUDE_XA
#if defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "begin XA cleanup");
while (1) {
// get xid's
@@ -729,11 +740,11 @@ int tokudb_end(handlerton* hton, ha_panic_function type) {
total_prepared += n_prepared;
}
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "end XA cleanup");
#endif
#endif // defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
error = db_env->close(
db_env,
total_prepared > 0 ? TOKUFT_DIRTY_SHUTDOWN : 0);
#if TOKU_INCLUDE_XA
#if defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
if (error != 0 && total_prepared > 0) {
sql_print_error(
"%s: %ld prepared txns still live, please shutdown, error %d",
@@ -741,7 +752,7 @@ int tokudb_end(handlerton* hton, ha_panic_function type) {
total_prepared,
error);
} else
#endif
#endif // defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
assert_always(error == 0);
db_env = NULL;
}
@@ -751,9 +762,9 @@ int tokudb_end(handlerton* hton, ha_panic_function type) {
tokudb_primary_key_bytes_inserted = NULL;
}
#if TOKU_THDVAR_MEMALLOC_BUG
#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
delete_tree(&tokudb_map);
#endif
#endif // defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
// 3938: drop the initialized flag and unlock
tokudb_hton_initialized = 0;
@@ -762,14 +773,14 @@ int tokudb_end(handlerton* hton, ha_panic_function type) {
TOKUDB_DBUG_RETURN(error);
}
static int tokudb_close_connection(handlerton* hton, THD* thd) {
static int tokudb_close_connection(TOKUDB_UNUSED(handlerton* hton), THD* thd) {
int error = 0;
tokudb_trx_data* trx = (tokudb_trx_data*)thd_get_ha_data(thd, tokudb_hton);
if (trx && trx->checkpoint_lock_taken) {
error = db_env->checkpointing_resume(db_env);
}
tokudb::memory::free(trx);
#if TOKU_THDVAR_MEMALLOC_BUG
#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
mutex_t_lock(tokudb_map_mutex);
struct tokudb_map_pair key = {thd, NULL};
struct tokudb_map_pair* found_key =
@@ -780,18 +791,18 @@ static int tokudb_close_connection(handlerton* hton, THD* thd) {
tree_delete(&tokudb_map, found_key, sizeof(*found_key), NULL);
}
mutex_t_unlock(tokudb_map_mutex);
#endif
#endif // defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
return error;
}
void tokudb_kill_connection(handlerton *hton, THD *thd,
enum thd_kill_levels level) {
void tokudb_kill_connection(TOKUDB_UNUSED(handlerton *hton), THD *thd,
TOKUDB_UNUSED(enum thd_kill_levels level)) {
TOKUDB_DBUG_ENTER("");
db_env->kill_waiter(db_env, thd);
DBUG_VOID_RETURN;
}
bool tokudb_flush_logs(handlerton * hton) {
bool tokudb_flush_logs(TOKUDB_UNUSED(handlerton* hton)) {
TOKUDB_DBUG_ENTER("");
int error;
bool result = 0;
@@ -883,7 +894,7 @@ extern "C" enum durability_properties thd_get_durability_property(
#endif
// Determine if an fsync is used when a transaction is committed.
static bool tokudb_sync_on_commit(THD* thd, tokudb_trx_data* trx, DB_TXN* txn) {
static bool tokudb_sync_on_commit(THD* thd, DB_TXN* txn) {
#if MYSQL_VERSION_ID >= 50600
// Check the client durability property which is set during 2PC
if (thd_get_durability_property(thd) == HA_IGNORE_DURABILITY)
@@ -907,7 +918,7 @@ static int tokudb_commit(handlerton * hton, THD * thd, bool all) {
DB_TXN *this_txn = *txn;
if (this_txn) {
uint32_t syncflag =
tokudb_sync_on_commit(thd, trx, this_txn) ? 0 : DB_TXN_NOSYNC;
tokudb_sync_on_commit(thd, this_txn) ? 0 : DB_TXN_NOSYNC;
TOKUDB_TRACE_FOR_FLAGS(
TOKUDB_DEBUG_TXN,
"commit trx %u txn %p syncflag %u",
@@ -958,7 +969,7 @@ static int tokudb_rollback(handlerton * hton, THD * thd, bool all) {
TOKUDB_DBUG_RETURN(0);
}
#if TOKU_INCLUDE_XA
#if defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
static bool tokudb_sync_on_prepare(void) {
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
// skip sync of log if fsync log period > 0
@@ -1023,7 +1034,9 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
TOKUDB_DBUG_RETURN(r);
}
static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len) {
static int tokudb_xa_recover(TOKUDB_UNUSED(handlerton* hton),
XID* xid_list,
uint len) {
TOKUDB_DBUG_ENTER("");
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0;
@@ -1043,7 +1056,7 @@ static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len) {
TOKUDB_DBUG_RETURN((int)num_returned);
}
static int tokudb_commit_by_xid(handlerton* hton, XID* xid) {
static int tokudb_commit_by_xid(TOKUDB_UNUSED(handlerton* hton), XID* xid) {
TOKUDB_DBUG_ENTER("");
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "xid %p", xid);
@@ -1063,7 +1076,7 @@ cleanup:
TOKUDB_DBUG_RETURN(r);
}
static int tokudb_rollback_by_xid(handlerton* hton, XID* xid) {
static int tokudb_rollback_by_xid(TOKUDB_UNUSED(handlerton* hton), XID* xid) {
TOKUDB_DBUG_ENTER("");
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "xid %p", xid);
@@ -1083,7 +1096,7 @@ cleanup:
TOKUDB_DBUG_RETURN(r);
}
#endif
#endif // defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
static int tokudb_savepoint(handlerton * hton, THD * thd, void *savepoint) {
TOKUDB_DBUG_ENTER("%p", savepoint);
@@ -1210,8 +1223,9 @@ static int tokudb_discover_table_existence(
my_free(frmblob);
return res != ENOENT;
}
#endif
#endif // 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099
#if defined(TOKU_INCLUDE_DISCOVER_FRM) && TOKU_INCLUDE_DISCOVER_FRM
static int tokudb_discover(
handlerton* hton,
THD* thd,
@@ -1243,15 +1257,13 @@ static int tokudb_discover2(
return tokudb_discover3(hton, thd, db, name, path, frmblob, frmlen);
}
static int tokudb_discover3(
handlerton* hton,
THD* thd,
const char* db,
const char* name,
char* path,
uchar** frmblob,
size_t* frmlen) {
static int tokudb_discover3(TOKUDB_UNUSED(handlerton* hton),
THD* thd,
const char* db,
const char* name,
char* path,
uchar** frmblob,
size_t* frmlen) {
TOKUDB_DBUG_ENTER("%s %s %s", db, name, path);
int error;
DB* status_db = NULL;
@@ -1309,6 +1321,7 @@ cleanup:
}
TOKUDB_DBUG_RETURN(error);
}
#endif // defined(TOKU_INCLUDE_DISCOVER_FRM) && TOKU_INCLUDE_DISCOVER_FRM
#define STATPRINT(legend, val) if (legend != NULL && val != NULL) \
@@ -1514,7 +1527,7 @@ cleanup:
}
static bool tokudb_show_status(
handlerton* hton,
TOKUDB_UNUSED(handlerton* hton),
THD* thd,
stat_print_fn* stat_print,
enum ha_stat_type stat_type) {
@@ -1529,7 +1542,8 @@ static bool tokudb_show_status(
return false;
}
#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
#if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static void tokudb_handle_fatal_signal(
TOKUDB_UNUSED(handlerton* hton),
TOKUDB_UNUSD(THD* thd),
@@ -1539,12 +1553,12 @@ static void tokudb_handle_fatal_signal(
db_env_try_gdb_stack_trace(tokudb_gdb_path);
}
}
#endif
#endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
// TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static void tokudb_print_error(
const DB_ENV* db_env,
const char* db_errpfx,
const char* buffer) {
static void tokudb_print_error(TOKUDB_UNUSED(const DB_ENV* db_env),
const char* db_errpfx,
const char* buffer) {
sql_print_error("%s: %s", db_errpfx, buffer);
}
@@ -1611,7 +1625,8 @@ struct st_mysql_storage_engine tokudb_storage_engine = {
MYSQL_HANDLERTON_INTERFACE_VERSION
};
#if TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING
#if defined(TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING) && \
TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING
struct tokudb_search_txn_extra {
bool match_found;
uint64_t match_txn_id;
@@ -1652,10 +1667,10 @@ static bool tokudb_txn_id_to_client_id(
}
return e.match_found;
}
#endif
#endif // defined(TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING) &&
// TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING
static void tokudb_pretty_key(
const DB* db,
const DBT* key,
const char* default_key,
String* out) {
@@ -1677,12 +1692,12 @@ static void tokudb_pretty_key(
}
}
void tokudb_pretty_left_key(const DB* db, const DBT* key, String* out) {
tokudb_pretty_key(db, key, "-infinity", out);
void tokudb_pretty_left_key(const DBT* key, String* out) {
tokudb_pretty_key(key, "-infinity", out);
}
void tokudb_pretty_right_key(const DB* db, const DBT* key, String* out) {
tokudb_pretty_key(db, key, "+infinity", out);
void tokudb_pretty_right_key(const DBT* key, String* out) {
tokudb_pretty_key(key, "+infinity", out);
}
const char* tokudb_get_index_name(DB* db) {
@@ -1729,20 +1744,20 @@ static void tokudb_lock_timeout_callback(
log_str.append_ulonglong(blocking_txnid);
if (tokudb_equal_key(left_key, right_key)) {
String key_str;
tokudb_pretty_key(db, left_key, "?", &key_str);
tokudb_pretty_key(left_key, "?", &key_str);
log_str.append(", \"key\":");
log_str.append("\"");
log_str.append(key_str);
log_str.append("\"");
} else {
String left_str;
tokudb_pretty_left_key(db, left_key, &left_str);
tokudb_pretty_left_key(left_key, &left_str);
log_str.append(", \"key_left\":");
log_str.append("\"");
log_str.append(left_str);
log_str.append("\"");
String right_str;
tokudb_pretty_right_key(db, right_key, &right_str);
tokudb_pretty_right_key(right_key, &right_str);
log_str.append(", \"key_right\":");
log_str.append("\"");
log_str.append(right_str);
@@ -1755,14 +1770,14 @@ static void tokudb_lock_timeout_callback(
char* new_lock_timeout =
tokudb::memory::strdup(log_str.c_ptr(), MY_FAE);
tokudb::sysvars::set_last_lock_timeout(thd, new_lock_timeout);
#if TOKU_THDVAR_MEMALLOC_BUG
#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
mutex_t_lock(tokudb_map_mutex);
struct tokudb_map_pair old_key = {thd, old_lock_timeout};
tree_delete(&tokudb_map, &old_key, sizeof old_key, NULL);
struct tokudb_map_pair new_key = {thd, new_lock_timeout};
tree_insert(&tokudb_map, &new_key, sizeof new_key, NULL);
mutex_t_unlock(tokudb_map_mutex);
#endif
#endif // defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
tokudb::memory::free(old_lock_timeout);
}
// dump to stderr
@@ -1778,7 +1793,8 @@ static void tokudb_lock_timeout_callback(
mysql_thread_id,
(int)qs->length,
qs->str);
#if TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING
#if defined(TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING) && \
TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING
uint64_t blocking_thread_id = 0;
if (tokudb_txn_id_to_client_id(
thd,
@@ -1798,7 +1814,8 @@ static void tokudb_lock_timeout_callback(
blocking_qs.c_ptr());
}
}
#endif
#endif // defined(TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING) &&
// TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING
}
}
}
@@ -1806,7 +1823,9 @@ static void tokudb_lock_timeout_callback(
// Retrieves variables for information_schema.global_status.
// Names (columnname) are automatically converted to upper case,
// and prefixed with "TOKUDB_"
static int show_tokudb_vars(THD *thd, SHOW_VAR *var, char *buff) {
static int show_tokudb_vars(TOKUDB_UNUSED(THD* thd),
SHOW_VAR* var,
TOKUDB_UNUSED(char* buff)) {
TOKUDB_DBUG_ENTER("");
int error;
@@ -1910,16 +1929,6 @@ static SHOW_VAR toku_global_status_variables_export[]= {
{NullS, NullS, SHOW_LONG}
};
#if TOKU_INCLUDE_BACKTRACE
#include <execinfo.h>
static void tokudb_backtrace(void) {
const int N_POINTERS = 30;
void *backtrace_pointers[N_POINTERS];
int n = backtrace(backtrace_pointers, N_POINTERS);
backtrace_symbols_fd(backtrace_pointers, n, fileno(stderr));
}
#endif
#ifdef MARIA_PLUGIN_INTERFACE_VERSION
maria_declare_plugin(tokudb)
#else

View File

@@ -92,7 +92,8 @@ inline toku_compression_method row_format_to_toku_compression_method(
inline enum row_type row_format_to_row_type(
tokudb::sysvars::row_format_t row_format) {
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
#if defined(TOKU_INCLUDE_ROW_TYPE_COMPRESSION) && \
TOKU_INCLUDE_ROW_TYPE_COMPRESSION
switch (row_format) {
case tokudb::sysvars::SRV_ROW_FORMAT_UNCOMPRESSED:
return ROW_TYPE_TOKU_UNCOMPRESSED;
@@ -111,13 +112,15 @@ inline enum row_type row_format_to_row_type(
case tokudb::sysvars::SRV_ROW_FORMAT_DEFAULT:
return ROW_TYPE_DEFAULT;
}
#endif
#endif // defined(TOKU_INCLUDE_ROW_TYPE_COMPRESSION) &&
// TOKU_INCLUDE_ROW_TYPE_COMPRESSION
return ROW_TYPE_DEFAULT;
}
inline tokudb::sysvars::row_format_t row_type_to_row_format(
enum row_type type) {
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
#if defined(TOKU_INCLUDE_ROW_TYPE_COMPRESSION) && \
TOKU_INCLUDE_ROW_TYPE_COMPRESSION
switch (type) {
case ROW_TYPE_TOKU_UNCOMPRESSED:
return tokudb::sysvars::SRV_ROW_FORMAT_UNCOMPRESSED;
@@ -138,7 +141,8 @@ inline tokudb::sysvars::row_format_t row_type_to_row_format(
default:
return tokudb::sysvars::SRV_ROW_FORMAT_DEFAULT;
}
#endif
#endif // defined(TOKU_INCLUDE_ROW_TYPE_COMPRESSION) &&
// TOKU_INCLUDE_ROW_TYPE_COMPRESSION
return tokudb::sysvars::SRV_ROW_FORMAT_DEFAULT;
}
@@ -158,7 +162,8 @@ inline toku_compression_method row_type_to_toku_compression_method(
void tokudb_checkpoint_lock(THD * thd);
void tokudb_checkpoint_unlock(THD * thd);
inline uint64_t tokudb_get_lock_wait_time_callback(uint64_t default_wait_time) {
inline uint64_t tokudb_get_lock_wait_time_callback(
TOKUDB_UNUSED(uint64_t default_wait_time)) {
THD *thd = current_thd;
return tokudb::sysvars::lock_timeout(thd);
}
@@ -168,7 +173,8 @@ inline uint64_t tokudb_get_loader_memory_size_callback(void) {
return tokudb::sysvars::loader_memory_size(thd);
}
inline uint64_t tokudb_get_killed_time_callback(uint64_t default_killed_time) {
inline uint64_t tokudb_get_killed_time_callback(
TOKUDB_UNUSED(uint64_t default_killed_time)) {
THD *thd = current_thd;
return tokudb::sysvars::killed_time(thd);
}
@@ -178,7 +184,8 @@ inline int tokudb_killed_callback(void) {
return thd_killed(thd);
}
inline bool tokudb_killed_thd_callback(void *extra, uint64_t deleted_rows) {
inline bool tokudb_killed_thd_callback(void* extra,
TOKUDB_UNUSED(uint64_t deleted_rows)) {
THD *thd = static_cast<THD *>(extra);
return thd_killed(thd) != 0;
}
@@ -196,8 +203,8 @@ void tokudb_split_dname(
String& table_name,
String& dictionary_name);
void tokudb_pretty_left_key(const DB* db, const DBT* key, String* out);
void tokudb_pretty_right_key(const DB* db, const DBT* key, String* out);
void tokudb_pretty_left_key(const DBT* key, String* out);
void tokudb_pretty_right_key(const DBT* key, String* out);
const char *tokudb_get_index_name(DB* db);
#endif //#ifdef _HATOKU_HTON

View File

@@ -13,3 +13,4 @@ rpl_tokudb_write_unique_uc1: unreliable, uses timestamp differences
rpl_tokudb_read_only_ff: unreliable, uses timestamp differences
rpl_tokudb_read_only_tf: unreliable, uses timestamp differences
rpl_tokudb_read_only_tt: unreliable, uses timestamp differences
rpl_tokudb_read_only_ft: no TOKU_INCLUDE_RFR

View File

@@ -3,11 +3,6 @@ include/master-slave.inc
drop table if exists t;
show variables like 'tokudb_rpl_%';
Variable_name Value
tokudb_rpl_check_readonly ON
tokudb_rpl_lookup_rows OFF
tokudb_rpl_lookup_rows_delay 10000
tokudb_rpl_unique_checks OFF
tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, primary key(a)) engine=tokudb;
insert into t values (1);
insert into t values (2),(3);

Some files were not shown because too many files have changed in this diff Show More