mirror of
https://github.com/MariaDB/server.git
synced 2025-11-19 19:03:26 +03:00
Merge whalegate.ndb.mysql.com:/home/tomas/mysql-5.1-new-maint
into whalegate.ndb.mysql.com:/home/tomas/mysql-5.1-single-user
This commit is contained in:
13
.bzrignore
13
.bzrignore
@@ -2641,6 +2641,16 @@ storage/ndb/lib/libNEWTON_BASICTEST_COMMON.so
|
|||||||
storage/ndb/lib/libREP_API.so
|
storage/ndb/lib/libREP_API.so
|
||||||
storage/ndb/lib/libndbclient.so
|
storage/ndb/lib/libndbclient.so
|
||||||
storage/ndb/lib/libndbclient_extra.so
|
storage/ndb/lib/libndbclient_extra.so
|
||||||
|
storage/ndb/ndbapi-examples/mgmapi_logevent/mgmapi_logevent
|
||||||
|
storage/ndb/ndbapi-examples/mgmapi_logevent2/mgmapi_logevent2
|
||||||
|
storage/ndb/ndbapi-examples/ndbapi_async/ndbapi_async
|
||||||
|
storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1
|
||||||
|
storage/ndb/ndbapi-examples/ndbapi_event/ndbapi_event
|
||||||
|
storage/ndb/ndbapi-examples/ndbapi_retries/ndbapi_retries
|
||||||
|
storage/ndb/ndbapi-examples/ndbapi_scan/ndbapi_scan
|
||||||
|
storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple
|
||||||
|
storage/ndb/ndbapi-examples/ndbapi_simple_dual/ndbapi_simple_dual
|
||||||
|
storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index
|
||||||
storage/ndb/src/common/debugger/libtrace.dsp
|
storage/ndb/src/common/debugger/libtrace.dsp
|
||||||
storage/ndb/src/common/debugger/signaldata/libsignaldataprint.dsp
|
storage/ndb/src/common/debugger/signaldata/libsignaldataprint.dsp
|
||||||
storage/ndb/src/common/logger/liblogger.dsp
|
storage/ndb/src/common/logger/liblogger.dsp
|
||||||
@@ -2718,6 +2728,8 @@ storage/ndb/test/ndbapi/testDataBuffers
|
|||||||
storage/ndb/test/ndbapi/testDeadlock
|
storage/ndb/test/ndbapi/testDeadlock
|
||||||
storage/ndb/test/ndbapi/testDict
|
storage/ndb/test/ndbapi/testDict
|
||||||
storage/ndb/test/ndbapi/testIndex
|
storage/ndb/test/ndbapi/testIndex
|
||||||
|
storage/ndb/test/ndbapi/testIndexStat
|
||||||
|
storage/ndb/test/ndbapi/testInterpreter
|
||||||
storage/ndb/test/ndbapi/testLcp
|
storage/ndb/test/ndbapi/testLcp
|
||||||
storage/ndb/test/ndbapi/testMgm
|
storage/ndb/test/ndbapi/testMgm
|
||||||
storage/ndb/test/ndbapi/testNdbApi
|
storage/ndb/test/ndbapi/testNdbApi
|
||||||
@@ -2753,6 +2765,7 @@ storage/ndb/test/tools/hugoScanRead
|
|||||||
storage/ndb/test/tools/hugoScanUpdate
|
storage/ndb/test/tools/hugoScanUpdate
|
||||||
storage/ndb/test/tools/listen_event
|
storage/ndb/test/tools/listen_event
|
||||||
storage/ndb/test/tools/ndb_cpcc
|
storage/ndb/test/tools/ndb_cpcc
|
||||||
|
storage/ndb/test/tools/rep_latency
|
||||||
storage/ndb/test/tools/restart
|
storage/ndb/test/tools/restart
|
||||||
storage/ndb/test/tools/verify_index
|
storage/ndb/test/tools/verify_index
|
||||||
storage/ndb/tools/ndb_config
|
storage/ndb/tools/ndb_config
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ libmysqlsources = errmsg.c get_password.c libmysql.c client.c pack.c \
|
|||||||
noinst_HEADERS = embedded_priv.h emb_qcache.h
|
noinst_HEADERS = embedded_priv.h emb_qcache.h
|
||||||
|
|
||||||
sqlsources = derror.cc field.cc field_conv.cc strfunc.cc filesort.cc \
|
sqlsources = derror.cc field.cc field_conv.cc strfunc.cc filesort.cc \
|
||||||
ha_ndbcluster.cc \
|
ha_ndbcluster.cc ha_ndbcluster_cond.cc \
|
||||||
ha_ndbcluster_binlog.cc ha_partition.cc \
|
ha_ndbcluster_binlog.cc ha_partition.cc \
|
||||||
handler.cc sql_handler.cc \
|
handler.cc sql_handler.cc \
|
||||||
hostname.cc init.cc password.c \
|
hostname.cc init.cc password.c \
|
||||||
@@ -107,6 +107,9 @@ endif
|
|||||||
ha_ndbcluster.o:ha_ndbcluster.cc
|
ha_ndbcluster.o:ha_ndbcluster.cc
|
||||||
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
|
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
|
||||||
|
|
||||||
|
ha_ndbcluster_cond.o:ha_ndbcluster_cond.cc
|
||||||
|
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
|
||||||
|
|
||||||
ha_ndbcluster_binlog.o: ha_ndbcluster_binlog.cc
|
ha_ndbcluster_binlog.o: ha_ndbcluster_binlog.cc
|
||||||
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
|
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
|
||||||
|
|
||||||
|
|||||||
@@ -48,9 +48,9 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
|
|||||||
procedure.h sql_class.h sql_lex.h sql_list.h \
|
procedure.h sql_class.h sql_lex.h sql_list.h \
|
||||||
sql_map.h sql_string.h unireg.h \
|
sql_map.h sql_string.h unireg.h \
|
||||||
sql_error.h field.h handler.h mysqld_suffix.h \
|
sql_error.h field.h handler.h mysqld_suffix.h \
|
||||||
ha_partition.h \
|
ha_ndbcluster.h ha_ndbcluster_cond.h \
|
||||||
ha_ndbcluster.h ha_ndbcluster_binlog.h \
|
ha_ndbcluster_binlog.h ha_ndbcluster_tables.h \
|
||||||
ha_ndbcluster_tables.h rpl_constants.h \
|
ha_partition.h rpl_constants.h \
|
||||||
opt_range.h protocol.h rpl_tblmap.h rpl_utility.h \
|
opt_range.h protocol.h rpl_tblmap.h rpl_utility.h \
|
||||||
log.h sql_show.h rpl_rli.h rpl_mi.h \
|
log.h sql_show.h rpl_rli.h rpl_mi.h \
|
||||||
sql_select.h structs.h table.h sql_udf.h hash_filo.h \
|
sql_select.h structs.h table.h sql_udf.h hash_filo.h \
|
||||||
@@ -92,8 +92,8 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \
|
|||||||
log_event_old.cc rpl_record_old.cc \
|
log_event_old.cc rpl_record_old.cc \
|
||||||
discover.cc time.cc opt_range.cc opt_sum.cc \
|
discover.cc time.cc opt_range.cc opt_sum.cc \
|
||||||
records.cc filesort.cc handler.cc \
|
records.cc filesort.cc handler.cc \
|
||||||
ha_partition.cc \
|
ha_ndbcluster.cc ha_ndbcluster_cond.cc \
|
||||||
ha_ndbcluster.cc ha_ndbcluster_binlog.cc \
|
ha_ndbcluster_binlog.cc ha_partition.cc \
|
||||||
sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \
|
sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \
|
||||||
sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \
|
sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \
|
||||||
sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \
|
sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \
|
||||||
@@ -156,10 +156,13 @@ lex_hash.h: gen_lex_hash.cc lex.h
|
|||||||
./gen_lex_hash$(EXEEXT) > $@-t
|
./gen_lex_hash$(EXEEXT) > $@-t
|
||||||
$(MV) $@-t $@
|
$(MV) $@-t $@
|
||||||
|
|
||||||
# the following three should eventually be moved out of this directory
|
# the following four should eventually be moved out of this directory
|
||||||
ha_ndbcluster.o:ha_ndbcluster.cc ha_ndbcluster.h
|
ha_ndbcluster.o:ha_ndbcluster.cc ha_ndbcluster.h
|
||||||
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
|
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
|
||||||
|
|
||||||
|
ha_ndbcluster_cond.o:ha_ndbcluster_cond.cc ha_ndbcluster_cond.h
|
||||||
|
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
|
||||||
|
|
||||||
ha_ndbcluster_binlog.o:ha_ndbcluster_binlog.cc ha_ndbcluster_binlog.h
|
ha_ndbcluster_binlog.o:ha_ndbcluster_binlog.cc ha_ndbcluster_binlog.h
|
||||||
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
|
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
|
||||||
|
|
||||||
|
|||||||
1409
sql/ha_ndbcluster.cc
1409
sql/ha_ndbcluster.cc
File diff suppressed because it is too large
Load Diff
@@ -37,11 +37,11 @@ class NdbOperation; // Forward declaration
|
|||||||
class NdbTransaction; // Forward declaration
|
class NdbTransaction; // Forward declaration
|
||||||
class NdbRecAttr; // Forward declaration
|
class NdbRecAttr; // Forward declaration
|
||||||
class NdbScanOperation;
|
class NdbScanOperation;
|
||||||
class NdbScanFilter;
|
|
||||||
class NdbIndexScanOperation;
|
class NdbIndexScanOperation;
|
||||||
class NdbBlob;
|
class NdbBlob;
|
||||||
class NdbIndexStat;
|
class NdbIndexStat;
|
||||||
class NdbEventOperation;
|
class NdbEventOperation;
|
||||||
|
class ha_ndbcluster_cond;
|
||||||
|
|
||||||
// connectstring to cluster if given by mysqld
|
// connectstring to cluster if given by mysqld
|
||||||
extern const char *ndbcluster_connectstring;
|
extern const char *ndbcluster_connectstring;
|
||||||
@@ -161,424 +161,6 @@ struct Ndb_tuple_id_range_guard {
|
|||||||
#define NSF_NO_BINLOG 4 /* table should not be binlogged */
|
#define NSF_NO_BINLOG 4 /* table should not be binlogged */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef enum ndb_item_type {
|
|
||||||
NDB_VALUE = 0, // Qualified more with Item::Type
|
|
||||||
NDB_FIELD = 1, // Qualified from table definition
|
|
||||||
NDB_FUNCTION = 2,// Qualified from Item_func::Functype
|
|
||||||
NDB_END_COND = 3 // End marker for condition group
|
|
||||||
} NDB_ITEM_TYPE;
|
|
||||||
|
|
||||||
typedef enum ndb_func_type {
|
|
||||||
NDB_EQ_FUNC = 0,
|
|
||||||
NDB_NE_FUNC = 1,
|
|
||||||
NDB_LT_FUNC = 2,
|
|
||||||
NDB_LE_FUNC = 3,
|
|
||||||
NDB_GT_FUNC = 4,
|
|
||||||
NDB_GE_FUNC = 5,
|
|
||||||
NDB_ISNULL_FUNC = 6,
|
|
||||||
NDB_ISNOTNULL_FUNC = 7,
|
|
||||||
NDB_LIKE_FUNC = 8,
|
|
||||||
NDB_NOTLIKE_FUNC = 9,
|
|
||||||
NDB_NOT_FUNC = 10,
|
|
||||||
NDB_UNKNOWN_FUNC = 11,
|
|
||||||
NDB_COND_AND_FUNC = 12,
|
|
||||||
NDB_COND_OR_FUNC = 13,
|
|
||||||
NDB_UNSUPPORTED_FUNC = 14
|
|
||||||
} NDB_FUNC_TYPE;
|
|
||||||
|
|
||||||
typedef union ndb_item_qualification {
|
|
||||||
Item::Type value_type;
|
|
||||||
enum_field_types field_type; // Instead of Item::FIELD_ITEM
|
|
||||||
NDB_FUNC_TYPE function_type; // Instead of Item::FUNC_ITEM
|
|
||||||
} NDB_ITEM_QUALIFICATION;
|
|
||||||
|
|
||||||
typedef struct ndb_item_field_value {
|
|
||||||
Field* field;
|
|
||||||
int column_no;
|
|
||||||
} NDB_ITEM_FIELD_VALUE;
|
|
||||||
|
|
||||||
typedef union ndb_item_value {
|
|
||||||
const Item *item;
|
|
||||||
NDB_ITEM_FIELD_VALUE *field_value;
|
|
||||||
uint arg_count;
|
|
||||||
} NDB_ITEM_VALUE;
|
|
||||||
|
|
||||||
struct negated_function_mapping
|
|
||||||
{
|
|
||||||
NDB_FUNC_TYPE pos_fun;
|
|
||||||
NDB_FUNC_TYPE neg_fun;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
Define what functions can be negated in condition pushdown.
|
|
||||||
Note, these HAVE to be in the same order as in definition enum
|
|
||||||
*/
|
|
||||||
static const negated_function_mapping neg_map[]=
|
|
||||||
{
|
|
||||||
{NDB_EQ_FUNC, NDB_NE_FUNC},
|
|
||||||
{NDB_NE_FUNC, NDB_EQ_FUNC},
|
|
||||||
{NDB_LT_FUNC, NDB_GE_FUNC},
|
|
||||||
{NDB_LE_FUNC, NDB_GT_FUNC},
|
|
||||||
{NDB_GT_FUNC, NDB_LE_FUNC},
|
|
||||||
{NDB_GE_FUNC, NDB_LT_FUNC},
|
|
||||||
{NDB_ISNULL_FUNC, NDB_ISNOTNULL_FUNC},
|
|
||||||
{NDB_ISNOTNULL_FUNC, NDB_ISNULL_FUNC},
|
|
||||||
{NDB_LIKE_FUNC, NDB_NOTLIKE_FUNC},
|
|
||||||
{NDB_NOTLIKE_FUNC, NDB_LIKE_FUNC},
|
|
||||||
{NDB_NOT_FUNC, NDB_UNSUPPORTED_FUNC},
|
|
||||||
{NDB_UNKNOWN_FUNC, NDB_UNSUPPORTED_FUNC},
|
|
||||||
{NDB_COND_AND_FUNC, NDB_UNSUPPORTED_FUNC},
|
|
||||||
{NDB_COND_OR_FUNC, NDB_UNSUPPORTED_FUNC},
|
|
||||||
{NDB_UNSUPPORTED_FUNC, NDB_UNSUPPORTED_FUNC}
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
This class is the construction element for serialization of Item tree
|
|
||||||
in condition pushdown.
|
|
||||||
An instance of Ndb_Item represents a constant, table field reference,
|
|
||||||
unary or binary comparison predicate, and start/end of AND/OR.
|
|
||||||
Instances of Ndb_Item are stored in a linked list implemented by Ndb_cond
|
|
||||||
class.
|
|
||||||
The order of elements produced by Ndb_cond::next corresponds to
|
|
||||||
breadth-first traversal of the Item (i.e. expression) tree in prefix order.
|
|
||||||
AND and OR have arbitrary arity, so the end of AND/OR group is marked with
|
|
||||||
Ndb_item with type == NDB_END_COND.
|
|
||||||
NOT items represent negated conditions and generate NAND/NOR groups.
|
|
||||||
*/
|
|
||||||
class Ndb_item {
|
|
||||||
public:
|
|
||||||
Ndb_item(NDB_ITEM_TYPE item_type) : type(item_type) {};
|
|
||||||
Ndb_item(NDB_ITEM_TYPE item_type,
|
|
||||||
NDB_ITEM_QUALIFICATION item_qualification,
|
|
||||||
const Item *item_value)
|
|
||||||
: type(item_type), qualification(item_qualification)
|
|
||||||
{
|
|
||||||
switch(item_type) {
|
|
||||||
case(NDB_VALUE):
|
|
||||||
value.item= item_value;
|
|
||||||
break;
|
|
||||||
case(NDB_FIELD): {
|
|
||||||
NDB_ITEM_FIELD_VALUE *field_value= new NDB_ITEM_FIELD_VALUE();
|
|
||||||
Item_field *field_item= (Item_field *) item_value;
|
|
||||||
field_value->field= field_item->field;
|
|
||||||
field_value->column_no= -1; // Will be fetched at scan filter generation
|
|
||||||
value.field_value= field_value;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case(NDB_FUNCTION):
|
|
||||||
value.item= item_value;
|
|
||||||
value.arg_count= ((Item_func *) item_value)->argument_count();
|
|
||||||
break;
|
|
||||||
case(NDB_END_COND):
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ndb_item(Field *field, int column_no) : type(NDB_FIELD)
|
|
||||||
{
|
|
||||||
NDB_ITEM_FIELD_VALUE *field_value= new NDB_ITEM_FIELD_VALUE();
|
|
||||||
qualification.field_type= field->type();
|
|
||||||
field_value->field= field;
|
|
||||||
field_value->column_no= column_no;
|
|
||||||
value.field_value= field_value;
|
|
||||||
};
|
|
||||||
Ndb_item(Item_func::Functype func_type, const Item *item_value)
|
|
||||||
: type(NDB_FUNCTION)
|
|
||||||
{
|
|
||||||
qualification.function_type= item_func_to_ndb_func(func_type);
|
|
||||||
value.item= item_value;
|
|
||||||
value.arg_count= ((Item_func *) item_value)->argument_count();
|
|
||||||
};
|
|
||||||
Ndb_item(Item_func::Functype func_type, uint no_args)
|
|
||||||
: type(NDB_FUNCTION)
|
|
||||||
{
|
|
||||||
qualification.function_type= item_func_to_ndb_func(func_type);
|
|
||||||
value.arg_count= no_args;
|
|
||||||
};
|
|
||||||
~Ndb_item()
|
|
||||||
{
|
|
||||||
if (type == NDB_FIELD)
|
|
||||||
{
|
|
||||||
delete value.field_value;
|
|
||||||
value.field_value= NULL;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
uint32 pack_length()
|
|
||||||
{
|
|
||||||
switch(type) {
|
|
||||||
case(NDB_VALUE):
|
|
||||||
if(qualification.value_type == Item::STRING_ITEM)
|
|
||||||
return value.item->str_value.length();
|
|
||||||
break;
|
|
||||||
case(NDB_FIELD):
|
|
||||||
return value.field_value->field->pack_length();
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
Field * get_field() { return value.field_value->field; };
|
|
||||||
|
|
||||||
int get_field_no() { return value.field_value->column_no; };
|
|
||||||
|
|
||||||
int argument_count()
|
|
||||||
{
|
|
||||||
return value.arg_count;
|
|
||||||
};
|
|
||||||
|
|
||||||
const char* get_val()
|
|
||||||
{
|
|
||||||
switch(type) {
|
|
||||||
case(NDB_VALUE):
|
|
||||||
if(qualification.value_type == Item::STRING_ITEM)
|
|
||||||
return value.item->str_value.ptr();
|
|
||||||
break;
|
|
||||||
case(NDB_FIELD):
|
|
||||||
return value.field_value->field->ptr;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
};
|
|
||||||
|
|
||||||
void save_in_field(Ndb_item *field_item)
|
|
||||||
{
|
|
||||||
Field *field = field_item->value.field_value->field;
|
|
||||||
const Item *item= value.item;
|
|
||||||
|
|
||||||
if (item && field)
|
|
||||||
{
|
|
||||||
my_bitmap_map *old_map=
|
|
||||||
dbug_tmp_use_all_columns(field->table, field->table->write_set);
|
|
||||||
((Item *)item)->save_in_field(field, FALSE);
|
|
||||||
dbug_tmp_restore_column_map(field->table->write_set, old_map);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
static NDB_FUNC_TYPE item_func_to_ndb_func(Item_func::Functype fun)
|
|
||||||
{
|
|
||||||
switch (fun) {
|
|
||||||
case (Item_func::EQ_FUNC): { return NDB_EQ_FUNC; }
|
|
||||||
case (Item_func::NE_FUNC): { return NDB_NE_FUNC; }
|
|
||||||
case (Item_func::LT_FUNC): { return NDB_LT_FUNC; }
|
|
||||||
case (Item_func::LE_FUNC): { return NDB_LE_FUNC; }
|
|
||||||
case (Item_func::GT_FUNC): { return NDB_GT_FUNC; }
|
|
||||||
case (Item_func::GE_FUNC): { return NDB_GE_FUNC; }
|
|
||||||
case (Item_func::ISNULL_FUNC): { return NDB_ISNULL_FUNC; }
|
|
||||||
case (Item_func::ISNOTNULL_FUNC): { return NDB_ISNOTNULL_FUNC; }
|
|
||||||
case (Item_func::LIKE_FUNC): { return NDB_LIKE_FUNC; }
|
|
||||||
case (Item_func::NOT_FUNC): { return NDB_NOT_FUNC; }
|
|
||||||
case (Item_func::UNKNOWN_FUNC): { return NDB_UNKNOWN_FUNC; }
|
|
||||||
case (Item_func::COND_AND_FUNC): { return NDB_COND_AND_FUNC; }
|
|
||||||
case (Item_func::COND_OR_FUNC): { return NDB_COND_OR_FUNC; }
|
|
||||||
default: { return NDB_UNSUPPORTED_FUNC; }
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
static NDB_FUNC_TYPE negate(NDB_FUNC_TYPE fun)
|
|
||||||
{
|
|
||||||
uint i= (uint) fun;
|
|
||||||
DBUG_ASSERT(fun == neg_map[i].pos_fun);
|
|
||||||
return neg_map[i].neg_fun;
|
|
||||||
};
|
|
||||||
|
|
||||||
NDB_ITEM_TYPE type;
|
|
||||||
NDB_ITEM_QUALIFICATION qualification;
|
|
||||||
private:
|
|
||||||
NDB_ITEM_VALUE value;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
This class implements a linked list used for storing a
|
|
||||||
serialization of the Item tree for condition pushdown.
|
|
||||||
*/
|
|
||||||
class Ndb_cond
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
Ndb_cond() : ndb_item(NULL), next(NULL), prev(NULL) {};
|
|
||||||
~Ndb_cond()
|
|
||||||
{
|
|
||||||
if (ndb_item) delete ndb_item;
|
|
||||||
ndb_item= NULL;
|
|
||||||
if (next) delete next;
|
|
||||||
next= prev= NULL;
|
|
||||||
};
|
|
||||||
Ndb_item *ndb_item;
|
|
||||||
Ndb_cond *next;
|
|
||||||
Ndb_cond *prev;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
This class implements a stack for storing several conditions
|
|
||||||
for pushdown (represented as serialized Item trees using Ndb_cond).
|
|
||||||
The current implementation only pushes one condition, but is
|
|
||||||
prepared for handling several (C1 AND C2 ...) if the logic for
|
|
||||||
pushing conditions is extended in sql_select.
|
|
||||||
*/
|
|
||||||
class Ndb_cond_stack
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
Ndb_cond_stack() : ndb_cond(NULL), next(NULL) {};
|
|
||||||
~Ndb_cond_stack()
|
|
||||||
{
|
|
||||||
if (ndb_cond) delete ndb_cond;
|
|
||||||
ndb_cond= NULL;
|
|
||||||
if (next) delete next;
|
|
||||||
next= NULL;
|
|
||||||
};
|
|
||||||
Ndb_cond *ndb_cond;
|
|
||||||
Ndb_cond_stack *next;
|
|
||||||
};
|
|
||||||
|
|
||||||
class Ndb_rewrite_context
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
Ndb_rewrite_context(Item_func *func)
|
|
||||||
: func_item(func), left_hand_item(NULL), count(0) {};
|
|
||||||
~Ndb_rewrite_context()
|
|
||||||
{
|
|
||||||
if (next) delete next;
|
|
||||||
}
|
|
||||||
const Item_func *func_item;
|
|
||||||
const Item *left_hand_item;
|
|
||||||
uint count;
|
|
||||||
Ndb_rewrite_context *next;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
This class is used for storing the context when traversing
|
|
||||||
the Item tree. It stores a reference to the table the condition
|
|
||||||
is defined on, the serialized representation being generated,
|
|
||||||
if the condition found is supported, and information what is
|
|
||||||
expected next in the tree inorder for the condition to be supported.
|
|
||||||
*/
|
|
||||||
class Ndb_cond_traverse_context
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
Ndb_cond_traverse_context(TABLE *tab, void* ndb_tab, Ndb_cond_stack* stack)
|
|
||||||
: table(tab), ndb_table(ndb_tab),
|
|
||||||
supported(TRUE), stack_ptr(stack), cond_ptr(NULL),
|
|
||||||
skip(0), collation(NULL), rewrite_stack(NULL)
|
|
||||||
{
|
|
||||||
// Allocate type checking bitmaps
|
|
||||||
bitmap_init(&expect_mask, 0, 512, FALSE);
|
|
||||||
bitmap_init(&expect_field_type_mask, 0, 512, FALSE);
|
|
||||||
bitmap_init(&expect_field_result_mask, 0, 512, FALSE);
|
|
||||||
|
|
||||||
if (stack)
|
|
||||||
cond_ptr= stack->ndb_cond;
|
|
||||||
};
|
|
||||||
~Ndb_cond_traverse_context()
|
|
||||||
{
|
|
||||||
bitmap_free(&expect_mask);
|
|
||||||
bitmap_free(&expect_field_type_mask);
|
|
||||||
bitmap_free(&expect_field_result_mask);
|
|
||||||
if (rewrite_stack) delete rewrite_stack;
|
|
||||||
}
|
|
||||||
void expect(Item::Type type)
|
|
||||||
{
|
|
||||||
bitmap_set_bit(&expect_mask, (uint) type);
|
|
||||||
if (type == Item::FIELD_ITEM) expect_all_field_types();
|
|
||||||
};
|
|
||||||
void dont_expect(Item::Type type)
|
|
||||||
{
|
|
||||||
bitmap_clear_bit(&expect_mask, (uint) type);
|
|
||||||
};
|
|
||||||
bool expecting(Item::Type type)
|
|
||||||
{
|
|
||||||
return bitmap_is_set(&expect_mask, (uint) type);
|
|
||||||
};
|
|
||||||
void expect_nothing()
|
|
||||||
{
|
|
||||||
bitmap_clear_all(&expect_mask);
|
|
||||||
};
|
|
||||||
bool expecting_nothing()
|
|
||||||
{
|
|
||||||
return bitmap_is_clear_all(&expect_mask);
|
|
||||||
}
|
|
||||||
void expect_only(Item::Type type)
|
|
||||||
{
|
|
||||||
expect_nothing();
|
|
||||||
expect(type);
|
|
||||||
};
|
|
||||||
|
|
||||||
void expect_field_type(enum_field_types type)
|
|
||||||
{
|
|
||||||
bitmap_set_bit(&expect_field_type_mask, (uint) type);
|
|
||||||
};
|
|
||||||
void expect_all_field_types()
|
|
||||||
{
|
|
||||||
bitmap_set_all(&expect_field_type_mask);
|
|
||||||
};
|
|
||||||
bool expecting_field_type(enum_field_types type)
|
|
||||||
{
|
|
||||||
return bitmap_is_set(&expect_field_type_mask, (uint) type);
|
|
||||||
};
|
|
||||||
void expect_no_field_type()
|
|
||||||
{
|
|
||||||
bitmap_clear_all(&expect_field_type_mask);
|
|
||||||
};
|
|
||||||
bool expecting_no_field_type()
|
|
||||||
{
|
|
||||||
return bitmap_is_clear_all(&expect_field_type_mask);
|
|
||||||
}
|
|
||||||
void expect_only_field_type(enum_field_types result)
|
|
||||||
{
|
|
||||||
expect_no_field_type();
|
|
||||||
expect_field_type(result);
|
|
||||||
};
|
|
||||||
|
|
||||||
void expect_field_result(Item_result result)
|
|
||||||
{
|
|
||||||
bitmap_set_bit(&expect_field_result_mask, (uint) result);
|
|
||||||
};
|
|
||||||
bool expecting_field_result(Item_result result)
|
|
||||||
{
|
|
||||||
return bitmap_is_set(&expect_field_result_mask, (uint) result);
|
|
||||||
};
|
|
||||||
void expect_no_field_result()
|
|
||||||
{
|
|
||||||
bitmap_clear_all(&expect_field_result_mask);
|
|
||||||
};
|
|
||||||
bool expecting_no_field_result()
|
|
||||||
{
|
|
||||||
return bitmap_is_clear_all(&expect_field_result_mask);
|
|
||||||
}
|
|
||||||
void expect_only_field_result(Item_result result)
|
|
||||||
{
|
|
||||||
expect_no_field_result();
|
|
||||||
expect_field_result(result);
|
|
||||||
};
|
|
||||||
void expect_collation(CHARSET_INFO* col)
|
|
||||||
{
|
|
||||||
collation= col;
|
|
||||||
};
|
|
||||||
bool expecting_collation(CHARSET_INFO* col)
|
|
||||||
{
|
|
||||||
bool matching= (!collation) ? true : (collation == col);
|
|
||||||
collation= NULL;
|
|
||||||
|
|
||||||
return matching;
|
|
||||||
};
|
|
||||||
|
|
||||||
TABLE* table;
|
|
||||||
void* ndb_table;
|
|
||||||
bool supported;
|
|
||||||
Ndb_cond_stack* stack_ptr;
|
|
||||||
Ndb_cond* cond_ptr;
|
|
||||||
MY_BITMAP expect_mask;
|
|
||||||
MY_BITMAP expect_field_type_mask;
|
|
||||||
MY_BITMAP expect_field_result_mask;
|
|
||||||
uint skip;
|
|
||||||
CHARSET_INFO* collation;
|
|
||||||
Ndb_rewrite_context *rewrite_stack;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
typedef enum ndb_query_state_bits {
|
typedef enum ndb_query_state_bits {
|
||||||
NDB_QUERY_NORMAL = 0,
|
NDB_QUERY_NORMAL = 0,
|
||||||
NDB_QUERY_MULTI_READ_RANGE = 1
|
NDB_QUERY_MULTI_READ_RANGE = 1
|
||||||
@@ -906,27 +488,6 @@ private:
|
|||||||
|
|
||||||
void release_completed_operations(NdbTransaction*, bool);
|
void release_completed_operations(NdbTransaction*, bool);
|
||||||
|
|
||||||
/*
|
|
||||||
Condition pushdown
|
|
||||||
*/
|
|
||||||
void cond_clear();
|
|
||||||
bool serialize_cond(const COND *cond, Ndb_cond_stack *ndb_cond);
|
|
||||||
int build_scan_filter_predicate(Ndb_cond* &cond,
|
|
||||||
NdbScanFilter* filter,
|
|
||||||
bool negated= false);
|
|
||||||
int build_scan_filter_group(Ndb_cond* &cond,
|
|
||||||
NdbScanFilter* filter);
|
|
||||||
int build_scan_filter(Ndb_cond* &cond, NdbScanFilter* filter);
|
|
||||||
int generate_scan_filter(Ndb_cond_stack* cond_stack,
|
|
||||||
NdbScanOperation* op);
|
|
||||||
int generate_scan_filter_from_cond(Ndb_cond_stack* cond_stack,
|
|
||||||
NdbScanFilter& filter);
|
|
||||||
int generate_scan_filter_from_key(NdbScanOperation* op,
|
|
||||||
const KEY* key_info,
|
|
||||||
const byte *key,
|
|
||||||
uint key_len,
|
|
||||||
byte *buf);
|
|
||||||
|
|
||||||
friend int execute_commit(ha_ndbcluster*, NdbTransaction*);
|
friend int execute_commit(ha_ndbcluster*, NdbTransaction*);
|
||||||
friend int execute_no_commit_ignore_no_key(ha_ndbcluster*, NdbTransaction*);
|
friend int execute_no_commit_ignore_no_key(ha_ndbcluster*, NdbTransaction*);
|
||||||
friend int execute_no_commit(ha_ndbcluster*, NdbTransaction*, bool);
|
friend int execute_no_commit(ha_ndbcluster*, NdbTransaction*, bool);
|
||||||
@@ -982,7 +543,7 @@ private:
|
|||||||
ha_rows m_autoincrement_prefetch;
|
ha_rows m_autoincrement_prefetch;
|
||||||
bool m_transaction_on;
|
bool m_transaction_on;
|
||||||
|
|
||||||
Ndb_cond_stack *m_cond_stack;
|
ha_ndbcluster_cond *m_cond;
|
||||||
bool m_disable_multi_read;
|
bool m_disable_multi_read;
|
||||||
byte *m_multi_range_result_ptr;
|
byte *m_multi_range_result_ptr;
|
||||||
KEY_MULTI_RANGE *m_multi_ranges;
|
KEY_MULTI_RANGE *m_multi_ranges;
|
||||||
|
|||||||
1426
sql/ha_ndbcluster_cond.cc
Normal file
1426
sql/ha_ndbcluster_cond.cc
Normal file
File diff suppressed because it is too large
Load Diff
475
sql/ha_ndbcluster_cond.h
Normal file
475
sql/ha_ndbcluster_cond.h
Normal file
@@ -0,0 +1,475 @@
|
|||||||
|
/* Copyright (C) 2000-2007 MySQL AB
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; version 2 of the License.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||||
|
|
||||||
|
/*
|
||||||
|
This file defines the data structures used by engine condition pushdown in
|
||||||
|
the NDB Cluster handler
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifdef USE_PRAGMA_INTERFACE
|
||||||
|
#pragma interface /* gcc class implementation */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef enum ndb_item_type {
|
||||||
|
NDB_VALUE = 0, // Qualified more with Item::Type
|
||||||
|
NDB_FIELD = 1, // Qualified from table definition
|
||||||
|
NDB_FUNCTION = 2,// Qualified from Item_func::Functype
|
||||||
|
NDB_END_COND = 3 // End marker for condition group
|
||||||
|
} NDB_ITEM_TYPE;
|
||||||
|
|
||||||
|
typedef enum ndb_func_type {
|
||||||
|
NDB_EQ_FUNC = 0,
|
||||||
|
NDB_NE_FUNC = 1,
|
||||||
|
NDB_LT_FUNC = 2,
|
||||||
|
NDB_LE_FUNC = 3,
|
||||||
|
NDB_GT_FUNC = 4,
|
||||||
|
NDB_GE_FUNC = 5,
|
||||||
|
NDB_ISNULL_FUNC = 6,
|
||||||
|
NDB_ISNOTNULL_FUNC = 7,
|
||||||
|
NDB_LIKE_FUNC = 8,
|
||||||
|
NDB_NOTLIKE_FUNC = 9,
|
||||||
|
NDB_NOT_FUNC = 10,
|
||||||
|
NDB_UNKNOWN_FUNC = 11,
|
||||||
|
NDB_COND_AND_FUNC = 12,
|
||||||
|
NDB_COND_OR_FUNC = 13,
|
||||||
|
NDB_UNSUPPORTED_FUNC = 14
|
||||||
|
} NDB_FUNC_TYPE;
|
||||||
|
|
||||||
|
typedef union ndb_item_qualification {
|
||||||
|
Item::Type value_type;
|
||||||
|
enum_field_types field_type; // Instead of Item::FIELD_ITEM
|
||||||
|
NDB_FUNC_TYPE function_type; // Instead of Item::FUNC_ITEM
|
||||||
|
} NDB_ITEM_QUALIFICATION;
|
||||||
|
|
||||||
|
typedef struct ndb_item_field_value {
|
||||||
|
Field* field;
|
||||||
|
int column_no;
|
||||||
|
} NDB_ITEM_FIELD_VALUE;
|
||||||
|
|
||||||
|
typedef union ndb_item_value {
|
||||||
|
const Item *item;
|
||||||
|
NDB_ITEM_FIELD_VALUE *field_value;
|
||||||
|
uint arg_count;
|
||||||
|
} NDB_ITEM_VALUE;
|
||||||
|
|
||||||
|
struct negated_function_mapping
|
||||||
|
{
|
||||||
|
NDB_FUNC_TYPE pos_fun;
|
||||||
|
NDB_FUNC_TYPE neg_fun;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
Define what functions can be negated in condition pushdown.
|
||||||
|
Note, these HAVE to be in the same order as in definition enum
|
||||||
|
*/
|
||||||
|
static const negated_function_mapping neg_map[]=
|
||||||
|
{
|
||||||
|
{NDB_EQ_FUNC, NDB_NE_FUNC},
|
||||||
|
{NDB_NE_FUNC, NDB_EQ_FUNC},
|
||||||
|
{NDB_LT_FUNC, NDB_GE_FUNC},
|
||||||
|
{NDB_LE_FUNC, NDB_GT_FUNC},
|
||||||
|
{NDB_GT_FUNC, NDB_LE_FUNC},
|
||||||
|
{NDB_GE_FUNC, NDB_LT_FUNC},
|
||||||
|
{NDB_ISNULL_FUNC, NDB_ISNOTNULL_FUNC},
|
||||||
|
{NDB_ISNOTNULL_FUNC, NDB_ISNULL_FUNC},
|
||||||
|
{NDB_LIKE_FUNC, NDB_NOTLIKE_FUNC},
|
||||||
|
{NDB_NOTLIKE_FUNC, NDB_LIKE_FUNC},
|
||||||
|
{NDB_NOT_FUNC, NDB_UNSUPPORTED_FUNC},
|
||||||
|
{NDB_UNKNOWN_FUNC, NDB_UNSUPPORTED_FUNC},
|
||||||
|
{NDB_COND_AND_FUNC, NDB_UNSUPPORTED_FUNC},
|
||||||
|
{NDB_COND_OR_FUNC, NDB_UNSUPPORTED_FUNC},
|
||||||
|
{NDB_UNSUPPORTED_FUNC, NDB_UNSUPPORTED_FUNC}
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
This class is the construction element for serialization of Item tree
|
||||||
|
in condition pushdown.
|
||||||
|
An instance of Ndb_Item represents a constant, table field reference,
|
||||||
|
unary or binary comparison predicate, and start/end of AND/OR.
|
||||||
|
Instances of Ndb_Item are stored in a linked list implemented by Ndb_cond
|
||||||
|
class.
|
||||||
|
The order of elements produced by Ndb_cond::next corresponds to
|
||||||
|
breadth-first traversal of the Item (i.e. expression) tree in prefix order.
|
||||||
|
AND and OR have arbitrary arity, so the end of AND/OR group is marked with
|
||||||
|
Ndb_item with type == NDB_END_COND.
|
||||||
|
NOT items represent negated conditions and generate NAND/NOR groups.
|
||||||
|
*/
|
||||||
|
class Ndb_item : public Sql_alloc
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
Ndb_item(NDB_ITEM_TYPE item_type) : type(item_type) {};
|
||||||
|
Ndb_item(NDB_ITEM_TYPE item_type,
|
||||||
|
NDB_ITEM_QUALIFICATION item_qualification,
|
||||||
|
const Item *item_value)
|
||||||
|
: type(item_type), qualification(item_qualification)
|
||||||
|
{
|
||||||
|
switch(item_type) {
|
||||||
|
case(NDB_VALUE):
|
||||||
|
value.item= item_value;
|
||||||
|
break;
|
||||||
|
case(NDB_FIELD): {
|
||||||
|
NDB_ITEM_FIELD_VALUE *field_value= new NDB_ITEM_FIELD_VALUE();
|
||||||
|
Item_field *field_item= (Item_field *) item_value;
|
||||||
|
field_value->field= field_item->field;
|
||||||
|
field_value->column_no= -1; // Will be fetched at scan filter generation
|
||||||
|
value.field_value= field_value;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case(NDB_FUNCTION):
|
||||||
|
value.item= item_value;
|
||||||
|
value.arg_count= ((Item_func *) item_value)->argument_count();
|
||||||
|
break;
|
||||||
|
case(NDB_END_COND):
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ndb_item(Field *field, int column_no) : type(NDB_FIELD)
|
||||||
|
{
|
||||||
|
NDB_ITEM_FIELD_VALUE *field_value= new NDB_ITEM_FIELD_VALUE();
|
||||||
|
qualification.field_type= field->type();
|
||||||
|
field_value->field= field;
|
||||||
|
field_value->column_no= column_no;
|
||||||
|
value.field_value= field_value;
|
||||||
|
};
|
||||||
|
Ndb_item(Item_func::Functype func_type, const Item *item_value)
|
||||||
|
: type(NDB_FUNCTION)
|
||||||
|
{
|
||||||
|
qualification.function_type= item_func_to_ndb_func(func_type);
|
||||||
|
value.item= item_value;
|
||||||
|
value.arg_count= ((Item_func *) item_value)->argument_count();
|
||||||
|
};
|
||||||
|
Ndb_item(Item_func::Functype func_type, uint no_args)
|
||||||
|
: type(NDB_FUNCTION)
|
||||||
|
{
|
||||||
|
qualification.function_type= item_func_to_ndb_func(func_type);
|
||||||
|
value.arg_count= no_args;
|
||||||
|
};
|
||||||
|
~Ndb_item()
|
||||||
|
{
|
||||||
|
if (type == NDB_FIELD)
|
||||||
|
{
|
||||||
|
delete value.field_value;
|
||||||
|
value.field_value= NULL;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
uint32 pack_length()
|
||||||
|
{
|
||||||
|
switch(type) {
|
||||||
|
case(NDB_VALUE):
|
||||||
|
if(qualification.value_type == Item::STRING_ITEM)
|
||||||
|
return value.item->str_value.length();
|
||||||
|
break;
|
||||||
|
case(NDB_FIELD):
|
||||||
|
return value.field_value->field->pack_length();
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
Field * get_field() { return value.field_value->field; };
|
||||||
|
|
||||||
|
int get_field_no() { return value.field_value->column_no; };
|
||||||
|
|
||||||
|
int argument_count()
|
||||||
|
{
|
||||||
|
return value.arg_count;
|
||||||
|
};
|
||||||
|
|
||||||
|
const char* get_val()
|
||||||
|
{
|
||||||
|
switch(type) {
|
||||||
|
case(NDB_VALUE):
|
||||||
|
if(qualification.value_type == Item::STRING_ITEM)
|
||||||
|
return value.item->str_value.ptr();
|
||||||
|
break;
|
||||||
|
case(NDB_FIELD):
|
||||||
|
return value.field_value->field->ptr;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
};
|
||||||
|
|
||||||
|
void save_in_field(Ndb_item *field_item)
|
||||||
|
{
|
||||||
|
Field *field = field_item->value.field_value->field;
|
||||||
|
const Item *item= value.item;
|
||||||
|
|
||||||
|
if (item && field)
|
||||||
|
{
|
||||||
|
my_bitmap_map *old_map=
|
||||||
|
dbug_tmp_use_all_columns(field->table, field->table->write_set);
|
||||||
|
((Item *)item)->save_in_field(field, FALSE);
|
||||||
|
dbug_tmp_restore_column_map(field->table->write_set, old_map);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static NDB_FUNC_TYPE item_func_to_ndb_func(Item_func::Functype fun)
|
||||||
|
{
|
||||||
|
switch (fun) {
|
||||||
|
case (Item_func::EQ_FUNC): { return NDB_EQ_FUNC; }
|
||||||
|
case (Item_func::NE_FUNC): { return NDB_NE_FUNC; }
|
||||||
|
case (Item_func::LT_FUNC): { return NDB_LT_FUNC; }
|
||||||
|
case (Item_func::LE_FUNC): { return NDB_LE_FUNC; }
|
||||||
|
case (Item_func::GT_FUNC): { return NDB_GT_FUNC; }
|
||||||
|
case (Item_func::GE_FUNC): { return NDB_GE_FUNC; }
|
||||||
|
case (Item_func::ISNULL_FUNC): { return NDB_ISNULL_FUNC; }
|
||||||
|
case (Item_func::ISNOTNULL_FUNC): { return NDB_ISNOTNULL_FUNC; }
|
||||||
|
case (Item_func::LIKE_FUNC): { return NDB_LIKE_FUNC; }
|
||||||
|
case (Item_func::NOT_FUNC): { return NDB_NOT_FUNC; }
|
||||||
|
case (Item_func::UNKNOWN_FUNC): { return NDB_UNKNOWN_FUNC; }
|
||||||
|
case (Item_func::COND_AND_FUNC): { return NDB_COND_AND_FUNC; }
|
||||||
|
case (Item_func::COND_OR_FUNC): { return NDB_COND_OR_FUNC; }
|
||||||
|
default: { return NDB_UNSUPPORTED_FUNC; }
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static NDB_FUNC_TYPE negate(NDB_FUNC_TYPE fun)
|
||||||
|
{
|
||||||
|
uint i= (uint) fun;
|
||||||
|
DBUG_ASSERT(fun == neg_map[i].pos_fun);
|
||||||
|
return neg_map[i].neg_fun;
|
||||||
|
};
|
||||||
|
|
||||||
|
NDB_ITEM_TYPE type;
|
||||||
|
NDB_ITEM_QUALIFICATION qualification;
|
||||||
|
private:
|
||||||
|
NDB_ITEM_VALUE value;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
This class implements a linked list used for storing a
|
||||||
|
serialization of the Item tree for condition pushdown.
|
||||||
|
*/
|
||||||
|
class Ndb_cond : public Sql_alloc
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
Ndb_cond() : ndb_item(NULL), next(NULL), prev(NULL) {};
|
||||||
|
~Ndb_cond()
|
||||||
|
{
|
||||||
|
if (ndb_item) delete ndb_item;
|
||||||
|
ndb_item= NULL;
|
||||||
|
if (next) delete next;
|
||||||
|
next= prev= NULL;
|
||||||
|
};
|
||||||
|
Ndb_item *ndb_item;
|
||||||
|
Ndb_cond *next;
|
||||||
|
Ndb_cond *prev;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
This class implements a stack for storing several conditions
|
||||||
|
for pushdown (represented as serialized Item trees using Ndb_cond).
|
||||||
|
The current implementation only pushes one condition, but is
|
||||||
|
prepared for handling several (C1 AND C2 ...) if the logic for
|
||||||
|
pushing conditions is extended in sql_select.
|
||||||
|
*/
|
||||||
|
class Ndb_cond_stack : public Sql_alloc
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
Ndb_cond_stack() : ndb_cond(NULL), next(NULL) {};
|
||||||
|
~Ndb_cond_stack()
|
||||||
|
{
|
||||||
|
if (ndb_cond) delete ndb_cond;
|
||||||
|
ndb_cond= NULL;
|
||||||
|
if (next) delete next;
|
||||||
|
next= NULL;
|
||||||
|
};
|
||||||
|
Ndb_cond *ndb_cond;
|
||||||
|
Ndb_cond_stack *next;
|
||||||
|
};
|
||||||
|
|
||||||
|
class Ndb_rewrite_context : public Sql_alloc
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
Ndb_rewrite_context(Item_func *func)
|
||||||
|
: func_item(func), left_hand_item(NULL), count(0) {};
|
||||||
|
~Ndb_rewrite_context()
|
||||||
|
{
|
||||||
|
if (next) delete next;
|
||||||
|
}
|
||||||
|
const Item_func *func_item;
|
||||||
|
const Item *left_hand_item;
|
||||||
|
uint count;
|
||||||
|
Ndb_rewrite_context *next;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
This class is used for storing the context when traversing
|
||||||
|
the Item tree. It stores a reference to the table the condition
|
||||||
|
is defined on, the serialized representation being generated,
|
||||||
|
if the condition found is supported, and information what is
|
||||||
|
expected next in the tree inorder for the condition to be supported.
|
||||||
|
*/
|
||||||
|
class Ndb_cond_traverse_context : public Sql_alloc
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
Ndb_cond_traverse_context(TABLE *tab, const NdbDictionary::Table *ndb_tab,
|
||||||
|
Ndb_cond_stack* stack)
|
||||||
|
: table(tab), ndb_table(ndb_tab),
|
||||||
|
supported(TRUE), stack_ptr(stack), cond_ptr(NULL),
|
||||||
|
skip(0), collation(NULL), rewrite_stack(NULL)
|
||||||
|
{
|
||||||
|
// Allocate type checking bitmaps
|
||||||
|
bitmap_init(&expect_mask, 0, 512, FALSE);
|
||||||
|
bitmap_init(&expect_field_type_mask, 0, 512, FALSE);
|
||||||
|
bitmap_init(&expect_field_result_mask, 0, 512, FALSE);
|
||||||
|
|
||||||
|
if (stack)
|
||||||
|
cond_ptr= stack->ndb_cond;
|
||||||
|
};
|
||||||
|
~Ndb_cond_traverse_context()
|
||||||
|
{
|
||||||
|
bitmap_free(&expect_mask);
|
||||||
|
bitmap_free(&expect_field_type_mask);
|
||||||
|
bitmap_free(&expect_field_result_mask);
|
||||||
|
if (rewrite_stack) delete rewrite_stack;
|
||||||
|
}
|
||||||
|
void expect(Item::Type type)
|
||||||
|
{
|
||||||
|
bitmap_set_bit(&expect_mask, (uint) type);
|
||||||
|
if (type == Item::FIELD_ITEM) expect_all_field_types();
|
||||||
|
};
|
||||||
|
void dont_expect(Item::Type type)
|
||||||
|
{
|
||||||
|
bitmap_clear_bit(&expect_mask, (uint) type);
|
||||||
|
};
|
||||||
|
bool expecting(Item::Type type)
|
||||||
|
{
|
||||||
|
return bitmap_is_set(&expect_mask, (uint) type);
|
||||||
|
};
|
||||||
|
void expect_nothing()
|
||||||
|
{
|
||||||
|
bitmap_clear_all(&expect_mask);
|
||||||
|
};
|
||||||
|
bool expecting_nothing()
|
||||||
|
{
|
||||||
|
return bitmap_is_clear_all(&expect_mask);
|
||||||
|
}
|
||||||
|
void expect_only(Item::Type type)
|
||||||
|
{
|
||||||
|
expect_nothing();
|
||||||
|
expect(type);
|
||||||
|
};
|
||||||
|
|
||||||
|
void expect_field_type(enum_field_types type)
|
||||||
|
{
|
||||||
|
bitmap_set_bit(&expect_field_type_mask, (uint) type);
|
||||||
|
};
|
||||||
|
void expect_all_field_types()
|
||||||
|
{
|
||||||
|
bitmap_set_all(&expect_field_type_mask);
|
||||||
|
};
|
||||||
|
bool expecting_field_type(enum_field_types type)
|
||||||
|
{
|
||||||
|
return bitmap_is_set(&expect_field_type_mask, (uint) type);
|
||||||
|
};
|
||||||
|
void expect_no_field_type()
|
||||||
|
{
|
||||||
|
bitmap_clear_all(&expect_field_type_mask);
|
||||||
|
};
|
||||||
|
bool expecting_no_field_type()
|
||||||
|
{
|
||||||
|
return bitmap_is_clear_all(&expect_field_type_mask);
|
||||||
|
}
|
||||||
|
void expect_only_field_type(enum_field_types result)
|
||||||
|
{
|
||||||
|
expect_no_field_type();
|
||||||
|
expect_field_type(result);
|
||||||
|
};
|
||||||
|
|
||||||
|
void expect_field_result(Item_result result)
|
||||||
|
{
|
||||||
|
bitmap_set_bit(&expect_field_result_mask, (uint) result);
|
||||||
|
};
|
||||||
|
bool expecting_field_result(Item_result result)
|
||||||
|
{
|
||||||
|
return bitmap_is_set(&expect_field_result_mask, (uint) result);
|
||||||
|
};
|
||||||
|
void expect_no_field_result()
|
||||||
|
{
|
||||||
|
bitmap_clear_all(&expect_field_result_mask);
|
||||||
|
};
|
||||||
|
bool expecting_no_field_result()
|
||||||
|
{
|
||||||
|
return bitmap_is_clear_all(&expect_field_result_mask);
|
||||||
|
}
|
||||||
|
void expect_only_field_result(Item_result result)
|
||||||
|
{
|
||||||
|
expect_no_field_result();
|
||||||
|
expect_field_result(result);
|
||||||
|
};
|
||||||
|
void expect_collation(CHARSET_INFO* col)
|
||||||
|
{
|
||||||
|
collation= col;
|
||||||
|
};
|
||||||
|
bool expecting_collation(CHARSET_INFO* col)
|
||||||
|
{
|
||||||
|
bool matching= (!collation) ? true : (collation == col);
|
||||||
|
collation= NULL;
|
||||||
|
|
||||||
|
return matching;
|
||||||
|
};
|
||||||
|
|
||||||
|
TABLE* table;
|
||||||
|
const NdbDictionary::Table *ndb_table;
|
||||||
|
bool supported;
|
||||||
|
Ndb_cond_stack* stack_ptr;
|
||||||
|
Ndb_cond* cond_ptr;
|
||||||
|
MY_BITMAP expect_mask;
|
||||||
|
MY_BITMAP expect_field_type_mask;
|
||||||
|
MY_BITMAP expect_field_result_mask;
|
||||||
|
uint skip;
|
||||||
|
CHARSET_INFO* collation;
|
||||||
|
Ndb_rewrite_context *rewrite_stack;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ha_ndbcluster;
|
||||||
|
|
||||||
|
class ha_ndbcluster_cond
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
ha_ndbcluster_cond()
|
||||||
|
: m_cond_stack(NULL)
|
||||||
|
{}
|
||||||
|
~ha_ndbcluster_cond()
|
||||||
|
{ if (m_cond_stack) delete m_cond_stack; }
|
||||||
|
const COND *cond_push(const COND *cond,
|
||||||
|
TABLE *table, const NdbDictionary::Table *ndb_table);
|
||||||
|
void cond_pop();
|
||||||
|
void cond_clear();
|
||||||
|
int generate_scan_filter(NdbScanOperation* op);
|
||||||
|
int generate_scan_filter_from_cond(NdbScanFilter& filter);
|
||||||
|
int generate_scan_filter_from_key(NdbScanOperation* op,
|
||||||
|
const KEY* key_info,
|
||||||
|
const byte *key,
|
||||||
|
uint key_len,
|
||||||
|
byte *buf);
|
||||||
|
private:
|
||||||
|
bool serialize_cond(const COND *cond, Ndb_cond_stack *ndb_cond,
|
||||||
|
TABLE *table, const NdbDictionary::Table *ndb_table);
|
||||||
|
int build_scan_filter_predicate(Ndb_cond* &cond,
|
||||||
|
NdbScanFilter* filter,
|
||||||
|
bool negated= false);
|
||||||
|
int build_scan_filter_group(Ndb_cond* &cond,
|
||||||
|
NdbScanFilter* filter);
|
||||||
|
int build_scan_filter(Ndb_cond* &cond, NdbScanFilter* filter);
|
||||||
|
|
||||||
|
Ndb_cond_stack *m_cond_stack;
|
||||||
|
};
|
||||||
@@ -2917,11 +2917,10 @@ Dbtup::nr_update_gci(Uint32 fragPtrI, const Local_key* key, Uint32 gci)
|
|||||||
int ret;
|
int ret;
|
||||||
if (tablePtr.p->m_attributes[MM].m_no_of_varsize)
|
if (tablePtr.p->m_attributes[MM].m_no_of_varsize)
|
||||||
{
|
{
|
||||||
tablePtr.p->m_offsets[MM].m_fix_header_size +=
|
const Uint32 XXX = Tuple_header::HeaderSize+Var_part_ref::SZ32;
|
||||||
Tuple_header::HeaderSize+1;
|
tablePtr.p->m_offsets[MM].m_fix_header_size += XXX;
|
||||||
ret = alloc_page(tablePtr.p, fragPtr.p, &page_ptr, tmp.m_page_no);
|
ret = alloc_page(tablePtr.p, fragPtr.p, &page_ptr, tmp.m_page_no);
|
||||||
tablePtr.p->m_offsets[MM].m_fix_header_size -=
|
tablePtr.p->m_offsets[MM].m_fix_header_size -= XXX;
|
||||||
Tuple_header::HeaderSize+1;
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -27,15 +27,34 @@ public:
|
|||||||
|
|
||||||
int getDbNodeId(int _i);
|
int getDbNodeId(int _i);
|
||||||
|
|
||||||
|
enum RestartFlags {
|
||||||
|
NRRF_INITIAL = 0x1,
|
||||||
|
NRRF_NOSTART = 0x2,
|
||||||
|
NRRF_ABORT = 0x4
|
||||||
|
};
|
||||||
|
|
||||||
int restartOneDbNode(int _nodeId,
|
int restartOneDbNode(int _nodeId,
|
||||||
bool initial = false,
|
bool initial = false,
|
||||||
bool nostart = false,
|
bool nostart = false,
|
||||||
bool abort = false);
|
bool abort = false);
|
||||||
|
|
||||||
|
int restartOneDbNode2(int _nodeId, Uint32 flags){
|
||||||
|
return restartOneDbNode(_nodeId,
|
||||||
|
flags & NRRF_INITIAL,
|
||||||
|
flags & NRRF_NOSTART,
|
||||||
|
flags & NRRF_ABORT);
|
||||||
|
}
|
||||||
|
|
||||||
int restartAll(bool initial = false,
|
int restartAll(bool initial = false,
|
||||||
bool nostart = false,
|
bool nostart = false,
|
||||||
bool abort = false);
|
bool abort = false);
|
||||||
|
|
||||||
|
int restartAll2(Uint32 flags){
|
||||||
|
return restartAll(flags & NRRF_INITIAL,
|
||||||
|
flags & NRRF_NOSTART,
|
||||||
|
flags & NRRF_ABORT);
|
||||||
|
}
|
||||||
|
|
||||||
int startAll();
|
int startAll();
|
||||||
int startNodes(const int * _nodes, int _num_nodes);
|
int startNodes(const int * _nodes, int _num_nodes);
|
||||||
int waitClusterStarted(unsigned int _timeout = 120);
|
int waitClusterStarted(unsigned int _timeout = 120);
|
||||||
|
|||||||
@@ -1567,6 +1567,72 @@ runBug27466(NDBT_Context* ctx, NDBT_Step* step)
|
|||||||
return NDBT_OK;
|
return NDBT_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
runBug28023(NDBT_Context* ctx, NDBT_Step* step)
|
||||||
|
{
|
||||||
|
int result = NDBT_OK;
|
||||||
|
int loops = ctx->getNumLoops();
|
||||||
|
int records = ctx->getNumRecords();
|
||||||
|
Ndb* pNdb = GETNDB(step);
|
||||||
|
NdbRestarter res;
|
||||||
|
|
||||||
|
if (res.getNumDbNodes() < 2)
|
||||||
|
{
|
||||||
|
return NDBT_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
HugoTransactions hugoTrans(*ctx->getTab());
|
||||||
|
if (hugoTrans.loadTable(pNdb, records) != 0){
|
||||||
|
return NDBT_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hugoTrans.clearTable(pNdb, records) != 0)
|
||||||
|
{
|
||||||
|
return NDBT_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (Uint32 i = 0; i<loops; i++)
|
||||||
|
{
|
||||||
|
int node1 = res.getDbNodeId(rand() % res.getNumDbNodes());
|
||||||
|
|
||||||
|
if (res.restartOneDbNode2(node1,
|
||||||
|
NdbRestarter::NRRF_ABORT |
|
||||||
|
NdbRestarter::NRRF_NOSTART))
|
||||||
|
return NDBT_FAILED;
|
||||||
|
|
||||||
|
if (res.waitNodesNoStart(&node1, 1))
|
||||||
|
return NDBT_FAILED;
|
||||||
|
|
||||||
|
if (hugoTrans.loadTable(pNdb, records) != 0){
|
||||||
|
return NDBT_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hugoTrans.clearTable(pNdb, records) != 0)
|
||||||
|
{
|
||||||
|
return NDBT_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
res.startNodes(&node1, 1);
|
||||||
|
if (res.waitClusterStarted())
|
||||||
|
return NDBT_FAILED;
|
||||||
|
|
||||||
|
if (hugoTrans.loadTable(pNdb, records) != 0){
|
||||||
|
return NDBT_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hugoTrans.scanUpdateRecords(pNdb, records) != 0)
|
||||||
|
return NDBT_FAILED;
|
||||||
|
|
||||||
|
if (hugoTrans.clearTable(pNdb, records) != 0)
|
||||||
|
{
|
||||||
|
return NDBT_FAILED;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return NDBT_OK;
|
||||||
|
}
|
||||||
|
|
||||||
NDBT_TESTSUITE(testNodeRestart);
|
NDBT_TESTSUITE(testNodeRestart);
|
||||||
TESTCASE("NoLoad",
|
TESTCASE("NoLoad",
|
||||||
"Test that one node at a time can be stopped and then restarted "\
|
"Test that one node at a time can be stopped and then restarted "\
|
||||||
@@ -1924,6 +1990,9 @@ TESTCASE("Bug27283", ""){
|
|||||||
TESTCASE("Bug27466", ""){
|
TESTCASE("Bug27466", ""){
|
||||||
INITIALIZER(runBug27466);
|
INITIALIZER(runBug27466);
|
||||||
}
|
}
|
||||||
|
TESTCASE("Bug28023", ""){
|
||||||
|
INITIALIZER(runBug28023);
|
||||||
|
}
|
||||||
NDBT_TESTSUITE_END(testNodeRestart);
|
NDBT_TESTSUITE_END(testNodeRestart);
|
||||||
|
|
||||||
int main(int argc, const char** argv){
|
int main(int argc, const char** argv){
|
||||||
|
|||||||
@@ -549,6 +549,10 @@ max-time: 1000
|
|||||||
cmd: testNodeRestart
|
cmd: testNodeRestart
|
||||||
args: -n Bug26481 T1
|
args: -n Bug26481 T1
|
||||||
|
|
||||||
|
max-time: 1000
|
||||||
|
cmd: testNodeRestart
|
||||||
|
args: -n Bug28023 T7 D2
|
||||||
|
|
||||||
#
|
#
|
||||||
# DICT TESTS
|
# DICT TESTS
|
||||||
max-time: 1500
|
max-time: 1500
|
||||||
|
|||||||
@@ -1146,7 +1146,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PKs
|
// PKs
|
||||||
if (equalForRow(pOp, r) != 0)
|
if (equalForRow(pUpdOp, r) != 0)
|
||||||
{
|
{
|
||||||
closeTransaction(pNdb);
|
closeTransaction(pNdb);
|
||||||
return NDBT_FAILED;
|
return NDBT_FAILED;
|
||||||
@@ -1714,7 +1714,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
|
|||||||
|
|
||||||
if(!ordered)
|
if(!ordered)
|
||||||
{
|
{
|
||||||
if (equalForRow(pOp, r+b) != 0)
|
if (equalForRow(pUpdOp, r+b) != 0)
|
||||||
{
|
{
|
||||||
closeTransaction(pNdb);
|
closeTransaction(pNdb);
|
||||||
return NDBT_FAILED;
|
return NDBT_FAILED;
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
ndbtest_PROGRAMS = hugoLoad hugoFill hugoLockRecords hugoPkDelete hugoPkRead hugoPkReadRecord hugoPkUpdate hugoScanRead hugoScanUpdate restart verify_index copy_tab create_index ndb_cpcc listen_event eventlog
|
ndbtest_PROGRAMS = hugoLoad hugoFill hugoLockRecords hugoPkDelete hugoPkRead hugoPkReadRecord hugoPkUpdate hugoScanRead hugoScanUpdate restart verify_index copy_tab create_index ndb_cpcc listen_event eventlog rep_latency
|
||||||
|
|
||||||
# transproxy
|
# transproxy
|
||||||
|
|
||||||
@@ -34,6 +34,7 @@ create_index_SOURCES = create_index.cpp
|
|||||||
ndb_cpcc_SOURCES = cpcc.cpp
|
ndb_cpcc_SOURCES = cpcc.cpp
|
||||||
listen_event_SOURCES = listen.cpp
|
listen_event_SOURCES = listen.cpp
|
||||||
eventlog_SOURCES = log_listner.cpp
|
eventlog_SOURCES = log_listner.cpp
|
||||||
|
rep_latency_SOURCES = rep_latency.cpp
|
||||||
|
|
||||||
include $(top_srcdir)/storage/ndb/config/common.mk.am
|
include $(top_srcdir)/storage/ndb/config/common.mk.am
|
||||||
include $(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am
|
include $(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am
|
||||||
|
|||||||
@@ -22,6 +22,128 @@
|
|||||||
#include <getarg.h>
|
#include <getarg.h>
|
||||||
|
|
||||||
|
|
||||||
|
#define BATCH_SIZE 128
|
||||||
|
struct Table_info
|
||||||
|
{
|
||||||
|
Uint32 id;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Trans_arg
|
||||||
|
{
|
||||||
|
Ndb *ndb;
|
||||||
|
NdbTransaction *trans;
|
||||||
|
Uint32 bytes_batched;
|
||||||
|
};
|
||||||
|
|
||||||
|
Vector< Vector<NdbRecAttr*> > event_values;
|
||||||
|
Vector< Vector<NdbRecAttr*> > event_pre_values;
|
||||||
|
Vector<struct Table_info> table_infos;
|
||||||
|
|
||||||
|
static void do_begin(Ndb *ndb, struct Trans_arg &trans_arg)
|
||||||
|
{
|
||||||
|
trans_arg.ndb = ndb;
|
||||||
|
trans_arg.trans = ndb->startTransaction();
|
||||||
|
trans_arg.bytes_batched = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void do_equal(NdbOperation *op,
|
||||||
|
NdbEventOperation *pOp)
|
||||||
|
{
|
||||||
|
struct Table_info *ti = (struct Table_info *)pOp->getCustomData();
|
||||||
|
Vector<NdbRecAttr*> &ev = event_values[ti->id];
|
||||||
|
const NdbDictionary::Table *tab= pOp->getTable();
|
||||||
|
unsigned i, n_columns = tab->getNoOfColumns();
|
||||||
|
for (i= 0; i < n_columns; i++)
|
||||||
|
{
|
||||||
|
if (tab->getColumn(i)->getPrimaryKey() &&
|
||||||
|
op->equal(i, ev[i]->aRef()))
|
||||||
|
{
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void do_set_value(NdbOperation *op,
|
||||||
|
NdbEventOperation *pOp)
|
||||||
|
{
|
||||||
|
struct Table_info *ti = (struct Table_info *)pOp->getCustomData();
|
||||||
|
Vector<NdbRecAttr*> &ev = event_values[ti->id];
|
||||||
|
const NdbDictionary::Table *tab= pOp->getTable();
|
||||||
|
unsigned i, n_columns = tab->getNoOfColumns();
|
||||||
|
for (i= 0; i < n_columns; i++)
|
||||||
|
{
|
||||||
|
if (!tab->getColumn(i)->getPrimaryKey() &&
|
||||||
|
op->setValue(i, ev[i]->aRef()))
|
||||||
|
{
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void do_insert(struct Trans_arg &trans_arg, NdbEventOperation *pOp)
|
||||||
|
{
|
||||||
|
if (!trans_arg.trans)
|
||||||
|
return;
|
||||||
|
|
||||||
|
NdbOperation *op =
|
||||||
|
trans_arg.trans->getNdbOperation(pOp->getEvent()->getTableName());
|
||||||
|
op->writeTuple();
|
||||||
|
|
||||||
|
do_equal(op, pOp);
|
||||||
|
do_set_value(op, pOp);
|
||||||
|
|
||||||
|
trans_arg.bytes_batched++;
|
||||||
|
if (trans_arg.bytes_batched > BATCH_SIZE)
|
||||||
|
{
|
||||||
|
trans_arg.trans->execute(NdbTransaction::NoCommit);
|
||||||
|
trans_arg.bytes_batched = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
static void do_update(struct Trans_arg &trans_arg, NdbEventOperation *pOp)
|
||||||
|
{
|
||||||
|
if (!trans_arg.trans)
|
||||||
|
return;
|
||||||
|
|
||||||
|
NdbOperation *op =
|
||||||
|
trans_arg.trans->getNdbOperation(pOp->getEvent()->getTableName());
|
||||||
|
op->writeTuple();
|
||||||
|
|
||||||
|
do_equal(op, pOp);
|
||||||
|
do_set_value(op, pOp);
|
||||||
|
|
||||||
|
trans_arg.bytes_batched++;
|
||||||
|
if (trans_arg.bytes_batched > BATCH_SIZE)
|
||||||
|
{
|
||||||
|
trans_arg.trans->execute(NdbTransaction::NoCommit);
|
||||||
|
trans_arg.bytes_batched = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
static void do_delete(struct Trans_arg &trans_arg, NdbEventOperation *pOp)
|
||||||
|
{
|
||||||
|
if (!trans_arg.trans)
|
||||||
|
return;
|
||||||
|
|
||||||
|
NdbOperation *op =
|
||||||
|
trans_arg.trans->getNdbOperation(pOp->getEvent()->getTableName());
|
||||||
|
op->deleteTuple();
|
||||||
|
|
||||||
|
do_equal(op, pOp);
|
||||||
|
|
||||||
|
trans_arg.bytes_batched++;
|
||||||
|
if (trans_arg.bytes_batched > BATCH_SIZE)
|
||||||
|
{
|
||||||
|
trans_arg.trans->execute(NdbTransaction::NoCommit);
|
||||||
|
trans_arg.bytes_batched = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
static void do_commit(struct Trans_arg &trans_arg)
|
||||||
|
{
|
||||||
|
if (!trans_arg.trans)
|
||||||
|
return;
|
||||||
|
trans_arg.trans->execute(NdbTransaction::Commit);
|
||||||
|
trans_arg.ndb->closeTransaction(trans_arg.trans);
|
||||||
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
main(int argc, const char** argv){
|
main(int argc, const char** argv){
|
||||||
ndb_init();
|
ndb_init();
|
||||||
@@ -29,8 +151,14 @@ main(int argc, const char** argv){
|
|||||||
|
|
||||||
int _help = 0;
|
int _help = 0;
|
||||||
const char* db = 0;
|
const char* db = 0;
|
||||||
|
const char* connectstring1 = 0;
|
||||||
|
const char* connectstring2 = 0;
|
||||||
|
|
||||||
struct getargs args[] = {
|
struct getargs args[] = {
|
||||||
|
{ "connectstring1", 'c',
|
||||||
|
arg_string, &connectstring1, "connectstring1", "" },
|
||||||
|
{ "connectstring2", 'C',
|
||||||
|
arg_string, &connectstring2, "connectstring2", "" },
|
||||||
{ "database", 'd', arg_string, &db, "Database", "" },
|
{ "database", 'd', arg_string, &db, "Database", "" },
|
||||||
{ "usage", '?', arg_flag, &_help, "Print help", "" }
|
{ "usage", '?', arg_flag, &_help, "Print help", "" }
|
||||||
};
|
};
|
||||||
@@ -46,7 +174,7 @@ main(int argc, const char** argv){
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Connect to Ndb
|
// Connect to Ndb
|
||||||
Ndb_cluster_connection con;
|
Ndb_cluster_connection con(connectstring1);
|
||||||
if(con.connect(12, 5, 1) != 0)
|
if(con.connect(12, 5, 1) != 0)
|
||||||
{
|
{
|
||||||
return NDBT_ProgramExit(NDBT_FAILED);
|
return NDBT_ProgramExit(NDBT_FAILED);
|
||||||
@@ -62,11 +190,34 @@ main(int argc, const char** argv){
|
|||||||
while(MyNdb.waitUntilReady() != 0)
|
while(MyNdb.waitUntilReady() != 0)
|
||||||
ndbout << "Waiting for ndb to become ready..." << endl;
|
ndbout << "Waiting for ndb to become ready..." << endl;
|
||||||
|
|
||||||
|
Ndb_cluster_connection *con2 = NULL;
|
||||||
|
Ndb *ndb2 = NULL;
|
||||||
|
if (connectstring2)
|
||||||
|
{
|
||||||
|
con2 = new Ndb_cluster_connection(connectstring2);
|
||||||
|
|
||||||
|
if(con2->connect(12, 5, 1) != 0)
|
||||||
|
{
|
||||||
|
return NDBT_ProgramExit(NDBT_FAILED);
|
||||||
|
}
|
||||||
|
ndb2 = new Ndb( con2, db ? db : "TEST_DB" );
|
||||||
|
|
||||||
|
if(ndb2->init() != 0){
|
||||||
|
ERR(ndb2->getNdbError());
|
||||||
|
return NDBT_ProgramExit(NDBT_FAILED);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect to Ndb and wait for it to become ready
|
||||||
|
while(ndb2->waitUntilReady() != 0)
|
||||||
|
ndbout << "Waiting for ndb to become ready..." << endl;
|
||||||
|
}
|
||||||
|
|
||||||
int result = 0;
|
int result = 0;
|
||||||
|
|
||||||
NdbDictionary::Dictionary *myDict = MyNdb.getDictionary();
|
NdbDictionary::Dictionary *myDict = MyNdb.getDictionary();
|
||||||
Vector<NdbDictionary::Event*> events;
|
Vector<NdbDictionary::Event*> events;
|
||||||
Vector<NdbEventOperation*> event_ops;
|
Vector<NdbEventOperation*> event_ops;
|
||||||
|
int sz = 0;
|
||||||
for(i= optind; i<argc; i++)
|
for(i= optind; i<argc; i++)
|
||||||
{
|
{
|
||||||
const NdbDictionary::Table* table= myDict->getTable(argv[i]);
|
const NdbDictionary::Table* table= myDict->getTable(argv[i]);
|
||||||
@@ -121,12 +272,23 @@ main(int argc, const char** argv){
|
|||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
event_values.push_back(Vector<NdbRecAttr *>());
|
||||||
|
event_pre_values.push_back(Vector<NdbRecAttr *>());
|
||||||
for (int a = 0; a < table->getNoOfColumns(); a++)
|
for (int a = 0; a < table->getNoOfColumns(); a++)
|
||||||
{
|
{
|
||||||
pOp->getValue(table->getColumn(a)->getName());
|
event_values[sz].
|
||||||
pOp->getPreValue(table->getColumn(a)->getName());
|
push_back(pOp->getValue(table->getColumn(a)->getName()));
|
||||||
|
event_pre_values[sz].
|
||||||
|
push_back(pOp->getPreValue(table->getColumn(a)->getName()));
|
||||||
}
|
}
|
||||||
event_ops.push_back(pOp);
|
event_ops.push_back(pOp);
|
||||||
|
{
|
||||||
|
struct Table_info ti;
|
||||||
|
ti.id = sz;
|
||||||
|
table_infos.push_back(ti);
|
||||||
|
}
|
||||||
|
pOp->setCustomData((void *)&table_infos[sz]);
|
||||||
|
sz++;
|
||||||
}
|
}
|
||||||
|
|
||||||
for(i= 0; i<(int)event_ops.size(); i++)
|
for(i= 0; i<(int)event_ops.size(); i++)
|
||||||
@@ -140,6 +302,7 @@ main(int argc, const char** argv){
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct Trans_arg trans_arg;
|
||||||
while(true)
|
while(true)
|
||||||
{
|
{
|
||||||
while(MyNdb.pollEvents(100) == 0);
|
while(MyNdb.pollEvents(100) == 0);
|
||||||
@@ -149,18 +312,26 @@ main(int argc, const char** argv){
|
|||||||
{
|
{
|
||||||
Uint64 gci= pOp->getGCI();
|
Uint64 gci= pOp->getGCI();
|
||||||
Uint64 cnt_i= 0, cnt_u= 0, cnt_d= 0;
|
Uint64 cnt_i= 0, cnt_u= 0, cnt_d= 0;
|
||||||
|
if (ndb2)
|
||||||
|
do_begin(ndb2, trans_arg);
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
switch(pOp->getEventType())
|
switch(pOp->getEventType())
|
||||||
{
|
{
|
||||||
case NdbDictionary::Event::TE_INSERT:
|
case NdbDictionary::Event::TE_INSERT:
|
||||||
cnt_i++;
|
cnt_i++;
|
||||||
|
if (ndb2)
|
||||||
|
do_insert(trans_arg, pOp);
|
||||||
break;
|
break;
|
||||||
case NdbDictionary::Event::TE_DELETE:
|
case NdbDictionary::Event::TE_DELETE:
|
||||||
cnt_d++;
|
cnt_d++;
|
||||||
|
if (ndb2)
|
||||||
|
do_delete(trans_arg, pOp);
|
||||||
break;
|
break;
|
||||||
case NdbDictionary::Event::TE_UPDATE:
|
case NdbDictionary::Event::TE_UPDATE:
|
||||||
cnt_u++;
|
cnt_u++;
|
||||||
|
if (ndb2)
|
||||||
|
do_update(trans_arg, pOp);
|
||||||
break;
|
break;
|
||||||
case NdbDictionary::Event::TE_CLUSTER_FAILURE:
|
case NdbDictionary::Event::TE_CLUSTER_FAILURE:
|
||||||
break;
|
break;
|
||||||
@@ -180,6 +351,8 @@ main(int argc, const char** argv){
|
|||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
} while ((pOp= MyNdb.nextEvent()) && gci == pOp->getGCI());
|
} while ((pOp= MyNdb.nextEvent()) && gci == pOp->getGCI());
|
||||||
|
if (ndb2)
|
||||||
|
do_commit(trans_arg);
|
||||||
ndbout_c("GCI: %lld events: %lld(I) %lld(U) %lld(D)", gci, cnt_i, cnt_u, cnt_d);
|
ndbout_c("GCI: %lld events: %lld(I) %lld(U) %lld(D)", gci, cnt_i, cnt_u, cnt_d);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -187,8 +360,15 @@ end:
|
|||||||
for(i= 0; i<(int)event_ops.size(); i++)
|
for(i= 0; i<(int)event_ops.size(); i++)
|
||||||
MyNdb.dropEventOperation(event_ops[i]);
|
MyNdb.dropEventOperation(event_ops[i]);
|
||||||
|
|
||||||
|
if (ndb2)
|
||||||
|
delete ndb2;
|
||||||
|
if (con2)
|
||||||
|
delete con2;
|
||||||
return NDBT_ProgramExit(NDBT_OK);
|
return NDBT_ProgramExit(NDBT_OK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template class Vector<struct Table_info>;
|
||||||
|
template class Vector<NdbRecAttr*>;
|
||||||
|
template class Vector< Vector<NdbRecAttr*> >;
|
||||||
template class Vector<NdbDictionary::Event*>;
|
template class Vector<NdbDictionary::Event*>;
|
||||||
template class Vector<NdbEventOperation*>;
|
template class Vector<NdbEventOperation*>;
|
||||||
|
|||||||
304
storage/ndb/test/tools/rep_latency.cpp
Normal file
304
storage/ndb/test/tools/rep_latency.cpp
Normal file
@@ -0,0 +1,304 @@
|
|||||||
|
/* Copyright (C) 2003 MySQL AB
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; version 2 of the License.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Update on master wait for update on slave
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <NdbApi.hpp>
|
||||||
|
#include <NdbSleep.h>
|
||||||
|
#include <sys/time.h>
|
||||||
|
#include <NdbOut.hpp>
|
||||||
|
#include <NDBT.hpp>
|
||||||
|
|
||||||
|
struct Xxx
|
||||||
|
{
|
||||||
|
Ndb *ndb;
|
||||||
|
const NdbDictionary::Table *table;
|
||||||
|
Uint32 pk_col;
|
||||||
|
Uint32 col;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct XxxR
|
||||||
|
{
|
||||||
|
Uint32 pk_val;
|
||||||
|
Uint32 val;
|
||||||
|
struct timeval start_time;
|
||||||
|
Uint32 latency;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int
|
||||||
|
prepare_master_or_slave(Ndb &myNdb,
|
||||||
|
const char* table,
|
||||||
|
const char* pk,
|
||||||
|
Uint32 pk_val,
|
||||||
|
const char* col,
|
||||||
|
struct Xxx &xxx,
|
||||||
|
struct XxxR &xxxr);
|
||||||
|
static void
|
||||||
|
run_master_update(struct Xxx &xxx, struct XxxR &xxxr);
|
||||||
|
static void
|
||||||
|
run_slave_wait(struct Xxx &xxx, struct XxxR &xxxr);
|
||||||
|
|
||||||
|
#define PRINT_ERROR(code,msg) \
|
||||||
|
g_err << "Error in " << __FILE__ << ", line: " << __LINE__ \
|
||||||
|
<< ", code: " << code \
|
||||||
|
<< ", msg: " << msg << ".\n"
|
||||||
|
#define APIERROR(error) { \
|
||||||
|
PRINT_ERROR((error).code, (error).message); \
|
||||||
|
exit(-1); }
|
||||||
|
|
||||||
|
int main(int argc, char** argv)
|
||||||
|
{
|
||||||
|
if (argc != 8)
|
||||||
|
{
|
||||||
|
ndbout << "Arguments are <connect_string cluster 1> <connect_string cluster 2> <database> <table name> <primary key> <value of primary key> <attribute to update>.\n";
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
// ndb_init must be called first
|
||||||
|
ndb_init();
|
||||||
|
{
|
||||||
|
const char *opt_connectstring1 = argv[1];
|
||||||
|
const char *opt_connectstring2 = argv[2];
|
||||||
|
const char *opt_db = argv[3];
|
||||||
|
const char *opt_table = argv[4];
|
||||||
|
const char *opt_pk = argv[5];
|
||||||
|
const Uint32 opt_pk_val = atoi(argv[6]);
|
||||||
|
const char *opt_col = argv[7];
|
||||||
|
|
||||||
|
// Object representing the cluster 1
|
||||||
|
Ndb_cluster_connection cluster1_connection(opt_connectstring1);
|
||||||
|
// Object representing the cluster 2
|
||||||
|
Ndb_cluster_connection cluster2_connection(opt_connectstring2);
|
||||||
|
|
||||||
|
// connect cluster 1 and run application
|
||||||
|
// Connect to cluster 1 management server (ndb_mgmd)
|
||||||
|
if (cluster1_connection.connect(4 /* retries */,
|
||||||
|
5 /* delay between retries */,
|
||||||
|
1 /* verbose */))
|
||||||
|
{
|
||||||
|
g_err << "Cluster 1 management server was not ready within 30 secs.\n";
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
// Optionally connect and wait for the storage nodes (ndbd's)
|
||||||
|
if (cluster1_connection.wait_until_ready(30,0) < 0)
|
||||||
|
{
|
||||||
|
g_err << "Cluster 1 was not ready within 30 secs.\n";
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
// connect cluster 2 and run application
|
||||||
|
// Connect to cluster management server (ndb_mgmd)
|
||||||
|
if (cluster2_connection.connect(4 /* retries */,
|
||||||
|
5 /* delay between retries */,
|
||||||
|
1 /* verbose */))
|
||||||
|
{
|
||||||
|
g_err << "Cluster 2 management server was not ready within 30 secs.\n";
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
// Optionally connect and wait for the storage nodes (ndbd's)
|
||||||
|
if (cluster2_connection.wait_until_ready(30,0) < 0)
|
||||||
|
{
|
||||||
|
g_err << "Cluster 2 was not ready within 30 secs.\n";
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
// Object representing the database
|
||||||
|
Ndb myNdb1(&cluster1_connection, opt_db);
|
||||||
|
Ndb myNdb2(&cluster2_connection, opt_db);
|
||||||
|
//
|
||||||
|
struct Xxx xxx1;
|
||||||
|
struct Xxx xxx2;
|
||||||
|
struct XxxR xxxr;
|
||||||
|
prepare_master_or_slave(myNdb1, opt_table, opt_pk, opt_pk_val, opt_col,
|
||||||
|
xxx1, xxxr);
|
||||||
|
prepare_master_or_slave(myNdb2, opt_table, opt_pk, opt_pk_val, opt_col,
|
||||||
|
xxx2, xxxr);
|
||||||
|
while (1)
|
||||||
|
{
|
||||||
|
// run the application code
|
||||||
|
run_master_update(xxx1, xxxr);
|
||||||
|
run_slave_wait(xxx2, xxxr);
|
||||||
|
ndbout << "latency: " << xxxr.latency << endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Note: all connections must have been destroyed before calling ndb_end()
|
||||||
|
ndb_end(0);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
prepare_master_or_slave(Ndb &myNdb,
|
||||||
|
const char* table,
|
||||||
|
const char* pk,
|
||||||
|
Uint32 pk_val,
|
||||||
|
const char* col,
|
||||||
|
struct Xxx &xxx,
|
||||||
|
struct XxxR &xxxr)
|
||||||
|
{
|
||||||
|
if (myNdb.init())
|
||||||
|
APIERROR(myNdb.getNdbError());
|
||||||
|
const NdbDictionary::Dictionary* myDict = myNdb.getDictionary();
|
||||||
|
const NdbDictionary::Table *myTable = myDict->getTable(table);
|
||||||
|
if (myTable == NULL)
|
||||||
|
APIERROR(myDict->getNdbError());
|
||||||
|
const NdbDictionary::Column *myPkCol = myTable->getColumn(pk);
|
||||||
|
if (myPkCol == NULL)
|
||||||
|
APIERROR(myDict->getNdbError());
|
||||||
|
if (myPkCol->getType() != NdbDictionary::Column::Unsigned)
|
||||||
|
{
|
||||||
|
PRINT_ERROR(0, "Primary key column not of type unsigned");
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
const NdbDictionary::Column *myCol = myTable->getColumn(col);
|
||||||
|
if (myCol == NULL)
|
||||||
|
APIERROR(myDict->getNdbError());
|
||||||
|
if (myCol->getType() != NdbDictionary::Column::Unsigned)
|
||||||
|
{
|
||||||
|
PRINT_ERROR(0, "Update column not of type unsigned");
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
xxx.ndb = &myNdb;
|
||||||
|
xxx.table = myTable;
|
||||||
|
xxx.pk_col = myPkCol->getColumnNo();
|
||||||
|
xxx.col = myCol->getColumnNo();
|
||||||
|
|
||||||
|
xxxr.pk_val = pk_val;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void run_master_update(struct Xxx &xxx, struct XxxR &xxxr)
|
||||||
|
{
|
||||||
|
Ndb *ndb = xxx.ndb;
|
||||||
|
const NdbDictionary::Table *myTable = xxx.table;
|
||||||
|
int retry_sleep= 10; /* 10 milliseconds */
|
||||||
|
int retries= 100;
|
||||||
|
while (1)
|
||||||
|
{
|
||||||
|
Uint32 val;
|
||||||
|
NdbTransaction *trans = ndb->startTransaction();
|
||||||
|
if (trans == NULL)
|
||||||
|
goto err;
|
||||||
|
{
|
||||||
|
NdbOperation *op = trans->getNdbOperation(myTable);
|
||||||
|
if (op == NULL)
|
||||||
|
APIERROR(trans->getNdbError());
|
||||||
|
op->readTupleExclusive();
|
||||||
|
op->equal(xxx.pk_col, xxxr.pk_val);
|
||||||
|
op->getValue(xxx.col, (char *)&val);
|
||||||
|
}
|
||||||
|
if (trans->execute(NdbTransaction::NoCommit))
|
||||||
|
goto err;
|
||||||
|
//fprintf(stderr, "read %u\n", val);
|
||||||
|
xxxr.val = val + 1;
|
||||||
|
{
|
||||||
|
NdbOperation *op = trans->getNdbOperation(myTable);
|
||||||
|
if (op == NULL)
|
||||||
|
APIERROR(trans->getNdbError());
|
||||||
|
op->updateTuple();
|
||||||
|
op->equal(xxx.pk_col, xxxr.pk_val);
|
||||||
|
op->setValue(xxx.col, xxxr.val);
|
||||||
|
}
|
||||||
|
if (trans->execute(NdbTransaction::Commit))
|
||||||
|
goto err;
|
||||||
|
ndb->closeTransaction(trans);
|
||||||
|
//fprintf(stderr, "updated to %u\n", xxxr.val);
|
||||||
|
break;
|
||||||
|
err:
|
||||||
|
const NdbError this_error= trans ?
|
||||||
|
trans->getNdbError() : ndb->getNdbError();
|
||||||
|
if (this_error.status == NdbError::TemporaryError)
|
||||||
|
{
|
||||||
|
if (retries--)
|
||||||
|
{
|
||||||
|
if (trans)
|
||||||
|
ndb->closeTransaction(trans);
|
||||||
|
NdbSleep_MilliSleep(retry_sleep);
|
||||||
|
continue; // retry
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (trans)
|
||||||
|
ndb->closeTransaction(trans);
|
||||||
|
APIERROR(this_error);
|
||||||
|
}
|
||||||
|
/* update done start timer */
|
||||||
|
gettimeofday(&xxxr.start_time, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void run_slave_wait(struct Xxx &xxx, struct XxxR &xxxr)
|
||||||
|
{
|
||||||
|
struct timeval old_end_time = xxxr.start_time, end_time;
|
||||||
|
Ndb *ndb = xxx.ndb;
|
||||||
|
const NdbDictionary::Table *myTable = xxx.table;
|
||||||
|
int retry_sleep= 10; /* 10 milliseconds */
|
||||||
|
int retries= 100;
|
||||||
|
while (1)
|
||||||
|
{
|
||||||
|
Uint32 val;
|
||||||
|
NdbTransaction *trans = ndb->startTransaction();
|
||||||
|
if (trans == NULL)
|
||||||
|
goto err;
|
||||||
|
{
|
||||||
|
NdbOperation *op = trans->getNdbOperation(myTable);
|
||||||
|
if (op == NULL)
|
||||||
|
APIERROR(trans->getNdbError());
|
||||||
|
op->readTuple();
|
||||||
|
op->equal(xxx.pk_col, xxxr.pk_val);
|
||||||
|
op->getValue(xxx.col, (char *)&val);
|
||||||
|
if (trans->execute(NdbTransaction::Commit))
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
/* read done, check time of read */
|
||||||
|
gettimeofday(&end_time, 0);
|
||||||
|
ndb->closeTransaction(trans);
|
||||||
|
//fprintf(stderr, "read %u waiting for %u\n", val, xxxr.val);
|
||||||
|
if (xxxr.val != val)
|
||||||
|
{
|
||||||
|
/* expected value not received yet */
|
||||||
|
retries = 100;
|
||||||
|
NdbSleep_MilliSleep(retry_sleep);
|
||||||
|
old_end_time = end_time;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
err:
|
||||||
|
const NdbError this_error= trans ?
|
||||||
|
trans->getNdbError() : ndb->getNdbError();
|
||||||
|
if (this_error.status == NdbError::TemporaryError)
|
||||||
|
{
|
||||||
|
if (retries--)
|
||||||
|
{
|
||||||
|
if (trans)
|
||||||
|
ndb->closeTransaction(trans);
|
||||||
|
NdbSleep_MilliSleep(retry_sleep);
|
||||||
|
continue; // retry
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (trans)
|
||||||
|
ndb->closeTransaction(trans);
|
||||||
|
APIERROR(this_error);
|
||||||
|
}
|
||||||
|
|
||||||
|
Int64 elapsed_usec1 =
|
||||||
|
((Int64)end_time.tv_sec - (Int64)xxxr.start_time.tv_sec)*1000*1000 +
|
||||||
|
((Int64)end_time.tv_usec - (Int64)xxxr.start_time.tv_usec);
|
||||||
|
Int64 elapsed_usec2 =
|
||||||
|
((Int64)end_time.tv_sec - (Int64)old_end_time.tv_sec)*1000*1000 +
|
||||||
|
((Int64)end_time.tv_usec - (Int64)old_end_time.tv_usec);
|
||||||
|
xxxr.latency =
|
||||||
|
((elapsed_usec1 - elapsed_usec2/2)+999)/1000;
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user