mirror of
https://github.com/MariaDB/server.git
synced 2025-07-30 16:24:05 +03:00
Merge whalegate.ndb.mysql.com:/home/tomas/cge-5.1
into whalegate.ndb.mysql.com:/home/tomas/mysql-5.1-new-ndb-merge
This commit is contained in:
@ -5,10 +5,6 @@ connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,);
|
||||
# Check that server1 has NDB support
|
||||
connection server1;
|
||||
disable_query_log;
|
||||
--disable_warnings
|
||||
drop table if exists t1, t2;
|
||||
--enable_warnings
|
||||
flush tables;
|
||||
--require r/true.require
|
||||
select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'ndbcluster';
|
||||
--source include/ndb_not_readonly.inc
|
||||
@ -17,14 +13,32 @@ enable_query_log;
|
||||
# Check that server2 has NDB support
|
||||
connection server2;
|
||||
disable_query_log;
|
||||
--disable_warnings
|
||||
drop table if exists t1, t2;
|
||||
--enable_warnings
|
||||
flush tables;
|
||||
--require r/true.require
|
||||
select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'ndbcluster';
|
||||
--source include/ndb_not_readonly.inc
|
||||
enable_query_log;
|
||||
|
||||
# Set the default connection to 'server1'
|
||||
# cleanup
|
||||
|
||||
connection server1;
|
||||
disable_query_log;
|
||||
disable_warnings;
|
||||
--error 0,1051
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
|
||||
flush tables;
|
||||
flush status;
|
||||
enable_warnings;
|
||||
enable_query_log;
|
||||
|
||||
connection server2;
|
||||
disable_query_log;
|
||||
disable_warnings;
|
||||
--error 0,1051
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
|
||||
flush tables;
|
||||
flush status;
|
||||
enable_warnings;
|
||||
enable_query_log;
|
||||
|
||||
# Set the default connection
|
||||
connection server1;
|
||||
|
@ -869,6 +869,30 @@ a b
|
||||
3 30
|
||||
4 1
|
||||
drop table t1,t2;
|
||||
create table t1 (a varchar(100) primary key, b varchar(100)) engine = NDB;
|
||||
insert into t1 values
|
||||
('a', 'a'),('b','b'),('c', 'c'),('aa', 'aa'),('bb', 'bb'),('cc', 'cc');
|
||||
replace into t1 values ('a', '-a');
|
||||
replace into t1 values ('b', '-b');
|
||||
replace into t1 values ('c', '-c');
|
||||
replace into t1 values ('aa', '-aa');
|
||||
replace into t1 values ('bb', '-bb');
|
||||
replace into t1 values ('cc', '-cc');
|
||||
replace into t1 values ('aaa', '-aaa');
|
||||
replace into t1 values ('bbb', '-bbb');
|
||||
replace into t1 values ('ccc', '-ccc');
|
||||
select * from t1 order by 1,2;
|
||||
a b
|
||||
a -a
|
||||
aa -aa
|
||||
aaa -aaa
|
||||
b -b
|
||||
bb -bb
|
||||
bbb -bbb
|
||||
c -c
|
||||
cc -cc
|
||||
ccc -ccc
|
||||
drop table t1;
|
||||
End of 5.0 tests
|
||||
CREATE TABLE t1 (a VARCHAR(255) NOT NULL,
|
||||
CONSTRAINT pk_a PRIMARY KEY (a))engine=ndb;
|
||||
|
@ -1,4 +1,5 @@
|
||||
drop table if exists t1, t2, t3, t4;
|
||||
flush status;
|
||||
drop table if exists t1, t2, t3, t4;
|
||||
flush status;
|
||||
create table t1 (a int) engine=ndbcluster;
|
||||
|
@ -1,4 +1,5 @@
|
||||
drop table if exists t1, t2, t3, t4;
|
||||
flush status;
|
||||
drop table if exists t1, t2, t3, t4;
|
||||
flush status;
|
||||
create table t1 (a int) engine=ndbcluster;
|
||||
|
@ -800,9 +800,27 @@ update ignore t1,t2 set a = 1, c = 1 where a = 3 and c = 3;
|
||||
select * from t1 order by a;
|
||||
drop table t1,t2;
|
||||
|
||||
# End of 5.0 tests
|
||||
--echo End of 5.0 tests
|
||||
#
|
||||
# Bug#31635
|
||||
#
|
||||
create table t1 (a varchar(100) primary key, b varchar(100)) engine = NDB;
|
||||
insert into t1 values
|
||||
('a', 'a'),('b','b'),('c', 'c'),('aa', 'aa'),('bb', 'bb'),('cc', 'cc');
|
||||
replace into t1 values ('a', '-a');
|
||||
replace into t1 values ('b', '-b');
|
||||
replace into t1 values ('c', '-c');
|
||||
|
||||
replace into t1 values ('aa', '-aa');
|
||||
replace into t1 values ('bb', '-bb');
|
||||
replace into t1 values ('cc', '-cc');
|
||||
|
||||
replace into t1 values ('aaa', '-aaa');
|
||||
replace into t1 values ('bbb', '-bbb');
|
||||
replace into t1 values ('ccc', '-ccc');
|
||||
select * from t1 order by 1,2;
|
||||
drop table t1;
|
||||
|
||||
--echo End of 5.0 tests
|
||||
|
||||
#
|
||||
# Bug #18483 Cannot create table with FK constraint
|
||||
|
@ -4,11 +4,11 @@
|
||||
--disable_warnings
|
||||
connection server2;
|
||||
drop table if exists t1, t2, t3, t4;
|
||||
flush status;
|
||||
connection server1;
|
||||
drop table if exists t1, t2, t3, t4;
|
||||
--enable_warnings
|
||||
|
||||
flush status;
|
||||
--enable_warnings
|
||||
|
||||
# Create test tables on server1
|
||||
create table t1 (a int) engine=ndbcluster;
|
||||
|
@ -6,11 +6,12 @@
|
||||
--disable_warnings
|
||||
connection server2;
|
||||
drop table if exists t1, t2, t3, t4;
|
||||
flush status;
|
||||
connection server1;
|
||||
drop table if exists t1, t2, t3, t4;
|
||||
flush status;
|
||||
--enable_warnings
|
||||
|
||||
flush status;
|
||||
|
||||
# Create test tables on server1
|
||||
create table t1 (a int) engine=ndbcluster;
|
||||
|
@ -618,7 +618,7 @@ bool ha_ndbcluster::get_error_message(int error,
|
||||
DBUG_ENTER("ha_ndbcluster::get_error_message");
|
||||
DBUG_PRINT("enter", ("error: %d", error));
|
||||
|
||||
Ndb *ndb= get_ndb();
|
||||
Ndb *ndb= check_ndb_in_thd(current_thd);
|
||||
if (!ndb)
|
||||
DBUG_RETURN(FALSE);
|
||||
|
||||
|
@ -241,18 +241,22 @@ static void dbug_print_table(const char *info, TABLE *table)
|
||||
static void run_query(THD *thd, char *buf, char *end,
|
||||
const int *no_print_error, my_bool disable_binlog)
|
||||
{
|
||||
ulong save_query_length= thd->query_length;
|
||||
char *save_query= thd->query;
|
||||
ulong save_thread_id= thd->variables.pseudo_thread_id;
|
||||
ulong save_thd_query_length= thd->query_length;
|
||||
char *save_thd_query= thd->query;
|
||||
struct system_variables save_thd_variables= thd->variables;
|
||||
struct system_status_var save_thd_status_var= thd->status_var;
|
||||
THD_TRANS save_thd_transaction_all= thd->transaction.all;
|
||||
THD_TRANS save_thd_transaction_stmt= thd->transaction.stmt;
|
||||
ulonglong save_thd_options= thd->options;
|
||||
DBUG_ASSERT(sizeof(save_thd_options) == sizeof(thd->options));
|
||||
NET save_net= thd->net;
|
||||
NET save_thd_net= thd->net;
|
||||
const char* found_semicolon= NULL;
|
||||
|
||||
bzero((char*) &thd->net, sizeof(NET));
|
||||
thd->query_length= end - buf;
|
||||
thd->query= buf;
|
||||
thd->variables.pseudo_thread_id= thread_id;
|
||||
thd->transaction.stmt.modified_non_trans_table= FALSE;
|
||||
if (disable_binlog)
|
||||
thd->options&= ~OPTION_BIN_LOG;
|
||||
|
||||
@ -275,10 +279,13 @@ static void run_query(THD *thd, char *buf, char *end,
|
||||
}
|
||||
|
||||
thd->options= save_thd_options;
|
||||
thd->query_length= save_query_length;
|
||||
thd->query= save_query;
|
||||
thd->variables.pseudo_thread_id= save_thread_id;
|
||||
thd->net= save_net;
|
||||
thd->query_length= save_thd_query_length;
|
||||
thd->query= save_thd_query;
|
||||
thd->variables= save_thd_variables;
|
||||
thd->status_var= save_thd_status_var;
|
||||
thd->transaction.all= save_thd_transaction_all;
|
||||
thd->transaction.stmt= save_thd_transaction_stmt;
|
||||
thd->net= save_thd_net;
|
||||
|
||||
if (thd == injector_thd)
|
||||
{
|
||||
@ -777,8 +784,9 @@ static int ndbcluster_create_ndb_apply_status_table(THD *thd)
|
||||
" end_pos BIGINT UNSIGNED NOT NULL, "
|
||||
" PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB");
|
||||
|
||||
const int no_print_error[4]= {ER_TABLE_EXISTS_ERROR,
|
||||
const int no_print_error[5]= {ER_TABLE_EXISTS_ERROR,
|
||||
701,
|
||||
702,
|
||||
4009,
|
||||
0}; // do not print error 701 etc
|
||||
run_query(thd, buf, end, no_print_error, TRUE);
|
||||
@ -837,8 +845,9 @@ static int ndbcluster_create_schema_table(THD *thd)
|
||||
" type INT UNSIGNED NOT NULL,"
|
||||
" PRIMARY KEY USING HASH (db,name) ) ENGINE=NDB");
|
||||
|
||||
const int no_print_error[4]= {ER_TABLE_EXISTS_ERROR,
|
||||
const int no_print_error[5]= {ER_TABLE_EXISTS_ERROR,
|
||||
701,
|
||||
702,
|
||||
4009,
|
||||
0}; // do not print error 701 etc
|
||||
run_query(thd, buf, end, no_print_error, TRUE);
|
||||
@ -3587,6 +3596,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
Thd_ndb *thd_ndb=0;
|
||||
int ndb_update_ndb_binlog_index= 1;
|
||||
injector *inj= injector::instance();
|
||||
uint incident_id= 0;
|
||||
|
||||
#ifdef RUN_NDB_BINLOG_TIMER
|
||||
Timer main_timer;
|
||||
@ -3692,18 +3702,64 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
|
||||
pthread_mutex_unlock(&injector_mutex);
|
||||
pthread_cond_signal(&injector_cond);
|
||||
|
||||
/*
|
||||
wait for mysql server to start (so that the binlog is started
|
||||
and thus can receive the first GAP event)
|
||||
*/
|
||||
pthread_mutex_lock(&LOCK_server_started);
|
||||
while (!mysqld_server_started)
|
||||
{
|
||||
struct timespec abstime;
|
||||
set_timespec(abstime, 1);
|
||||
pthread_cond_timedwait(&COND_server_started, &LOCK_server_started,
|
||||
&abstime);
|
||||
if (ndbcluster_terminating)
|
||||
{
|
||||
pthread_mutex_unlock(&LOCK_server_started);
|
||||
pthread_mutex_lock(&LOCK_ndb_util_thread);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&LOCK_server_started);
|
||||
restart:
|
||||
/*
|
||||
Main NDB Injector loop
|
||||
*/
|
||||
while (ndb_binlog_running)
|
||||
{
|
||||
/*
|
||||
Always insert a GAP event as we cannot know what has happened in the cluster
|
||||
while not being connected.
|
||||
check if it is the first log, if so we do not insert a GAP event
|
||||
as there is really no log to have a GAP in
|
||||
*/
|
||||
LEX_STRING const msg= { C_STRING_WITH_LEN("Cluster connect") };
|
||||
inj->record_incident(thd, INCIDENT_LOST_EVENTS, msg);
|
||||
if (incident_id == 0)
|
||||
{
|
||||
LOG_INFO log_info;
|
||||
mysql_bin_log.get_current_log(&log_info);
|
||||
int len= strlen(log_info.log_file_name);
|
||||
uint no= 0;
|
||||
if ((sscanf(log_info.log_file_name + len - 6, "%u", &no) == 1) &&
|
||||
no == 1)
|
||||
{
|
||||
/* this is the fist log, so skip GAP event */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Always insert a GAP event as we cannot know what has happened
|
||||
in the cluster while not being connected.
|
||||
*/
|
||||
LEX_STRING const msg[2]=
|
||||
{
|
||||
{ C_STRING_WITH_LEN("mysqld startup") },
|
||||
{ C_STRING_WITH_LEN("cluster disconnect")}
|
||||
};
|
||||
IF_DBUG(int error=)
|
||||
inj->record_incident(thd, INCIDENT_LOST_EVENTS, msg[incident_id]);
|
||||
DBUG_ASSERT(!error);
|
||||
break;
|
||||
}
|
||||
incident_id= 1;
|
||||
{
|
||||
thd->proc_info= "Waiting for ndbcluster to start";
|
||||
|
||||
|
@ -5,7 +5,7 @@ Next DBACC 3002
|
||||
Next DBTUP 4029
|
||||
Next DBLQH 5047
|
||||
Next DBDICT 6008
|
||||
Next DBDIH 7193
|
||||
Next DBDIH 7195
|
||||
Next DBTC 8054
|
||||
Next CMVMI 9000
|
||||
Next BACKUP 10038
|
||||
@ -81,6 +81,11 @@ Delay GCP_SAVEREQ by 10 secs
|
||||
|
||||
7185: Dont reply to COPY_GCI_REQ where reason == GCP
|
||||
|
||||
7193: Dont send LCP_FRAG_ORD to self, and crash when sending first
|
||||
LCP_FRAG_ORD(last)
|
||||
|
||||
7194: Force removeNodeFromStored to complete in the middle of MASTER_LCPCONF
|
||||
|
||||
ERROR CODES FOR TESTING NODE FAILURE, LOCAL CHECKPOINT HANDLING:
|
||||
-----------------------------------------------------------------
|
||||
|
||||
|
@ -1310,7 +1310,17 @@ private:
|
||||
LcpStatus lcpStatus;
|
||||
Uint32 lcpStatusUpdatedPlace;
|
||||
|
||||
struct Save {
|
||||
LcpStatus m_status;
|
||||
Uint32 m_place;
|
||||
} m_saveState[10];
|
||||
|
||||
void setLcpStatus(LcpStatus status, Uint32 line){
|
||||
for (Uint32 i = 9; i > 0; i--)
|
||||
m_saveState[i] = m_saveState[i-1];
|
||||
m_saveState[0].m_status = lcpStatus;
|
||||
m_saveState[0].m_place = lcpStatusUpdatedPlace;
|
||||
|
||||
lcpStatus = status;
|
||||
lcpStatusUpdatedPlace = line;
|
||||
}
|
||||
|
@ -5181,10 +5181,18 @@ void Dbdih::startRemoveFailedNode(Signal* signal, NodeRecordPtr failedNodePtr)
|
||||
}
|
||||
|
||||
jam();
|
||||
signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
|
||||
signal->theData[1] = failedNodePtr.i;
|
||||
signal->theData[2] = 0; // Tab id
|
||||
sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
|
||||
|
||||
if (!ERROR_INSERTED(7194))
|
||||
{
|
||||
signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
|
||||
signal->theData[1] = failedNodePtr.i;
|
||||
signal->theData[2] = 0; // Tab id
|
||||
sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
|
||||
}
|
||||
else
|
||||
{
|
||||
ndbout_c("7194 Not starting ZREMOVE_NODE_FROM_TABLE");
|
||||
}
|
||||
|
||||
setLocalNodefailHandling(signal, failedNodePtr.i, NF_REMOVE_NODE_FROM_TABLE);
|
||||
}//Dbdih::startRemoveFailedNode()
|
||||
@ -6115,11 +6123,21 @@ Dbdih::checkEmptyLcpComplete(Signal *signal){
|
||||
signal->theData[0] = 7012;
|
||||
execDUMP_STATE_ORD(signal);
|
||||
|
||||
if (ERROR_INSERTED(7194))
|
||||
{
|
||||
ndbout_c("7194 starting ZREMOVE_NODE_FROM_TABLE");
|
||||
signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
|
||||
signal->theData[1] = c_lcpMasterTakeOverState.failedNodeId;
|
||||
signal->theData[2] = 0; // Tab id
|
||||
sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
|
||||
}
|
||||
|
||||
c_lcpMasterTakeOverState.set(LMTOS_INITIAL, __LINE__);
|
||||
MasterLCPReq * const req = (MasterLCPReq *)&signal->theData[0];
|
||||
req->masterRef = reference();
|
||||
req->failedNodeId = c_lcpMasterTakeOverState.failedNodeId;
|
||||
sendLoopMacro(MASTER_LCPREQ, sendMASTER_LCPREQ);
|
||||
|
||||
} else {
|
||||
sendMASTER_LCPCONF(signal);
|
||||
}
|
||||
@ -6432,6 +6450,15 @@ void Dbdih::execMASTER_LCPCONF(Signal* signal)
|
||||
{
|
||||
const MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0];
|
||||
jamEntry();
|
||||
|
||||
if (ERROR_INSERTED(7194))
|
||||
{
|
||||
ndbout_c("delaying MASTER_LCPCONF due to error 7194");
|
||||
sendSignalWithDelay(reference(), GSN_MASTER_LCPCONF, signal,
|
||||
300, signal->getLength());
|
||||
return;
|
||||
}
|
||||
|
||||
Uint32 senderNodeId = conf->senderNodeId;
|
||||
MasterLCPConf::State lcpState = (MasterLCPConf::State)conf->lcpState;
|
||||
const Uint32 failedNodeId = conf->failedNodeId;
|
||||
@ -6566,7 +6593,6 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
|
||||
#endif
|
||||
|
||||
c_lcpState.keepGci = SYSFILE->keepGCI;
|
||||
c_lcpState.setLcpStatus(LCP_START_LCP_ROUND, __LINE__);
|
||||
startLcpRoundLoopLab(signal, 0, 0);
|
||||
break;
|
||||
}
|
||||
@ -10538,6 +10564,8 @@ void Dbdih::sendLastLCP_FRAG_ORD(Signal* signal)
|
||||
if(ERROR_INSERTED(7075)){
|
||||
continue;
|
||||
}
|
||||
|
||||
CRASH_INSERTION(7193);
|
||||
BlockReference ref = calcLqhBlockRef(nodePtr.i);
|
||||
sendSignal(ref, GSN_LCP_FRAG_ORD, signal,LcpFragOrd::SignalLength, JBB);
|
||||
}
|
||||
@ -10765,6 +10793,13 @@ Dbdih::checkLcpAllTablesDoneInLqh(){
|
||||
CRASH_INSERTION2(7017, !isMaster());
|
||||
|
||||
c_lcpState.setLcpStatus(LCP_TAB_COMPLETED, __LINE__);
|
||||
|
||||
if (ERROR_INSERTED(7194))
|
||||
{
|
||||
ndbout_c("CLEARING 7194");
|
||||
CLEAR_ERROR_INSERT_VALUE;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -10954,6 +10989,11 @@ Dbdih::sendLCP_FRAG_ORD(Signal* signal,
|
||||
|
||||
BlockReference ref = calcLqhBlockRef(replicaPtr.p->procNode);
|
||||
|
||||
if (ERROR_INSERTED(7193) && replicaPtr.p->procNode == getOwnNodeId())
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0];
|
||||
lcpFragOrd->tableId = info.tableId;
|
||||
lcpFragOrd->fragmentId = info.fragId;
|
||||
@ -14500,6 +14540,14 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
|
||||
("immediateLcpStart = %d masterLcpNodeId = %d",
|
||||
c_lcpState.immediateLcpStart,
|
||||
refToNode(c_lcpState.m_masterLcpDihRef));
|
||||
|
||||
for (Uint32 i = 0; i<10; i++)
|
||||
{
|
||||
infoEvent("%u : status: %u place: %u", i,
|
||||
c_lcpState.m_saveState[i].m_status,
|
||||
c_lcpState.m_saveState[i].m_place);
|
||||
}
|
||||
|
||||
infoEvent("-- Node %d LCP STATE --", getOwnNodeId());
|
||||
}
|
||||
|
||||
|
@ -2776,9 +2776,13 @@ runDropDDObjects(NDBT_Context* ctx, NDBT_Step* step){
|
||||
case NdbDictionary::Object::UserTable:
|
||||
tableFound = list.elements[i].name;
|
||||
if(tableFound != 0){
|
||||
if(pDict->dropTable(tableFound) != 0){
|
||||
g_err << "Failed to drop table: " << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
if(strcmp(tableFound, "ndb_apply_status") != 0 &&
|
||||
strcmp(tableFound, "NDB$BLOB_2_3") != 0 &&
|
||||
strcmp(tableFound, "ndb_schema") != 0){
|
||||
if(pDict->dropTable(tableFound) != 0){
|
||||
g_err << "Failed to drop table: " << tableFound << pDict->getNdbError() << endl;
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
}
|
||||
}
|
||||
tableFound = 0;
|
||||
|
@ -1836,6 +1836,51 @@ runBug31525(NDBT_Context* ctx, NDBT_Step* step)
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int
|
||||
runBug32160(NDBT_Context* ctx, NDBT_Step* step)
|
||||
{
|
||||
int result = NDBT_OK;
|
||||
int loops = ctx->getNumLoops();
|
||||
int records = ctx->getNumRecords();
|
||||
Ndb* pNdb = GETNDB(step);
|
||||
NdbRestarter res;
|
||||
|
||||
if (res.getNumDbNodes() < 2)
|
||||
{
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int master = res.getMasterNodeId();
|
||||
int next = res.getNextMasterNodeId(master);
|
||||
|
||||
if (res.insertErrorInNode(next, 7194))
|
||||
{
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
|
||||
if (res.dumpStateOneNode(master, val2, 2))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.insertErrorInNode(master, 7193))
|
||||
return NDBT_FAILED;
|
||||
|
||||
int val3[] = { 7099 };
|
||||
if (res.dumpStateOneNode(master, val3, 1))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.waitNodesNoStart(&master, 1))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.startNodes(&master, 1))
|
||||
return NDBT_FAILED;
|
||||
|
||||
if (res.waitClusterStarted())
|
||||
return NDBT_FAILED;
|
||||
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
NDBT_TESTSUITE(testNodeRestart);
|
||||
TESTCASE("NoLoad",
|
||||
"Test that one node at a time can be stopped and then restarted "\
|
||||
@ -2205,6 +2250,9 @@ TESTCASE("Bug28717", ""){
|
||||
TESTCASE("Bug29364", ""){
|
||||
INITIALIZER(runBug29364);
|
||||
}
|
||||
TESTCASE("Bug32160", ""){
|
||||
INITIALIZER(runBug32160);
|
||||
}
|
||||
NDBT_TESTSUITE_END(testNodeRestart);
|
||||
|
||||
int main(int argc, const char** argv){
|
||||
|
@ -581,6 +581,10 @@ max-time: 1000
|
||||
cmd: testNodeRestart
|
||||
args: -n Bug29364 T1
|
||||
|
||||
max-time: 300
|
||||
cmd: testNodeRestart
|
||||
args: -n Bug32160 T1
|
||||
|
||||
#
|
||||
# DICT TESTS
|
||||
max-time: 500
|
||||
|
@ -534,6 +534,88 @@ TupleS::prepareRecord(TableS & tab){
|
||||
return true;
|
||||
}
|
||||
|
||||
int
|
||||
RestoreDataIterator::readTupleData(Uint32 *buf_ptr, Uint32 *ptr,
|
||||
Uint32 dataLength)
|
||||
{
|
||||
while (ptr + 2 < buf_ptr + dataLength)
|
||||
{
|
||||
typedef BackupFormat::DataFile::VariableData VarData;
|
||||
VarData * data = (VarData *)ptr;
|
||||
Uint32 sz = ntohl(data->Sz);
|
||||
Uint32 attrId = ntohl(data->Id); // column_no
|
||||
|
||||
AttributeData * attr_data = m_tuple.getData(attrId);
|
||||
const AttributeDesc * attr_desc = m_tuple.getDesc(attrId);
|
||||
|
||||
// just a reminder - remove when backwards compat implemented
|
||||
if (m_currentTable->backupVersion < MAKE_VERSION(5,1,3) &&
|
||||
attr_desc->m_column->getNullable())
|
||||
{
|
||||
const Uint32 ind = attr_desc->m_nullBitIndex;
|
||||
if(BitmaskImpl::get(m_currentTable->m_nullBitmaskSize,
|
||||
buf_ptr,ind))
|
||||
{
|
||||
attr_data->null = true;
|
||||
attr_data->void_value = NULL;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (m_currentTable->backupVersion < MAKE_VERSION(5,1,3))
|
||||
{
|
||||
sz *= 4;
|
||||
}
|
||||
|
||||
attr_data->null = false;
|
||||
attr_data->void_value = &data->Data[0];
|
||||
attr_data->size = sz;
|
||||
|
||||
//if (m_currentTable->getTableId() >= 2) { ndbout << "var off=" << ptr-buf_ptr << " attrId=" << attrId << endl; }
|
||||
|
||||
/**
|
||||
* Compute array size
|
||||
*/
|
||||
const Uint32 arraySize = sz / (attr_desc->size / 8);
|
||||
assert(arraySize <= attr_desc->arraySize);
|
||||
|
||||
//convert the length of blob(v1) and text(v1)
|
||||
if(!m_hostByteOrder
|
||||
&& (attr_desc->m_column->getType() == NdbDictionary::Column::Blob
|
||||
|| attr_desc->m_column->getType() == NdbDictionary::Column::Text)
|
||||
&& attr_desc->m_column->getArrayType() == NdbDictionary::Column::ArrayTypeFixed)
|
||||
{
|
||||
char* p = (char*)&attr_data->u_int64_value[0];
|
||||
Uint64 x;
|
||||
memcpy(&x, p, sizeof(Uint64));
|
||||
x = Twiddle64(x);
|
||||
memcpy(p, &x, sizeof(Uint64));
|
||||
}
|
||||
|
||||
//convert datetime type
|
||||
if(!m_hostByteOrder
|
||||
&& attr_desc->m_column->getType() == NdbDictionary::Column::Datetime)
|
||||
{
|
||||
char* p = (char*)&attr_data->u_int64_value[0];
|
||||
Uint64 x;
|
||||
memcpy(&x, p, sizeof(Uint64));
|
||||
x = Twiddle64(x);
|
||||
memcpy(p, &x, sizeof(Uint64));
|
||||
}
|
||||
|
||||
if(!Twiddle(attr_desc, attr_data, attr_desc->arraySize))
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
ptr += ((sz + 3) >> 2) + 2;
|
||||
}
|
||||
|
||||
assert(ptr == buf_ptr + dataLength);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const TupleS *
|
||||
RestoreDataIterator::getNextTuple(int & res)
|
||||
{
|
||||
@ -630,78 +712,8 @@ RestoreDataIterator::getNextTuple(int & res)
|
||||
attr_data->void_value = NULL;
|
||||
}
|
||||
|
||||
while (ptr + 2 < buf_ptr + dataLength) {
|
||||
typedef BackupFormat::DataFile::VariableData VarData;
|
||||
VarData * data = (VarData *)ptr;
|
||||
Uint32 sz = ntohl(data->Sz);
|
||||
Uint32 attrId = ntohl(data->Id); // column_no
|
||||
|
||||
AttributeData * attr_data = m_tuple.getData(attrId);
|
||||
const AttributeDesc * attr_desc = m_tuple.getDesc(attrId);
|
||||
|
||||
// just a reminder - remove when backwards compat implemented
|
||||
if(m_currentTable->backupVersion < MAKE_VERSION(5,1,3) &&
|
||||
attr_desc->m_column->getNullable()){
|
||||
const Uint32 ind = attr_desc->m_nullBitIndex;
|
||||
if(BitmaskImpl::get(m_currentTable->m_nullBitmaskSize,
|
||||
buf_ptr,ind)){
|
||||
attr_data->null = true;
|
||||
attr_data->void_value = NULL;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (m_currentTable->backupVersion < MAKE_VERSION(5,1,3))
|
||||
{
|
||||
sz *= 4;
|
||||
}
|
||||
|
||||
attr_data->null = false;
|
||||
attr_data->void_value = &data->Data[0];
|
||||
attr_data->size = sz;
|
||||
|
||||
//if (m_currentTable->getTableId() >= 2) { ndbout << "var off=" << ptr-buf_ptr << " attrId=" << attrId << endl; }
|
||||
|
||||
/**
|
||||
* Compute array size
|
||||
*/
|
||||
const Uint32 arraySize = sz / (attr_desc->size / 8);
|
||||
assert(arraySize <= attr_desc->arraySize);
|
||||
|
||||
//convert the length of blob(v1) and text(v1)
|
||||
if(!m_hostByteOrder
|
||||
&& (attr_desc->m_column->getType() == NdbDictionary::Column::Blob
|
||||
|| attr_desc->m_column->getType() == NdbDictionary::Column::Text)
|
||||
&& attr_desc->m_column->getArrayType() == NdbDictionary::Column::ArrayTypeFixed)
|
||||
{
|
||||
char* p = (char*)&attr_data->u_int64_value[0];
|
||||
Uint64 x;
|
||||
memcpy(&x, p, sizeof(Uint64));
|
||||
x = Twiddle64(x);
|
||||
memcpy(p, &x, sizeof(Uint64));
|
||||
}
|
||||
|
||||
//convert datetime type
|
||||
if(!m_hostByteOrder
|
||||
&& attr_desc->m_column->getType() == NdbDictionary::Column::Datetime)
|
||||
{
|
||||
char* p = (char*)&attr_data->u_int64_value[0];
|
||||
Uint64 x;
|
||||
memcpy(&x, p, sizeof(Uint64));
|
||||
x = Twiddle64(x);
|
||||
memcpy(p, &x, sizeof(Uint64));
|
||||
}
|
||||
|
||||
if(!Twiddle(attr_desc, attr_data, attr_desc->arraySize))
|
||||
{
|
||||
res = -1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ptr += ((sz + 3) >> 2) + 2;
|
||||
}
|
||||
|
||||
assert(ptr == buf_ptr + dataLength);
|
||||
if ((res = readTupleData(buf_ptr, ptr, dataLength)))
|
||||
return NULL;
|
||||
|
||||
m_count ++;
|
||||
res = 0;
|
||||
|
@ -355,6 +355,10 @@ public:
|
||||
bool validateFragmentFooter();
|
||||
|
||||
const TupleS *getNextTuple(int & res);
|
||||
|
||||
private:
|
||||
|
||||
int readTupleData(Uint32 *buf_ptr, Uint32 *ptr, Uint32 dataLength);
|
||||
};
|
||||
|
||||
class LogEntry {
|
||||
|
Reference in New Issue
Block a user