1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-01 03:47:19 +03:00

Merge whalegate.ndb.mysql.com:/home/tomas/cge-5.1

into  whalegate.ndb.mysql.com:/home/tomas/mysql-5.1-new-ndb-merge
This commit is contained in:
tomas@whalegate.ndb.mysql.com
2007-11-12 10:50:58 +01:00
17 changed files with 361 additions and 111 deletions

View File

@ -5,10 +5,6 @@ connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,);
# Check that server1 has NDB support # Check that server1 has NDB support
connection server1; connection server1;
disable_query_log; disable_query_log;
--disable_warnings
drop table if exists t1, t2;
--enable_warnings
flush tables;
--require r/true.require --require r/true.require
select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'ndbcluster'; select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'ndbcluster';
--source include/ndb_not_readonly.inc --source include/ndb_not_readonly.inc
@ -17,14 +13,32 @@ enable_query_log;
# Check that server2 has NDB support # Check that server2 has NDB support
connection server2; connection server2;
disable_query_log; disable_query_log;
--disable_warnings
drop table if exists t1, t2;
--enable_warnings
flush tables;
--require r/true.require --require r/true.require
select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'ndbcluster'; select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'ndbcluster';
--source include/ndb_not_readonly.inc --source include/ndb_not_readonly.inc
enable_query_log; enable_query_log;
# Set the default connection to 'server1' # cleanup
connection server1;
disable_query_log;
disable_warnings;
--error 0,1051
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
flush tables;
flush status;
enable_warnings;
enable_query_log;
connection server2;
disable_query_log;
disable_warnings;
--error 0,1051
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
flush tables;
flush status;
enable_warnings;
enable_query_log;
# Set the default connection
connection server1; connection server1;

View File

@ -869,6 +869,30 @@ a b
3 30 3 30
4 1 4 1
drop table t1,t2; drop table t1,t2;
create table t1 (a varchar(100) primary key, b varchar(100)) engine = NDB;
insert into t1 values
('a', 'a'),('b','b'),('c', 'c'),('aa', 'aa'),('bb', 'bb'),('cc', 'cc');
replace into t1 values ('a', '-a');
replace into t1 values ('b', '-b');
replace into t1 values ('c', '-c');
replace into t1 values ('aa', '-aa');
replace into t1 values ('bb', '-bb');
replace into t1 values ('cc', '-cc');
replace into t1 values ('aaa', '-aaa');
replace into t1 values ('bbb', '-bbb');
replace into t1 values ('ccc', '-ccc');
select * from t1 order by 1,2;
a b
a -a
aa -aa
aaa -aaa
b -b
bb -bb
bbb -bbb
c -c
cc -cc
ccc -ccc
drop table t1;
End of 5.0 tests End of 5.0 tests
CREATE TABLE t1 (a VARCHAR(255) NOT NULL, CREATE TABLE t1 (a VARCHAR(255) NOT NULL,
CONSTRAINT pk_a PRIMARY KEY (a))engine=ndb; CONSTRAINT pk_a PRIMARY KEY (a))engine=ndb;

View File

@ -1,4 +1,5 @@
drop table if exists t1, t2, t3, t4; drop table if exists t1, t2, t3, t4;
flush status;
drop table if exists t1, t2, t3, t4; drop table if exists t1, t2, t3, t4;
flush status; flush status;
create table t1 (a int) engine=ndbcluster; create table t1 (a int) engine=ndbcluster;

View File

@ -1,4 +1,5 @@
drop table if exists t1, t2, t3, t4; drop table if exists t1, t2, t3, t4;
flush status;
drop table if exists t1, t2, t3, t4; drop table if exists t1, t2, t3, t4;
flush status; flush status;
create table t1 (a int) engine=ndbcluster; create table t1 (a int) engine=ndbcluster;

View File

@ -800,9 +800,27 @@ update ignore t1,t2 set a = 1, c = 1 where a = 3 and c = 3;
select * from t1 order by a; select * from t1 order by a;
drop table t1,t2; drop table t1,t2;
# End of 5.0 tests #
--echo End of 5.0 tests # Bug#31635
#
create table t1 (a varchar(100) primary key, b varchar(100)) engine = NDB;
insert into t1 values
('a', 'a'),('b','b'),('c', 'c'),('aa', 'aa'),('bb', 'bb'),('cc', 'cc');
replace into t1 values ('a', '-a');
replace into t1 values ('b', '-b');
replace into t1 values ('c', '-c');
replace into t1 values ('aa', '-aa');
replace into t1 values ('bb', '-bb');
replace into t1 values ('cc', '-cc');
replace into t1 values ('aaa', '-aaa');
replace into t1 values ('bbb', '-bbb');
replace into t1 values ('ccc', '-ccc');
select * from t1 order by 1,2;
drop table t1;
--echo End of 5.0 tests
# #
# Bug #18483 Cannot create table with FK constraint # Bug #18483 Cannot create table with FK constraint

View File

@ -4,11 +4,11 @@
--disable_warnings --disable_warnings
connection server2; connection server2;
drop table if exists t1, t2, t3, t4; drop table if exists t1, t2, t3, t4;
flush status;
connection server1; connection server1;
drop table if exists t1, t2, t3, t4; drop table if exists t1, t2, t3, t4;
--enable_warnings
flush status; flush status;
--enable_warnings
# Create test tables on server1 # Create test tables on server1
create table t1 (a int) engine=ndbcluster; create table t1 (a int) engine=ndbcluster;

View File

@ -6,11 +6,12 @@
--disable_warnings --disable_warnings
connection server2; connection server2;
drop table if exists t1, t2, t3, t4; drop table if exists t1, t2, t3, t4;
flush status;
connection server1; connection server1;
drop table if exists t1, t2, t3, t4; drop table if exists t1, t2, t3, t4;
flush status;
--enable_warnings --enable_warnings
flush status;
# Create test tables on server1 # Create test tables on server1
create table t1 (a int) engine=ndbcluster; create table t1 (a int) engine=ndbcluster;

View File

@ -618,7 +618,7 @@ bool ha_ndbcluster::get_error_message(int error,
DBUG_ENTER("ha_ndbcluster::get_error_message"); DBUG_ENTER("ha_ndbcluster::get_error_message");
DBUG_PRINT("enter", ("error: %d", error)); DBUG_PRINT("enter", ("error: %d", error));
Ndb *ndb= get_ndb(); Ndb *ndb= check_ndb_in_thd(current_thd);
if (!ndb) if (!ndb)
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);

View File

@ -241,18 +241,22 @@ static void dbug_print_table(const char *info, TABLE *table)
static void run_query(THD *thd, char *buf, char *end, static void run_query(THD *thd, char *buf, char *end,
const int *no_print_error, my_bool disable_binlog) const int *no_print_error, my_bool disable_binlog)
{ {
ulong save_query_length= thd->query_length; ulong save_thd_query_length= thd->query_length;
char *save_query= thd->query; char *save_thd_query= thd->query;
ulong save_thread_id= thd->variables.pseudo_thread_id; struct system_variables save_thd_variables= thd->variables;
struct system_status_var save_thd_status_var= thd->status_var;
THD_TRANS save_thd_transaction_all= thd->transaction.all;
THD_TRANS save_thd_transaction_stmt= thd->transaction.stmt;
ulonglong save_thd_options= thd->options; ulonglong save_thd_options= thd->options;
DBUG_ASSERT(sizeof(save_thd_options) == sizeof(thd->options)); DBUG_ASSERT(sizeof(save_thd_options) == sizeof(thd->options));
NET save_net= thd->net; NET save_thd_net= thd->net;
const char* found_semicolon= NULL; const char* found_semicolon= NULL;
bzero((char*) &thd->net, sizeof(NET)); bzero((char*) &thd->net, sizeof(NET));
thd->query_length= end - buf; thd->query_length= end - buf;
thd->query= buf; thd->query= buf;
thd->variables.pseudo_thread_id= thread_id; thd->variables.pseudo_thread_id= thread_id;
thd->transaction.stmt.modified_non_trans_table= FALSE;
if (disable_binlog) if (disable_binlog)
thd->options&= ~OPTION_BIN_LOG; thd->options&= ~OPTION_BIN_LOG;
@ -275,10 +279,13 @@ static void run_query(THD *thd, char *buf, char *end,
} }
thd->options= save_thd_options; thd->options= save_thd_options;
thd->query_length= save_query_length; thd->query_length= save_thd_query_length;
thd->query= save_query; thd->query= save_thd_query;
thd->variables.pseudo_thread_id= save_thread_id; thd->variables= save_thd_variables;
thd->net= save_net; thd->status_var= save_thd_status_var;
thd->transaction.all= save_thd_transaction_all;
thd->transaction.stmt= save_thd_transaction_stmt;
thd->net= save_thd_net;
if (thd == injector_thd) if (thd == injector_thd)
{ {
@ -777,8 +784,9 @@ static int ndbcluster_create_ndb_apply_status_table(THD *thd)
" end_pos BIGINT UNSIGNED NOT NULL, " " end_pos BIGINT UNSIGNED NOT NULL, "
" PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB"); " PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB");
const int no_print_error[4]= {ER_TABLE_EXISTS_ERROR, const int no_print_error[5]= {ER_TABLE_EXISTS_ERROR,
701, 701,
702,
4009, 4009,
0}; // do not print error 701 etc 0}; // do not print error 701 etc
run_query(thd, buf, end, no_print_error, TRUE); run_query(thd, buf, end, no_print_error, TRUE);
@ -837,8 +845,9 @@ static int ndbcluster_create_schema_table(THD *thd)
" type INT UNSIGNED NOT NULL," " type INT UNSIGNED NOT NULL,"
" PRIMARY KEY USING HASH (db,name) ) ENGINE=NDB"); " PRIMARY KEY USING HASH (db,name) ) ENGINE=NDB");
const int no_print_error[4]= {ER_TABLE_EXISTS_ERROR, const int no_print_error[5]= {ER_TABLE_EXISTS_ERROR,
701, 701,
702,
4009, 4009,
0}; // do not print error 701 etc 0}; // do not print error 701 etc
run_query(thd, buf, end, no_print_error, TRUE); run_query(thd, buf, end, no_print_error, TRUE);
@ -3587,6 +3596,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
Thd_ndb *thd_ndb=0; Thd_ndb *thd_ndb=0;
int ndb_update_ndb_binlog_index= 1; int ndb_update_ndb_binlog_index= 1;
injector *inj= injector::instance(); injector *inj= injector::instance();
uint incident_id= 0;
#ifdef RUN_NDB_BINLOG_TIMER #ifdef RUN_NDB_BINLOG_TIMER
Timer main_timer; Timer main_timer;
@ -3692,18 +3702,64 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
pthread_mutex_unlock(&injector_mutex); pthread_mutex_unlock(&injector_mutex);
pthread_cond_signal(&injector_cond); pthread_cond_signal(&injector_cond);
/*
wait for mysql server to start (so that the binlog is started
and thus can receive the first GAP event)
*/
pthread_mutex_lock(&LOCK_server_started);
while (!mysqld_server_started)
{
struct timespec abstime;
set_timespec(abstime, 1);
pthread_cond_timedwait(&COND_server_started, &LOCK_server_started,
&abstime);
if (ndbcluster_terminating)
{
pthread_mutex_unlock(&LOCK_server_started);
pthread_mutex_lock(&LOCK_ndb_util_thread);
goto err;
}
}
pthread_mutex_unlock(&LOCK_server_started);
restart: restart:
/* /*
Main NDB Injector loop Main NDB Injector loop
*/ */
while (ndb_binlog_running)
{ {
/* /*
Always insert a GAP event as we cannot know what has happened in the cluster check if it is the first log, if so we do not insert a GAP event
while not being connected. as there is really no log to have a GAP in
*/ */
LEX_STRING const msg= { C_STRING_WITH_LEN("Cluster connect") }; if (incident_id == 0)
inj->record_incident(thd, INCIDENT_LOST_EVENTS, msg); {
LOG_INFO log_info;
mysql_bin_log.get_current_log(&log_info);
int len= strlen(log_info.log_file_name);
uint no= 0;
if ((sscanf(log_info.log_file_name + len - 6, "%u", &no) == 1) &&
no == 1)
{
/* this is the fist log, so skip GAP event */
break;
}
}
/*
Always insert a GAP event as we cannot know what has happened
in the cluster while not being connected.
*/
LEX_STRING const msg[2]=
{
{ C_STRING_WITH_LEN("mysqld startup") },
{ C_STRING_WITH_LEN("cluster disconnect")}
};
IF_DBUG(int error=)
inj->record_incident(thd, INCIDENT_LOST_EVENTS, msg[incident_id]);
DBUG_ASSERT(!error);
break;
} }
incident_id= 1;
{ {
thd->proc_info= "Waiting for ndbcluster to start"; thd->proc_info= "Waiting for ndbcluster to start";

View File

@ -5,7 +5,7 @@ Next DBACC 3002
Next DBTUP 4029 Next DBTUP 4029
Next DBLQH 5047 Next DBLQH 5047
Next DBDICT 6008 Next DBDICT 6008
Next DBDIH 7193 Next DBDIH 7195
Next DBTC 8054 Next DBTC 8054
Next CMVMI 9000 Next CMVMI 9000
Next BACKUP 10038 Next BACKUP 10038
@ -81,6 +81,11 @@ Delay GCP_SAVEREQ by 10 secs
7185: Dont reply to COPY_GCI_REQ where reason == GCP 7185: Dont reply to COPY_GCI_REQ where reason == GCP
7193: Dont send LCP_FRAG_ORD to self, and crash when sending first
LCP_FRAG_ORD(last)
7194: Force removeNodeFromStored to complete in the middle of MASTER_LCPCONF
ERROR CODES FOR TESTING NODE FAILURE, LOCAL CHECKPOINT HANDLING: ERROR CODES FOR TESTING NODE FAILURE, LOCAL CHECKPOINT HANDLING:
----------------------------------------------------------------- -----------------------------------------------------------------

View File

@ -1310,7 +1310,17 @@ private:
LcpStatus lcpStatus; LcpStatus lcpStatus;
Uint32 lcpStatusUpdatedPlace; Uint32 lcpStatusUpdatedPlace;
struct Save {
LcpStatus m_status;
Uint32 m_place;
} m_saveState[10];
void setLcpStatus(LcpStatus status, Uint32 line){ void setLcpStatus(LcpStatus status, Uint32 line){
for (Uint32 i = 9; i > 0; i--)
m_saveState[i] = m_saveState[i-1];
m_saveState[0].m_status = lcpStatus;
m_saveState[0].m_place = lcpStatusUpdatedPlace;
lcpStatus = status; lcpStatus = status;
lcpStatusUpdatedPlace = line; lcpStatusUpdatedPlace = line;
} }

View File

@ -5181,11 +5181,19 @@ void Dbdih::startRemoveFailedNode(Signal* signal, NodeRecordPtr failedNodePtr)
} }
jam(); jam();
signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
signal->theData[1] = failedNodePtr.i; if (!ERROR_INSERTED(7194))
signal->theData[2] = 0; // Tab id {
sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
signal->theData[1] = failedNodePtr.i;
signal->theData[2] = 0; // Tab id
sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
}
else
{
ndbout_c("7194 Not starting ZREMOVE_NODE_FROM_TABLE");
}
setLocalNodefailHandling(signal, failedNodePtr.i, NF_REMOVE_NODE_FROM_TABLE); setLocalNodefailHandling(signal, failedNodePtr.i, NF_REMOVE_NODE_FROM_TABLE);
}//Dbdih::startRemoveFailedNode() }//Dbdih::startRemoveFailedNode()
@ -6114,12 +6122,22 @@ Dbdih::checkEmptyLcpComplete(Signal *signal){
signal->theData[0] = 7012; signal->theData[0] = 7012;
execDUMP_STATE_ORD(signal); execDUMP_STATE_ORD(signal);
if (ERROR_INSERTED(7194))
{
ndbout_c("7194 starting ZREMOVE_NODE_FROM_TABLE");
signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
signal->theData[1] = c_lcpMasterTakeOverState.failedNodeId;
signal->theData[2] = 0; // Tab id
sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
}
c_lcpMasterTakeOverState.set(LMTOS_INITIAL, __LINE__); c_lcpMasterTakeOverState.set(LMTOS_INITIAL, __LINE__);
MasterLCPReq * const req = (MasterLCPReq *)&signal->theData[0]; MasterLCPReq * const req = (MasterLCPReq *)&signal->theData[0];
req->masterRef = reference(); req->masterRef = reference();
req->failedNodeId = c_lcpMasterTakeOverState.failedNodeId; req->failedNodeId = c_lcpMasterTakeOverState.failedNodeId;
sendLoopMacro(MASTER_LCPREQ, sendMASTER_LCPREQ); sendLoopMacro(MASTER_LCPREQ, sendMASTER_LCPREQ);
} else { } else {
sendMASTER_LCPCONF(signal); sendMASTER_LCPCONF(signal);
} }
@ -6432,6 +6450,15 @@ void Dbdih::execMASTER_LCPCONF(Signal* signal)
{ {
const MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0]; const MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0];
jamEntry(); jamEntry();
if (ERROR_INSERTED(7194))
{
ndbout_c("delaying MASTER_LCPCONF due to error 7194");
sendSignalWithDelay(reference(), GSN_MASTER_LCPCONF, signal,
300, signal->getLength());
return;
}
Uint32 senderNodeId = conf->senderNodeId; Uint32 senderNodeId = conf->senderNodeId;
MasterLCPConf::State lcpState = (MasterLCPConf::State)conf->lcpState; MasterLCPConf::State lcpState = (MasterLCPConf::State)conf->lcpState;
const Uint32 failedNodeId = conf->failedNodeId; const Uint32 failedNodeId = conf->failedNodeId;
@ -6566,7 +6593,6 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
#endif #endif
c_lcpState.keepGci = SYSFILE->keepGCI; c_lcpState.keepGci = SYSFILE->keepGCI;
c_lcpState.setLcpStatus(LCP_START_LCP_ROUND, __LINE__);
startLcpRoundLoopLab(signal, 0, 0); startLcpRoundLoopLab(signal, 0, 0);
break; break;
} }
@ -10538,6 +10564,8 @@ void Dbdih::sendLastLCP_FRAG_ORD(Signal* signal)
if(ERROR_INSERTED(7075)){ if(ERROR_INSERTED(7075)){
continue; continue;
} }
CRASH_INSERTION(7193);
BlockReference ref = calcLqhBlockRef(nodePtr.i); BlockReference ref = calcLqhBlockRef(nodePtr.i);
sendSignal(ref, GSN_LCP_FRAG_ORD, signal,LcpFragOrd::SignalLength, JBB); sendSignal(ref, GSN_LCP_FRAG_ORD, signal,LcpFragOrd::SignalLength, JBB);
} }
@ -10765,6 +10793,13 @@ Dbdih::checkLcpAllTablesDoneInLqh(){
CRASH_INSERTION2(7017, !isMaster()); CRASH_INSERTION2(7017, !isMaster());
c_lcpState.setLcpStatus(LCP_TAB_COMPLETED, __LINE__); c_lcpState.setLcpStatus(LCP_TAB_COMPLETED, __LINE__);
if (ERROR_INSERTED(7194))
{
ndbout_c("CLEARING 7194");
CLEAR_ERROR_INSERT_VALUE;
}
return true; return true;
} }
@ -10954,6 +10989,11 @@ Dbdih::sendLCP_FRAG_ORD(Signal* signal,
BlockReference ref = calcLqhBlockRef(replicaPtr.p->procNode); BlockReference ref = calcLqhBlockRef(replicaPtr.p->procNode);
if (ERROR_INSERTED(7193) && replicaPtr.p->procNode == getOwnNodeId())
{
return;
}
LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0]; LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0];
lcpFragOrd->tableId = info.tableId; lcpFragOrd->tableId = info.tableId;
lcpFragOrd->fragmentId = info.fragId; lcpFragOrd->fragmentId = info.fragId;
@ -14500,6 +14540,14 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
("immediateLcpStart = %d masterLcpNodeId = %d", ("immediateLcpStart = %d masterLcpNodeId = %d",
c_lcpState.immediateLcpStart, c_lcpState.immediateLcpStart,
refToNode(c_lcpState.m_masterLcpDihRef)); refToNode(c_lcpState.m_masterLcpDihRef));
for (Uint32 i = 0; i<10; i++)
{
infoEvent("%u : status: %u place: %u", i,
c_lcpState.m_saveState[i].m_status,
c_lcpState.m_saveState[i].m_place);
}
infoEvent("-- Node %d LCP STATE --", getOwnNodeId()); infoEvent("-- Node %d LCP STATE --", getOwnNodeId());
} }

View File

@ -2776,9 +2776,13 @@ runDropDDObjects(NDBT_Context* ctx, NDBT_Step* step){
case NdbDictionary::Object::UserTable: case NdbDictionary::Object::UserTable:
tableFound = list.elements[i].name; tableFound = list.elements[i].name;
if(tableFound != 0){ if(tableFound != 0){
if(pDict->dropTable(tableFound) != 0){ if(strcmp(tableFound, "ndb_apply_status") != 0 &&
g_err << "Failed to drop table: " << pDict->getNdbError() << endl; strcmp(tableFound, "NDB$BLOB_2_3") != 0 &&
return NDBT_FAILED; strcmp(tableFound, "ndb_schema") != 0){
if(pDict->dropTable(tableFound) != 0){
g_err << "Failed to drop table: " << tableFound << pDict->getNdbError() << endl;
return NDBT_FAILED;
}
} }
} }
tableFound = 0; tableFound = 0;

View File

@ -1830,6 +1830,51 @@ runBug31525(NDBT_Context* ctx, NDBT_Step* step)
if (res.restartOneDbNode(nodes[1], false, false, true)) if (res.restartOneDbNode(nodes[1], false, false, true))
return NDBT_FAILED; return NDBT_FAILED;
if (res.waitClusterStarted())
return NDBT_FAILED;
return NDBT_OK;
}
int
runBug32160(NDBT_Context* ctx, NDBT_Step* step)
{
int result = NDBT_OK;
int loops = ctx->getNumLoops();
int records = ctx->getNumRecords();
Ndb* pNdb = GETNDB(step);
NdbRestarter res;
if (res.getNumDbNodes() < 2)
{
return NDBT_OK;
}
int master = res.getMasterNodeId();
int next = res.getNextMasterNodeId(master);
if (res.insertErrorInNode(next, 7194))
{
return NDBT_FAILED;
}
int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
if (res.dumpStateOneNode(master, val2, 2))
return NDBT_FAILED;
if (res.insertErrorInNode(master, 7193))
return NDBT_FAILED;
int val3[] = { 7099 };
if (res.dumpStateOneNode(master, val3, 1))
return NDBT_FAILED;
if (res.waitNodesNoStart(&master, 1))
return NDBT_FAILED;
if (res.startNodes(&master, 1))
return NDBT_FAILED;
if (res.waitClusterStarted()) if (res.waitClusterStarted())
return NDBT_FAILED; return NDBT_FAILED;
@ -2205,6 +2250,9 @@ TESTCASE("Bug28717", ""){
TESTCASE("Bug29364", ""){ TESTCASE("Bug29364", ""){
INITIALIZER(runBug29364); INITIALIZER(runBug29364);
} }
TESTCASE("Bug32160", ""){
INITIALIZER(runBug32160);
}
NDBT_TESTSUITE_END(testNodeRestart); NDBT_TESTSUITE_END(testNodeRestart);
int main(int argc, const char** argv){ int main(int argc, const char** argv){

View File

@ -581,6 +581,10 @@ max-time: 1000
cmd: testNodeRestart cmd: testNodeRestart
args: -n Bug29364 T1 args: -n Bug29364 T1
max-time: 300
cmd: testNodeRestart
args: -n Bug32160 T1
# #
# DICT TESTS # DICT TESTS
max-time: 500 max-time: 500

View File

@ -534,6 +534,88 @@ TupleS::prepareRecord(TableS & tab){
return true; return true;
} }
int
RestoreDataIterator::readTupleData(Uint32 *buf_ptr, Uint32 *ptr,
Uint32 dataLength)
{
while (ptr + 2 < buf_ptr + dataLength)
{
typedef BackupFormat::DataFile::VariableData VarData;
VarData * data = (VarData *)ptr;
Uint32 sz = ntohl(data->Sz);
Uint32 attrId = ntohl(data->Id); // column_no
AttributeData * attr_data = m_tuple.getData(attrId);
const AttributeDesc * attr_desc = m_tuple.getDesc(attrId);
// just a reminder - remove when backwards compat implemented
if (m_currentTable->backupVersion < MAKE_VERSION(5,1,3) &&
attr_desc->m_column->getNullable())
{
const Uint32 ind = attr_desc->m_nullBitIndex;
if(BitmaskImpl::get(m_currentTable->m_nullBitmaskSize,
buf_ptr,ind))
{
attr_data->null = true;
attr_data->void_value = NULL;
continue;
}
}
if (m_currentTable->backupVersion < MAKE_VERSION(5,1,3))
{
sz *= 4;
}
attr_data->null = false;
attr_data->void_value = &data->Data[0];
attr_data->size = sz;
//if (m_currentTable->getTableId() >= 2) { ndbout << "var off=" << ptr-buf_ptr << " attrId=" << attrId << endl; }
/**
* Compute array size
*/
const Uint32 arraySize = sz / (attr_desc->size / 8);
assert(arraySize <= attr_desc->arraySize);
//convert the length of blob(v1) and text(v1)
if(!m_hostByteOrder
&& (attr_desc->m_column->getType() == NdbDictionary::Column::Blob
|| attr_desc->m_column->getType() == NdbDictionary::Column::Text)
&& attr_desc->m_column->getArrayType() == NdbDictionary::Column::ArrayTypeFixed)
{
char* p = (char*)&attr_data->u_int64_value[0];
Uint64 x;
memcpy(&x, p, sizeof(Uint64));
x = Twiddle64(x);
memcpy(p, &x, sizeof(Uint64));
}
//convert datetime type
if(!m_hostByteOrder
&& attr_desc->m_column->getType() == NdbDictionary::Column::Datetime)
{
char* p = (char*)&attr_data->u_int64_value[0];
Uint64 x;
memcpy(&x, p, sizeof(Uint64));
x = Twiddle64(x);
memcpy(p, &x, sizeof(Uint64));
}
if(!Twiddle(attr_desc, attr_data, attr_desc->arraySize))
{
return -1;
}
ptr += ((sz + 3) >> 2) + 2;
}
assert(ptr == buf_ptr + dataLength);
return 0;
}
const TupleS * const TupleS *
RestoreDataIterator::getNextTuple(int & res) RestoreDataIterator::getNextTuple(int & res)
{ {
@ -630,78 +712,8 @@ RestoreDataIterator::getNextTuple(int & res)
attr_data->void_value = NULL; attr_data->void_value = NULL;
} }
while (ptr + 2 < buf_ptr + dataLength) { if ((res = readTupleData(buf_ptr, ptr, dataLength)))
typedef BackupFormat::DataFile::VariableData VarData; return NULL;
VarData * data = (VarData *)ptr;
Uint32 sz = ntohl(data->Sz);
Uint32 attrId = ntohl(data->Id); // column_no
AttributeData * attr_data = m_tuple.getData(attrId);
const AttributeDesc * attr_desc = m_tuple.getDesc(attrId);
// just a reminder - remove when backwards compat implemented
if(m_currentTable->backupVersion < MAKE_VERSION(5,1,3) &&
attr_desc->m_column->getNullable()){
const Uint32 ind = attr_desc->m_nullBitIndex;
if(BitmaskImpl::get(m_currentTable->m_nullBitmaskSize,
buf_ptr,ind)){
attr_data->null = true;
attr_data->void_value = NULL;
continue;
}
}
if (m_currentTable->backupVersion < MAKE_VERSION(5,1,3))
{
sz *= 4;
}
attr_data->null = false;
attr_data->void_value = &data->Data[0];
attr_data->size = sz;
//if (m_currentTable->getTableId() >= 2) { ndbout << "var off=" << ptr-buf_ptr << " attrId=" << attrId << endl; }
/**
* Compute array size
*/
const Uint32 arraySize = sz / (attr_desc->size / 8);
assert(arraySize <= attr_desc->arraySize);
//convert the length of blob(v1) and text(v1)
if(!m_hostByteOrder
&& (attr_desc->m_column->getType() == NdbDictionary::Column::Blob
|| attr_desc->m_column->getType() == NdbDictionary::Column::Text)
&& attr_desc->m_column->getArrayType() == NdbDictionary::Column::ArrayTypeFixed)
{
char* p = (char*)&attr_data->u_int64_value[0];
Uint64 x;
memcpy(&x, p, sizeof(Uint64));
x = Twiddle64(x);
memcpy(p, &x, sizeof(Uint64));
}
//convert datetime type
if(!m_hostByteOrder
&& attr_desc->m_column->getType() == NdbDictionary::Column::Datetime)
{
char* p = (char*)&attr_data->u_int64_value[0];
Uint64 x;
memcpy(&x, p, sizeof(Uint64));
x = Twiddle64(x);
memcpy(p, &x, sizeof(Uint64));
}
if(!Twiddle(attr_desc, attr_data, attr_desc->arraySize))
{
res = -1;
return NULL;
}
ptr += ((sz + 3) >> 2) + 2;
}
assert(ptr == buf_ptr + dataLength);
m_count ++; m_count ++;
res = 0; res = 0;

View File

@ -355,6 +355,10 @@ public:
bool validateFragmentFooter(); bool validateFragmentFooter();
const TupleS *getNextTuple(int & res); const TupleS *getNextTuple(int & res);
private:
int readTupleData(Uint32 *buf_ptr, Uint32 *ptr, Uint32 dataLength);
}; };
class LogEntry { class LogEntry {