From b50015f280f2eca4676267c79e868c2a0b511016 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 26 Jul 2007 20:24:54 +1000 Subject: [PATCH 1/8] [PATCH] BUG#26793 test: mysqld crashes in NDB on I_S query Reduce case and formalise into something we should be able to use in mysql-test-run. Index: ndb-work/mysql-test/t/ndb_bug26793.test =================================================================== mysql-test/r/ndb_bug26793.result: BUG#26793 test: mysqld crashes in NDB on I_S query mysql-test/t/ndb_bug26793.test: BUG#26793 test: mysqld crashes in NDB on I_S query --- mysql-test/r/ndb_bug26793.result | 9 +++++++++ mysql-test/t/ndb_bug26793.test | 33 ++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 mysql-test/r/ndb_bug26793.result create mode 100644 mysql-test/t/ndb_bug26793.test diff --git a/mysql-test/r/ndb_bug26793.result b/mysql-test/r/ndb_bug26793.result new file mode 100644 index 00000000000..9a15841e670 --- /dev/null +++ b/mysql-test/r/ndb_bug26793.result @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE `test` ( +`id` INT NOT NULL AUTO_INCREMENT PRIMARY KEY , +`t` VARCHAR( 10 ) NOT NULL +) ENGINE = ndbcluster; +delete from mysql.db where user=''; +flush privileges; +GRANT USAGE ON *.* TO user1@localhost IDENTIFIED BY 'pass'; +DROP TABLE `test`.`test`; diff --git a/mysql-test/t/ndb_bug26793.test b/mysql-test/t/ndb_bug26793.test new file mode 100644 index 00000000000..66595639c3e --- /dev/null +++ b/mysql-test/t/ndb_bug26793.test @@ -0,0 +1,33 @@ +-- source include/have_ndb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE `test` ( +`id` INT NOT NULL AUTO_INCREMENT PRIMARY KEY , +`t` VARCHAR( 10 ) NOT NULL +) ENGINE = ndbcluster; + +delete from mysql.db where user=''; + +flush privileges; + +GRANT USAGE ON *.* TO user1@localhost IDENTIFIED BY 'pass'; + +connect (user1,localhost,user1,pass,*NO-ONE*); + +disable_query_log; +disable_result_log; +let $i= 100; +while ($i) +{ +select count(*) from information_schema.tables union all select count(*) from information_schema.tables union all select count(*) from information_schema.tables; +dec $i; +} +enable_query_log; +enable_result_log; + +connect (root,localhost,root,,test); +connection root; +DROP TABLE `test`.`test`; From ffa5fb613d6078ddaef0e9476e30da557fa4ad6b Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 26 Jul 2007 20:25:05 +1000 Subject: [PATCH 2/8] [PATCH] Bug#26793 I_S query crashes in NDB If ::exteral_lock hadn't been called, we'd have no NDB object, so need to check/get one here. It looks like sql_show.cc is the only place that does this.... or at least the other places will be well hidden. Index: ndb-work/sql/ha_ndbcluster.cc =================================================================== sql/ha_ndbcluster.cc: Bug#26793 I_S query crashes in NDB --- sql/ha_ndbcluster.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 357b797ec75..03b6bcf3242 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3310,6 +3310,8 @@ int ha_ndbcluster::info(uint flag) DBUG_PRINT("info", ("HA_STATUS_AUTO")); if (m_table && table->found_next_number_field) { + if ((my_errno= check_ndb_connection())) + DBUG_RETURN(my_errno); Ndb *ndb= get_ndb(); Uint64 auto_increment_value64; From 180068ddceb0e400bb22f19c10df9187b301dedb Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 27 Jul 2007 17:12:53 +1000 Subject: [PATCH 3/8] save the data from mysql.db that we delete (side effect that made read_only fail) --- mysql-test/r/ndb_bug26793.result | 4 ++++ mysql-test/t/ndb_bug26793.test | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/mysql-test/r/ndb_bug26793.result b/mysql-test/r/ndb_bug26793.result index 9a15841e670..31f9763dd6b 100644 --- a/mysql-test/r/ndb_bug26793.result +++ b/mysql-test/r/ndb_bug26793.result @@ -3,7 +3,11 @@ CREATE TABLE `test` ( `id` INT NOT NULL AUTO_INCREMENT PRIMARY KEY , `t` VARCHAR( 10 ) NOT NULL ) ENGINE = ndbcluster; +create table test.db_temp as select * from mysql.db where user=''; delete from mysql.db where user=''; flush privileges; GRANT USAGE ON *.* TO user1@localhost IDENTIFIED BY 'pass'; DROP TABLE `test`.`test`; +insert into mysql.db select * from test.db_temp; +drop table db_temp; +flush privileges; diff --git a/mysql-test/t/ndb_bug26793.test b/mysql-test/t/ndb_bug26793.test index 66595639c3e..4f5a78fdca4 100644 --- a/mysql-test/t/ndb_bug26793.test +++ b/mysql-test/t/ndb_bug26793.test @@ -9,6 +9,7 @@ CREATE TABLE `test` ( `t` VARCHAR( 10 ) NOT NULL ) ENGINE = ndbcluster; +create table test.db_temp as select * from mysql.db where user=''; delete from mysql.db where user=''; flush privileges; @@ -31,3 +32,7 @@ enable_result_log; connect (root,localhost,root,,test); connection root; DROP TABLE `test`.`test`; +insert into mysql.db select * from test.db_temp; +drop table db_temp; +flush privileges; + From 52a014c7c646f4b4fa5c3117675671c6668cd1ac Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 13 Aug 2007 09:22:42 +0200 Subject: [PATCH 4/8] ndb - bug#28804 Handle out of transaction buffer in TC for INDX lookups ndb/src/kernel/blocks/ERROR_codes.txt: Add new error codes for simulating out of transaction buffer memory ndb/src/kernel/blocks/dbtc/Dbtc.hpp: Change signature to handle out of buffer ndb/src/kernel/blocks/dbtc/DbtcMain.cpp: Handle otu of transaction buffers in index operations (TCINDXREQ++) ndb/src/ndbapi/NdbTransaction.cpp: Give more info on 4012 ndb/src/ndbapi/ndberror.c: Add new error code ndb/test/ndbapi/testIndex.cpp: add tests ndb/test/run-test/daily-basic-tests.txt: add tests sql/ha_ndbcluster.cc: Set correct status --- ndb/src/kernel/blocks/ERROR_codes.txt | 6 +- ndb/src/kernel/blocks/dbtc/Dbtc.hpp | 10 +- ndb/src/kernel/blocks/dbtc/DbtcMain.cpp | 163 ++++++++++++++++++------ ndb/src/ndbapi/NdbTransaction.cpp | 38 +++++- ndb/src/ndbapi/ndberror.c | 2 + ndb/test/ndbapi/testIndex.cpp | 117 +++++++++++++++++ ndb/test/run-test/daily-basic-tests.txt | 8 ++ sql/ha_ndbcluster.cc | 10 +- 8 files changed, 299 insertions(+), 55 deletions(-) diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt index 17f2c35624a..e45c608b601 100644 --- a/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/ndb/src/kernel/blocks/ERROR_codes.txt @@ -6,7 +6,7 @@ Next DBTUP 4014 Next DBLQH 5043 Next DBDICT 6007 Next DBDIH 7183 -Next DBTC 8039 +Next DBTC 8052 Next CMVMI 9000 Next BACKUP 10022 Next DBUTIL 11002 @@ -296,6 +296,10 @@ ABORT OF TCKEYREQ 8038 : Simulate API disconnect just after SCAN_TAB_REQ +8039 : Simulate failure of TransactionBufferMemory allocation for OI lookup + +8051 : Simulate failure of allocation for saveINDXKEYINFO + CMVMI ----- diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp index 6934de76ad3..710d2fde182 100644 --- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp +++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp @@ -1497,12 +1497,12 @@ private: void clearCommitAckMarker(ApiConnectRecord * const regApiPtr, TcConnectRecord * const regTcPtr); // Trigger and index handling - bool saveINDXKEYINFO(Signal* signal, - TcIndexOperation* indexOp, - const Uint32 *src, - Uint32 len); + int saveINDXKEYINFO(Signal* signal, + TcIndexOperation* indexOp, + const Uint32 *src, + Uint32 len); bool receivedAllINDXKEYINFO(TcIndexOperation* indexOp); - bool saveINDXATTRINFO(Signal* signal, + int saveINDXATTRINFO(Signal* signal, TcIndexOperation* indexOp, const Uint32 *src, Uint32 len); diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index e2df1249661..60024e82978 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -1789,9 +1789,18 @@ start_failure: }//switch } +static +inline +bool +compare_transid(Uint32* val0, Uint32* val1) +{ + Uint32 tmp0 = val0[0] ^ val1[0]; + Uint32 tmp1 = val0[1] ^ val1[1]; + return (tmp0 | tmp1) == 0; +} + void Dbtc::execKEYINFO(Signal* signal) { - UintR compare_transid1, compare_transid2; jamEntry(); apiConnectptr.i = signal->theData[0]; tmaxData = 20; @@ -1801,10 +1810,8 @@ void Dbtc::execKEYINFO(Signal* signal) }//if ptrAss(apiConnectptr, apiConnectRecord); ttransid_ptr = 1; - compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1]; - compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2]; - compare_transid1 = compare_transid1 | compare_transid2; - if (compare_transid1 != 0) { + if (compare_transid(apiConnectptr.p->transid, signal->theData+1) == false) + { TCKEY_abort(signal, 19); return; }//if @@ -2105,7 +2112,6 @@ void Dbtc::saveAttrbuf(Signal* signal) void Dbtc::execATTRINFO(Signal* signal) { - UintR compare_transid1, compare_transid2; UintR Tdata1 = signal->theData[0]; UintR Tlength = signal->length(); UintR TapiConnectFilesize = capiConnectFilesize; @@ -2120,17 +2126,13 @@ void Dbtc::execATTRINFO(Signal* signal) return; }//if - UintR Tdata2 = signal->theData[1]; - UintR Tdata3 = signal->theData[2]; ApiConnectRecord * const regApiPtr = &localApiConnectRecord[Tdata1]; - compare_transid1 = regApiPtr->transid[0] ^ Tdata2; - compare_transid2 = regApiPtr->transid[1] ^ Tdata3; apiConnectptr.p = regApiPtr; - compare_transid1 = compare_transid1 | compare_transid2; - if (compare_transid1 != 0) { + if (compare_transid(regApiPtr->transid, signal->theData+1) == false) + { DEBUG("Drop ATTRINFO, wrong transid, lenght="<theData[1]<<", "<theData[2]); TCKEY_abort(signal, 19); return; }//if @@ -5456,11 +5458,32 @@ void Dbtc::execTC_COMMITREQ(Signal* signal) } }//Dbtc::execTC_COMMITREQ() +/** + * TCROLLBACKREQ + * + * Format is: + * + * thedata[0] = apiconnectptr + * thedata[1] = transid[0] + * thedata[2] = transid[1] + * OPTIONAL thedata[3] = flags + * + * Flags: + * 0x1 = potentiallyBad data from API (try not to assert) + */ void Dbtc::execTCROLLBACKREQ(Signal* signal) { + bool potentiallyBad= false; UintR compare_transid1, compare_transid2; jamEntry(); + + if(unlikely((signal->getLength() >= 4) && (signal->theData[3] & 0x1))) + { + ndbout_c("Trying to roll back potentially bad txn\n"); + potentiallyBad= true; + } + apiConnectptr.i = signal->theData[0]; if (apiConnectptr.i >= capiConnectFilesize) { goto TC_ROLL_warning; @@ -5547,12 +5570,14 @@ void Dbtc::execTCROLLBACKREQ(Signal* signal) TC_ROLL_warning: jam(); - warningHandlerLab(signal, __LINE__); + if(likely(potentiallyBad==false)) + warningHandlerLab(signal, __LINE__); return; TC_ROLL_system_error: jam(); - systemErrorLab(signal, __LINE__); + if(likely(potentiallyBad==false)) + systemErrorLab(signal, __LINE__); return; }//Dbtc::execTCROLLBACKREQ() @@ -11559,6 +11584,7 @@ void Dbtc::execTCINDXREQ(Signal* signal) // This is a newly started transaction, clean-up releaseAllSeizedIndexOperations(regApiPtr); + regApiPtr->apiConnectstate = CS_STARTED; regApiPtr->transid[0] = tcIndxReq->transId1; regApiPtr->transid[1] = tcIndxReq->transId2; }//if @@ -11599,20 +11625,29 @@ void Dbtc::execTCINDXREQ(Signal* signal) Uint32 includedIndexLength = MIN(indexLength, indexBufSize); indexOp->expectedAttrInfo = attrLength; Uint32 includedAttrLength = MIN(attrLength, attrBufSize); - if (saveINDXKEYINFO(signal, - indexOp, - dataPtr, - includedIndexLength)) { + + int ret; + if ((ret = saveINDXKEYINFO(signal, + indexOp, + dataPtr, + includedIndexLength)) == 0) + { jam(); // We have received all we need readIndexTable(signal, regApiPtr, indexOp); return; } + else if (ret == -1) + { + jam(); + return; + } + dataPtr += includedIndexLength; if (saveINDXATTRINFO(signal, indexOp, dataPtr, - includedAttrLength)) { + includedAttrLength) == 0) { jam(); // We have received all we need readIndexTable(signal, regApiPtr, indexOp); @@ -11715,13 +11750,25 @@ void Dbtc::execINDXKEYINFO(Signal* signal) TcIndexOperationPtr indexOpPtr; TcIndexOperation* indexOp; + if (compare_transid(regApiPtr->transid, indxKeyInfo->transId) == false) + { + TCKEY_abort(signal, 19); + return; + } + + if (regApiPtr->apiConnectstate == CS_ABORTING) + { + jam(); + return; + } + if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL) { indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i); if (saveINDXKEYINFO(signal, indexOp, src, - keyInfoLength)) { + keyInfoLength) == 0) { jam(); // We have received all we need readIndexTable(signal, regApiPtr, indexOp); @@ -11748,17 +11795,31 @@ void Dbtc::execINDXATTRINFO(Signal* signal) TcIndexOperationPtr indexOpPtr; TcIndexOperation* indexOp; + if (compare_transid(regApiPtr->transid, indxAttrInfo->transId) == false) + { + TCKEY_abort(signal, 19); + return; + } + + if (regApiPtr->apiConnectstate == CS_ABORTING) + { + jam(); + return; + } + if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL) { indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i); if (saveINDXATTRINFO(signal, indexOp, src, - attrInfoLength)) { + attrInfoLength) == 0) { jam(); // We have received all we need readIndexTable(signal, regApiPtr, indexOp); + return; } + return; } } @@ -11766,12 +11827,13 @@ void Dbtc::execINDXATTRINFO(Signal* signal) * Save signal INDXKEYINFO * Return true if we have received all needed data */ -bool Dbtc::saveINDXKEYINFO(Signal* signal, - TcIndexOperation* indexOp, - const Uint32 *src, - Uint32 len) +int +Dbtc::saveINDXKEYINFO(Signal* signal, + TcIndexOperation* indexOp, + const Uint32 *src, + Uint32 len) { - if (!indexOp->keyInfo.append(src, len)) { + if (ERROR_INSERTED(8039) || !indexOp->keyInfo.append(src, len)) { jam(); // Failed to seize keyInfo, abort transaction #ifdef VM_TRACE @@ -11781,15 +11843,17 @@ bool Dbtc::saveINDXKEYINFO(Signal* signal, apiConnectptr.i = indexOp->connectionIndex; ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); releaseIndexOperation(apiConnectptr.p, indexOp); - terrorCode = 4000; + terrorCode = 289; + if(TcKeyReq::getExecuteFlag(indexOp->tcIndxReq.requestInfo)) + apiConnectptr.p->m_exec_flag= 1; abortErrorLab(signal); - return false; + return -1; } if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) { jam(); - return true; + return 0; } - return false; + return 1; } bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp) @@ -11801,12 +11865,13 @@ bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp) * Save signal INDXATTRINFO * Return true if we have received all needed data */ -bool Dbtc::saveINDXATTRINFO(Signal* signal, - TcIndexOperation* indexOp, - const Uint32 *src, - Uint32 len) +int +Dbtc::saveINDXATTRINFO(Signal* signal, + TcIndexOperation* indexOp, + const Uint32 *src, + Uint32 len) { - if (!indexOp->attrInfo.append(src, len)) { + if (ERROR_INSERTED(8051) || !indexOp->attrInfo.append(src, len)) { jam(); #ifdef VM_TRACE ndbout_c("Dbtc::saveINDXATTRINFO: Failed to seize attrInfo\n"); @@ -11814,15 +11879,17 @@ bool Dbtc::saveINDXATTRINFO(Signal* signal, apiConnectptr.i = indexOp->connectionIndex; ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); releaseIndexOperation(apiConnectptr.p, indexOp); - terrorCode = 4000; + terrorCode = 289; + if(TcKeyReq::getExecuteFlag(indexOp->tcIndxReq.requestInfo)) + apiConnectptr.p->m_exec_flag= 1; abortErrorLab(signal); - return false; + return -1; } if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) { jam(); - return true; + return 0; } - return false; + return 1; } bool Dbtc::receivedAllINDXATTRINFO(TcIndexOperation* indexOp) @@ -12006,6 +12073,9 @@ void Dbtc::execTCKEYREF(Signal* signal) tcIndxRef->transId[0] = tcKeyRef->transId[0]; tcIndxRef->transId[1] = tcKeyRef->transId[1]; tcIndxRef->errorCode = tcKeyRef->errorCode; + + releaseIndexOperation(regApiPtr, indexOp); + sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB); return; @@ -12538,7 +12608,18 @@ void Dbtc::executeIndexOperation(Signal* signal, bool Dbtc::seizeIndexOperation(ApiConnectRecord* regApiPtr, TcIndexOperationPtr& indexOpPtr) { - return regApiPtr->theSeizedIndexOperations.seize(indexOpPtr); + if (regApiPtr->theSeizedIndexOperations.seize(indexOpPtr)) + { + ndbassert(indexOpPtr.p->expectedKeyInfo == 0); + ndbassert(indexOpPtr.p->keyInfo.getSize() == 0); + ndbassert(indexOpPtr.p->expectedAttrInfo == 0); + ndbassert(indexOpPtr.p->attrInfo.getSize() == 0); + ndbassert(indexOpPtr.p->expectedTransIdAI == 0); + ndbassert(indexOpPtr.p->transIdAI.getSize() == 0); + return true; + } + + return false; } void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr, diff --git a/ndb/src/ndbapi/NdbTransaction.cpp b/ndb/src/ndbapi/NdbTransaction.cpp index f5076ff2020..1ebc5b7ef24 100644 --- a/ndb/src/ndbapi/NdbTransaction.cpp +++ b/ndb/src/ndbapi/NdbTransaction.cpp @@ -481,12 +481,27 @@ NdbTransaction::executeNoBlobs(ExecType aTypeOfExec, while (1) { int noOfComp = tNdb->sendPollNdb(3 * timeout, 1, forceSend); if (noOfComp == 0) { - /** - * This timeout situation can occur if NDB crashes. + /* + * Just for fun, this is only one of two places where + * we could hit this error... It's quite possible we + * hit it in Ndbif.cpp in Ndb::check_send_timeout() + * + * We behave rather similarly in both places. + * Hitting this is certainly a bug though... */ - ndbout << "This timeout should never occur, execute(..)" << endl; - theError.code = 4012; - setOperationErrorCodeAbort(4012); // Error code for "Cluster Failure" + g_eventLogger.error("WARNING: Timeout in executeNoBlobs() waiting for " + "response from NDB data nodes. This should NEVER " + "occur. You have likely hit a NDB Bug. Please " + "file a bug."); + DBUG_PRINT("error",("This timeout should never occure, execute()")); + g_eventLogger.error("Forcibly trying to rollback txn (%p" + ") to try to clean up data node resources.", + this); + executeNoBlobs(NdbTransaction::Rollback); + theError.code = 4012; + theError.status= NdbError::PermanentError; + theError.classification= NdbError::TimeoutExpired; + setOperationErrorCodeAbort(4012); // ndbd timeout DBUG_RETURN(-1); }//if @@ -550,7 +565,12 @@ NdbTransaction::executeAsynchPrepare( ExecType aTypeOfExec, */ if (theError.code != 0) DBUG_PRINT("enter", ("Resetting error %d on execute", theError.code)); - theError.code = 0; + /** + * for timeout (4012) we want sendROLLBACK to behave differently. + * Else, normal behaviour of reset errcode + */ + if (theError.code != 4012) + theError.code = 0; NdbScanOperation* tcOp = m_theFirstScanOperation; if (tcOp != 0){ // Execute any cursor operations @@ -873,6 +893,12 @@ NdbTransaction::sendROLLBACK() // Send a TCROLLBACKREQ signal; tSignal.setData(theTCConPtr, 1); tSignal.setData(tTransId1, 2); tSignal.setData(tTransId2, 3); + if(theError.code == 4012) + { + g_eventLogger.error("Sending TCROLLBACKREQ with Bad flag"); + tSignal.setLength(tSignal.getLength() + 1); // + flags + tSignal.setData(0x1, 4); // potentially bad data + } tReturnCode = tp->sendSignal(&tSignal,theDBnode); if (tReturnCode != -1) { theSendStatus = sendTC_ROLLBACK; diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index 328b0688857..24ccb1d07c2 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -173,6 +173,8 @@ ErrorBundle ErrorCodes[] = { { 4022, TR, "Out of Send Buffer space in NDB API" }, { 4032, TR, "Out of Send Buffer space in NDB API" }, { 288, TR, "Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)" }, + { 289, TR, "Out of transaction buffer memory in TC (increase TransactionBufferMemory)" }, + /** * InsufficientSpace */ diff --git a/ndb/test/ndbapi/testIndex.cpp b/ndb/test/ndbapi/testIndex.cpp index 78672cd519f..f715db1ef8c 100644 --- a/ndb/test/ndbapi/testIndex.cpp +++ b/ndb/test/ndbapi/testIndex.cpp @@ -1297,6 +1297,102 @@ runBug25059(NDBT_Context* ctx, NDBT_Step* step) return res; } +int tcSaveINDX_test(NDBT_Context* ctx, NDBT_Step* step, int inject_err) +{ + int result= NDBT_OK; + Ndb* pNdb = GETNDB(step); + NdbDictionary::Dictionary * dict = pNdb->getDictionary(); + const NdbDictionary::Index * idx = dict->getIndex(pkIdxName, *ctx->getTab()); + + HugoOperations ops(*ctx->getTab(), idx); + + g_err << "Using INDEX: " << pkIdxName << endl; + + NdbRestarter restarter; + + int loops = ctx->getNumLoops(); + const int rows = ctx->getNumRecords(); + const int batchsize = ctx->getProperty("BatchSize", 1); + + for(int bs=1; bs < loops; bs++) + { + int c= 0; + while (c++ < loops) + { + g_err << "BS " << bs << " LOOP #" << c << endl; + + g_err << "inserting error on op#" << c << endl; + + CHECK(ops.startTransaction(pNdb) == 0); + for(int i=1;i<=c;i++) + { + if(i==c) + { + if(restarter.insertErrorInAllNodes(inject_err)!=0) + { + g_err << "**** FAILED to insert error" << endl; + result= NDBT_FAILED; + break; + } + } + CHECK(ops.indexReadRecords(pNdb, pkIdxName, i,false,1) == 0); + if(i%bs==0 || i==c) + { + if(istatus= STATUS_NOT_FOUND; - DBUG_RETURN(ndb_err(trans)); + int err= ndb_err(trans); + if(err==HA_ERR_KEY_NOT_FOUND) + table->status= STATUS_NOT_FOUND; + else + table->status= STATUS_GARBAGE; + + DBUG_RETURN(err); } + // The value have now been fetched from NDB unpack_record(buf); table->status= 0; From c1b89b85ce24583d2bd9d48b3a951b956bb2a368 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 13 Aug 2007 11:36:30 +0200 Subject: [PATCH 5/8] bug#30337 DELETE ... WHERE PK IN (..) and AFTER DELETE trigger crashes API node: Disable multi_read_range if there are after delete/update triggers --- sql/ha_ndbcluster.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 03b6bcf3242..c7ad51596cf 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -6464,7 +6464,8 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, if (uses_blob_value(m_retrieve_all_fields) || (cur_index_type == UNIQUE_INDEX && has_null_in_unique_index(active_index) && - null_value_index_search(ranges, ranges+range_count, buffer))) + null_value_index_search(ranges, ranges+range_count, buffer)) + || m_delete_cannot_batch || m_update_cannot_batch) { m_disable_multi_read= TRUE; DBUG_RETURN(handler::read_multi_range_first(found_range_p, From a40202e6965d8c508defa223e6f1a13fd39051d4 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 13 Aug 2007 15:59:08 +0200 Subject: [PATCH 6/8] bug#30337 DELETE ... WHERE PK IN (..) and AFTER DELETE trigger crashes API node: Added testcase --- mysql-test/r/ndb_read_multi_range.result | 19 +++++++++++++++++++ mysql-test/t/ndb_read_multi_range.test | 22 ++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/mysql-test/r/ndb_read_multi_range.result b/mysql-test/r/ndb_read_multi_range.result index 64a6749bed1..d18f4c1e65a 100644 --- a/mysql-test/r/ndb_read_multi_range.result +++ b/mysql-test/r/ndb_read_multi_range.result @@ -405,3 +405,22 @@ a b 1 1 10 10 drop table t2; +create table t1 (id int primary key) engine ndb; +insert into t1 values (1), (2), (3); +create table t2 (id int primary key) engine ndb; +insert into t2 select id from t1; +create trigger kaboom after delete on t1 +for each row begin +delete from t2 where id=old.id; +end| +select * from t1 order by id; +id +1 +2 +3 +delete from t1 where id in (1,2); +select * from t2 order by id; +id +3 +drop trigger kaboom; +drop table t1; diff --git a/mysql-test/t/ndb_read_multi_range.test b/mysql-test/t/ndb_read_multi_range.test index e1f1dfc1150..1d1d5f26552 100644 --- a/mysql-test/t/ndb_read_multi_range.test +++ b/mysql-test/t/ndb_read_multi_range.test @@ -291,3 +291,25 @@ insert into t2 values (1,1), (10,10); select * from t2 use index (ab) where a in(1,10) order by a; drop table t2; + +#bug#30337 + +create table t1 (id int primary key) engine ndb; +insert into t1 values (1), (2), (3); + +create table t2 (id int primary key) engine ndb; +insert into t2 select id from t1; + +delimiter |; +create trigger kaboom after delete on t1 +for each row begin + delete from t2 where id=old.id; +end| +delimiter ;| + +select * from t1 order by id; +delete from t1 where id in (1,2); +select * from t2 order by id; + +drop trigger kaboom; +drop table t1; From 6656d39c7216cc07b457f090d5432168e668582d Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 14 Aug 2007 15:07:17 +1000 Subject: [PATCH 7/8] Backport Magnus' fix from 5.1 ChangeSet@1.2575, 2007-08-07 19:16:06+02:00, msvensson@pilot.(none) +2 -0 Bug#26793 mysqld crashes when doing specific query on information_schema - Drop the newly created user user1@localhost - Cleanup testcase mysql-test/r/ndb_bug26793.result: mysql-test/r/ndb_bug26793.result@1.3, 2007-08-07 19:16:04+02:00, msvensson@pilot.(none) +1 -6 Update test result mysql-test/t/ndb_bug26793.test: mysql-test/t/ndb_bug26793.test@1.3, 2007-08-07 19:16:04+02:00, msvensson@pilot.(none) +8 -11 - Remove the drop/restore of anonymous users - there are no such users by default anymore(if there were, they would probably be in mysql.user) - Switch back to default connection before cleanup - Drop user1@localhost as part of cleanup --- mysql-test/r/ndb_bug26793.result | 7 +------ mysql-test/t/ndb_bug26793.test | 21 +++++++++------------ 2 files changed, 10 insertions(+), 18 deletions(-) diff --git a/mysql-test/r/ndb_bug26793.result b/mysql-test/r/ndb_bug26793.result index 31f9763dd6b..a9a8a798546 100644 --- a/mysql-test/r/ndb_bug26793.result +++ b/mysql-test/r/ndb_bug26793.result @@ -3,11 +3,6 @@ CREATE TABLE `test` ( `id` INT NOT NULL AUTO_INCREMENT PRIMARY KEY , `t` VARCHAR( 10 ) NOT NULL ) ENGINE = ndbcluster; -create table test.db_temp as select * from mysql.db where user=''; -delete from mysql.db where user=''; -flush privileges; GRANT USAGE ON *.* TO user1@localhost IDENTIFIED BY 'pass'; DROP TABLE `test`.`test`; -insert into mysql.db select * from test.db_temp; -drop table db_temp; -flush privileges; +drop user user1@localhost; diff --git a/mysql-test/t/ndb_bug26793.test b/mysql-test/t/ndb_bug26793.test index 4f5a78fdca4..f35d8808c1a 100644 --- a/mysql-test/t/ndb_bug26793.test +++ b/mysql-test/t/ndb_bug26793.test @@ -9,15 +9,12 @@ CREATE TABLE `test` ( `t` VARCHAR( 10 ) NOT NULL ) ENGINE = ndbcluster; -create table test.db_temp as select * from mysql.db where user=''; -delete from mysql.db where user=''; - -flush privileges; - +# Add user1@localhost with a specific password +# and connect as that user GRANT USAGE ON *.* TO user1@localhost IDENTIFIED BY 'pass'; - connect (user1,localhost,user1,pass,*NO-ONE*); +# Run the query 100 times disable_query_log; disable_result_log; let $i= 100; @@ -29,10 +26,10 @@ dec $i; enable_query_log; enable_result_log; -connect (root,localhost,root,,test); -connection root; -DROP TABLE `test`.`test`; -insert into mysql.db select * from test.db_temp; -drop table db_temp; -flush privileges; +disconnect user1; + +# Switch back to the default connection and cleanup +connection default; +DROP TABLE `test`.`test`; +drop user user1@localhost; From cc0750aceeb29fae7fd6544a5e46ed5b9c61d1d2 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 29 Aug 2007 21:45:53 +0300 Subject: [PATCH 8/8] ndb - bug#29102 : use locked read even for blob parts ndb/src/ndbapi/NdbBlob.cpp: race condition : s/committedRead/readTuple/ when reading parts since TUP commits tuples separately --- ndb/src/ndbapi/NdbBlob.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ndb/src/ndbapi/NdbBlob.cpp b/ndb/src/ndbapi/NdbBlob.cpp index 7ab9c2132d8..611d0396f96 100644 --- a/ndb/src/ndbapi/NdbBlob.cpp +++ b/ndb/src/ndbapi/NdbBlob.cpp @@ -892,7 +892,12 @@ NdbBlob::readParts(char* buf, Uint32 part, Uint32 count) while (n < count) { NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable); if (tOp == NULL || - tOp->committedRead() == -1 || + /* + * This was committedRead() before. However lock on main + * table tuple does not fully protect blob parts since DBTUP + * commits each tuple separately. + */ + tOp->readTuple() == -1 || setPartKeyValue(tOp, part + n) == -1 || tOp->getValue((Uint32)3, buf) == NULL) { setErrorCode(tOp);