From 750de61d2357f682f32f540c38681a85d98bcdfb Mon Sep 17 00:00:00 2001 From: "joerg@mysql.com" <> Date: Wed, 30 Mar 2005 17:45:03 +0200 Subject: [PATCH 01/65] As of 5.0.4, all builds will use '--with-big-tables'. Let development builds follow. --- BUILD/SETUP.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh index fbb0936c4b5..f84dfd4b22d 100755 --- a/BUILD/SETUP.sh +++ b/BUILD/SETUP.sh @@ -48,13 +48,13 @@ global_warnings="-Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wch c_warnings="$global_warnings -Wunused" cxx_warnings="$global_warnings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor" -base_max_configs="--with-innodb --with-berkeley-db --with-ndbcluster --with-archive-storage-engine --with-raid --with-openssl --with-raid" -max_leave_isam_configs="--with-innodb --with-berkeley-db --with-ndbcluster --with-archive-storage-engine --with-federated-storage-engine --with-raid --with-openssl --with-raid --with-embedded-server" +base_max_configs="--with-innodb --with-berkeley-db --with-ndbcluster --with-archive-storage-engine --with-raid --with-openssl --with-raid --with-big-tables" +max_leave_isam_configs="--with-innodb --with-berkeley-db --with-ndbcluster --with-archive-storage-engine --with-federated-storage-engine --with-raid --with-openssl --with-raid --with-embedded-server --with-big-tables" max_no_es_configs="$max_leave_isam_configs --without-isam" max_configs="$max_no_es_configs --with-embedded-server" alpha_cflags="-mcpu=ev6 -Wa,-mev6" # Not used yet -amd64_cflags="-DBIG_TABLES" +amd64_cflags="" # If dropping '--with-big-tables', add here "-DBIG_TABLES" pentium_cflags="-mcpu=pentiumpro" pentium64_cflags="-mcpu=nocona -m64" ppc_cflags="-mpowerpc -mcpu=powerpc" @@ -70,9 +70,9 @@ debug_cflags="-DUNIV_MUST_NOT_INLINE -DEXTRA_DEBUG -DFORCE_INIT_OF_VARS -DSAFEMA debug_extra_cflags="-O1 -Wuninitialized" base_cxxflags="-felide-constructors -fno-exceptions -fno-rtti" -amd64_cxxflags="-DBIG_TABLES" +amd64_cxxflags="" # If dropping '--with-big-tables', add here "-DBIG_TABLES" -base_configs="$prefix_configs --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client --with-readline" +base_configs="$prefix_configs --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client --with-readline --with-big-tables" static_link="--with-mysqld-ldflags=-all-static --with-client-ldflags=-all-static" amd64_configs="" alpha_configs="" # Not used yet From 39e750d949b203e2ebd282ac9e6ff3145761b5cf Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Tue, 12 Apr 2005 17:54:34 +0200 Subject: [PATCH 02/65] bug#9749 - ndb lock upgrade handle more cases... --- ndb/src/kernel/blocks/dbacc/Dbacc.hpp | 2 + ndb/src/kernel/blocks/dbacc/DbaccMain.cpp | 143 +++++++++++++++++++++- ndb/test/ndbapi/testOperations.cpp | 72 +++++++++-- 3 files changed, 204 insertions(+), 13 deletions(-) diff --git a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp index 64b947b5462..aaa4aca7b00 100644 --- a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp +++ b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp @@ -1100,6 +1100,8 @@ private: Uint32 executeNextOperation(Signal* signal); void releaselock(Signal* signal); void takeOutFragWaitQue(Signal* signal); + void check_lock_upgrade(Signal* signal, OperationrecPtr lock_owner, + OperationrecPtr release_op); void allocOverflowPage(Signal* signal); bool getrootfragmentrec(Signal* signal, RootfragmentrecPtr&, Uint32 fragId); void insertLockOwnersList(Signal* signal, const OperationrecPtr&); diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index 17c5a31cbed..28956de198c 100644 --- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -5802,9 +5802,148 @@ void Dbacc::commitOperation(Signal* signal) ptrCheckGuard(tolqTmpPtr, coprecsize, operationrec); tolqTmpPtr.p->prevParallelQue = operationRecPtr.p->prevParallelQue; }//if - }//if + + /** + * Check possible lock upgrade + * 1) Find lock owner + * 2) Count transactions in parallel que + * 3) If count == 1 and TRANSID(next serial) == TRANSID(lock owner) + * upgrade next serial + */ + if(operationRecPtr.p->lockMode) + { + jam(); + /** + * Committing a non shared operation can't lead to lock upgrade + */ + return; + } + + OperationrecPtr lock_owner; + lock_owner.i = operationRecPtr.p->prevParallelQue; + ptrCheckGuard(lock_owner, coprecsize, operationrec); + Uint32 transid[2] = { lock_owner.p->transId1, + lock_owner.p->transId2 }; + + + while(lock_owner.p->prevParallelQue != RNIL) + { + lock_owner.i = lock_owner.p->prevParallelQue; + ptrCheckGuard(lock_owner, coprecsize, operationrec); + + if(lock_owner.p->transId1 != transid[0] || + lock_owner.p->transId2 != transid[1]) + { + jam(); + /** + * If more than 1 trans in lock queue -> no lock upgrade + */ + return; + } + } + + check_lock_upgrade(signal, lock_owner, operationRecPtr); + } }//Dbacc::commitOperation() +void +Dbacc::check_lock_upgrade(Signal* signal, + OperationrecPtr lock_owner, + OperationrecPtr release_op) +{ + if((lock_owner.p->transId1 == release_op.p->transId1 && + lock_owner.p->transId2 == release_op.p->transId2) || + release_op.p->lockMode || + lock_owner.p->nextSerialQue == RNIL) + { + jam(); + /** + * No lock upgrade if same trans or lock owner has no serial queue + * or releasing non shared op + */ + return; + } + + OperationrecPtr next; + next.i = lock_owner.p->nextSerialQue; + ptrCheckGuard(next, coprecsize, operationrec); + + if(lock_owner.p->transId1 != next.p->transId1 || + lock_owner.p->transId2 != next.p->transId2) + { + jam(); + /** + * No lock upgrad if !same trans in serial queue + */ + return; + } + + tgnptMainOpPtr = lock_owner; + getNoParallelTransaction(signal); + if (tgnptNrTransaction > 1) + { + jam(); + /** + * No lock upgrade if more than 1 transaction in parallell queue + */ + return; + } + + OperationrecPtr tmp; + tmp.i = lock_owner.p->nextSerialQue = next.p->nextSerialQue; + if(tmp.i != RNIL) + { + ptrCheckGuard(tmp, coprecsize, operationrec); + ndbassert(tmp.p->prevSerialQue == next.i); + tmp.p->prevSerialQue = lock_owner.i; + } + next.p->nextSerialQue = next.p->prevSerialQue = RNIL; + + // Find end of parallell que + tmp = lock_owner; + tmp.p->lockMode= 1; + while(tmp.p->nextParallelQue != RNIL) + { + jam(); + tmp.i = tmp.p->nextParallelQue; + ptrCheckGuard(tmp, coprecsize, operationrec); + tmp.p->lockMode= 1; + } + + next.p->prevParallelQue = tmp.i; + tmp.p->nextParallelQue = next.i; + + OperationrecPtr save = operationRecPtr; + + Uint32 TelementIsDisappeared = 0; // lock upgrade = all reads + Uint32 ThashValue = lock_owner.p->hashValue; + Uint32 localdata[2]; + localdata[0] = lock_owner.p->localdata[0]; + localdata[1] = lock_owner.p->localdata[1]; + do { + next.p->elementIsDisappeared = TelementIsDisappeared; + next.p->hashValue = ThashValue; + next.p->localdata[0] = localdata[0]; + next.p->localdata[1] = localdata[1]; + + operationRecPtr = next; + ndbassert(next.p->lockMode); + TelementIsDisappeared = executeNextOperation(signal); + if (next.p->nextParallelQue != RNIL) + { + jam(); + next.i = next.p->nextParallelQue; + ptrCheckGuard(next, coprecsize, operationrec); + } else { + jam(); + break; + }//if + } while (1); + + operationRecPtr = save; + +} + /* ------------------------------------------------------------------------- */ /* RELEASELOCK */ /* RESETS LOCK OF AN ELEMENT. */ @@ -5841,6 +5980,8 @@ void Dbacc::releaselock(Signal* signal) ptrCheckGuard(trlTmpOperPtr, coprecsize, operationrec); trlTmpOperPtr.p->prevSerialQue = trlOperPtr.i; }//if + + check_lock_upgrade(signal, copyInOperPtr, operationRecPtr); /* --------------------------------------------------------------------------------- */ /* SINCE THERE ARE STILL ITEMS IN THE PARALLEL QUEUE WE NEED NOT WORRY ABOUT */ /* STARTING QUEUED OPERATIONS. THUS WE CAN END HERE. */ diff --git a/ndb/test/ndbapi/testOperations.cpp b/ndb/test/ndbapi/testOperations.cpp index 9f1d5ee1191..773511a0475 100644 --- a/ndb/test/ndbapi/testOperations.cpp +++ b/ndb/test/ndbapi/testOperations.cpp @@ -547,21 +547,64 @@ runLockUpgrade1(NDBT_Context* ctx, NDBT_Step* step){ do { CHECK(hugoOps.startTransaction(pNdb) == 0); - CHECK(hugoOps.pkReadRecord(pNdb, 0, 1, NdbOperation::LM_Read) == 0); - CHECK(hugoOps.execute_NoCommit(pNdb) == 0); + if(ctx->getProperty("LOCK_UPGRADE", 1) == 1) + { + CHECK(hugoOps.pkReadRecord(pNdb, 0, 1, NdbOperation::LM_Read) == 0); + CHECK(hugoOps.execute_NoCommit(pNdb) == 0); - ctx->setProperty("READ_DONE", 1); - ctx->broadcast(); - ndbout_c("wait 2"); - ctx->getPropertyWait("READ_DONE", 2); - ndbout_c("wait 2 - done"); + ctx->setProperty("READ_DONE", 1); + ctx->broadcast(); + ndbout_c("wait 2"); + ctx->getPropertyWait("READ_DONE", 2); + ndbout_c("wait 2 - done"); + } + else + { + ctx->setProperty("READ_DONE", 1); + ctx->broadcast(); + ctx->getPropertyWait("READ_DONE", 2); + ndbout_c("wait 2 - done"); + CHECK(hugoOps.pkReadRecord(pNdb, 0, 1, NdbOperation::LM_Read) == 0); + CHECK(hugoOps.execute_NoCommit(pNdb) == 0); + } + if(ctx->getProperty("LU_OP", o_INS) == o_INS) + { + CHECK(hugoOps.pkDeleteRecord(pNdb, 0, 1) == 0); + CHECK(hugoOps.pkInsertRecord(pNdb, 0, 1, 2) == 0); + } + else if(ctx->getProperty("LU_OP", o_UPD) == o_UPD) + { + CHECK(hugoOps.pkUpdateRecord(pNdb, 0, 1, 2) == 0); + } + else + { + CHECK(hugoOps.pkDeleteRecord(pNdb, 0, 1) == 0); + } ctx->setProperty("READ_DONE", 3); ctx->broadcast(); ndbout_c("before update"); - CHECK(hugoOps.pkUpdateRecord(pNdb, 0, 1, 2) == 0); ndbout_c("wait update"); - CHECK(hugoOps.execute_NoCommit(pNdb) == 0); - CHECK(hugoOps.closeTransaction(pNdb)); + CHECK(hugoOps.execute_Commit(pNdb) == 0); + CHECK(hugoOps.closeTransaction(pNdb) == 0); + + CHECK(hugoOps.startTransaction(pNdb) == 0); + CHECK(hugoOps.pkReadRecord(pNdb, 0, 1) == 0); + int res= hugoOps.execute_Commit(pNdb); + if(ctx->getProperty("LU_OP", o_INS) == o_INS) + { + CHECK(res == 0); + CHECK(hugoOps.verifyUpdatesValue(2) == 0); + } + else if(ctx->getProperty("LU_OP", o_UPD) == o_UPD) + { + CHECK(res == 0); + CHECK(hugoOps.verifyUpdatesValue(2) == 0); + } + else + { + CHECK(res == 626); + } + } while(0); return result; @@ -592,10 +635,10 @@ runLockUpgrade2(NDBT_Context* ctx, NDBT_Step* step){ ndbout_c("wait 3 - done"); NdbSleep_MilliSleep(200); - CHECK(hugoOps.execute_Commit(pNdb) == 0); + CHECK(hugoOps.execute_Commit(pNdb) == 0); } while(0); - return NDBT_FAILED; + return result; } int @@ -607,11 +650,16 @@ main(int argc, const char** argv){ NDBT_TestSuite ts("testOperations"); + for(Uint32 i = 0; i <6; i++) { BaseString name("bug_9749"); + name.appfmt("_%d", i); NDBT_TestCaseImpl1 *pt = new NDBT_TestCaseImpl1(&ts, name.c_str(), ""); + pt->setProperty("LOCK_UPGRADE", 1 + (i & 1)); + pt->setProperty("LU_OP", 1 + (i >> 1)); + pt->addInitializer(new NDBT_Initializer(pt, "runClearTable", runClearTable)); From 45a07db5c01937b37c4d62b7bdcd024740d87e6d Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Wed, 13 Apr 2005 09:54:40 +0200 Subject: [PATCH 03/65] BUG#9749 - ndb lock upgrade - more fixes... 1) Make getNoParall into function instead of a procedure 2) Check for multiple transactions in "upgrade's" parallell queue 3) Set lock mode according to lock_owner's lockMode NOTE: Does still not handle lock upgrade in case of aborts correctly --- ndb/src/kernel/blocks/dbacc/Dbacc.hpp | 4 +- ndb/src/kernel/blocks/dbacc/DbaccMain.cpp | 69 +++++++++++------------ 2 files changed, 34 insertions(+), 39 deletions(-) diff --git a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp index aaa4aca7b00..246afc5ceb8 100644 --- a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp +++ b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp @@ -1022,7 +1022,7 @@ private: Uint32 placeReadInLockQueue(Signal* signal); void placeSerialQueueRead(Signal* signal); void checkOnlyReadEntry(Signal* signal); - void getNoParallelTransaction(Signal* signal); + Uint32 getNoParallelTransaction(const Operationrec*); void moveLastParallelQueue(Signal* signal); void moveLastParallelQueueWrite(Signal* signal); Uint32 placeWriteInLockQueue(Signal* signal); @@ -1265,7 +1265,6 @@ private: OperationrecPtr mlpqOperPtr; OperationrecPtr queOperPtr; OperationrecPtr readWriteOpPtr; - OperationrecPtr tgnptMainOpPtr; Uint32 cfreeopRec; Uint32 coprecsize; /* --------------------------------------------------------------------------------- */ @@ -1516,7 +1515,6 @@ private: Uint32 turlIndex; Uint32 tlfrTmp1; Uint32 tlfrTmp2; - Uint32 tgnptNrTransaction; Uint32 tudqeIndex; Uint32 tscanTrid1; Uint32 tscanTrid2; diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index 28956de198c..cdb9091da42 100644 --- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -1936,9 +1936,7 @@ void Dbacc::insertelementLab(Signal* signal) /* --------------------------------------------------------------------------------- */ Uint32 Dbacc::placeReadInLockQueue(Signal* signal) { - tgnptMainOpPtr = queOperPtr; - getNoParallelTransaction(signal); - if (tgnptNrTransaction == 1) { + if (getNoParallelTransaction(queOperPtr.p) == 1) { if ((queOperPtr.p->transId1 == operationRecPtr.p->transId1) && (queOperPtr.p->transId2 == operationRecPtr.p->transId2)) { /* --------------------------------------------------------------------------------- */ @@ -2021,9 +2019,7 @@ void Dbacc::placeSerialQueueRead(Signal* signal) checkOnlyReadEntry(signal); return; }//if - tgnptMainOpPtr = readWriteOpPtr; - getNoParallelTransaction(signal); - if (tgnptNrTransaction == 1) { + if (getNoParallelTransaction(readWriteOpPtr.p) == 1) { jam(); /* --------------------------------------------------------------------------------- */ /* THERE WAS ONLY ONE TRANSACTION INVOLVED IN THE PARALLEL QUEUE. IF THIS IS OUR */ @@ -2104,24 +2100,23 @@ void Dbacc::checkOnlyReadEntry(Signal* signal) /* --------------------------------------------------------------------------------- */ /* GET_NO_PARALLEL_TRANSACTION */ /* --------------------------------------------------------------------------------- */ -void Dbacc::getNoParallelTransaction(Signal* signal) +Uint32 +Dbacc::getNoParallelTransaction(const Operationrec * op) { - OperationrecPtr tnptOpPtr; - - tgnptNrTransaction = 1; - tnptOpPtr.i = tgnptMainOpPtr.p->nextParallelQue; - while ((tnptOpPtr.i != RNIL) && - (tgnptNrTransaction == 1)) { + OperationrecPtr tmp; + + tmp.i= op->nextParallelQue; + Uint32 transId[2] = { op->transId1, op->transId2 }; + while (tmp.i != RNIL) + { jam(); - ptrCheckGuard(tnptOpPtr, coprecsize, operationrec); - if ((tnptOpPtr.p->transId1 == tgnptMainOpPtr.p->transId1) && - (tnptOpPtr.p->transId2 == tgnptMainOpPtr.p->transId2)) { - tnptOpPtr.i = tnptOpPtr.p->nextParallelQue; - } else { - jam(); - tgnptNrTransaction++; - }//if - }//while + ptrCheckGuard(tmp, coprecsize, operationrec); + if (tmp.p->transId1 == transId[0] && tmp.p->transId2 == transId[1]) + tmp.i = tmp.p->nextParallelQue; + else + return 2; + } + return 1; }//Dbacc::getNoParallelTransaction() void Dbacc::moveLastParallelQueue(Signal* signal) @@ -2162,9 +2157,7 @@ void Dbacc::moveLastParallelQueueWrite(Signal* signal) /* --------------------------------------------------------------------------------- */ Uint32 Dbacc::placeWriteInLockQueue(Signal* signal) { - tgnptMainOpPtr = queOperPtr; - getNoParallelTransaction(signal); - if (!((tgnptNrTransaction == 1) && + if (!((getNoParallelTransaction(queOperPtr.p) == 1) && (queOperPtr.p->transId1 == operationRecPtr.p->transId1) && (queOperPtr.p->transId2 == operationRecPtr.p->transId2))) { jam(); @@ -2215,9 +2208,7 @@ void Dbacc::placeSerialQueueWrite(Signal* signal) }//if readWriteOpPtr.i = readWriteOpPtr.p->nextSerialQue; ptrCheckGuard(readWriteOpPtr, coprecsize, operationrec); - tgnptMainOpPtr = readWriteOpPtr; - getNoParallelTransaction(signal); - if (tgnptNrTransaction == 1) { + if (getNoParallelTransaction(readWriteOpPtr.p) == 1) { /* --------------------------------------------------------------------------------- */ /* THERE WAS ONLY ONE TRANSACTION INVOLVED IN THE PARALLEL QUEUE. IF THIS IS OUR */ /* TRANSACTION WE CAN STILL GET HOLD OF THE LOCK. */ @@ -5878,9 +5869,7 @@ Dbacc::check_lock_upgrade(Signal* signal, return; } - tgnptMainOpPtr = lock_owner; - getNoParallelTransaction(signal); - if (tgnptNrTransaction > 1) + if (getNoParallelTransaction(lock_owner.p) > 1) { jam(); /** @@ -5888,6 +5877,15 @@ Dbacc::check_lock_upgrade(Signal* signal, */ return; } + + if (getNoParallelTransaction(next.p) > 1) + { + jam(); + /** + * No lock upgrade if more than 1 transaction in next's parallell queue + */ + return; + } OperationrecPtr tmp; tmp.i = lock_owner.p->nextSerialQue = next.p->nextSerialQue; @@ -5901,20 +5899,19 @@ Dbacc::check_lock_upgrade(Signal* signal, // Find end of parallell que tmp = lock_owner; - tmp.p->lockMode= 1; while(tmp.p->nextParallelQue != RNIL) { jam(); tmp.i = tmp.p->nextParallelQue; ptrCheckGuard(tmp, coprecsize, operationrec); - tmp.p->lockMode= 1; } next.p->prevParallelQue = tmp.i; tmp.p->nextParallelQue = next.i; OperationrecPtr save = operationRecPtr; - + Uint32 lockMode = lock_owner.p->lockMode; + Uint32 TelementIsDisappeared = 0; // lock upgrade = all reads Uint32 ThashValue = lock_owner.p->hashValue; Uint32 localdata[2]; @@ -5927,7 +5924,7 @@ Dbacc::check_lock_upgrade(Signal* signal, next.p->localdata[1] = localdata[1]; operationRecPtr = next; - ndbassert(next.p->lockMode); + next.p->lockMode = lockMode; TelementIsDisappeared = executeNextOperation(signal); if (next.p->nextParallelQue != RNIL) { @@ -5941,7 +5938,7 @@ Dbacc::check_lock_upgrade(Signal* signal, } while (1); operationRecPtr = save; - + } /* ------------------------------------------------------------------------- */ From 27f7a6c41bb9bd786d16e2b24fce66b16a9d99b5 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Thu, 14 Apr 2005 13:43:07 +0200 Subject: [PATCH 04/65] BUG#9891 - ndb lcp Crash if ACC_CONTOPREQ was sent while ACC_LCPCONF was in job buffer if ACC_LCPCONF would have arrived eariler (before TUP_LCPSTARTED) operations could lockup. But would be restarted on next LCP -- LQH 1) Better check for LCP started that will also return true if ACC or TUP already has completed 2) Remove incorrect if statement that prevented operations to be started if ACC has completed -- ACC Make sure all ACC_CONTOPCONF are sent before releasing lcp record i.e. use noOfLcpConf == 4 (2 ACC_LCPCONF + 2 ACC_CONTOPCONF) Check for == 4 also when sending ACC_CONTOPCONF --- ndb/src/kernel/blocks/dbacc/DbaccMain.cpp | 20 ++++++- ndb/src/kernel/blocks/dblqh/Dblqh.hpp | 3 +- ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 66 +++++++++-------------- 3 files changed, 44 insertions(+), 45 deletions(-) diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index cdb9091da42..d566639489c 100644 --- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -8486,7 +8486,7 @@ void Dbacc::checkSendLcpConfLab(Signal* signal) break; }//switch lcpConnectptr.p->noOfLcpConf++; - ndbrequire(lcpConnectptr.p->noOfLcpConf <= 2); + ndbrequire(lcpConnectptr.p->noOfLcpConf <= 4); fragrecptr.p->fragState = ACTIVEFRAG; rlpPageptr.i = fragrecptr.p->zeroPagePtr; ptrCheckGuard(rlpPageptr, cpagesize, page8); @@ -8504,7 +8504,7 @@ void Dbacc::checkSendLcpConfLab(Signal* signal) }//for signal->theData[0] = fragrecptr.p->lcpLqhPtr; sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_LCPCONF, signal, 1, JBB); - if (lcpConnectptr.p->noOfLcpConf == 2) { + if (lcpConnectptr.p->noOfLcpConf == 4) { jam(); releaseLcpConnectRec(signal); rootfragrecptr.i = fragrecptr.p->myroot; @@ -8535,6 +8535,13 @@ void Dbacc::execACC_CONTOPREQ(Signal* signal) /* LOCAL FRAG ID */ tresult = 0; ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec); + if(ERROR_INSERTED(3002) && lcpConnectptr.p->noOfLcpConf < 2) + { + sendSignalWithDelay(cownBlockref, GSN_ACC_CONTOPREQ, signal, 300, + signal->getLength()); + return; + } + ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE); rootfragrecptr.i = lcpConnectptr.p->rootrecptr; ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec); @@ -8568,6 +8575,15 @@ void Dbacc::execACC_CONTOPREQ(Signal* signal) }//while signal->theData[0] = fragrecptr.p->lcpLqhPtr; sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_CONTOPCONF, signal, 1, JBA); + + lcpConnectptr.p->noOfLcpConf++; + if (lcpConnectptr.p->noOfLcpConf == 4) { + jam(); + releaseLcpConnectRec(signal); + rootfragrecptr.i = fragrecptr.p->myroot; + ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec); + rootfragrecptr.p->rootState = ACTIVEROOT; + }//if return; /* ALL QUEUED OPERATION ARE RESTARTED IF NEEDED. */ }//Dbacc::execACC_CONTOPREQ() diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index 0c63cb5fe17..19e055a3011 100644 --- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -968,7 +968,6 @@ public: enum LcpState { LCP_IDLE = 0, - LCP_STARTED = 1, LCP_COMPLETED = 2, LCP_WAIT_FRAGID = 3, LCP_WAIT_TUP_PREPLCP = 4, @@ -2266,7 +2265,7 @@ private: void sendCopyActiveConf(Signal* signal,Uint32 tableId); void checkLcpCompleted(Signal* signal); void checkLcpHoldop(Signal* signal); - void checkLcpStarted(Signal* signal); + bool checkLcpStarted(Signal* signal); void checkLcpTupprep(Signal* signal); void getNextFragForLcp(Signal* signal); void initLcpLocAcc(Signal* signal, Uint32 fragId); diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index c79f4dfc6c7..27f995750b6 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -10351,8 +10351,8 @@ void Dblqh::execTUP_LCPSTARTED(Signal* signal) void Dblqh::lcpStartedLab(Signal* signal) { - checkLcpStarted(signal); - if (lcpPtr.p->lcpState == LcpRecord::LCP_STARTED) { + if (checkLcpStarted(signal)) + { jam(); /* ---------------------------------------------------------------------- * THE LOCAL CHECKPOINT HAS BEEN STARTED. IT IS NOW TIME TO @@ -10432,26 +10432,7 @@ void Dblqh::execLQH_RESTART_OP(Signal* signal) lcpPtr.i = signal->theData[1]; ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord); ndbrequire(fragptr.p->fragStatus == Fragrecord::BLOCKED); - if (lcpPtr.p->lcpState == LcpRecord::LCP_STARTED) { - jam(); - /***********************************************************************/ - /* THIS SIGNAL CAN ONLY BE RECEIVED WHEN FRAGMENT IS BLOCKED AND - * THE LOCAL CHECKPOINT HAS BEEN STARTED. THE BLOCKING WILL BE - * REMOVED AS SOON AS ALL OPERATIONS HAVE BEEN STARTED. - ***********************************************************************/ - restartOperationsLab(signal); - } else if (lcpPtr.p->lcpState == LcpRecord::LCP_BLOCKED_COMP) { - jam(); - /*******************************************************************> - * THE CHECKPOINT IS COMPLETED BUT HAS NOT YET STARTED UP - * ALL OPERATIONS AGAIN. - * WE PERFORM THIS START-UP BEFORE CONTINUING WITH THE NEXT - * FRAGMENT OF THE LOCAL CHECKPOINT TO AVOID ANY STRANGE ERRORS. - *******************************************************************> */ - restartOperationsLab(signal); - } else { - ndbrequire(false); - } + restartOperationsLab(signal); }//Dblqh::execLQH_RESTART_OP() void Dblqh::restartOperationsLab(Signal* signal) @@ -11000,7 +10981,8 @@ void Dblqh::checkLcpHoldop(Signal* signal) * * SUBROUTINE SHORT NAME = CLS * ========================================================================== */ -void Dblqh::checkLcpStarted(Signal* signal) +bool +Dblqh::checkLcpStarted(Signal* signal) { LcpLocRecordPtr clsLcpLocptr; @@ -11010,7 +10992,7 @@ void Dblqh::checkLcpStarted(Signal* signal) do { ptrCheckGuard(clsLcpLocptr, clcpLocrecFileSize, lcpLocRecord); if (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_WAIT_STARTED){ - return; + return false; }//if clsLcpLocptr.i = clsLcpLocptr.p->nextLcpLoc; i++; @@ -11021,12 +11003,13 @@ void Dblqh::checkLcpStarted(Signal* signal) do { ptrCheckGuard(clsLcpLocptr, clcpLocrecFileSize, lcpLocRecord); if (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_WAIT_STARTED){ - return; + return false; }//if clsLcpLocptr.i = clsLcpLocptr.p->nextLcpLoc; i++; } while (clsLcpLocptr.i != RNIL); - lcpPtr.p->lcpState = LcpRecord::LCP_STARTED; + + return true; }//Dblqh::checkLcpStarted() /* ========================================================================== @@ -11187,20 +11170,12 @@ void Dblqh::sendAccContOp(Signal* signal) do { ptrCheckGuard(sacLcpLocptr, clcpLocrecFileSize, lcpLocRecord); sacLcpLocptr.p->accContCounter = 0; - if(sacLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_STARTED){ - /* ------------------------------------------------------------------- */ - /*SEND START OPERATIONS TO ACC AGAIN */ - /* ------------------------------------------------------------------- */ - signal->theData[0] = lcpPtr.p->lcpAccptr; - signal->theData[1] = sacLcpLocptr.p->locFragid; - sendSignal(fragptr.p->accBlockref, GSN_ACC_CONTOPREQ, signal, 2, JBA); - count++; - } else if(sacLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_COMPLETED){ - signal->theData[0] = sacLcpLocptr.i; - sendSignal(reference(), GSN_ACC_CONTOPCONF, signal, 1, JBB); - } else { - ndbrequire(false); - } + /* ------------------------------------------------------------------- */ + /*SEND START OPERATIONS TO ACC AGAIN */ + /* ------------------------------------------------------------------- */ + signal->theData[0] = lcpPtr.p->lcpAccptr; + signal->theData[1] = sacLcpLocptr.p->locFragid; + sendSignal(fragptr.p->accBlockref, GSN_ACC_CONTOPREQ, signal, 2, JBA); sacLcpLocptr.i = sacLcpLocptr.p->nextLcpLoc; } while (sacLcpLocptr.i != RNIL); @@ -11236,9 +11211,18 @@ void Dblqh::sendStartLcp(Signal* signal) signal->theData[0] = stlLcpLocptr.i; signal->theData[1] = cownref; signal->theData[2] = stlLcpLocptr.p->tupRef; - sendSignal(fragptr.p->tupBlockref, GSN_TUP_LCPREQ, signal, 3, JBA); + if(ERROR_INSERTED(5077)) + sendSignalWithDelay(fragptr.p->tupBlockref, GSN_TUP_LCPREQ, + signal, 5000, 3); + else + sendSignal(fragptr.p->tupBlockref, GSN_TUP_LCPREQ, signal, 3, JBA); stlLcpLocptr.i = stlLcpLocptr.p->nextLcpLoc; } while (stlLcpLocptr.i != RNIL); + + if(ERROR_INSERTED(5077)) + { + ndbout_c("Delayed TUP_LCPREQ with 5 sec"); + } }//Dblqh::sendStartLcp() /* ------------------------------------------------------------------------- */ From 6b0b4734d90efd3c98d7f7ad9584f0aa404ba52b Mon Sep 17 00:00:00 2001 From: "dlenev@brandersnatch.localdomain" <> Date: Fri, 15 Apr 2005 20:31:47 +0400 Subject: [PATCH 05/65] Fix for bug #9486 "Can't perform multi-update in stored procedure". New more SP-locking friendly approach to handling locks in multi-update. Now we mark all tables of multi-update as needing write lock at parsing stage and if possible downgrade lock at execution stage (For its work SP-locking mechanism needs to know all lock types right after parsing stage). --- mysql-test/r/sp-threads.result | 17 +++++++++++++++++ mysql-test/t/sp-threads.test | 31 +++++++++++++++++++++++++++++++ sql/sp_head.cc | 13 ++++++++++--- sql/sql_lex.h | 2 +- sql/sql_prepare.cc | 5 ----- sql/sql_update.cc | 19 ++++++++----------- sql/sql_yacc.yy | 11 ++++++----- 7 files changed, 73 insertions(+), 25 deletions(-) diff --git a/mysql-test/r/sp-threads.result b/mysql-test/r/sp-threads.result index 0bb5c3423e2..a081e520496 100644 --- a/mysql-test/r/sp-threads.result +++ b/mysql-test/r/sp-threads.result @@ -23,3 +23,20 @@ select * from t1; s1 s2 s3 drop table t1; drop procedure bug4934; +drop procedure if exists bug9486; +drop table if exists t1, t2; +create table t1 (id1 int, val int); +create table t2 (id2 int); +create procedure bug9486() +update t1, t2 set val= 1 where id1=id2; +call bug9486(); +lock tables t2 write; + call bug9486(); +show processlist; +Id User Host db Command Time State Info +# root localhost test Sleep # NULL +# root localhost test Query # Locked call bug9486() +# root localhost test Query # NULL show processlist +unlock tables; +drop procedure bug9486; +drop table t1, t2; diff --git a/mysql-test/t/sp-threads.test b/mysql-test/t/sp-threads.test index 0ced60a610f..608ac3e2ee7 100644 --- a/mysql-test/t/sp-threads.test +++ b/mysql-test/t/sp-threads.test @@ -54,6 +54,37 @@ drop table t1; drop procedure bug4934; +# +# BUG #9486 "Can't perform multi-update in stored procedure" +# +--disable_warnings +drop procedure if exists bug9486; +drop table if exists t1, t2; +--enable_warnings +create table t1 (id1 int, val int); +create table t2 (id2 int); + +create procedure bug9486() + update t1, t2 set val= 1 where id1=id2; +call bug9486(); +# Let us check that SP invocation requires write lock for t2. +connection con2root; +lock tables t2 write; +connection con1root; +send call bug9486(); +connection con2root; +--sleep 2 +# There should be call statement in locked state. +--replace_column 1 # 6 # +show processlist; +unlock tables; +connection con1root; +reap; + +drop procedure bug9486; +drop table t1, t2; + + # # BUG#NNNN: New bug synopsis # diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 63f67959f33..01ac7a877a1 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -2026,6 +2026,12 @@ typedef struct st_sp_table LEX_STRING qname; bool temp; TABLE_LIST *table; + /* + We can't use table->lock_type as lock type for table + in multi-set since it can be changed by statement during + its execution (e.g. as this happens for multi-update). + */ + thr_lock_type lock_type; uint lock_count; uint query_lock_count; } SP_TABLE; @@ -2097,8 +2103,8 @@ sp_head::merge_table_list(THD *thd, TABLE_LIST *table, LEX *lex_for_tmp_check) */ if ((tab= (SP_TABLE *)hash_search(&m_sptabs, (byte *)tname, tlen))) { - if (tab->table->lock_type < table->lock_type) - tab->table= table; // Use the table with the highest lock type + if (tab->lock_type < table->lock_type) + tab->lock_type= table->lock_type; // Use the table with the highest lock type tab->query_lock_count++; if (tab->query_lock_count > tab->lock_count) tab->lock_count++; @@ -2116,6 +2122,7 @@ sp_head::merge_table_list(THD *thd, TABLE_LIST *table, LEX *lex_for_tmp_check) lex_for_tmp_check->create_info.options & HA_LEX_CREATE_TMP_TABLE) tab->temp= TRUE; tab->table= table; + tab->lock_type= table->lock_type; tab->lock_count= tab->query_lock_count= 1; my_hash_insert(&m_sptabs, (byte *)tab); } @@ -2188,7 +2195,7 @@ sp_head::add_used_tables_to_table_list(THD *thd, table->alias= otable->alias; table->table_name= otable->table_name; table->table_name_length= otable->table_name_length; - table->lock_type= otable->lock_type; + table->lock_type= stab->lock_type; table->cacheable_table= 1; table->prelocking_placeholder= 1; diff --git a/sql/sql_lex.h b/sql/sql_lex.h index d8c83434423..94f1a8e0df4 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -732,7 +732,7 @@ typedef struct st_lex USER_RESOURCES mqh; ulong type; enum_sql_command sql_command, orig_sql_command; - thr_lock_type lock_option, multi_lock_option; + thr_lock_type lock_option; enum SSL_type ssl_type; /* defined in violite.h */ enum my_lex_states next_state; enum enum_duplicates duplicates; diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 416105c6523..b4660c91641 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1012,11 +1012,6 @@ static int mysql_test_update(Prepared_statement *stmt, DBUG_PRINT("info", ("Switch to multi-update")); /* pass counter value */ thd->lex->table_count= table_count; - /* - give correct value to multi_lock_option, because it will be used - in multiupdate - */ - thd->lex->multi_lock_option= table_list->lock_type; /* convert to multiupdate */ return 2; } diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 8b8dd32b22d..86aa0bf9890 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -145,11 +145,6 @@ int mysql_update(THD *thd, DBUG_PRINT("info", ("Switch to multi-update")); /* pass counter value */ thd->lex->table_count= table_count; - /* - give correct value to multi_lock_option, because it will be used - in multiupdate - */ - thd->lex->multi_lock_option= table_list->lock_type; /* convert to multiupdate */ return 2; } @@ -692,8 +687,10 @@ bool mysql_multi_update_prepare(THD *thd) } DBUG_PRINT("info",("setting table `%s` for update", tl->alias)); - tl->lock_type= lex->multi_lock_option; - tl->updating= 1; + /* + If table will be updated we should not downgrade lock for it and + leave it as is. + */ } else { @@ -705,15 +702,15 @@ bool mysql_multi_update_prepare(THD *thd) */ tl->lock_type= using_update_log ? TL_READ_NO_INSERT : TL_READ; tl->updating= 0; + /* Update TABLE::lock_type accordingly. */ + if (!tl->placeholder() && !tl->schema_table && !using_lock_tables) + tl->table->reginfo.lock_type= tl->lock_type; } /* Check access privileges for table */ if (!tl->derived && !tl->belong_to_view) { uint want_privilege= tl->updating ? UPDATE_ACL : SELECT_ACL; - if (!using_lock_tables) - tl->table->reginfo.lock_type= tl->lock_type; - if (check_access(thd, want_privilege, tl->db, &tl->grant.privilege, 0, 0) || (grant_option && check_grant(thd, want_privilege, tl, 0, 1, 0))) @@ -847,7 +844,7 @@ bool mysql_multi_update(THD *thd, result, unit, select_lex); delete result; thd->abort_on_warning= 0; - DBUG_RETURN(TRUE); + DBUG_RETURN(FALSE); } diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 5af0cbbb00b..8b5778b5a67 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -5969,10 +5969,7 @@ update: { LEX *lex= Lex; if (lex->select_lex.table_list.elements > 1) - { lex->sql_command= SQLCOM_UPDATE_MULTI; - lex->multi_lock_option= $3; - } else if (lex->select_lex.get_table_list()->derived) { /* it is single table update and it is update of derived table */ @@ -5980,8 +5977,12 @@ update: lex->select_lex.get_table_list()->alias, "UPDATE"); YYABORT; } - else - Select->set_lock_for_tables($3); + /* + In case of multi-update setting write lock for all tables may + be too pessimistic. We will decrease lock level if possible in + mysql_multi_update(). + */ + Select->set_lock_for_tables($3); } where_clause opt_order_clause delete_limit_clause {} ; From a76ecc5bc78365a2ecdf1f96d9c238d5ecbe30c4 Mon Sep 17 00:00:00 2001 From: "sergefp@mysql.com" <> Date: Mon, 18 Apr 2005 05:21:44 +0400 Subject: [PATCH 06/65] Fix for BUG#9103: Don't produce data truncation warnings from within cp_buffer_from_ref(). This function is only used to make index search tuples and data truncation that occurs here has no relation with truncated values being saved into tables. --- mysql-test/r/update.result | 14 ++++++++++++++ mysql-test/t/update.test | 12 ++++++++++++ sql/opt_range.cc | 2 +- sql/sql_select.cc | 19 +++++++++++++------ sql/sql_select.h | 2 +- 5 files changed, 41 insertions(+), 8 deletions(-) diff --git a/mysql-test/r/update.result b/mysql-test/r/update.result index 4a9e95fb89e..d83952e118b 100644 --- a/mysql-test/r/update.result +++ b/mysql-test/r/update.result @@ -226,3 +226,17 @@ select * from t1; a b 0 2 drop table t1; +create table t1 (a int, b varchar(10), key b(b(5))) engine=myisam; +create table t2 (a int, b varchar(10)) engine=myisam; +insert into t1 values ( 1, 'abcd1e'); +insert into t1 values ( 2, 'abcd2e'); +insert into t2 values ( 1, 'abcd1e'); +insert into t2 values ( 2, 'abcd2e'); +analyze table t1,t2; +Table Op Msg_type Msg_text +test.t1 analyze status OK +test.t2 analyze status OK +update t1, t2 set t1.a = t2.a where t2.b = t1.b; +show warnings; +Level Code Message +drop table t1, t2; diff --git a/mysql-test/t/update.test b/mysql-test/t/update.test index 8eb3a924ee3..6a90fb95760 100644 --- a/mysql-test/t/update.test +++ b/mysql-test/t/update.test @@ -189,3 +189,15 @@ insert into t1 values (0, '1'); update t1 set b = b + 1 where a = 0; select * from t1; drop table t1; + +# BUG#9103 "Erroneous data truncation warnings on multi-table updates" +create table t1 (a int, b varchar(10), key b(b(5))) engine=myisam; +create table t2 (a int, b varchar(10)) engine=myisam; +insert into t1 values ( 1, 'abcd1e'); +insert into t1 values ( 2, 'abcd2e'); +insert into t2 values ( 1, 'abcd1e'); +insert into t2 values ( 2, 'abcd2e'); +analyze table t1,t2; +update t1, t2 set t1.a = t2.a where t2.b = t1.b; +show warnings; +drop table t1, t2; diff --git a/sql/opt_range.cc b/sql/opt_range.cc index e2cae0598a0..33223b83894 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -2574,7 +2574,7 @@ QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) if (!quick) return 0; /* no ranges found */ - if (cp_buffer_from_ref(ref)) + if (cp_buffer_from_ref(thd, ref)) { if (thd->is_fatal_error) goto err; // out of memory diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 3f133a473ac..14dc8463e38 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -6185,7 +6185,7 @@ join_read_const(JOIN_TAB *tab) TABLE *table= tab->table; if (table->status & STATUS_GARBAGE) // If first read { - if (cp_buffer_from_ref(&tab->ref)) + if (cp_buffer_from_ref(tab->join->thd, &tab->ref)) error=HA_ERR_KEY_NOT_FOUND; else { @@ -6248,7 +6248,7 @@ join_read_always_key(JOIN_TAB *tab) if (!table->file->inited) table->file->ha_index_init(tab->ref.key); - if (cp_buffer_from_ref(&tab->ref)) + if (cp_buffer_from_ref(tab->join->thd, &tab->ref)) return -1; if ((error=table->file->index_read(table->record[0], tab->ref.key_buff, @@ -6275,7 +6275,7 @@ join_read_last_key(JOIN_TAB *tab) if (!table->file->inited) table->file->ha_index_init(tab->ref.key); - if (cp_buffer_from_ref(&tab->ref)) + if (cp_buffer_from_ref(tab->join->thd, &tab->ref)) return -1; if ((error=table->file->index_read_last(table->record[0], tab->ref.key_buff, @@ -6449,7 +6449,7 @@ join_ft_read_first(JOIN_TAB *tab) if (!table->file->inited) table->file->ha_index_init(tab->ref.key); #if NOT_USED_YET - if (cp_buffer_from_ref(&tab->ref)) // as ft-key doesn't use store_key's + if (cp_buffer_from_ref(tab->join->thd, &tab->ref)) // as ft-key doesn't use store_key's return -1; // see also FT_SELECT::init() #endif table->file->ft_init(); @@ -8168,7 +8168,8 @@ cmp_buffer_with_ref(JOIN_TAB *tab) { memcpy(tab->ref.key_buff2, tab->ref.key_buff, tab->ref.key_length); } - if ((tab->ref.key_err=cp_buffer_from_ref(&tab->ref)) || diff) + if ((tab->ref.key_err= cp_buffer_from_ref(tab->join->thd, &tab->ref)) || + diff) return 1; return memcmp(tab->ref.key_buff2, tab->ref.key_buff, tab->ref.key_length) != 0; @@ -8176,11 +8177,17 @@ cmp_buffer_with_ref(JOIN_TAB *tab) bool -cp_buffer_from_ref(TABLE_REF *ref) +cp_buffer_from_ref(THD *thd, TABLE_REF *ref) { + enum enum_check_fields save_count_cuted_fields= thd->count_cuted_fields; + thd->count_cuted_fields= CHECK_FIELD_IGNORE; for (store_key **copy=ref->key_copy ; *copy ; copy++) if ((*copy)->copy()) + { + thd->count_cuted_fields= save_count_cuted_fields; return 1; // Something went wrong + } + thd->count_cuted_fields= save_count_cuted_fields; return 0; } diff --git a/sql/sql_select.h b/sql/sql_select.h index ab3b442ef74..caf4574fbec 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -445,7 +445,7 @@ public: const char *name() const { return "const"; } }; -bool cp_buffer_from_ref(TABLE_REF *ref); +bool cp_buffer_from_ref(THD *thd, TABLE_REF *ref); bool error_if_full_join(JOIN *join); int report_error(TABLE *table, int error); int safe_index_read(JOIN_TAB *tab); From 384efc449398925cca78d5fe98d3ac53e3b6aa6d Mon Sep 17 00:00:00 2001 From: "jan@hundin.mysql.fi" <> Date: Mon, 18 Apr 2005 12:17:32 +0300 Subject: [PATCH 07/65] Fixed a bug: deadlock without any locking, simple select and update (Bug #7975). Backported from 5.0.3. --- innobase/row/row0ins.c | 41 +++++++++++++++++++++++------------------ sql/ha_innodb.cc | 19 +++++++++++++++---- 2 files changed, 38 insertions(+), 22 deletions(-) diff --git a/innobase/row/row0ins.c b/innobase/row/row0ins.c index 15ffabf70cc..5ca1ee51cbd 100644 --- a/innobase/row/row0ins.c +++ b/innobase/row/row0ins.c @@ -51,14 +51,19 @@ innobase_invalidate_query_cache( chars count */ /********************************************************************** -This function returns true if SQL-query in the current thread +This function returns true if + +1) SQL-query in the current thread is either REPLACE or LOAD DATA INFILE REPLACE. + +2) SQL-query in the current thread +is INSERT ON DUPLICATE KEY UPDATE. + NOTE that /mysql/innobase/row/row0ins.c must contain the prototype for this function ! */ ibool -innobase_query_is_replace(void); -/*===========================*/ +innobase_query_is_update(void); /************************************************************************* Creates an insert node struct. */ @@ -1562,12 +1567,12 @@ row_ins_scan_sec_index_for_duplicate( trx = thr_get_trx(thr); ut_ad(trx); - if (innobase_query_is_replace()) { + if (innobase_query_is_update()) { - /* The manual defines the REPLACE semantics that it - is either an INSERT or DELETE(s) for duplicate key - + INSERT. Therefore, we should take X-lock for - duplicates */ + /* If the SQL-query will update or replace + duplicate key we will take X-lock for + duplicates ( REPLACE, LOAD DATAFILE REPLACE, + INSERT ON DUPLICATE KEY UPDATE). */ err = row_ins_set_exclusive_rec_lock( LOCK_ORDINARY,rec,index,thr); @@ -1675,12 +1680,12 @@ row_ins_duplicate_error_in_clust( sure that in roll-forward we get the same duplicate errors as in original execution */ - if (innobase_query_is_replace()) { + if (innobase_query_is_update()) { - /* The manual defines the REPLACE semantics - that it is either an INSERT or DELETE(s) - for duplicate key + INSERT. Therefore, we - should take X-lock for duplicates */ + /* If the SQL-query will update or replace + duplicate key we will take X-lock for + duplicates ( REPLACE, LOAD DATAFILE REPLACE, + INSERT ON DUPLICATE KEY UPDATE). */ err = row_ins_set_exclusive_rec_lock( LOCK_REC_NOT_GAP,rec,cursor->index, @@ -1713,12 +1718,12 @@ row_ins_duplicate_error_in_clust( if (rec != page_get_supremum_rec(page)) { - /* The manual defines the REPLACE semantics that it - is either an INSERT or DELETE(s) for duplicate key - + INSERT. Therefore, we should take X-lock for - duplicates. */ + if (innobase_query_is_update()) { - if (innobase_query_is_replace()) { + /* If the SQL-query will update or replace + duplicate key we will take X-lock for + duplicates ( REPLACE, LOAD DATAFILE REPLACE, + INSERT ON DUPLICATE KEY UPDATE). */ err = row_ins_set_exclusive_rec_lock( LOCK_REC_NOT_GAP, diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 6cb35fb392d..06d9bf24c13 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -5653,13 +5653,19 @@ innobase_get_at_most_n_mbchars( extern "C" { /********************************************************************** -This function returns true if SQL-query in the current thread +This function returns true if + +1) SQL-query in the current thread is either REPLACE or LOAD DATA INFILE REPLACE. + +2) SQL-query in the current thread +is INSERT ON DUPLICATE KEY UPDATE. + NOTE that /mysql/innobase/row/row0ins.c must contain the prototype for this function ! */ ibool -innobase_query_is_replace(void) +innobase_query_is_update(void) /*===========================*/ { THD* thd; @@ -5671,9 +5677,14 @@ innobase_query_is_replace(void) ( thd->lex->sql_command == SQLCOM_LOAD && thd->lex->duplicates == DUP_REPLACE )) { return true; - } else { - return false; } + + if ( thd->lex->sql_command == SQLCOM_INSERT && + thd->lex->duplicates == DUP_UPDATE ) { + return true; + } + + return false; } } From f931466f36c0038181c897e6a6e70d046e9d216e Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Mon, 18 Apr 2005 12:41:12 +0200 Subject: [PATCH 08/65] bug#9892 Make BUILDINDX RF_LOCAL aware --- ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 37 +++++++++++++++++++------ ndb/src/ndbapi/Ndbif.cpp | 10 +++---- ndb/test/ndbapi/testIndex.cpp | 2 +- 3 files changed, 34 insertions(+), 15 deletions(-) diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 7247b7e2b9c..184db794057 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -9811,11 +9811,20 @@ Dbdict::execBUILDINDXREQ(Signal* signal) requestType == BuildIndxReq::RT_ALTER_INDEX || requestType == BuildIndxReq::RT_SYSTEMRESTART) { jam(); + + const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL; + NdbNodeBitmask receiverNodes = c_aliveNodes; + if (isLocal) { + receiverNodes.clear(); + receiverNodes.set(getOwnNodeId()); + } + if (signal->getLength() == BuildIndxReq::SignalLength) { jam(); - if (getOwnNodeId() != c_masterNodeId) { + + if (!isLocal && getOwnNodeId() != c_masterNodeId) { jam(); - + releaseSections(signal); OpBuildIndex opBad; opPtr.p = &opBad; @@ -9828,9 +9837,9 @@ Dbdict::execBUILDINDXREQ(Signal* signal) } // forward initial request plus operation key to all req->setOpKey(++c_opRecordSequence); - NodeReceiverGroup rg(DBDICT, c_aliveNodes); + NodeReceiverGroup rg(DBDICT, receiverNodes); sendSignal(rg, GSN_BUILDINDXREQ, - signal, BuildIndxReq::SignalLength + 1, JBB); + signal, BuildIndxReq::SignalLength + 1, JBB); return; } // seize operation record @@ -9853,7 +9862,7 @@ Dbdict::execBUILDINDXREQ(Signal* signal) } c_opBuildIndex.add(opPtr); // master expects to hear from all - opPtr.p->m_signalCounter = c_aliveNodes; + opPtr.p->m_signalCounter = receiverNodes; buildIndex_sendReply(signal, opPtr, false); return; } @@ -10208,10 +10217,20 @@ Dbdict::buildIndex_sendSlaveReq(Signal* signal, OpBuildIndexPtr opPtr) req->setConnectionPtr(opPtr.p->key); req->setRequestType(opPtr.p->m_requestType); req->addRequestFlag(opPtr.p->m_requestFlag); - opPtr.p->m_signalCounter = c_aliveNodes; - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - sendSignal(rg, GSN_BUILDINDXREQ, - signal, BuildIndxReq::SignalLength, JBB); + if(opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) + { + opPtr.p->m_signalCounter.clearWaitingFor(); + opPtr.p->m_signalCounter.setWaitingFor(getOwnNodeId()); + sendSignal(reference(), GSN_BUILDINDXREQ, + signal, BuildIndxReq::SignalLength, JBB); + } + else + { + opPtr.p->m_signalCounter = c_aliveNodes; + NodeReceiverGroup rg(DBDICT, c_aliveNodes); + sendSignal(rg, GSN_BUILDINDXREQ, + signal, BuildIndxReq::SignalLength, JBB); + } } void diff --git a/ndb/src/ndbapi/Ndbif.cpp b/ndb/src/ndbapi/Ndbif.cpp index a4f233709c4..1caebe436ef 100644 --- a/ndb/src/ndbapi/Ndbif.cpp +++ b/ndb/src/ndbapi/Ndbif.cpp @@ -453,7 +453,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3]) tFirstDataPtr = int2void(tFirstData); if(tFirstDataPtr != 0){ tOp = void2rec_op(tFirstDataPtr); - if (tOp->checkMagicNumber() == 0) { + if (tOp->checkMagicNumber(false) == 0) { tCon = tOp->theNdbCon; if (tCon != NULL) { if ((tCon->theSendStatus == NdbConnection::sendTC_OP) || @@ -466,11 +466,11 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3]) }//if }//if }//if - } else { -#ifdef VM_TRACE - ndbout_c("Recevied TCKEY_FAILREF wo/ operation"); -#endif } +#ifdef VM_TRACE + ndbout_c("Recevied TCKEY_FAILREF wo/ operation"); +#endif + return; break; } case GSN_TCKEYREF: diff --git a/ndb/test/ndbapi/testIndex.cpp b/ndb/test/ndbapi/testIndex.cpp index 6623ad35a7f..d359f83257f 100644 --- a/ndb/test/ndbapi/testIndex.cpp +++ b/ndb/test/ndbapi/testIndex.cpp @@ -1329,7 +1329,7 @@ TESTCASE("NFNR2_O", INITIALIZER(runLoadTable); STEP(runRestarts); STEP(runTransactions2); - STEP(runTransactions2); + //STEP(runTransactions2); FINALIZER(runVerifyIndex); FINALIZER(createRandomIndex_Drop); FINALIZER(createPkIndex_Drop); From c0248b218688f81ccc4540278bfb01266f8dd875 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Mon, 18 Apr 2005 12:46:35 +0200 Subject: [PATCH 09/65] bug#9892 - ndb index activation 4.1->5.0 merge fix --- ndb/test/ndbapi/testIndex.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ndb/test/ndbapi/testIndex.cpp b/ndb/test/ndbapi/testIndex.cpp index bb3c41d0474..5785db232c4 100644 --- a/ndb/test/ndbapi/testIndex.cpp +++ b/ndb/test/ndbapi/testIndex.cpp @@ -1323,7 +1323,7 @@ TESTCASE("NFNR2_O", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("OrderedIndex", 1); TC_PROPERTY("LoggedIndexes", (unsigned)0); - TC_PROPERTY("PauseThreads", 2); + TC_PROPERTY("PauseThreads", 1); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(createPkIndex); From 1ef7bbc74b934db6f59f28b8953d0b0569878fa6 Mon Sep 17 00:00:00 2001 From: "acurtis@xiphis.org" <> Date: Mon, 18 Apr 2005 22:01:18 +0100 Subject: [PATCH 10/65] Update for VARCHAR and remove 4.1-specific actions. by Philip Antoniades --- scripts/mysql_fix_privilege_tables.sql | 46 ++++---------------------- 1 file changed, 7 insertions(+), 39 deletions(-) diff --git a/scripts/mysql_fix_privilege_tables.sql b/scripts/mysql_fix_privilege_tables.sql index 00fe5c053d0..d18536e1c81 100644 --- a/scripts/mysql_fix_privilege_tables.sql +++ b/scripts/mysql_fix_privilege_tables.sql @@ -9,7 +9,7 @@ -- this sql script. -- On windows you should do 'mysql --force mysql < mysql_fix_privilege_tables.sql' -set table_type=MyISAM; +set storage_engine=MyISAM; CREATE TABLE IF NOT EXISTS func ( name char(64) binary DEFAULT '' NOT NULL, @@ -64,7 +64,7 @@ CREATE TABLE IF NOT EXISTS tables_priv ( ALTER TABLE tables_priv modify Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter') COLLATE utf8_general_ci DEFAULT '' NOT NULL, modify Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL; -ALTER TABLE procs_priv type=MyISAM, CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin; +ALTER TABLE procs_priv ENGINE=MyISAM, CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin; ALTER TABLE procs_priv modify Proc_priv set('Execute','Alter Routine','Grant') COLLATE utf8_general_ci DEFAULT '' NOT NULL; @@ -321,19 +321,19 @@ KEY Grantor (Grantor) CREATE TABLE IF NOT EXISTS help_topic ( help_topic_id int unsigned not null, -name char(64) not null, +name varchar(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, -url char(128) not null, +url varchar(128) not null, primary key (help_topic_id), unique index (name) ) CHARACTER SET utf8 comment='help topics'; CREATE TABLE IF NOT EXISTS help_category ( help_category_id smallint unsigned not null, -name char(64) not null, +name varchar(64) not null, parent_category_id smallint unsigned null, -url char(128) not null, +url varchar(128) not null, primary key (help_category_id), unique index (name) ) CHARACTER SET utf8 comment='help categories'; @@ -346,7 +346,7 @@ primary key (help_keyword_id, help_topic_id) CREATE TABLE IF NOT EXISTS help_keyword ( help_keyword_id int unsigned not null, -name char(64) not null, +name varchar(64) not null, primary key (help_keyword_id), unique index (name) ) CHARACTER SET utf8 comment='help keywords'; @@ -493,35 +493,3 @@ ALTER TABLE proc MODIFY name char(64) DEFAULT '' NOT NULL, 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE' ) DEFAULT 0 NOT NULL; - -# -# Change all varchar fields in privilege tables to CHAR, to ensure that -# we can use the privilege tables in MySQL 4.1 -# Note that for this hack to work, we must change all CHAR() columns at -# the same time -# - -ALTER TABLE mysql.user -modify Host char(60) binary DEFAULT '' NOT NULL, -modify User char(16) binary DEFAULT '' NOT NULL, -modify Password char(41) binary DEFAULT '' NOT NULL; - -ALTER TABLE mysql.db -modify Host char(60) binary DEFAULT '' NOT NULL, -modify Db char(64) binary DEFAULT '' NOT NULL, -modify User char(16) binary DEFAULT '' NOT NULL; - -ALTER TABLE mysql.host -modify Host char(60) binary DEFAULT '' NOT NULL, -modify Db char(64) binary DEFAULT '' NOT NULL; - -ALTER TABLE help_topic -modify name char(64) not null, -modify url char(128) not null; - -ALTER TABLE help_category -modify name char(64) not null, -modify url char(128) not null; - -ALTER TABLE help_keyword -modify name char(64) not null; From 1f994ec920a47f4bbe34b8847cc6a6c2fb36f9fd Mon Sep 17 00:00:00 2001 From: "jan@hundin.mysql.fi" <> Date: Tue, 19 Apr 2005 08:23:03 +0300 Subject: [PATCH 11/65] Style change. Use 1 and 0 instead of true and false. --- sql/ha_innodb.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 06d9bf24c13..83c72594dfb 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -5666,7 +5666,7 @@ prototype for this function ! */ ibool innobase_query_is_update(void) -/*===========================*/ +/*==========================*/ { THD* thd; @@ -5676,15 +5676,15 @@ innobase_query_is_update(void) thd->lex->sql_command == SQLCOM_REPLACE_SELECT || ( thd->lex->sql_command == SQLCOM_LOAD && thd->lex->duplicates == DUP_REPLACE )) { - return true; + return(1); } if ( thd->lex->sql_command == SQLCOM_INSERT && thd->lex->duplicates == DUP_UPDATE ) { - return true; + return(1); } - return false; + return(0); } } From f638e5cb8f9956b92c0f72fc48d6a297d7fc393e Mon Sep 17 00:00:00 2001 From: "acurtis@xiphis.org" <> Date: Tue, 19 Apr 2005 09:09:25 +0100 Subject: [PATCH 12/65] Bug#9102 - Stored proccedures: function which returns blob causes crash Initialization of fields for sp return type was not complete. --- mysql-test/r/sp.result | 6 ++++ mysql-test/t/sp.test | 9 ++++++ sql/mysql_priv.h | 1 + sql/sp_head.cc | 26 +++++++++++++++- sql/sql_table.cc | 71 ++++++++++++++++++++++++++++++++++++++++++ sql/sql_yacc.yy | 14 +++++++-- 6 files changed, 124 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index 4bb1640f0eb..4cc59679ce5 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -2958,4 +2958,10 @@ select @x| set global query_cache_size = @qcs1| delete from t1| drop function bug9902| +drop function if exists bug9102| +create function bug9102() returns blob return 'a'| +select bug9102(); +drop function bug9102| +bug9102() +a drop table t1,t2; diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test index 4101a7a4bfa..3934e8ad17a 100644 --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@ -3627,6 +3627,15 @@ set global query_cache_size = @qcs1| delete from t1| drop function bug9902| +# +# BUG#9102: New bug synopsis +# +--disable_warnings +drop function if exists bug9102| +--enable_warnings +create function bug9102() returns blob return 'a'| +select bug9102(); +drop function bug9102| # # BUG#NNNN: New bug synopsis diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index a854f8c45d3..56fbd993aed 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -647,6 +647,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, Item ***copy_func, Field **from_field, bool group, bool modify_item, uint convert_blob_length); +void sp_prepare_create_field(THD *thd, create_field *sql_field); int prepare_create_field(create_field *sql_field, uint *blob_columns, int *timestamps, int *timestamps_with_niladic, diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 0fe9c449540..d1486cb234e 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -370,6 +370,7 @@ TYPELIB * sp_head::create_typelib(List *src) { TYPELIB *result= NULL; + CHARSET_INFO *cs= m_returns_cs; DBUG_ENTER("sp_head::clone_typelib"); if (src->elements) { @@ -380,8 +381,31 @@ sp_head::create_typelib(List *src) alloc_root(mem_root,sizeof(char *)*(result->count+1)))) return 0; List_iterator it(*src); + String conv, *tmp; + uint32 dummy; for (uint i=0; icount; i++) - result->type_names[i]= strdup_root(mem_root, (it++)->c_ptr()); + { + tmp = it++; + if (String::needs_conversion(tmp->length(), tmp->charset(), + cs, &dummy)) + { + uint cnv_errs; + conv.copy(tmp->ptr(), tmp->length(), tmp->charset(), cs, &cnv_errs); + char *buf= (char*) alloc_root(mem_root,conv.length()+1); + memcpy(buf, conv.ptr(), conv.length()); + buf[conv.length()]= '\0'; + result->type_names[i]= buf; + result->type_lengths[i]= conv.length(); + } + else + result->type_names[i]= strdup_root(mem_root, tmp->c_ptr()); + + // Strip trailing spaces. + uint lengthsp= cs->cset->lengthsp(cs, result->type_names[i], + result->type_lengths[i]); + result->type_lengths[i]= lengthsp; + ((uchar *)result->type_names[i])[lengthsp]= '\0'; + } result->type_names[result->count]= 0; } return result; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 8753f62ab89..18c90d549ec 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1351,6 +1351,77 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, } +/* + Preparation of create_field for SP function return values. + Based on code used in the inner loop of mysql_prepare_table() above + + SYNOPSIS + sp_prepare_create_field() + thd Thread object + sql_field Field to prepare + + DESCRIPTION + Prepares the field structures for field creation. + +*/ + +void sp_prepare_create_field(THD *thd, create_field *sql_field) +{ + if (sql_field->sql_type == FIELD_TYPE_SET || + sql_field->sql_type == FIELD_TYPE_ENUM) + { + uint32 field_length, dummy; + if (sql_field->sql_type == FIELD_TYPE_SET) + { + calculate_interval_lengths(sql_field->charset, + sql_field->interval, &dummy, + &field_length); + sql_field->length= field_length + + (sql_field->interval->count - 1); + } + else /* FIELD_TYPE_ENUM */ + { + calculate_interval_lengths(sql_field->charset, + sql_field->interval, + &field_length, &dummy); + sql_field->length= field_length; + } + set_if_smaller(sql_field->length, MAX_FIELD_WIDTH-1); + } + + if (sql_field->sql_type == FIELD_TYPE_BIT) + { + sql_field->pack_flag= FIELDFLAG_NUMBER | + FIELDFLAG_TREAT_BIT_AS_CHAR; + } + sql_field->create_length_to_internal_length(); + + if (sql_field->length > MAX_FIELD_VARCHARLENGTH && + !(sql_field->flags & BLOB_FLAG)) + { + /* Convert long VARCHAR columns to TEXT or BLOB */ + char warn_buff[MYSQL_ERRMSG_SIZE]; + + sql_field->sql_type= FIELD_TYPE_BLOB; + sql_field->flags|= BLOB_FLAG; + sprintf(warn_buff, ER(ER_AUTO_CONVERT), sql_field->field_name, + "VARCHAR", + (sql_field->charset == &my_charset_bin) ? "BLOB" : "TEXT"); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_AUTO_CONVERT, + warn_buff); + } + + if ((sql_field->flags & BLOB_FLAG) && sql_field->length) + { + if (sql_field->sql_type == FIELD_TYPE_BLOB) + { + /* The user has given a length to the blob column */ + sql_field->sql_type= get_blob_type_from_length(sql_field->length); + sql_field->pack_length= calc_pack_length(sql_field->sql_type, 0); + } + sql_field->length= 0; // Probably from an item + } +} /* Create a table diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 9aa5d7fb4fc..40529312493 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1407,6 +1407,16 @@ create_function_tail: lex->uint_geom_type))) YYABORT; + sp->m_returns_cs= new_field->charset; + + if (new_field->sql_type == FIELD_TYPE_SET || + new_field->sql_type == FIELD_TYPE_ENUM) + { + new_field->interval= + sp->create_typelib(&new_field->interval_list); + } + sp_prepare_create_field(YYTHD, new_field); + if (prepare_create_field(new_field, &unused1, &unused2, &unused2, 0)) YYABORT; @@ -1415,8 +1425,8 @@ create_function_tail: sp->m_returns_cs= new_field->charset; sp->m_returns_len= new_field->length; sp->m_returns_pack= new_field->pack_flag; - sp->m_returns_typelib= - sp->create_typelib(&new_field->interval_list); + sp->m_returns_typelib= new_field->interval; + new_field->interval= NULL; bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics)); } From f8bf8697c1815459ead7e7b8463ff0d8d970534f Mon Sep 17 00:00:00 2001 From: "lenz@mysql.com" <> Date: Tue, 19 Apr 2005 11:15:36 +0200 Subject: [PATCH 13/65] - removed COPYING from txt_files of Docs/Makefile.am (it's already in EXTRA_DIST of the toplevel Makefile.am) --- Docs/Makefile.am | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/Makefile.am b/Docs/Makefile.am index 4d6aa30a30f..a0d278285b4 100644 --- a/Docs/Makefile.am +++ b/Docs/Makefile.am @@ -15,7 +15,7 @@ EXTRA_DIST = $(noinst_SCRIPTS) mysql.info INSTALL-BINARY all: txt_files -txt_files: ../INSTALL-SOURCE ../COPYING ../EXCEPTIONS-CLIENT \ +txt_files: ../INSTALL-SOURCE ../EXCEPTIONS-CLIENT \ INSTALL-BINARY ../support-files/MacOSX/ReadMe.txt CLEAN_FILES: $(txt_files) From 2df2c4b8957a583e213e69849e6cab8a1f3cce15 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Tue, 19 Apr 2005 11:17:32 +0200 Subject: [PATCH 14/65] CSC5149 - ndb test programs Fix src distributions for benchmark prg --- ndb/test/ndbapi/Makefile.am | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ndb/test/ndbapi/Makefile.am b/ndb/test/ndbapi/Makefile.am index 0c84db8c068..6f04ac3fce2 100644 --- a/ndb/test/ndbapi/Makefile.am +++ b/ndb/test/ndbapi/Makefile.am @@ -70,8 +70,8 @@ test_event_SOURCES = test_event.cpp ndbapi_slow_select_SOURCES = slow_select.cpp testReadPerf_SOURCES = testReadPerf.cpp testLcp_SOURCES = testLcp.cpp -DbCreate_SOURCES= bench/mainPopulate.cpp bench/dbPopulate.cpp bench/userInterface.cpp -DbAsyncGenerator_SOURCES= bench/mainAsyncGenerator.cpp bench/asyncGenerator.cpp bench/ndb_async2.cpp +DbCreate_SOURCES= bench/mainPopulate.cpp bench/dbPopulate.cpp bench/userInterface.cpp bench/dbPopulate.h bench/userInterface.h bench/testData.h bench/testDefinitions.h bench/ndb_schema.hpp bench/ndb_error.hpp +DbAsyncGenerator_SOURCES= bench/mainAsyncGenerator.cpp bench/asyncGenerator.cpp bench/ndb_async2.cpp bench/dbGenerator.h bench/macros.h bench/userInterface.h bench/testData.h bench/testDefinitions.h bench/ndb_schema.hpp bench/ndb_error.hpp INCLUDES_LOC = -I$(top_srcdir)/ndb/include/kernel From 4193a314fb264f257b15d2d0d36eddae0c5e4d5b Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Tue, 19 Apr 2005 11:21:26 +0200 Subject: [PATCH 15/65] Fix for Bug #9691 UPDATE fails on attempt to update primary key --- mysql-test/r/ndb_update.result | 30 +++++++++++++++++++++----- mysql-test/t/ndb_update.test | 15 +++++++++++-- sql/ha_ndbcluster.cc | 39 +++++++++++++++++++++------------- 3 files changed, 62 insertions(+), 22 deletions(-) diff --git a/mysql-test/r/ndb_update.result b/mysql-test/r/ndb_update.result index 5df5c861cfb..c2247564e65 100644 --- a/mysql-test/r/ndb_update.result +++ b/mysql-test/r/ndb_update.result @@ -2,12 +2,32 @@ DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, -c INT NOT NULL +c INT NOT NULL UNIQUE ) ENGINE=ndbcluster; -INSERT INTO t1 VALUES (0, 0, 1),(1,1,2),(2,2,3); +INSERT INTO t1 VALUES (0, 1, 0),(1,2,1),(2,3,2); UPDATE t1 set b = c; select * from t1 order by pk1; pk1 b c -0 1 1 -1 2 2 -2 3 3 +0 0 0 +1 1 1 +2 2 2 +UPDATE t1 set pk1 = 4 where pk1 = 1; +select * from t1 order by pk1; +pk1 b c +0 0 0 +2 2 2 +4 1 1 +UPDATE t1 set pk1 = 1, c = 2 where pk1 = 4; +ERROR 23000: Duplicate entry '1' for key 1 +select * from t1 order by pk1; +pk1 b c +0 0 0 +2 2 2 +4 1 1 +UPDATE t1 set pk1 = pk1 + 10; +select * from t1 order by pk1; +pk1 b c +10 0 0 +12 2 2 +14 1 1 +DROP TABLE IF EXISTS t1; diff --git a/mysql-test/t/ndb_update.test b/mysql-test/t/ndb_update.test index 3b0e84e2344..fc489ec4697 100644 --- a/mysql-test/t/ndb_update.test +++ b/mysql-test/t/ndb_update.test @@ -14,9 +14,20 @@ DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, - c INT NOT NULL + c INT NOT NULL UNIQUE ) ENGINE=ndbcluster; -INSERT INTO t1 VALUES (0, 0, 1),(1,1,2),(2,2,3); +INSERT INTO t1 VALUES (0, 1, 0),(1,2,1),(2,3,2); UPDATE t1 set b = c; select * from t1 order by pk1; +UPDATE t1 set pk1 = 4 where pk1 = 1; +select * from t1 order by pk1; +-- error 1062 +UPDATE t1 set pk1 = 1, c = 2 where pk1 = 4; +select * from t1 order by pk1; +UPDATE t1 set pk1 = pk1 + 10; +select * from t1 order by pk1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index d14d5f6f5c3..a6b4f928e72 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1863,8 +1863,10 @@ int ha_ndbcluster::write_row(byte *record) m_skip_auto_increment= !auto_increment_column_changed; } - if ((res= set_primary_key(op))) - return res; + if ((res= (m_primary_key_update ? + set_primary_key_from_old_data(op, record) + : set_primary_key(op)))) + return res; } // Set non-key attribute(s) @@ -2001,7 +2003,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) { int read_res, insert_res, delete_res; - DBUG_PRINT("info", ("primary key update, doing pk read+insert+delete")); + DBUG_PRINT("info", ("primary key update, doing pk read+delete+insert")); // Get all old fields, since we optimize away fields not in query read_res= complemented_pk_read(old_data, new_data); if (read_res) @@ -2009,15 +2011,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) DBUG_PRINT("info", ("pk read failed")); DBUG_RETURN(read_res); } - // Insert new row - insert_res= write_row(new_data); - if (insert_res) - { - DBUG_PRINT("info", ("insert failed")); - DBUG_RETURN(insert_res); - } // Delete old row - DBUG_PRINT("info", ("insert succeded")); m_primary_key_update= TRUE; delete_res= delete_row(old_data); m_primary_key_update= FALSE; @@ -2025,9 +2019,23 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) { DBUG_PRINT("info", ("delete failed")); // Undo write_row(new_data) - DBUG_RETURN(delete_row(new_data)); + DBUG_RETURN(delete_res); } - DBUG_PRINT("info", ("insert+delete succeeded")); + // Insert new row + DBUG_PRINT("info", ("delete succeded")); + insert_res= write_row(new_data); + if (insert_res) + { + DBUG_PRINT("info", ("insert failed")); + if (trans->commitStatus() == NdbConnection::Started) + { + m_primary_key_update= TRUE; + insert_res= write_row((byte *)old_data); + m_primary_key_update= FALSE; + } + DBUG_RETURN(insert_res); + } + DBUG_PRINT("info", ("delete+insert succeeded")); DBUG_RETURN(0); } @@ -2125,8 +2133,9 @@ int ha_ndbcluster::delete_row(const byte *record) no_uncommitted_rows_update(-1); - // If deleting from cursor, NoCommit will be handled in next_result - DBUG_RETURN(0); + if (!m_primary_key_update) + // If deleting from cursor, NoCommit will be handled in next_result + DBUG_RETURN(0); } else { From 77a9429c134ddf6eaf3936286280e216b2be03af Mon Sep 17 00:00:00 2001 From: "acurtis@xiphis.org" <> Date: Tue, 19 Apr 2005 10:51:11 +0100 Subject: [PATCH 16/65] Bug#7648 - Stored procedure crash when invoking a function that returns a bit bugfix 9102 corrected the crashing, this corrects the result. --- mysql-test/r/sp.result | 6 ++++++ mysql-test/t/sp.test | 17 +++++++++++++++-- sql/item_func.h | 12 +++++++++++- 3 files changed, 32 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index 4cc59679ce5..a4a3968672f 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -2964,4 +2964,10 @@ select bug9102(); drop function bug9102| bug9102() a +drop procedure if exists bug7648| +create function bug7648() returns bit(8) return 'a'| +select bug7648()| +bug7648() +a +drop function bug7648| drop table t1,t2; diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test index 3934e8ad17a..6c833b14482 100644 --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@ -3627,16 +3627,29 @@ set global query_cache_size = @qcs1| delete from t1| drop function bug9902| + # -# BUG#9102: New bug synopsis +# BUG#9102: Stored proccedures: function which returns blob causes crash # --disable_warnings drop function if exists bug9102| --enable_warnings create function bug9102() returns blob return 'a'| -select bug9102(); +select bug9102()| drop function bug9102| + +# +# BUG#7648: Stored procedure crash when invoking a function that returns a bit +# +--disable_warnings +drop procedure if exists bug7648| +--enable_warnings +create function bug7648() returns bit(8) return 'a'| +select bug7648()| +drop function bug7648| + + # # BUG#NNNN: New bug synopsis # diff --git a/sql/item_func.h b/sql/item_func.h index 9bf21fa1aa3..b39786e5544 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1308,7 +1308,17 @@ public: longlong val_int() { - return (longlong)Item_func_sp::val_real(); + Item *it; + longlong l; + + if (execute(&it)) + { + null_value= 1; + return 0LL; + } + l= it->val_int(); + null_value= it->null_value; + return l; } double val_real() From 4a4c9017add2b693c64526b5f46181c128459d5b Mon Sep 17 00:00:00 2001 From: "msvensson@neptunus.(none)" <> Date: Tue, 19 Apr 2005 13:12:19 +0200 Subject: [PATCH 17/65] BUG#9568 mysql segfaults from CTRL-R - Segfault because of passing a 64-bit pointer to 32 bit integer. - Add new include config_readline.h which will calculate some new defines based on what is found in config.h - This file was originally included in readline 4.3 as config.h.in, only the part that makes new defines have been moved to this file. --- cmd-line-utils/readline/bind.c | 4 +--- cmd-line-utils/readline/callback.c | 4 +--- cmd-line-utils/readline/complete.c | 4 +--- cmd-line-utils/readline/config_readline.h | 26 +++++++++++++++++++++++ cmd-line-utils/readline/display.c | 4 +--- cmd-line-utils/readline/funmap.c | 4 +--- cmd-line-utils/readline/histexpand.c | 4 +--- cmd-line-utils/readline/histfile.c | 4 +--- cmd-line-utils/readline/history.c | 4 +--- cmd-line-utils/readline/histsearch.c | 4 +--- cmd-line-utils/readline/input.c | 4 +--- cmd-line-utils/readline/isearch.c | 4 +--- cmd-line-utils/readline/keymaps.c | 4 +--- cmd-line-utils/readline/kill.c | 4 +--- cmd-line-utils/readline/macro.c | 4 +--- cmd-line-utils/readline/mbutil.c | 4 +--- cmd-line-utils/readline/misc.c | 4 +--- cmd-line-utils/readline/nls.c | 4 +--- cmd-line-utils/readline/parens.c | 4 +--- cmd-line-utils/readline/readline.c | 4 +--- cmd-line-utils/readline/rltty.c | 4 +--- cmd-line-utils/readline/search.c | 4 +--- cmd-line-utils/readline/shell.c | 4 +--- cmd-line-utils/readline/signals.c | 4 +--- cmd-line-utils/readline/terminal.c | 4 +--- cmd-line-utils/readline/text.c | 4 +--- cmd-line-utils/readline/tilde.c | 4 +--- cmd-line-utils/readline/undo.c | 4 +--- cmd-line-utils/readline/util.c | 4 +--- cmd-line-utils/readline/vi_mode.c | 4 +--- cmd-line-utils/readline/xmalloc.c | 4 +--- 31 files changed, 56 insertions(+), 90 deletions(-) create mode 100644 cmd-line-utils/readline/config_readline.h diff --git a/cmd-line-utils/readline/bind.c b/cmd-line-utils/readline/bind.c index fd01049f09f..0e8efc5c636 100644 --- a/cmd-line-utils/readline/bind.c +++ b/cmd-line-utils/readline/bind.c @@ -21,9 +21,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include #include diff --git a/cmd-line-utils/readline/callback.c b/cmd-line-utils/readline/callback.c index a8f4323c929..737f483eed0 100644 --- a/cmd-line-utils/readline/callback.c +++ b/cmd-line-utils/readline/callback.c @@ -21,9 +21,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include "rlconf.h" diff --git a/cmd-line-utils/readline/complete.c b/cmd-line-utils/readline/complete.c index 693550c9945..749875e0e5e 100644 --- a/cmd-line-utils/readline/complete.c +++ b/cmd-line-utils/readline/complete.c @@ -21,9 +21,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include #include diff --git a/cmd-line-utils/readline/config_readline.h b/cmd-line-utils/readline/config_readline.h new file mode 100644 index 00000000000..99da4445228 --- /dev/null +++ b/cmd-line-utils/readline/config_readline.h @@ -0,0 +1,26 @@ +/* config-readline.h Maintained by hand. Contains the readline specific + parts from config.h.in in readline 4.3 */ + +#if defined (HAVE_CONFIG_H) +# include +#endif + +/* Ultrix botches type-ahead when switching from canonical to + non-canonical mode, at least through version 4.3 */ +#if !defined (HAVE_TERMIOS_H) || !defined (HAVE_TCGETATTR) || defined (ultrix) +# define TERMIOS_MISSING +#endif + +#if defined (STRCOLL_BROKEN) +# undef HAVE_STRCOLL +#endif + +#if defined (__STDC__) && defined (HAVE_STDARG_H) +# define PREFER_STDARG +# define USE_VARARGS +#else +# if defined (HAVE_VARARGS_H) +# define PREFER_VARARGS +# define USE_VARARGS +# endif +#endif diff --git a/cmd-line-utils/readline/display.c b/cmd-line-utils/readline/display.c index f393e7e8516..7c393f1c8a5 100644 --- a/cmd-line-utils/readline/display.c +++ b/cmd-line-utils/readline/display.c @@ -21,9 +21,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include diff --git a/cmd-line-utils/readline/funmap.c b/cmd-line-utils/readline/funmap.c index fe9a1da43d7..53fd22754ab 100644 --- a/cmd-line-utils/readline/funmap.c +++ b/cmd-line-utils/readline/funmap.c @@ -21,9 +21,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #if !defined (BUFSIZ) #include diff --git a/cmd-line-utils/readline/histexpand.c b/cmd-line-utils/readline/histexpand.c index f01d54c5b1d..eed8d5a365e 100644 --- a/cmd-line-utils/readline/histexpand.c +++ b/cmd-line-utils/readline/histexpand.c @@ -22,9 +22,7 @@ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include diff --git a/cmd-line-utils/readline/histfile.c b/cmd-line-utils/readline/histfile.c index 60a91251b7a..77f757eac1d 100644 --- a/cmd-line-utils/readline/histfile.c +++ b/cmd-line-utils/readline/histfile.c @@ -25,9 +25,7 @@ you can call. I think I have done that. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include diff --git a/cmd-line-utils/readline/history.c b/cmd-line-utils/readline/history.c index 4242f33efe1..759ff9e0de9 100644 --- a/cmd-line-utils/readline/history.c +++ b/cmd-line-utils/readline/history.c @@ -25,9 +25,7 @@ you can call. I think I have done that. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include diff --git a/cmd-line-utils/readline/histsearch.c b/cmd-line-utils/readline/histsearch.c index d94fd6cd9c6..ffc97d720db 100644 --- a/cmd-line-utils/readline/histsearch.c +++ b/cmd-line-utils/readline/histsearch.c @@ -22,9 +22,7 @@ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include #if defined (HAVE_STDLIB_H) diff --git a/cmd-line-utils/readline/input.c b/cmd-line-utils/readline/input.c index 1442c5ef155..d9c52dfcec8 100644 --- a/cmd-line-utils/readline/input.c +++ b/cmd-line-utils/readline/input.c @@ -21,9 +21,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include #include diff --git a/cmd-line-utils/readline/isearch.c b/cmd-line-utils/readline/isearch.c index 137842a841f..1de16c6a56c 100644 --- a/cmd-line-utils/readline/isearch.c +++ b/cmd-line-utils/readline/isearch.c @@ -26,9 +26,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include diff --git a/cmd-line-utils/readline/keymaps.c b/cmd-line-utils/readline/keymaps.c index 12506d3aab2..9972d83e4f1 100644 --- a/cmd-line-utils/readline/keymaps.c +++ b/cmd-line-utils/readline/keymaps.c @@ -20,9 +20,7 @@ Software Foundation, 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #if defined (HAVE_STDLIB_H) # include diff --git a/cmd-line-utils/readline/kill.c b/cmd-line-utils/readline/kill.c index f8c6961bbd3..32a661f076f 100644 --- a/cmd-line-utils/readline/kill.c +++ b/cmd-line-utils/readline/kill.c @@ -21,9 +21,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include diff --git a/cmd-line-utils/readline/macro.c b/cmd-line-utils/readline/macro.c index 7ab4b6ca657..7f5c39f7d86 100644 --- a/cmd-line-utils/readline/macro.c +++ b/cmd-line-utils/readline/macro.c @@ -21,9 +21,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include diff --git a/cmd-line-utils/readline/mbutil.c b/cmd-line-utils/readline/mbutil.c index debad6320ce..3113b7b0538 100644 --- a/cmd-line-utils/readline/mbutil.c +++ b/cmd-line-utils/readline/mbutil.c @@ -21,9 +21,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include #include diff --git a/cmd-line-utils/readline/misc.c b/cmd-line-utils/readline/misc.c index 94ad433473b..858d09dbe90 100644 --- a/cmd-line-utils/readline/misc.c +++ b/cmd-line-utils/readline/misc.c @@ -21,9 +21,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #if defined (HAVE_UNISTD_H) # include diff --git a/cmd-line-utils/readline/nls.c b/cmd-line-utils/readline/nls.c index 706c8195c10..6555c50c22b 100644 --- a/cmd-line-utils/readline/nls.c +++ b/cmd-line-utils/readline/nls.c @@ -21,9 +21,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include diff --git a/cmd-line-utils/readline/parens.c b/cmd-line-utils/readline/parens.c index 54ef1f3695f..5d4a08a0ce8 100644 --- a/cmd-line-utils/readline/parens.c +++ b/cmd-line-utils/readline/parens.c @@ -23,9 +23,7 @@ #include "rlconf.h" -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include #include diff --git a/cmd-line-utils/readline/readline.c b/cmd-line-utils/readline/readline.c index 28801f19dfc..2c0bb499b7b 100644 --- a/cmd-line-utils/readline/readline.c +++ b/cmd-line-utils/readline/readline.c @@ -22,9 +22,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include #include "posixstat.h" diff --git a/cmd-line-utils/readline/rltty.c b/cmd-line-utils/readline/rltty.c index 09702e9e755..9a2cef4b279 100644 --- a/cmd-line-utils/readline/rltty.c +++ b/cmd-line-utils/readline/rltty.c @@ -22,9 +22,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include #include diff --git a/cmd-line-utils/readline/search.c b/cmd-line-utils/readline/search.c index ac47596a3f8..637534924f1 100644 --- a/cmd-line-utils/readline/search.c +++ b/cmd-line-utils/readline/search.c @@ -22,9 +22,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include #include diff --git a/cmd-line-utils/readline/shell.c b/cmd-line-utils/readline/shell.c index ad27cc14884..fd6a2816309 100644 --- a/cmd-line-utils/readline/shell.c +++ b/cmd-line-utils/readline/shell.c @@ -22,9 +22,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include diff --git a/cmd-line-utils/readline/signals.c b/cmd-line-utils/readline/signals.c index 0a1468b6b2a..4609598ff98 100644 --- a/cmd-line-utils/readline/signals.c +++ b/cmd-line-utils/readline/signals.c @@ -21,9 +21,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include /* Just for NULL. Yuck. */ #include diff --git a/cmd-line-utils/readline/terminal.c b/cmd-line-utils/readline/terminal.c index 397b10a1d46..a506fa6de09 100644 --- a/cmd-line-utils/readline/terminal.c +++ b/cmd-line-utils/readline/terminal.c @@ -21,9 +21,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include #include "posixstat.h" diff --git a/cmd-line-utils/readline/text.c b/cmd-line-utils/readline/text.c index 81a468fdbda..d98b266edfe 100644 --- a/cmd-line-utils/readline/text.c +++ b/cmd-line-utils/readline/text.c @@ -21,9 +21,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #if defined (HAVE_UNISTD_H) # include diff --git a/cmd-line-utils/readline/tilde.c b/cmd-line-utils/readline/tilde.c index fab4aab65ad..456a6bcb357 100644 --- a/cmd-line-utils/readline/tilde.c +++ b/cmd-line-utils/readline/tilde.c @@ -19,9 +19,7 @@ along with Readline; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #if defined (HAVE_UNISTD_H) # ifdef _MINIX diff --git a/cmd-line-utils/readline/undo.c b/cmd-line-utils/readline/undo.c index df913195fad..947da3d00d0 100644 --- a/cmd-line-utils/readline/undo.c +++ b/cmd-line-utils/readline/undo.c @@ -22,9 +22,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include diff --git a/cmd-line-utils/readline/util.c b/cmd-line-utils/readline/util.c index 2a6e4e3398a..403b3d544d9 100644 --- a/cmd-line-utils/readline/util.c +++ b/cmd-line-utils/readline/util.c @@ -21,9 +21,7 @@ 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include #include diff --git a/cmd-line-utils/readline/vi_mode.c b/cmd-line-utils/readline/vi_mode.c index 01df589f625..e8ad05d866f 100644 --- a/cmd-line-utils/readline/vi_mode.c +++ b/cmd-line-utils/readline/vi_mode.c @@ -31,9 +31,7 @@ #if defined (VI_MODE) -#if defined (HAVE_CONFIG_H) -# include -#endif +#include "config_readline.h" #include diff --git a/cmd-line-utils/readline/xmalloc.c b/cmd-line-utils/readline/xmalloc.c index 8985d340d39..698807addf9 100644 --- a/cmd-line-utils/readline/xmalloc.c +++ b/cmd-line-utils/readline/xmalloc.c @@ -20,9 +20,7 @@ Software Foundation, 59 Temple Place, Suite 330, Boston, MA 02111 USA. */ #define READLINE_LIBRARY -#if defined (HAVE_CONFIG_H) -#include -#endif +#include "config_readline.h" #include From 04a4680b8bafee1b2cdf5281f10aa10eb9e1f675 Mon Sep 17 00:00:00 2001 From: "lenz@mysql.com" <> Date: Tue, 19 Apr 2005 13:33:38 +0200 Subject: [PATCH 18/65] Updated Docs/Makefile.am: - fixed copyright header and removed obsolete comments about how to build the manual using texinfo - added an "install-data-hook" to install the mysql.info info page. This seems to be the only way to install info pages, if they are not built from texinfo sources directly. --- Docs/Makefile.am | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/Docs/Makefile.am b/Docs/Makefile.am index a0d278285b4..8577fee52cb 100644 --- a/Docs/Makefile.am +++ b/Docs/Makefile.am @@ -1,13 +1,18 @@ -# Normally you do not need to remake the files here. But if you want -# to you will need the GNU TeX-info utilities. To make a Postscript -# files you also need TeX and dvips. To make the PDF file you will -# need pdftex. We use the teTeX distribution for all of these. - -# We avoid default automake rules because of problems with .dvi file -# and BSD makes - -# If you know how to fix any of this more elegantly please mail -# docs@mysql.com +# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA noinst_SCRIPTS = Support/generate-text-files.pl @@ -18,6 +23,13 @@ all: txt_files txt_files: ../INSTALL-SOURCE ../EXCEPTIONS-CLIENT \ INSTALL-BINARY ../support-files/MacOSX/ReadMe.txt +# make sure that "make install" installs the info page, too +# automake only seems to take care of this automatically, +# if we're building the info page from texi directly. +install-data-hook: mysql.info + $(mkinstalldirs) $(DESTDIR)$(infodir) + $(INSTALL_DATA) $(srcdir)/mysql.info $(DESTDIR)$(infodir) + CLEAN_FILES: $(txt_files) touch $(txt_files) From 792b816b1be0f9a4cb736b23d6797c27e405532d Mon Sep 17 00:00:00 2001 From: "ingo@mysql.com" <> Date: Tue, 19 Apr 2005 15:12:32 +0200 Subject: [PATCH 19/65] Bug#7806 - insert on duplicate key and auto-update of timestamp Modified the check for the timestamp field so that the flags for the automatic for inserts and updates are cleared independently. --- mysql-test/r/type_timestamp.result | 47 +++++++++++++++ mysql-test/t/type_timestamp.test | 21 +++++++ sql/mysql_priv.h | 2 - sql/sql_insert.cc | 94 ++++++++++++++++++++++++++---- sql/table.h | 4 ++ 5 files changed, 154 insertions(+), 14 deletions(-) diff --git a/mysql-test/r/type_timestamp.result b/mysql-test/r/type_timestamp.result index 6c46d308e7e..c0396e4640d 100644 --- a/mysql-test/r/type_timestamp.result +++ b/mysql-test/r/type_timestamp.result @@ -432,3 +432,50 @@ t1 CREATE TABLE "t1" ( ) set sql_mode=''; drop table t1; +create table t1 (a int auto_increment primary key, b int, c timestamp); +insert into t1 (a, b, c) values (1, 0, '2001-01-01 01:01:01'), +(2, 0, '2002-02-02 02:02:02'), (3, 0, '2003-03-03 03:03:03'); +select * from t1; +a b c +1 0 2001-01-01 01:01:01 +2 0 2002-02-02 02:02:02 +3 0 2003-03-03 03:03:03 +update t1 set b = 2, c = c where a = 2; +select * from t1; +a b c +1 0 2001-01-01 01:01:01 +2 2 2002-02-02 02:02:02 +3 0 2003-03-03 03:03:03 +insert into t1 (a) values (4); +select * from t1; +a b c +1 0 2001-01-01 01:01:01 +2 2 2002-02-02 02:02:02 +3 0 2003-03-03 03:03:03 +4 NULL 2001-09-09 04:46:59 +update t1 set c = '2004-04-04 04:04:04' where a = 4; +select * from t1; +a b c +1 0 2001-01-01 01:01:01 +2 2 2002-02-02 02:02:02 +3 0 2003-03-03 03:03:03 +4 NULL 2004-04-04 04:04:04 +insert into t1 (a) values (3), (5) on duplicate key update b = 3, c = c; +select * from t1; +a b c +1 0 2001-01-01 01:01:01 +2 2 2002-02-02 02:02:02 +3 3 2003-03-03 03:03:03 +4 NULL 2004-04-04 04:04:04 +5 NULL 2001-09-09 04:46:59 +insert into t1 (a, c) values (4, '2004-04-04 00:00:00'), +(6, '2006-06-06 06:06:06') on duplicate key update b = 4; +select * from t1; +a b c +1 0 2001-01-01 01:01:01 +2 2 2002-02-02 02:02:02 +3 3 2003-03-03 03:03:03 +4 4 2001-09-09 04:46:59 +5 NULL 2001-09-09 04:46:59 +6 NULL 2006-06-06 06:06:06 +drop table t1; diff --git a/mysql-test/t/type_timestamp.test b/mysql-test/t/type_timestamp.test index 783e310f02d..3c7f07dfbce 100644 --- a/mysql-test/t/type_timestamp.test +++ b/mysql-test/t/type_timestamp.test @@ -298,3 +298,24 @@ show create table t1; # restore default mode set sql_mode=''; drop table t1; + +# +# Bug#7806 - insert on duplicate key and auto-update of timestamp +# +create table t1 (a int auto_increment primary key, b int, c timestamp); +insert into t1 (a, b, c) values (1, 0, '2001-01-01 01:01:01'), + (2, 0, '2002-02-02 02:02:02'), (3, 0, '2003-03-03 03:03:03'); +select * from t1; +update t1 set b = 2, c = c where a = 2; +select * from t1; +insert into t1 (a) values (4); +select * from t1; +update t1 set c = '2004-04-04 04:04:04' where a = 4; +select * from t1; +insert into t1 (a) values (3), (5) on duplicate key update b = 3, c = c; +select * from t1; +insert into t1 (a, c) values (4, '2004-04-04 00:00:00'), + (6, '2006-06-06 06:06:06') on duplicate key update b = 4; +select * from t1; +drop table t1; + diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 2b13b2053d7..e75a6f25957 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -668,8 +668,6 @@ void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name); void mysql_stmt_free(THD *thd, char *packet); void mysql_stmt_reset(THD *thd, char *packet); void mysql_stmt_get_longdata(THD *thd, char *pos, ulong packet_length); -int check_insert_fields(THD *thd,TABLE *table,List &fields, - List &values, ulong counter); /* sql_error.cc */ MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, uint code, diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 1f190a450de..bb115b9d548 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -42,15 +42,29 @@ static void unlink_blobs(register TABLE *table); #define DELAYED_LOG_UPDATE 1 #define DELAYED_LOG_BIN 2 + /* Check if insert fields are correct. - Sets table->timestamp_field_type to TIMESTAMP_NO_AUTO_SET or leaves it - as is, depending on if timestamp should be updated or not. + + SYNOPSIS + check_insert_fields() + thd The current thread. + table The table for insert. + fields The insert fields. + values The insert values. + + NOTE + Clears TIMESTAMP_AUTO_SET_ON_INSERT from table->timestamp_field_type + or leaves it as is, depending on if timestamp should be updated or + not. + + RETURN + 0 OK + -1 Error */ -int -check_insert_fields(THD *thd,TABLE *table,List &fields, - List &values, ulong counter) +static int check_insert_fields(THD *thd, TABLE *table, List &fields, + List &values) { if (fields.elements == 0 && values.elements != 0) { @@ -58,7 +72,7 @@ check_insert_fields(THD *thd,TABLE *table,List &fields, { my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW, ER(ER_WRONG_VALUE_COUNT_ON_ROW), - MYF(0),counter); + MYF(0), 1); return -1; } #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -66,7 +80,7 @@ check_insert_fields(THD *thd,TABLE *table,List &fields, check_grant_all_columns(thd,INSERT_ACL,table)) return -1; #endif - table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + (int) table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_INSERT; } else { // Part field list @@ -74,7 +88,7 @@ check_insert_fields(THD *thd,TABLE *table,List &fields, { my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW, ER(ER_WRONG_VALUE_COUNT_ON_ROW), - MYF(0),counter); + MYF(0), 1); return -1; } TABLE_LIST table_list; @@ -96,7 +110,7 @@ check_insert_fields(THD *thd,TABLE *table,List &fields, } if (table->timestamp_field && // Don't set timestamp if used table->timestamp_field->query_id == thd->query_id) - table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + (int) table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_INSERT; } // For the values we need select_priv #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -106,6 +120,62 @@ check_insert_fields(THD *thd,TABLE *table,List &fields, } +/* + Check update fields for the timestamp field. + + SYNOPSIS + check_update_fields() + thd The current thread. + insert_table_list The insert table list. + table The table for update. + update_fields The update fields. + + NOTE + If the update fields include the timestamp field, + remove TIMESTAMP_AUTO_SET_ON_UPDATE from table->timestamp_field_type. + + RETURN + 0 OK + -1 Error +*/ + +static int check_update_fields(THD *thd, TABLE *table, + TABLE_LIST *insert_table_list, + List &update_fields) +{ + ulong timestamp_query_id; + LINT_INIT(timestamp_query_id); + + /* + Change the query_id for the timestamp column so that we can + check if this is modified directly. + */ + if (table->timestamp_field) + { + timestamp_query_id= table->timestamp_field->query_id; + table->timestamp_field->query_id= thd->query_id-1; + } + + /* + Check the fields we are going to modify. This will set the query_id + of all used fields to the threads query_id. + */ + if (setup_fields(thd, 0, insert_table_list, update_fields, 1, 0, 0)) + return -1; + + if (table->timestamp_field) + { + /* Don't set timestamp column if this is modified. */ + if (table->timestamp_field->query_id == thd->query_id) + (int) table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_UPDATE; + else + table->timestamp_field->query_id= timestamp_query_id; + } + + return 0; +} + + int mysql_insert(THD *thd,TABLE_LIST *table_list, List &fields, List &values_list, @@ -450,11 +520,11 @@ int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, if (!table->insert_values) DBUG_RETURN(-1); } - if ((values && check_insert_fields(thd, table, fields, *values, 1)) || + if ((values && check_insert_fields(thd, table, fields, *values)) || setup_tables(insert_table_list) || (values && setup_fields(thd, 0, insert_table_list, *values, 0, 0, 0)) || (duplic == DUP_UPDATE && - (setup_fields(thd, 0, insert_table_list, update_fields, 1, 0, 0) || + (check_update_fields(thd, table, insert_table_list, update_fields) || setup_fields(thd, 0, insert_table_list, update_values, 1, 0, 0)))) DBUG_RETURN(-1); if (values && find_real_table_in_list(table_list->next, table_list->db, @@ -1457,7 +1527,7 @@ select_insert::prepare(List &values, SELECT_LEX_UNIT *u) DBUG_ENTER("select_insert::prepare"); unit= u; - if (check_insert_fields(thd,table,*fields,values,1)) + if (check_insert_fields(thd, table, *fields, values)) DBUG_RETURN(1); restore_record(table,default_values); // Get empty record diff --git a/sql/table.h b/sql/table.h index 054f24267b7..e822d68531e 100644 --- a/sql/table.h +++ b/sql/table.h @@ -60,6 +60,10 @@ typedef struct st_filesort_info /* Values in this enum are used to indicate during which operations value of TIMESTAMP field should be set to current timestamp. + WARNING: The values are used for bit operations. If you change the enum, + you must keep the bitwise relation of the values. For example: + (int) TIMESTAMP_AUTO_SET_ON_BOTH == + (int) TIMESTAMP_AUTO_SET_ON_INSERT | (int) TIMESTAMP_AUTO_SET_ON_UPDATE. */ enum timestamp_auto_set_type { From a42c7d442e8d9894de3f043703b77e22b30d0358 Mon Sep 17 00:00:00 2001 From: "lenz@mysql.com" <> Date: Tue, 19 Apr 2005 17:00:05 +0200 Subject: [PATCH 20/65] - added missing copyright headers to several Makefile.am's (noticed while testing changes to the mysql-copyright scripts --- libmysqld/examples/Makefile.am | 16 ++++++++++++++++ sql/share/Makefile.am | 16 ++++++++++++++++ tools/Makefile.am | 16 ++++++++++++++++ 3 files changed, 48 insertions(+) diff --git a/libmysqld/examples/Makefile.am b/libmysqld/examples/Makefile.am index 61f54b88b2e..83164bc946f 100644 --- a/libmysqld/examples/Makefile.am +++ b/libmysqld/examples/Makefile.am @@ -1,3 +1,19 @@ +# Copyright (C) 2000 MySQL AB +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + noinst_PROGRAMS = mysqltest mysql client_sources = $(mysqltest_SOURCES) $(mysql_SOURCES) diff --git a/sql/share/Makefile.am b/sql/share/Makefile.am index c70ac9ccf57..5ca3dce4e04 100644 --- a/sql/share/Makefile.am +++ b/sql/share/Makefile.am @@ -1,3 +1,19 @@ +# Copyright (C) 2000 MySQL AB +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + ## Process this file with automake to create Makefile.in dist-hook: diff --git a/tools/Makefile.am b/tools/Makefile.am index 3c786dc9281..e182e5a87f8 100644 --- a/tools/Makefile.am +++ b/tools/Makefile.am @@ -1,3 +1,19 @@ +# Copyright (C) 2000 MySQL AB +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + INCLUDES = @MT_INCLUDES@ -I$(srcdir)/../include \ $(openssl_includes) -I../include LDADD= @CLIENT_EXTRA_LDFLAGS@ ../libmysql_r/libmysqlclient_r.la @openssl_libs@ From dbd035818b5d311c8ada47bdb10cc6c51c797616 Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Tue, 19 Apr 2005 17:23:49 +0200 Subject: [PATCH 21/65] Disabled ndb tests for embedded --- mysql-test/t/ndb_alter_table.test | 1 + mysql-test/t/ndb_autodiscover.test | 1 + mysql-test/t/ndb_autodiscover2.test | 1 + mysql-test/t/ndb_basic.test | 1 + mysql-test/t/ndb_blob.test | 1 + mysql-test/t/ndb_cache.test | 1 + mysql-test/t/ndb_charset.test | 1 + mysql-test/t/ndb_index.test | 1 + mysql-test/t/ndb_index_ordered.test | 1 + mysql-test/t/ndb_index_unique.test | 1 + mysql-test/t/ndb_insert.test | 1 + mysql-test/t/ndb_limit.test | 1 + mysql-test/t/ndb_lock.test | 1 + mysql-test/t/ndb_minmax.test | 1 + mysql-test/t/ndb_multi.test | 1 + mysql-test/t/ndb_replace.test | 1 + mysql-test/t/ndb_restore.test | 1 + mysql-test/t/ndb_subquery.test | 1 + mysql-test/t/ndb_transaction.test | 1 + mysql-test/t/ndb_truncate.test | 1 + mysql-test/t/ndb_types.test | 1 + mysql-test/t/ndb_update.test | 1 + 22 files changed, 22 insertions(+) diff --git a/mysql-test/t/ndb_alter_table.test b/mysql-test/t/ndb_alter_table.test index 3ff2e735cb5..9cc1426554f 100644 --- a/mysql-test/t/ndb_alter_table.test +++ b/mysql-test/t/ndb_alter_table.test @@ -1,5 +1,6 @@ -- source include/have_ndb.inc -- source include/have_multi_ndb.inc +-- source include/not_embedded.inc --disable_warnings DROP TABLE IF EXISTS t1; diff --git a/mysql-test/t/ndb_autodiscover.test b/mysql-test/t/ndb_autodiscover.test index 037115f5e82..bd73a36fcab 100644 --- a/mysql-test/t/ndb_autodiscover.test +++ b/mysql-test/t/ndb_autodiscover.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; diff --git a/mysql-test/t/ndb_autodiscover2.test b/mysql-test/t/ndb_autodiscover2.test index 11e1cc204f7..76baa31a2a9 100644 --- a/mysql-test/t/ndb_autodiscover2.test +++ b/mysql-test/t/ndb_autodiscover2.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc # # Simple test to show use of discover when the server has been restarted diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test index 66300f61fc3..24baf8d9fb4 100644 --- a/mysql-test/t/ndb_basic.test +++ b/mysql-test/t/ndb_basic.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7; diff --git a/mysql-test/t/ndb_blob.test b/mysql-test/t/ndb_blob.test index 96e38bfb58e..b265809b75f 100644 --- a/mysql-test/t/ndb_blob.test +++ b/mysql-test/t/ndb_blob.test @@ -1,4 +1,5 @@ --source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings drop table if exists t1; diff --git a/mysql-test/t/ndb_cache.test b/mysql-test/t/ndb_cache.test index abd09424f64..5ba42f9b23c 100644 --- a/mysql-test/t/ndb_cache.test +++ b/mysql-test/t/ndb_cache.test @@ -1,5 +1,6 @@ -- source include/have_query_cache.inc -- source include/have_ndb.inc +-- source include/not_embedded.inc set GLOBAL query_cache_size=1355776; reset query cache; diff --git a/mysql-test/t/ndb_charset.test b/mysql-test/t/ndb_charset.test index 1b9e7e8bfcc..242f9192948 100644 --- a/mysql-test/t/ndb_charset.test +++ b/mysql-test/t/ndb_charset.test @@ -1,4 +1,5 @@ --source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings drop table if exists t1; diff --git a/mysql-test/t/ndb_index.test b/mysql-test/t/ndb_index.test index e65b24a9b20..93085dea587 100644 --- a/mysql-test/t/ndb_index.test +++ b/mysql-test/t/ndb_index.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings drop table if exists t1; diff --git a/mysql-test/t/ndb_index_ordered.test b/mysql-test/t/ndb_index_ordered.test index 010060a694d..4bc2021d45e 100644 --- a/mysql-test/t/ndb_index_ordered.test +++ b/mysql-test/t/ndb_index_ordered.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings drop table if exists t1, test1, test2; diff --git a/mysql-test/t/ndb_index_unique.test b/mysql-test/t/ndb_index_unique.test index 9bbea75028b..67cf6cb4537 100644 --- a/mysql-test/t/ndb_index_unique.test +++ b/mysql-test/t/ndb_index_unique.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings drop table if exists t1, t2, t3, t4, t5, t6, t7; diff --git a/mysql-test/t/ndb_insert.test b/mysql-test/t/ndb_insert.test index 611df3d84e9..f88d33f22f4 100644 --- a/mysql-test/t/ndb_insert.test +++ b/mysql-test/t/ndb_insert.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings DROP TABLE IF EXISTS t1; diff --git a/mysql-test/t/ndb_limit.test b/mysql-test/t/ndb_limit.test index c2d7a0ecfec..0df3b2f7566 100644 --- a/mysql-test/t/ndb_limit.test +++ b/mysql-test/t/ndb_limit.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings DROP TABLE IF EXISTS t2; diff --git a/mysql-test/t/ndb_lock.test b/mysql-test/t/ndb_lock.test index 39a8655b972..b93abbd564b 100644 --- a/mysql-test/t/ndb_lock.test +++ b/mysql-test/t/ndb_lock.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc connect (con1,localhost,root,,); connect (con2,localhost,root,,); diff --git a/mysql-test/t/ndb_minmax.test b/mysql-test/t/ndb_minmax.test index 3be193ce602..97ea84f98ef 100644 --- a/mysql-test/t/ndb_minmax.test +++ b/mysql-test/t/ndb_minmax.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings drop table if exists t1, t2; diff --git a/mysql-test/t/ndb_multi.test b/mysql-test/t/ndb_multi.test index 27ddd6508e9..24651913a79 100644 --- a/mysql-test/t/ndb_multi.test +++ b/mysql-test/t/ndb_multi.test @@ -1,5 +1,6 @@ -- source include/have_ndb.inc -- source include/have_multi_ndb.inc +-- source include/not_embedded.inc --disable_warnings diff --git a/mysql-test/t/ndb_replace.test b/mysql-test/t/ndb_replace.test index 59454b5a9fa..1c06a9a6633 100644 --- a/mysql-test/t/ndb_replace.test +++ b/mysql-test/t/ndb_replace.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc # # Test of REPLACE with NDB diff --git a/mysql-test/t/ndb_restore.test b/mysql-test/t/ndb_restore.test index d413453fb0e..ee47f7da6bc 100644 --- a/mysql-test/t/ndb_restore.test +++ b/mysql-test/t/ndb_restore.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings use test; diff --git a/mysql-test/t/ndb_subquery.test b/mysql-test/t/ndb_subquery.test index cebc1920eaa..9d3a256a263 100644 --- a/mysql-test/t/ndb_subquery.test +++ b/mysql-test/t/ndb_subquery.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings drop table if exists t1; diff --git a/mysql-test/t/ndb_transaction.test b/mysql-test/t/ndb_transaction.test index f8ed22207ea..ae02059786d 100644 --- a/mysql-test/t/ndb_transaction.test +++ b/mysql-test/t/ndb_transaction.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7; diff --git a/mysql-test/t/ndb_truncate.test b/mysql-test/t/ndb_truncate.test index 63bb8cbefb6..7c0f79bcc59 100644 --- a/mysql-test/t/ndb_truncate.test +++ b/mysql-test/t/ndb_truncate.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings DROP TABLE IF EXISTS t2; diff --git a/mysql-test/t/ndb_types.test b/mysql-test/t/ndb_types.test index d66718ca4e4..4276fa147eb 100644 --- a/mysql-test/t/ndb_types.test +++ b/mysql-test/t/ndb_types.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings DROP TABLE IF EXISTS t1; diff --git a/mysql-test/t/ndb_update.test b/mysql-test/t/ndb_update.test index fc489ec4697..5453e41f937 100644 --- a/mysql-test/t/ndb_update.test +++ b/mysql-test/t/ndb_update.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings DROP TABLE IF EXISTS t1; From 25c396628a5db12756e46e4e45708583bf1a0a81 Mon Sep 17 00:00:00 2001 From: "igor@rurik.mysql.com" <> Date: Tue, 19 Apr 2005 16:54:30 -0700 Subject: [PATCH 22/65] sql_select.cc: Fixed bug #9681. The bug happened with queries using derived tables specified by a SELECT with ROLLUP, such as: SELECT * FROM (SELECT a, SUM(a) FROM t1 GROUP BY a WITH ROLLUP) t2, if column a of table t1 is declared as NOT NULL. This was to the fact that the first column of the temporary table created to contain the derived table erroneously inherited the NOT NULL attribute from column a. olap.result, olap.test: Added a test case for bug #9681. --- mysql-test/r/olap.result | 13 +++++++++++++ mysql-test/t/olap.test | 13 +++++++++++++ sql/sql_select.cc | 14 +++++++++++++- 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/olap.result b/mysql-test/r/olap.result index 6500edf478f..341d51033f2 100644 --- a/mysql-test/r/olap.result +++ b/mysql-test/r/olap.result @@ -392,3 +392,16 @@ SELECT SQL_CALC_FOUND_ROWS a, SUM(b) FROM t1 GROUP BY a WITH ROLLUP LIMIT 1; a SUM(b) 1 4 DROP TABLE t1; +CREATE TABLE t1 (a int(11) NOT NULL); +INSERT INTO t1 VALUES (1),(2); +SELECT a, SUM(a) m FROM t1 GROUP BY a WITH ROLLUP; +a m +1 1 +2 2 +NULL 3 +SELECT * FROM ( SELECT a, SUM(a) m FROM t1 GROUP BY a WITH ROLLUP ) t2; +a m +1 1 +2 2 +NULL 3 +DROP TABLE t1; diff --git a/mysql-test/t/olap.test b/mysql-test/t/olap.test index 3aac0f45ead..4f3b0f51286 100644 --- a/mysql-test/t/olap.test +++ b/mysql-test/t/olap.test @@ -171,3 +171,16 @@ SELECT a, SUM(b) FROM t1 GROUP BY a WITH ROLLUP LIMIT 1; SELECT SQL_CALC_FOUND_ROWS a, SUM(b) FROM t1 GROUP BY a WITH ROLLUP LIMIT 1; DROP TABLE t1; + +# +# Tests for bug #9681: ROLLUP in subquery for derived table wiht +# a group by field declared as NOT NULL +# + +CREATE TABLE t1 (a int(11) NOT NULL); +INSERT INTO t1 VALUES (1),(2); + +SELECT a, SUM(a) m FROM t1 GROUP BY a WITH ROLLUP; +SELECT * FROM ( SELECT a, SUM(a) m FROM t1 GROUP BY a WITH ROLLUP ) t2; + +DROP TABLE t1; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 3f133a473ac..39698d6f1f7 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -4779,7 +4779,7 @@ static Field* create_tmp_field_from_field(THD *thd, Field* org_field, item->result_field= new_field; else new_field->field_name= name; - if (org_field->maybe_null()) + if (org_field->maybe_null() || (item && item->maybe_null)) new_field->flags&= ~NOT_NULL_FLAG; // Because of outer join if (org_field->type() == FIELD_TYPE_VAR_STRING) table->db_create_options|= HA_OPTION_PACK_RECORD; @@ -9192,7 +9192,19 @@ bool JOIN::rollup_init() for (j=0 ; j < fields_list.elements ; j++) rollup.fields[i].push_back(rollup.null_items[i]); } + List_iterator_fast it(fields_list); + Item *item; + while ((item= it++)) + { + ORDER *group_tmp; + for (group_tmp= group_list; group_tmp; group_tmp= group_tmp->next) + { + if (*group_tmp->item == item) + item->maybe_null= 1; + } + } return 0; + } From cf152b156d7ee9fb99daf27ab6a04c4421a20b76 Mon Sep 17 00:00:00 2001 From: "lenz@mysql.com" <> Date: Wed, 20 Apr 2005 08:39:08 +0200 Subject: [PATCH 23/65] - Enabled the "blackhole" storage engine for the Max RPM subpackage --- support-files/mysql.spec.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index df21ad63377..1de315b6458 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -331,6 +331,7 @@ BuildMySQL "--enable-shared \ --with-archive \ --with-csv-storage-engine \ --with-example-storage-engine \ + --with-blackhole-storage-engine \ --with-embedded-server \ --with-comment=\"MySQL Community Edition - Max (GPL)\" \ --with-server-suffix='-Max'" @@ -688,6 +689,10 @@ fi # itself - note that they must be ordered by date (important when # merging BK trees) %changelog +* Wed Apr 20 2005 Lenz Grimmer + +- Enabled the "blackhole" storage engine for the Max RPM + * Wed Apr 13 2005 Lenz Grimmer - removed the MySQL manual files (html/ps/texi) - they have been removed From ff665c9513658c383f2b5cc0261eaf998c1b158d Mon Sep 17 00:00:00 2001 From: "jani@a193-229-222-105.elisa-laajakaista.fi" <> Date: Wed, 20 Apr 2005 10:37:03 +0300 Subject: [PATCH 24/65] Fixed Bug#8046 --mysqld of mysqld_safe must contain substring "mysqld" --- configure.in | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/configure.in b/configure.in index ff4527ee09f..f4ced66a06d 100644 --- a/configure.in +++ b/configure.in @@ -451,33 +451,33 @@ PS=$ac_cv_path_PS # Linux style if $PS p $$ 2> /dev/null | grep $0 > /dev/null then - FIND_PROC="$PS p \$\$PID | grep mysqld > /dev/null" + FIND_PROC="$PS p \$\$PID | grep \$\$MYSQLD > /dev/null" # Solaris elif $PS -fp $$ 2> /dev/null | grep $0 > /dev/null then - FIND_PROC="$PS -p \$\$PID | grep mysqld > /dev/null" + FIND_PROC="$PS -p \$\$PID | grep \$\$MYSQLD > /dev/null" # BSD style elif $PS -uaxww 2> /dev/null | grep $0 > /dev/null then - FIND_PROC="$PS -uaxww | grep mysqld | grep \" \$\$PID \" > /dev/null" + FIND_PROC="$PS -uaxww | grep \$\$MYSQLD | grep \" \$\$PID \" > /dev/null" # SysV style elif $PS -ef 2> /dev/null | grep $0 > /dev/null then - FIND_PROC="$PS -ef | grep mysqld | grep \" \$\$PID \" > /dev/null" + FIND_PROC="$PS -ef | grep \$\$MYSQLD | grep \" \$\$PID \" > /dev/null" # Do anybody use this? elif $PS $$ 2> /dev/null | grep $0 > /dev/null then - FIND_PROC="$PS \$\$PID | grep mysqld > /dev/null" + FIND_PROC="$PS \$\$PID | grep \$\$MYSQLD > /dev/null" else case $SYSTEM_TYPE in *freebsd*) - FIND_PROC="$PS p \$\$PID | grep mysqld > /dev/null" + FIND_PROC="$PS p \$\$PID | grep \$\$MYSQLD > /dev/null" ;; *darwin*) - FIND_PROC="$PS -uaxww | grep mysqld | grep \" \$\$PID \" > /dev/null" + FIND_PROC="$PS -uaxww | grep \$\$MYSQLD | grep \" \$\$PID \" > /dev/null" ;; *cygwin*) - FIND_PROC="$PS -e | grep mysqld | grep \" \$\$PID \" > /dev/null" + FIND_PROC="$PS -e | grep \$\$MYSQLD | grep \" \$\$PID \" > /dev/null" ;; *netware*) FIND_PROC= From c9e68705bb1b75b8c940d65d4e5b1c050ba05170 Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Wed, 20 Apr 2005 11:08:53 +0200 Subject: [PATCH 25/65] Removed forgotten abort() call, bug#10001 --- ndb/src/kernel/blocks/dbtc/DbtcMain.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index 03a0590b6e2..efd4c2a46d0 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -1838,7 +1838,6 @@ void Dbtc::execKEYINFO(Signal* signal) do { if (cfirstfreeDatabuf == RNIL) { jam(); - abort(); seizeDatabuferrorLab(signal); return; }//if From b26d29a081026bdcd9ab3441566beb04a758d42e Mon Sep 17 00:00:00 2001 From: "msvensson@neptunus.(none)" <> Date: Wed, 20 Apr 2005 12:02:07 +0200 Subject: [PATCH 26/65] After review fix --- cmd-line-utils/readline/config_readline.h | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/cmd-line-utils/readline/config_readline.h b/cmd-line-utils/readline/config_readline.h index 99da4445228..141989ec3c9 100644 --- a/cmd-line-utils/readline/config_readline.h +++ b/cmd-line-utils/readline/config_readline.h @@ -1,12 +1,16 @@ -/* config-readline.h Maintained by hand. Contains the readline specific - parts from config.h.in in readline 4.3 */ +/* + config-readline.h Maintained by hand. Contains the readline specific + parts from config.h.in in readline 4.3 +*/ #if defined (HAVE_CONFIG_H) # include #endif -/* Ultrix botches type-ahead when switching from canonical to - non-canonical mode, at least through version 4.3 */ +/* + Ultrix botches type-ahead when switching from canonical to + non-canonical mode, at least through version 4.3 +*/ #if !defined (HAVE_TERMIOS_H) || !defined (HAVE_TCGETATTR) || defined (ultrix) # define TERMIOS_MISSING #endif From 189d6b502bbdf3b9e295177dc4f61ed28ca72db6 Mon Sep 17 00:00:00 2001 From: "pem@mysql.comhem.se" <> Date: Wed, 20 Apr 2005 15:37:07 +0200 Subject: [PATCH 27/65] Fixed BUG#6898: Stored procedure crash if GOTO statements exist Bug in the optimizer caused an infinite loop for weird code. --- mysql-test/r/sp.result | 9 +++++++++ mysql-test/t/sp.test | 16 ++++++++++++++++ sql/sp_head.cc | 2 +- 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index 4bb1640f0eb..d911fc68b7c 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -2958,4 +2958,13 @@ select @x| set global query_cache_size = @qcs1| delete from t1| drop function bug9902| +drop procedure if exists bug6898| +create procedure bug6898() +begin +goto label1; +label label1; +begin end; +goto label1; +end| +drop procedure bug6898| drop table t1,t2; diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test index 4101a7a4bfa..d5298645f76 100644 --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@ -3628,6 +3628,22 @@ delete from t1| drop function bug9902| +# +# BUG#6898: Stored procedure crash if GOTO statements exist +# +--disable_warnings +drop procedure if exists bug6898| +--enable_warnings +create procedure bug6898() +begin + goto label1; + label label1; + begin end; + goto label1; +end| +drop procedure bug6898| + + # # BUG#NNNN: New bug synopsis # diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 0fe9c449540..c505ef05b57 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -1499,7 +1499,7 @@ sp_instr_jump::opt_shortcut_jump(sp_head *sp, sp_instr *start) { uint ndest; - if (start == i) + if (start == i || this == i) break; ndest= i->opt_shortcut_jump(sp, start); if (ndest == dest) From 737a866faae0aacdfa231a4c3546bcffa844bf74 Mon Sep 17 00:00:00 2001 From: "pekka@mysql.com" <> Date: Wed, 20 Apr 2005 16:09:00 +0200 Subject: [PATCH 28/65] ndb - dbug aid for api signal log --- ndb/src/ndbapi/TransporterFacade.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ndb/src/ndbapi/TransporterFacade.cpp b/ndb/src/ndbapi/TransporterFacade.cpp index 5582143be44..93cec59ada6 100644 --- a/ndb/src/ndbapi/TransporterFacade.cpp +++ b/ndb/src/ndbapi/TransporterFacade.cpp @@ -160,6 +160,10 @@ setSignalLog(){ } else if(tmp !=0){ if (strcmp(tmp, "-") == 0) signalLogger.setOutputStream(stdout); +#ifndef DBUG_OFF + else if (strcmp(tmp, "+") == 0) + signalLogger.setOutputStream(DBUG_FILE); +#endif else signalLogger.setOutputStream(fopen(tmp, "w")); apiSignalLog = tmp; From 54dfe4a66a5bc2a17098aa0597b3a1a299cb61f0 Mon Sep 17 00:00:00 2001 From: "pem@mysql.comhem.se" <> Date: Wed, 20 Apr 2005 17:59:28 +0200 Subject: [PATCH 29/65] Fixed BUG#7047: Stored procedure crash if alter procedure by simply disallowing alter procedure/function in an SP (as for drop). --- mysql-test/r/sp-error.result | 13 +++++++++++-- mysql-test/t/sp-error.test | 14 ++++++++++++++ sql/share/errmsg.txt | 2 +- sql/sql_yacc.yy | 10 ++++++++++ 4 files changed, 36 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/sp-error.result b/mysql-test/r/sp-error.result index 3f224d790f7..5ee1d46f3c9 100644 --- a/mysql-test/r/sp-error.result +++ b/mysql-test/r/sp-error.result @@ -457,9 +457,9 @@ ERROR 42S22: Unknown column 'aa' in 'order clause' drop procedure bug2653_1| drop procedure bug2653_2| create procedure bug4344() drop procedure bug4344| -ERROR HY000: Can't drop a PROCEDURE from within another stored routine +ERROR HY000: Can't drop or alter a PROCEDURE from within another stored routine create procedure bug4344() drop function bug4344| -ERROR HY000: Can't drop a FUNCTION from within another stored routine +ERROR HY000: Can't drop or alter a FUNCTION from within another stored routine drop procedure if exists bug3294| create procedure bug3294() begin @@ -585,4 +585,13 @@ end; end; end| drop procedure bug9073| +create procedure bug7047() +alter procedure bug7047| +ERROR HY000: Can't drop or alter a PROCEDURE from within another stored routine +create function bug7047() returns int +begin +alter function bug7047; +return 0; +end| +ERROR HY000: Can't drop or alter a FUNCTION from within another stored routine drop table t1| diff --git a/mysql-test/t/sp-error.test b/mysql-test/t/sp-error.test index ecbc98f86e9..cb4ebf080f4 100644 --- a/mysql-test/t/sp-error.test +++ b/mysql-test/t/sp-error.test @@ -817,6 +817,20 @@ end| drop procedure bug9073| +# +# BUG#7047: Stored procedure crash if alter procedure +# +--error ER_SP_NO_DROP_SP +create procedure bug7047() + alter procedure bug7047| +--error ER_SP_NO_DROP_SP +create function bug7047() returns int +begin + alter function bug7047; + return 0; +end| + + # # BUG#NNNN: New bug synopsis # diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index e93229a4a3e..8d7a1fe0093 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -5213,7 +5213,7 @@ ER_VIEW_INVALID eng "View '%-.64s.%-.64s' references invalid table(s) or column(s) or function(s)" rus "View '%-.64s.%-.64s' ÓÓÙÌÁÅÔÓÑ ÎÁ ÎÅÓÕÝÅÓÔ×ÕÀÝÉÅ ÔÁÂÌÉÃÙ ÉÌÉ ÓÔÏÌÂÃÙ ÉÌÉ ÆÕÎËÃÉÉ" ER_SP_NO_DROP_SP - eng "Can't drop a %s from within another stored routine" + eng "Can't drop or alter a %s from within another stored routine" ER_SP_GOTO_IN_HNDLR eng "GOTO is not allowed in a stored procedure handler" ER_TRG_ALREADY_EXISTS diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 9aa5d7fb4fc..7f5255aa764 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -3305,6 +3305,11 @@ alter: { LEX *lex= Lex; + if (lex->sphead) + { + my_error(ER_SP_NO_DROP_SP, MYF(0), "PROCEDURE"); + YYABORT; + } bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics)); } sp_a_chistics @@ -3318,6 +3323,11 @@ alter: { LEX *lex= Lex; + if (lex->sphead) + { + my_error(ER_SP_NO_DROP_SP, MYF(0), "FUNCTION"); + YYABORT; + } bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics)); } sp_a_chistics From 8689083aca0c97457701551b776b1a9c9743c10d Mon Sep 17 00:00:00 2001 From: "acurtis@xiphis.org" <> Date: Wed, 20 Apr 2005 18:08:42 +0100 Subject: [PATCH 30/65] Bug#9775 - Stored procedures: crash if create function that returns enum or set Fix bug and implement return type casting. --- mysql-test/r/sp.result | 27 ++++++++++++++++-- mysql-test/t/sp.test | 23 +++++++++++++++- sql/item_func.cc | 62 ++++++++++++++++++++++++++++++++---------- sql/item_func.h | 58 ++++++++++++--------------------------- sql/sp_head.cc | 8 ++++-- 5 files changed, 117 insertions(+), 61 deletions(-) diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index a4a3968672f..5d93f252716 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -2960,14 +2960,35 @@ delete from t1| drop function bug9902| drop function if exists bug9102| create function bug9102() returns blob return 'a'| -select bug9102(); -drop function bug9102| +select bug9102()| bug9102() a -drop procedure if exists bug7648| +drop function bug9102| +drop function if exists bug7648| create function bug7648() returns bit(8) return 'a'| select bug7648()| bug7648() a drop function bug7648| +drop function if exists bug9775| +create function bug9775(v1 char(1)) returns enum('a','b') return v1| +select bug9775('a'),bug9775('b'),bug9775('c')| +bug9775('a') bug9775('b') bug9775('c') +a b +drop function bug9775| +create function bug9775(v1 int) returns enum('a','b') return v1| +select bug9775(1),bug9775(2),bug9775(3)| +bug9775(1) bug9775(2) bug9775(3) +a b +drop function bug9775| +create function bug9775(v1 char(1)) returns set('a','b') return v1| +select bug9775('a'),bug9775('b'),bug9775('a,b'),bug9775('c')| +bug9775('a') bug9775('b') bug9775('a,b') bug9775('c') +a b a,b +drop function bug9775| +create function bug9775(v1 int) returns set('a','b') return v1| +select bug9775(1),bug9775(2),bug9775(3),bug9775(4)| +bug9775(1) bug9775(2) bug9775(3) bug9775(4) +a b a,b +drop function bug9775| drop table t1,t2; diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test index 6c833b14482..9f76f7ec0e4 100644 --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@ -3643,13 +3643,34 @@ drop function bug9102| # BUG#7648: Stored procedure crash when invoking a function that returns a bit # --disable_warnings -drop procedure if exists bug7648| +drop function if exists bug7648| --enable_warnings create function bug7648() returns bit(8) return 'a'| select bug7648()| drop function bug7648| +# +# BUG#9775: crash if create function that returns enum or set +# +--disable_warnings +drop function if exists bug9775| +--enable_warnings +create function bug9775(v1 char(1)) returns enum('a','b') return v1| +select bug9775('a'),bug9775('b'),bug9775('c')| +drop function bug9775| +create function bug9775(v1 int) returns enum('a','b') return v1| +select bug9775(1),bug9775(2),bug9775(3)| +drop function bug9775| + +create function bug9775(v1 char(1)) returns set('a','b') return v1| +select bug9775('a'),bug9775('b'),bug9775('a,b'),bug9775('c')| +drop function bug9775| +create function bug9775(v1 int) returns set('a','b') return v1| +select bug9775(1),bug9775(2),bug9775(3),bug9775(4)| +drop function bug9775| + + # # BUG#NNNN: New bug synopsis # diff --git a/sql/item_func.cc b/sql/item_func.cc index c607efa0797..fb21551e22f 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -4461,7 +4461,7 @@ longlong Item_func_row_count::val_int() Item_func_sp::Item_func_sp(sp_name *name) - :Item_func(), m_name(name), m_sp(NULL) + :Item_func(), m_name(name), m_sp(NULL), result_field(NULL) { maybe_null= 1; m_name->init_qname(current_thd); @@ -4470,7 +4470,7 @@ Item_func_sp::Item_func_sp(sp_name *name) Item_func_sp::Item_func_sp(sp_name *name, List &list) - :Item_func(list), m_name(name), m_sp(NULL) + :Item_func(list), m_name(name), m_sp(NULL), result_field(NULL) { maybe_null= 1; m_name->init_qname(current_thd); @@ -4526,6 +4526,29 @@ Item_func_sp::sp_result_field(void) const } +int +Item_func_sp::execute(Field **flp) +{ + Item *it; + Field *f; + if (execute(&it)) + { + null_value= 1; + return 1; + } + if (!(f= *flp)) + { + *flp= f= sp_result_field(); + f->move_field((f->pack_length() > sizeof(result_buf)) ? + sql_alloc(f->pack_length()) : result_buf); + f->null_ptr= (uchar *)&null_value; + f->null_bit= 1; + } + it->save_in_field(f, 1); + return f->is_null(); +} + + int Item_func_sp::execute(Item **itp) { @@ -4601,6 +4624,8 @@ Item_func_sp::field_type() const Field *field= 0; DBUG_ENTER("Item_func_sp::field_type"); + if (result_field) + DBUG_RETURN(result_field->type()); if (! m_sp) m_sp= sp_find_function(current_thd, m_name, TRUE); // cache only if ((field= sp_result_field())) @@ -4621,6 +4646,8 @@ Item_func_sp::result_type() const DBUG_ENTER("Item_func_sp::result_type"); DBUG_PRINT("info", ("m_sp = %p", m_sp)); + if (result_field) + DBUG_RETURN(result_field->result_type()); if (! m_sp) m_sp= sp_find_function(current_thd, m_name, TRUE); // cache only if ((field= sp_result_field())) @@ -4636,8 +4663,16 @@ Item_func_sp::result_type() const void Item_func_sp::fix_length_and_dec() { + Field *field= result_field; DBUG_ENTER("Item_func_sp::fix_length_and_dec"); + if (result_field) + { + decimals= result_field->decimals(); + max_length= result_field->representation_length(); + DBUG_VOID_RETURN; + } + if (! m_sp) m_sp= sp_find_function(current_thd, m_name, TRUE); // cache only if (! m_sp) @@ -4646,29 +4681,28 @@ Item_func_sp::fix_length_and_dec() } else { - switch (result_type()) { + if (!field) + field= sp_result_field(); + + decimals= field->decimals(); + max_length= field->representation_length(); + + switch (field->result_type()) { case STRING_RESULT: maybe_null= 1; - max_length= MAX_BLOB_WIDTH; - break; case REAL_RESULT: - decimals= NOT_FIXED_DEC; - max_length= float_length(decimals); - break; case INT_RESULT: - decimals= 0; - max_length= 21; - break; case DECIMAL_RESULT: - // TODO: where to find real precision and scale? - decimals= min(DECIMAL_MAX_LENGTH / 2, NOT_FIXED_DEC - 1); - max_length= DECIMAL_MAX_LENGTH; + break; case ROW_RESULT: default: // This case should never be chosen DBUG_ASSERT(0); break; } + + if (field != result_field) + delete field; } DBUG_VOID_RETURN; } diff --git a/sql/item_func.h b/sql/item_func.h index b39786e5544..ba5a6101e4c 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1283,8 +1283,11 @@ private: sp_name *m_name; mutable sp_head *m_sp; TABLE *dummy_table; + Field *result_field; + char result_buf[64]; int execute(Item **itp); + int execute(Field **flp); Field *sp_result_field(void) const; public: @@ -1296,6 +1299,12 @@ public: virtual ~Item_func_sp() {} + void cleanup() + { + Item_func::cleanup(); + result_field= NULL; + } + const char *func_name() const; enum enum_field_types field_type() const; @@ -1308,63 +1317,30 @@ public: longlong val_int() { - Item *it; - longlong l; - - if (execute(&it)) - { - null_value= 1; + if (execute(&result_field)) return 0LL; - } - l= it->val_int(); - null_value= it->null_value; - return l; + return result_field->val_int(); } double val_real() { - Item *it; - double d; - - if (execute(&it)) - { - null_value= 1; + if (execute(&result_field)) return 0.0; - } - d= it->val_real(); - null_value= it->null_value; - return d; + return result_field->val_real(); } my_decimal *val_decimal(my_decimal *dec_buf) { - Item *it; - my_decimal *result; - - if (execute(&it)) - { - null_value= 1; + if (execute(&result_field)) return NULL; - } - result= it->val_decimal(dec_buf); - null_value= it->null_value; - return result; + return result_field->val_decimal(dec_buf); } - String *val_str(String *str) { - Item *it; - String *s; - - if (execute(&it)) - { - null_value= 1; + if (execute(&result_field)) return NULL; - } - s= it->val_str(str); - null_value= it->null_value; - return s; + return result_field->val_str(str); } void fix_length_and_dec(); diff --git a/sql/sp_head.cc b/sql/sp_head.cc index d1486cb234e..4ae76e4c6a5 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -378,8 +378,9 @@ sp_head::create_typelib(List *src) result->count= src->elements; result->name= ""; if (!(result->type_names=(const char **) - alloc_root(mem_root,sizeof(char *)*(result->count+1)))) + alloc_root(mem_root,(sizeof(char *)+sizeof(int))*(result->count+1)))) return 0; + result->type_lengths= (unsigned int *)(result->type_names + result->count+1); List_iterator it(*src); String conv, *tmp; uint32 dummy; @@ -397,8 +398,10 @@ sp_head::create_typelib(List *src) result->type_names[i]= buf; result->type_lengths[i]= conv.length(); } - else + else { result->type_names[i]= strdup_root(mem_root, tmp->c_ptr()); + result->type_lengths[i]= tmp->length(); + } // Strip trailing spaces. uint lengthsp= cs->cset->lengthsp(cs, result->type_names[i], @@ -407,6 +410,7 @@ sp_head::create_typelib(List *src) ((uchar *)result->type_names[i])[lengthsp]= '\0'; } result->type_names[result->count]= 0; + result->type_lengths[result->count]= 0; } return result; } From c2959178ca9c3eacbd59d854899ecdcdb7ff72de Mon Sep 17 00:00:00 2001 From: "sergefp@mysql.com" <> Date: Thu, 21 Apr 2005 01:55:33 +0400 Subject: [PATCH 31/65] Fix for BUG#10037 * Add 0.01 to cost of 'range'+'using index' scan to avoid optimizer choice races with 'index' scan. --- mysql-test/r/range.result | 4 ++-- sql/opt_range.cc | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result index 256c4ee6431..fea5754d704 100644 --- a/mysql-test/r/range.result +++ b/mysql-test/r/range.result @@ -256,12 +256,12 @@ INSERT INTO t2 VALUES (0),(0),(1),(1),(2),(2); explain select * from t1, t2 where (t1.key1 table->file->ref_length) + 1); read_time=((double) (records+keys_per_block-1)/ (double) keys_per_block); - return read_time; + /* Add 0.01 to avoid cost races between 'range' and 'index' */ + return read_time + 0.01; } @@ -7912,6 +7913,8 @@ int QUICK_GROUP_MIN_MAX_SELECT::reset(void) file->extra(HA_EXTRA_KEYREAD); /* We need only the key attributes */ result= file->ha_index_init(index); result= file->index_last(record); + if (result == HA_ERR_END_OF_FILE) + DBUG_RETURN(0); if (result) DBUG_RETURN(result); if (quick_prefix_select && quick_prefix_select->reset()) From c6464cab0d1ca92950497344b7587e55d5ae4769 Mon Sep 17 00:00:00 2001 From: "hf@deer.(none)" <> Date: Thu, 21 Apr 2005 10:58:58 +0500 Subject: [PATCH 32/65] Fix for bug #7344 (Repeating mysql_server_start/mysql_server_end crash clients) One more free_charset call added --- libmysql/libmysql.c | 1 + 1 file changed, 1 insertion(+) diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index 16dda115ee9..c663dab7476 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -186,6 +186,7 @@ void STDCALL mysql_server_end() } else mysql_thread_end(); + free_charsets(); mysql_client_init= org_my_init_done= 0; } From eeda9b3f8c91fd08fe76f7bcfb02f81b1312879e Mon Sep 17 00:00:00 2001 From: "ingo@mysql.com" <> Date: Thu, 21 Apr 2005 10:29:05 +0200 Subject: [PATCH 33/65] After merge fix. --- sql/sql_insert.cc | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index d12b8d8f2cc..fab6ae544fc 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -53,6 +53,7 @@ static bool check_view_insertability(TABLE_LIST *view, query_id_t query_id); table The table for insert. fields The insert fields. values The insert values. + check_unique If duplicate values should be rejected. NOTE Clears TIMESTAMP_AUTO_SET_ON_INSERT from table->timestamp_field_type @@ -64,8 +65,9 @@ static bool check_view_insertability(TABLE_LIST *view, query_id_t query_id); -1 Error */ -static int check_insert_fields(THD *thd, TABLE *table, List &fields, - List &values) +static int check_insert_fields(THD *thd, TABLE_LIST *table_list, + List &fields, List &values, + bool check_unique) { TABLE *table= table_list->table; @@ -87,7 +89,7 @@ static int check_insert_fields(THD *thd, TABLE *table, List &fields, } if (values.elements != table->s->fields) { - my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter); + my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), 1); return -1; } #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -109,7 +111,7 @@ static int check_insert_fields(THD *thd, TABLE *table, List &fields, int res; if (fields.elements != values.elements) { - my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter); + my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), 1); return -1; } @@ -186,11 +188,11 @@ static int check_insert_fields(THD *thd, TABLE *table, List &fields, -1 Error */ -static int check_update_fields(THD *thd, TABLE *table, - TABLE_LIST *insert_table_list, +static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list, List &update_fields) { - ulong timestamp_query_id; + TABLE *table= insert_table_list->table; + ulong timestamp_query_id; LINT_INIT(timestamp_query_id); /* @@ -200,7 +202,7 @@ static int check_update_fields(THD *thd, TABLE *table, if (table->timestamp_field) { timestamp_query_id= table->timestamp_field->query_id; - table->timestamp_field->query_id= thd->query_id-1; + table->timestamp_field->query_id= thd->query_id - 1; } /* @@ -762,13 +764,12 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table, next_local= table_list->next_local; table_list->next_local= 0; - if ((values && check_insert_fields(thd, table_list, fields, *values, 1, + if ((values && check_insert_fields(thd, table_list, fields, *values, !insert_into_view)) || (values && setup_fields(thd, 0, table_list, *values, 0, 0, 0)) || (duplic == DUP_UPDATE && - (check_update_fields(thd, table, insert_table_list, update_fields) || ((thd->lex->select_lex.no_wrap_view_item= 1, - (res= setup_fields(thd, 0, table_list, update_fields, 1, 0, 0)), + (res= check_update_fields(thd, table_list, update_fields)), thd->lex->select_lex.no_wrap_view_item= 0, res) || setup_fields(thd, 0, table_list, update_values, 1, 0, 0)))) @@ -1860,8 +1861,7 @@ select_insert::prepare(List &values, SELECT_LEX_UNIT *u) DBUG_ENTER("select_insert::prepare"); unit= u; - if (check_insert_fields(thd, table_list, *fields, values, 1, - !insert_into_view)) + if (check_insert_fields(thd, table_list, *fields, values, !insert_into_view)) DBUG_RETURN(1); /* if it is INSERT into join view then check_insert_fields already found From fcf18dafeea5e53a67d016dfd2f51116c366e55c Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Thu, 21 Apr 2005 10:59:21 +0200 Subject: [PATCH 34/65] Re-generated --- mysql-test/r/ndb_update.result | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/mysql-test/r/ndb_update.result b/mysql-test/r/ndb_update.result index 121931d79e3..c2247564e65 100644 --- a/mysql-test/r/ndb_update.result +++ b/mysql-test/r/ndb_update.result @@ -2,13 +2,32 @@ DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, -c INT NOT NULL +c INT NOT NULL UNIQUE ) ENGINE=ndbcluster; -INSERT INTO t1 VALUES (0, 0, 1),(1,1,2),(2,2,3); +INSERT INTO t1 VALUES (0, 1, 0),(1,2,1),(2,3,2); UPDATE t1 set b = c; select * from t1 order by pk1; pk1 b c -0 1 1 -1 2 2 -2 3 3 -drop table t1; +0 0 0 +1 1 1 +2 2 2 +UPDATE t1 set pk1 = 4 where pk1 = 1; +select * from t1 order by pk1; +pk1 b c +0 0 0 +2 2 2 +4 1 1 +UPDATE t1 set pk1 = 1, c = 2 where pk1 = 4; +ERROR 23000: Duplicate entry '1' for key 1 +select * from t1 order by pk1; +pk1 b c +0 0 0 +2 2 2 +4 1 1 +UPDATE t1 set pk1 = pk1 + 10; +select * from t1 order by pk1; +pk1 b c +10 0 0 +12 2 2 +14 1 1 +DROP TABLE IF EXISTS t1; From 8ee8108cb50145d55eca6ed56319f533f47b2dac Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Thu, 21 Apr 2005 11:11:37 +0200 Subject: [PATCH 35/65] Disabled test for embedded --- mysql-test/t/ndb_bitfield.test | 1 + mysql-test/t/ndb_cache2.test | 1 + mysql-test/t/ndb_cache_multi.test | 1 + mysql-test/t/ndb_cache_multi2.test | 1 + mysql-test/t/ndb_condition_pushdown.test | 1 + mysql-test/t/ndb_read_multi_range.test | 1 + 6 files changed, 6 insertions(+) diff --git a/mysql-test/t/ndb_bitfield.test b/mysql-test/t/ndb_bitfield.test index f1ec7b6433c..efacd8f7c06 100644 --- a/mysql-test/t/ndb_bitfield.test +++ b/mysql-test/t/ndb_bitfield.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings drop table if exists t1; diff --git a/mysql-test/t/ndb_cache2.test b/mysql-test/t/ndb_cache2.test index 7f960e5ef3a..352b01ef73f 100644 --- a/mysql-test/t/ndb_cache2.test +++ b/mysql-test/t/ndb_cache2.test @@ -1,5 +1,6 @@ -- source include/have_query_cache.inc -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings drop table if exists t1, t2, t3, t4, t5; diff --git a/mysql-test/t/ndb_cache_multi.test b/mysql-test/t/ndb_cache_multi.test index 7202b5f8558..beb8e4bc2ac 100644 --- a/mysql-test/t/ndb_cache_multi.test +++ b/mysql-test/t/ndb_cache_multi.test @@ -1,6 +1,7 @@ -- source include/have_query_cache.inc -- source include/have_ndb.inc -- source include/have_multi_ndb.inc +-- source include/not_embedded.inc --disable_warnings drop table if exists t1, t2; diff --git a/mysql-test/t/ndb_cache_multi2.test b/mysql-test/t/ndb_cache_multi2.test index f9ccb0bf53e..9352505649c 100644 --- a/mysql-test/t/ndb_cache_multi2.test +++ b/mysql-test/t/ndb_cache_multi2.test @@ -1,6 +1,7 @@ -- source include/have_query_cache.inc -- source include/have_ndb.inc -- source include/have_multi_ndb.inc +-- source include/not_embedded.inc --disable_warnings drop table if exists t1, t2; diff --git a/mysql-test/t/ndb_condition_pushdown.test b/mysql-test/t/ndb_condition_pushdown.test index 0ada161b813..1d201a94c95 100644 --- a/mysql-test/t/ndb_condition_pushdown.test +++ b/mysql-test/t/ndb_condition_pushdown.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings DROP TABLE IF EXISTS t1,t2; diff --git a/mysql-test/t/ndb_read_multi_range.test b/mysql-test/t/ndb_read_multi_range.test index 40da69d00d0..9d1f918fef0 100644 --- a/mysql-test/t/ndb_read_multi_range.test +++ b/mysql-test/t/ndb_read_multi_range.test @@ -1,4 +1,5 @@ -- source include/have_ndb.inc +-- source include/not_embedded.inc --disable_warnings DROP TABLE IF EXISTS t1, r1; From 0e7e062465a31ff361a8da8cc05d698660378589 Mon Sep 17 00:00:00 2001 From: "sergefp@mysql.com" <> Date: Thu, 21 Apr 2005 13:32:46 +0400 Subject: [PATCH 36/65] Fix for BUG#9912: make the test handle both possible optimizer choices --- mysql-test/r/index_merge_ror.result | 6 +++--- mysql-test/t/index_merge_ror.test | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/index_merge_ror.result b/mysql-test/r/index_merge_ror.result index 6fc9dddef88..a1d306c3ea4 100644 --- a/mysql-test/r/index_merge_ror.result +++ b/mysql-test/r/index_merge_ror.result @@ -180,9 +180,9 @@ count(a) select count(a) from t2 where b='BBBBBBBB'; count(a) 4 -explain select count(a) from t2 where a='AAAAAAAA' and b='AAAAAAAA'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ref a,b a 6 const 4 Using where +expla_or_bin select count(a_or_b) from t2 where a_or_b='AAAAAAAA' a_or_bnd a_or_b='AAAAAAAA'; +id select_type ta_or_ba_or_ble type possia_or_ble_keys key key_len ref rows Extra_or_b +1 SIMPLE t2 ref a_or_b,a_or_b a_or_b 6 const 4 Using where select count(a) from t2 where a='AAAAAAAA' and b='AAAAAAAA'; count(a) 4 diff --git a/mysql-test/t/index_merge_ror.test b/mysql-test/t/index_merge_ror.test index 5375f9d1a31..3a484157267 100644 --- a/mysql-test/t/index_merge_ror.test +++ b/mysql-test/t/index_merge_ror.test @@ -240,6 +240,7 @@ select count(a) from t2 where a='BBBBBBBB'; select count(a) from t2 where b='BBBBBBBB'; # BUG#1: +--replace_result a a_or_b b a_or_b explain select count(a) from t2 where a='AAAAAAAA' and b='AAAAAAAA'; select count(a) from t2 where a='AAAAAAAA' and b='AAAAAAAA'; select count(a) from t2 ignore index(a,b) where a='AAAAAAAA' and b='AAAAAAAA'; From ae2346dd942895612053ed07d660811cc3d4302b Mon Sep 17 00:00:00 2001 From: "pekka@mysql.com" <> Date: Thu, 21 Apr 2005 12:53:40 +0200 Subject: [PATCH 37/65] ndb - fix typo --- sql/ha_ndbcluster.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 8cbcbe75146..f61a1a40aab 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1187,7 +1187,7 @@ static void shrink_varchar(Field* field, const byte* & ptr, char* buf) { if (field->type() == MYSQL_TYPE_VARCHAR) { Field_varstring* f= (Field_varstring*)field; - if (f->length_bytes < 256) { + if (f->length_bytes == 1) { uint pack_len= field->pack_length(); DBUG_ASSERT(1 <= pack_len && pack_len <= 256); if (ptr[1] == 0) { From ad9fba9f388e51e94952928aacda1900792dc9d7 Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Thu, 21 Apr 2005 13:09:19 +0200 Subject: [PATCH 38/65] Fix for failing ps_7ndb.test, force write_row when batching inserts so that update of primary key is done before next value to insert --- sql/ha_ndbcluster.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index a6b4f928e72..bf8db96c76f 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1894,6 +1894,7 @@ int ha_ndbcluster::write_row(byte *record) m_bulk_insert_not_flushed= TRUE; if ((m_rows_to_insert == (ha_rows) 1) || ((m_rows_inserted % m_bulk_insert_rows) == 0) || + m_primary_key_update || set_blob_value) { THD *thd= current_thd; @@ -2018,17 +2019,19 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) if (delete_res) { DBUG_PRINT("info", ("delete failed")); - // Undo write_row(new_data) DBUG_RETURN(delete_res); } // Insert new row DBUG_PRINT("info", ("delete succeded")); + m_primary_key_update= TRUE; insert_res= write_row(new_data); + m_primary_key_update= FALSE; if (insert_res) { DBUG_PRINT("info", ("insert failed")); if (trans->commitStatus() == NdbConnection::Started) { + // Undo write_row(new_data) m_primary_key_update= TRUE; insert_res= write_row((byte *)old_data); m_primary_key_update= FALSE; From f67507588fc58b86a05ea494f669e2909b08bc72 Mon Sep 17 00:00:00 2001 From: "acurtis@xiphis.org" <> Date: Thu, 21 Apr 2005 13:22:47 +0100 Subject: [PATCH 39/65] Bug#8861 - If Return is a YEAR data type, value is not shown in year format Bug partially fixed by 9775/9102 work on SP function return. --- mysql-test/r/sp.result | 10 ++++++++++ mysql-test/t/sp.test | 13 +++++++++++++ sql/protocol.cc | 1 + 3 files changed, 24 insertions(+) diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index 5d93f252716..23c3548b9bf 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -2991,4 +2991,14 @@ select bug9775(1),bug9775(2),bug9775(3),bug9775(4)| bug9775(1) bug9775(2) bug9775(3) bug9775(4) a b a,b drop function bug9775| +drop function if exists bug8861| +create function bug8861(v1 int) returns year return v1| +select bug8861(05)| +bug8861(05) +2005 +set @x = bug8861(05)| +select @x| +@x +2005 +drop function bug8861| drop table t1,t2; diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test index 9f76f7ec0e4..66c36a394f7 100644 --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@ -3671,6 +3671,19 @@ select bug9775(1),bug9775(2),bug9775(3),bug9775(4)| drop function bug9775| +# +# BUG#8861: If Return is a YEAR data type, value is not shown in year format +# +--disable_warnings +drop function if exists bug8861| +--enable_warnings +create function bug8861(v1 int) returns year return v1| +select bug8861(05)| +set @x = bug8861(05)| +select @x| +drop function bug8861| + + # # BUG#NNNN: New bug synopsis # diff --git a/sql/protocol.cc b/sql/protocol.cc index dc9ab7bf795..edeb78cc00b 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -774,6 +774,7 @@ bool Protocol_simple::store(const char *from, uint length, #ifndef DEBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_DECIMAL || + field_types[field_pos] == MYSQL_TYPE_YEAR || field_types[field_pos] == MYSQL_TYPE_BIT || field_types[field_pos] == MYSQL_TYPE_NEWDECIMAL || (field_types[field_pos] >= MYSQL_TYPE_ENUM && From a6b68d51ecd2a45caea367dc43721a53350a22a6 Mon Sep 17 00:00:00 2001 From: "acurtis@xiphis.org" <> Date: Thu, 21 Apr 2005 15:36:10 +0100 Subject: [PATCH 40/65] Bug#8861 Correct previous bugfix --- sql/item.cc | 1 + sql/protocol.cc | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/sql/item.cc b/sql/item.cc index 73c8e80228b..541fbf7b178 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -3591,6 +3591,7 @@ bool Item::send(Protocol *protocol, String *buffer) break; } case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_YEAR: { longlong nr; nr= val_int(); diff --git a/sql/protocol.cc b/sql/protocol.cc index edeb78cc00b..22f1249ca28 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -774,7 +774,6 @@ bool Protocol_simple::store(const char *from, uint length, #ifndef DEBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_DECIMAL || - field_types[field_pos] == MYSQL_TYPE_YEAR || field_types[field_pos] == MYSQL_TYPE_BIT || field_types[field_pos] == MYSQL_TYPE_NEWDECIMAL || (field_types[field_pos] >= MYSQL_TYPE_ENUM && @@ -801,6 +800,7 @@ bool Protocol_simple::store_short(longlong from) { #ifndef DEBUG_OFF DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_YEAR || field_types[field_pos] == MYSQL_TYPE_SHORT); field_pos++; #endif From 310c36046568dba3df69fcfb04364436244d2eab Mon Sep 17 00:00:00 2001 From: "acurtis@xiphis.org" <> Date: Thu, 21 Apr 2005 18:38:34 +0100 Subject: [PATCH 41/65] C99 Portability fix --- sql/sql_insert.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index fab6ae544fc..a38138ca5fd 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -103,7 +103,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, return -1; } #endif - (int) table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_INSERT; + *(int*)&table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_INSERT; } else { // Part field list @@ -150,7 +150,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, } if (table->timestamp_field && // Don't set timestamp if used table->timestamp_field->query_id == thd->query_id) - (int) table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_INSERT; + *(int*)&table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_INSERT; } // For the values we need select_priv #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -216,7 +216,7 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list, { /* Don't set timestamp column if this is modified. */ if (table->timestamp_field->query_id == thd->query_id) - (int) table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_UPDATE; + *(int*)&table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_UPDATE; else table->timestamp_field->query_id= timestamp_query_id; } From 11bb365f689d866976e3eaf8c0465b56c7e34c6d Mon Sep 17 00:00:00 2001 From: "lenz@mysql.com" <> Date: Thu, 21 Apr 2005 21:15:35 +0200 Subject: [PATCH 42/65] - added missing file config_readline.h to the source distribution --- cmd-line-utils/readline/Makefile.am | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd-line-utils/readline/Makefile.am b/cmd-line-utils/readline/Makefile.am index 7c4fe8eeb91..87880517166 100644 --- a/cmd-line-utils/readline/Makefile.am +++ b/cmd-line-utils/readline/Makefile.am @@ -22,7 +22,7 @@ pkginclude_HEADERS = readline.h chardefs.h keymaps.h \ history.h tilde.h rlmbutil.h rltypedefs.h rlprivate.h \ rlshell.h xmalloc.h -noinst_HEADERS = rldefs.h histlib.h rlwinsize.h \ +noinst_HEADERS = config_readline.h rldefs.h histlib.h rlwinsize.h \ posixstat.h posixdir.h posixjmp.h \ tilde.h rlconf.h rltty.h ansi_stdlib.h \ tcap.h rlstdc.h From 0c23f81dcb395091c7361254df419555117d0d7f Mon Sep 17 00:00:00 2001 From: "msvensson@neptunus.(none)" <> Date: Thu, 21 Apr 2005 21:47:58 +0200 Subject: [PATCH 43/65] C99 Portability fix --- sql/sql_insert.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index bb115b9d548..96d94127316 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -80,7 +80,7 @@ static int check_insert_fields(THD *thd, TABLE *table, List &fields, check_grant_all_columns(thd,INSERT_ACL,table)) return -1; #endif - (int) table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_INSERT; + *(int*)&table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_INSERT; } else { // Part field list @@ -110,7 +110,7 @@ static int check_insert_fields(THD *thd, TABLE *table, List &fields, } if (table->timestamp_field && // Don't set timestamp if used table->timestamp_field->query_id == thd->query_id) - (int) table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_INSERT; + *(int*)&table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_INSERT; } // For the values we need select_priv #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -167,7 +167,7 @@ static int check_update_fields(THD *thd, TABLE *table, { /* Don't set timestamp column if this is modified. */ if (table->timestamp_field->query_id == thd->query_id) - (int) table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_UPDATE; + *(int*)&table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_UPDATE; else table->timestamp_field->query_id= timestamp_query_id; } From e6142c477c036a0d16e0fe812d6d238419c2b475 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Fri, 22 Apr 2005 09:07:25 +0200 Subject: [PATCH 44/65] bug#9924 - ndb backup abort handling Redo abort handling according to descr. in Backup.txt bug#9960 - ndb backup increase wait completed timeout to 48 hours --- ndb/include/kernel/signaldata/BackupImpl.hpp | 12 +- .../kernel/signaldata/BackupSignalData.hpp | 3 + .../common/debugger/signaldata/BackupImpl.cpp | 6 +- ndb/src/kernel/blocks/backup/Backup.cpp | 1508 +++++++---------- ndb/src/kernel/blocks/backup/Backup.hpp | 36 +- ndb/src/kernel/blocks/backup/Backup.txt | 25 + ndb/src/kernel/blocks/backup/BackupInit.cpp | 2 +- ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 1 + ndb/src/mgmapi/mgmapi.cpp | 4 +- ndb/src/mgmsrv/MgmtSrvr.cpp | 29 +- ndb/src/mgmsrv/MgmtSrvr.hpp | 4 +- .../mgmsrv/MgmtSrvrGeneralSignalHandling.cpp | 6 +- ndb/src/ndbapi/ndberror.c | 5 +- ndb/test/ndbapi/testBackup.cpp | 14 +- ndb/test/run-test/daily-basic-tests.txt | 24 + ndb/test/src/NdbBackup.cpp | 46 +- 16 files changed, 758 insertions(+), 967 deletions(-) diff --git a/ndb/include/kernel/signaldata/BackupImpl.hpp b/ndb/include/kernel/signaldata/BackupImpl.hpp index 2ac91570aad..2032e2347b5 100644 --- a/ndb/include/kernel/signaldata/BackupImpl.hpp +++ b/ndb/include/kernel/signaldata/BackupImpl.hpp @@ -75,7 +75,7 @@ class DefineBackupRef { friend bool printDEFINE_BACKUP_REF(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 3 ); + STATIC_CONST( SignalLength = 4 ); enum ErrorCode { Undefined = 1340, @@ -92,6 +92,7 @@ private: Uint32 backupId; Uint32 backupPtr; Uint32 errorCode; + Uint32 nodeId; }; class DefineBackupConf { @@ -158,7 +159,7 @@ class StartBackupRef { friend bool printSTART_BACKUP_REF(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 4 ); + STATIC_CONST( SignalLength = 5 ); enum ErrorCode { FailedToAllocateTriggerRecord = 1 @@ -168,6 +169,7 @@ private: Uint32 backupPtr; Uint32 signalNo; Uint32 errorCode; + Uint32 nodeId; }; class StartBackupConf { @@ -232,9 +234,8 @@ public: private: Uint32 backupId; Uint32 backupPtr; - Uint32 tableId; - Uint32 fragmentNo; Uint32 errorCode; + Uint32 nodeId; }; class BackupFragmentConf { @@ -296,12 +297,13 @@ class StopBackupRef { friend bool printSTOP_BACKUP_REF(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 3 ); + STATIC_CONST( SignalLength = 4 ); private: Uint32 backupId; Uint32 backupPtr; Uint32 errorCode; + Uint32 nodeId; }; class StopBackupConf { diff --git a/ndb/include/kernel/signaldata/BackupSignalData.hpp b/ndb/include/kernel/signaldata/BackupSignalData.hpp index fb018026a49..b38dd8d14b2 100644 --- a/ndb/include/kernel/signaldata/BackupSignalData.hpp +++ b/ndb/include/kernel/signaldata/BackupSignalData.hpp @@ -240,6 +240,9 @@ public: FileOrScanError = 1325, // slave -> coordinator BackupFailureDueToNodeFail = 1326, // slave -> slave OkToClean = 1327 // master -> slave + + ,AbortScan = 1328 + ,IncompatibleVersions = 1329 }; private: Uint32 requestType; diff --git a/ndb/src/common/debugger/signaldata/BackupImpl.cpp b/ndb/src/common/debugger/signaldata/BackupImpl.cpp index bdc34d614cf..e9b0188d93b 100644 --- a/ndb/src/common/debugger/signaldata/BackupImpl.cpp +++ b/ndb/src/common/debugger/signaldata/BackupImpl.cpp @@ -90,10 +90,8 @@ printBACKUP_FRAGMENT_REQ(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){ bool printBACKUP_FRAGMENT_REF(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){ BackupFragmentRef* sig = (BackupFragmentRef*)data; - fprintf(out, " backupPtr: %d backupId: %d\n", - sig->backupPtr, sig->backupId); - fprintf(out, " tableId: %d fragmentNo: %d errorCode: %d\n", - sig->tableId, sig->fragmentNo, sig->errorCode); + fprintf(out, " backupPtr: %d backupId: %d nodeId: %d errorCode: %d\n", + sig->backupPtr, sig->backupId, sig->nodeId, sig->errorCode); return true; } diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp index 2e62979ce8e..713991a4f58 100644 --- a/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/ndb/src/kernel/blocks/backup/Backup.cpp @@ -67,31 +67,6 @@ static const Uint32 BACKUP_SEQUENCE = 0x1F000000; //#define DEBUG_ABORT -//--------------------------------------------------------- -// Ignore this since a completed abort could have preceded -// this message. -//--------------------------------------------------------- -#define slaveAbortCheck() \ -if ((ptr.p->backupId != backupId) || \ - (ptr.p->slaveState.getState() == ABORTING)) { \ - jam(); \ - return; \ -} - -#define masterAbortCheck() \ -if ((ptr.p->backupId != backupId) || \ - (ptr.p->masterData.state.getState() == ABORTING)) { \ - jam(); \ - return; \ -} - -#define defineSlaveAbortCheck() \ - if (ptr.p->slaveState.getState() == ABORTING) { \ - jam(); \ - closeFiles(signal, ptr); \ - return; \ - } - static Uint32 g_TypeOfStart = NodeState::ST_ILLEGAL_TYPE; void @@ -221,12 +196,7 @@ Backup::execCONTINUEB(Signal* signal) jam(); BackupRecordPtr ptr; c_backupPool.getPtr(ptr, Tdata1); - - if (ptr.p->slaveState.getState() == ABORTING) { - jam(); - closeFiles(signal, ptr); - return; - }//if + BackupFilePtr filePtr; ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); FsBuffer & buf = filePtr.p->operation.dataBuffer; @@ -324,13 +294,7 @@ Backup::execDUMP_STATE_ORD(Signal* signal) for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr)){ infoEvent("BackupRecord %d: BackupId: %d MasterRef: %x ClientRef: %x", ptr.i, ptr.p->backupId, ptr.p->masterRef, ptr.p->clientRef); - if(ptr.p->masterRef == reference()){ - infoEvent(" MasterState: %d State: %d", - ptr.p->masterData.state.getState(), - ptr.p->slaveState.getState()); - } else { - infoEvent(" State: %d", ptr.p->slaveState.getState()); - } + infoEvent(" State: %d", ptr.p->slaveState.getState()); BackupFilePtr filePtr; for(ptr.p->files.first(filePtr); filePtr.i != RNIL; ptr.p->files.next(filePtr)){ @@ -338,7 +302,7 @@ Backup::execDUMP_STATE_ORD(Signal* signal) infoEvent(" file %d: type: %d open: %d running: %d done: %d scan: %d", filePtr.i, filePtr.p->fileType, filePtr.p->fileOpened, filePtr.p->fileRunning, - filePtr.p->fileDone, filePtr.p->scanRunning); + filePtr.p->fileClosing, filePtr.p->scanRunning); } } } @@ -356,6 +320,17 @@ Backup::execDUMP_STATE_ORD(Signal* signal) infoEvent("PagePool: %d", c_pagePool.getSize()); + + if(signal->getLength() == 2 && signal->theData[1] == 2424) + { + ndbrequire(c_tablePool.getSize() == c_tablePool.getNoOfFree()); + ndbrequire(c_attributePool.getSize() == c_attributePool.getNoOfFree()); + ndbrequire(c_backupPool.getSize() == c_backupPool.getNoOfFree()); + ndbrequire(c_backupFilePool.getSize() == c_backupFilePool.getNoOfFree()); + ndbrequire(c_pagePool.getSize() == c_pagePool.getNoOfFree()); + ndbrequire(c_fragmentPool.getSize() == c_fragmentPool.getNoOfFree()); + ndbrequire(c_triggerPool.getSize() == c_triggerPool.getNoOfFree()); + } } } @@ -511,27 +486,6 @@ const char* triggerNameFormat[] = { "NDB$BACKUP_%d_%d_DELETE" }; -const Backup::State -Backup::validMasterTransitions[] = { - INITIAL, DEFINING, - DEFINING, DEFINED, - DEFINED, STARTED, - STARTED, SCANNING, - SCANNING, STOPPING, - STOPPING, INITIAL, - - DEFINING, ABORTING, - DEFINED, ABORTING, - STARTED, ABORTING, - SCANNING, ABORTING, - STOPPING, ABORTING, - ABORTING, ABORTING, - - DEFINING, INITIAL, - ABORTING, INITIAL, - INITIAL, INITIAL -}; - const Backup::State Backup::validSlaveTransitions[] = { INITIAL, DEFINING, @@ -561,10 +515,6 @@ const Uint32 Backup::validSlaveTransitionsCount = sizeof(Backup::validSlaveTransitions) / sizeof(Backup::State); -const Uint32 -Backup::validMasterTransitionsCount = -sizeof(Backup::validMasterTransitions) / sizeof(Backup::State); - void Backup::CompoundState::setState(State newState){ bool found = false; @@ -578,7 +528,8 @@ Backup::CompoundState::setState(State newState){ break; } } - ndbrequire(found); + + //ndbrequire(found); if (newState == INITIAL) abortState = INITIAL; @@ -647,8 +598,7 @@ Backup::execNODE_FAILREP(Signal* signal) Uint32 theFailedNodes[NodeBitmask::Size]; for (Uint32 i = 0; i < NodeBitmask::Size; i++) theFailedNodes[i] = rep->theNodes[i]; - -// NodeId old_master_node_id = getMasterNodeId(); + c_masterNodeId = new_master_node_id; NodePtr nodePtr; @@ -686,15 +636,24 @@ Backup::execNODE_FAILREP(Signal* signal) } bool -Backup::verifyNodesAlive(const NdbNodeBitmask& aNodeBitMask) +Backup::verifyNodesAlive(BackupRecordPtr ptr, + const NdbNodeBitmask& aNodeBitMask) { + Uint32 version = getNodeInfo(getOwnNodeId()).m_version; for (Uint32 i = 0; i < MAX_NDB_NODES; i++) { jam(); if(aNodeBitMask.get(i)) { if(!c_aliveNodes.get(i)){ jam(); + ptr.p->setErrorCode(AbortBackupOrd::BackupFailureDueToNodeFail); return false; }//if + if(getNodeInfo(i).m_version != version) + { + jam(); + ptr.p->setErrorCode(AbortBackupOrd::IncompatibleVersions); + return false; + } }//if }//for return true; @@ -709,6 +668,10 @@ Backup::checkNodeFail(Signal* signal, ndbrequire( ptr.p->nodes.get(newCoord)); /* just to make sure newCoord * is part of the backup */ + + NdbNodeBitmask mask; + mask.assign(2, theFailedNodes); + /* Update ptr.p->nodes to be up to date with current alive nodes */ NodePtr nodePtr; @@ -730,26 +693,42 @@ Backup::checkNodeFail(Signal* signal, return; // failed node is not part of backup process, safe to continue } - bool doMasterTakeover = false; - if(NodeBitmask::get(theFailedNodes, refToNode(ptr.p->masterRef))){ - jam(); - doMasterTakeover = true; - }; - - if (newCoord == getOwnNodeId()){ - jam(); - if (doMasterTakeover) { - /** - * I'm new master - */ - CRASH_INSERTION((10002)); -#ifdef DEBUG_ABORT - ndbout_c("**** Master Takeover: Node failed: Master id = %u", - refToNode(ptr.p->masterRef)); -#endif - masterTakeOver(signal, ptr); + if(mask.get(refToNode(ptr.p->masterRef))) + { + /** + * Master died...abort + */ + ptr.p->masterRef = reference(); + ptr.p->nodes.clear(); + ptr.p->nodes.set(getOwnNodeId()); + ptr.p->setErrorCode(AbortBackupOrd::BackupFailureDueToNodeFail); + switch(ptr.p->m_gsn){ + case GSN_DEFINE_BACKUP_REQ: + case GSN_START_BACKUP_REQ: + case GSN_BACKUP_FRAGMENT_REQ: + case GSN_STOP_BACKUP_REQ: + // I'm currently processing...reply to self and abort... + ptr.p->masterData.gsn = ptr.p->m_gsn; + ptr.p->masterData.sendCounter = ptr.p->nodes; return; - }//if + case GSN_DEFINE_BACKUP_REF: + case GSN_DEFINE_BACKUP_CONF: + case GSN_START_BACKUP_REF: + case GSN_START_BACKUP_CONF: + case GSN_BACKUP_FRAGMENT_REF: + case GSN_BACKUP_FRAGMENT_CONF: + case GSN_STOP_BACKUP_REF: + case GSN_STOP_BACKUP_CONF: + ptr.p->masterData.gsn = GSN_DEFINE_BACKUP_REQ; + masterAbort(signal, ptr); + return; + case GSN_ABORT_BACKUP_ORD: + // Already aborting + return; + } + } + else if (newCoord == getOwnNodeId()) + { /** * I'm master for this backup */ @@ -759,61 +738,81 @@ Backup::checkNodeFail(Signal* signal, ndbout_c("**** Master: Node failed: Master id = %u", refToNode(ptr.p->masterRef)); #endif - masterAbort(signal, ptr, false); + + Uint32 gsn, len, pos; + ptr.p->nodes.bitANDC(mask); + switch(ptr.p->masterData.gsn){ + case GSN_DEFINE_BACKUP_REQ: + { + DefineBackupRef * ref = (DefineBackupRef*)signal->getDataPtr(); + ref->backupPtr = ptr.i; + ref->backupId = ptr.p->backupId; + ref->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail; + gsn= GSN_DEFINE_BACKUP_REF; + len= DefineBackupRef::SignalLength; + pos= &ref->nodeId - signal->getDataPtr(); + break; + } + case GSN_START_BACKUP_REQ: + { + StartBackupRef * ref = (StartBackupRef*)signal->getDataPtr(); + ref->backupPtr = ptr.i; + ref->backupId = ptr.p->backupId; + ref->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail; + ref->signalNo = ptr.p->masterData.startBackup.signalNo; + gsn= GSN_START_BACKUP_REF; + len= StartBackupRef::SignalLength; + pos= &ref->nodeId - signal->getDataPtr(); + break; + } + case GSN_BACKUP_FRAGMENT_REQ: + { + BackupFragmentRef * ref = (BackupFragmentRef*)signal->getDataPtr(); + ref->backupPtr = ptr.i; + ref->backupId = ptr.p->backupId; + ref->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail; + gsn= GSN_BACKUP_FRAGMENT_REF; + len= BackupFragmentRef::SignalLength; + pos= &ref->nodeId - signal->getDataPtr(); + break; + } + case GSN_STOP_BACKUP_REQ: + { + StopBackupRef * ref = (StopBackupRef*)signal->getDataPtr(); + ref->backupPtr = ptr.i; + ref->backupId = ptr.p->backupId; + ref->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail; + gsn= GSN_STOP_BACKUP_REF; + len= StopBackupRef::SignalLength; + pos= &ref->nodeId - signal->getDataPtr(); + break; + } + case GSN_CREATE_TRIG_REQ: + case GSN_ALTER_TRIG_REQ: + case GSN_WAIT_GCP_REQ: + case GSN_UTIL_SEQUENCE_REQ: + case GSN_UTIL_LOCK_REQ: + case GSN_DROP_TRIG_REQ: + return; + } + + for(Uint32 i = 0; (i = mask.find(i+1)) != NdbNodeBitmask::NotFound; ) + { + signal->theData[pos] = i; + sendSignal(reference(), gsn, signal, len, JBB); +#ifdef DEBUG_ABORT + ndbout_c("sending %d to self from %d", gsn, i); +#endif + } return; }//if - - /** - * If there's a new master, (it's not me) - * but remember who it is - */ - ptr.p->masterRef = calcBackupBlockRef(newCoord); -#ifdef DEBUG_ABORT - ndbout_c("**** Slave: Node failed: Master id = %u", - refToNode(ptr.p->masterRef)); -#endif + /** * I abort myself as slave if not master */ CRASH_INSERTION((10021)); - // slaveAbort(signal, ptr); } -void -Backup::masterTakeOver(Signal* signal, BackupRecordPtr ptr) -{ - ptr.p->masterRef = reference(); - ptr.p->masterData.gsn = MAX_GSN + 1; - - switch(ptr.p->slaveState.getState()){ - case INITIAL: - jam(); - ptr.p->masterData.state.forceState(INITIAL); - break; - case ABORTING: - jam(); - case DEFINING: - jam(); - case DEFINED: - jam(); - case STARTED: - jam(); - case SCANNING: - jam(); - ptr.p->masterData.state.forceState(STARTED); - break; - case STOPPING: - jam(); - case CLEANING: - jam(); - ptr.p->masterData.state.forceState(STOPPING); - break; - default: - ndbrequire(false); - } - masterAbort(signal, ptr, false); -} - void Backup::execINCL_NODEREQ(Signal* signal) { @@ -895,8 +894,8 @@ Backup::execBACKUP_REQ(Signal* signal) ndbrequire(ptr.p->pages.empty()); ndbrequire(ptr.p->tables.isEmpty()); - ptr.p->masterData.state.forceState(INITIAL); - ptr.p->masterData.state.setState(DEFINING); + ptr.p->m_gsn = 0; + ptr.p->errorCode = 0; ptr.p->clientRef = senderRef; ptr.p->clientData = senderData; ptr.p->masterRef = reference(); @@ -905,6 +904,7 @@ Backup::execBACKUP_REQ(Signal* signal) ptr.p->backupKey[0] = 0; ptr.p->backupKey[1] = 0; ptr.p->backupDataLen = 0; + ptr.p->masterData.errorCode = 0; ptr.p->masterData.dropTrig.tableId = RNIL; ptr.p->masterData.alterTrig.tableId = RNIL; @@ -928,7 +928,6 @@ Backup::execUTIL_SEQUENCE_REF(Signal* signal) ndbrequire(ptr.i == RNIL); c_backupPool.getPtr(ptr); ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_SEQUENCE_REQ); - ptr.p->masterData.gsn = 0; sendBackupRef(signal, ptr, BackupRef::SequenceFailure); }//execUTIL_SEQUENCE_REF() @@ -938,8 +937,7 @@ Backup::sendBackupRef(Signal* signal, BackupRecordPtr ptr, Uint32 errorCode) { jam(); sendBackupRef(ptr.p->clientRef, signal, ptr.p->clientData, errorCode); - // ptr.p->masterData.state.setState(INITIAL); - cleanupSlaveResources(ptr); + cleanup(signal, ptr); } void @@ -968,7 +966,8 @@ Backup::execUTIL_SEQUENCE_CONF(Signal* signal) UtilSequenceConf * conf = (UtilSequenceConf*)signal->getDataPtr(); - if(conf->requestType == UtilSequenceReq::Create) { + if(conf->requestType == UtilSequenceReq::Create) + { jam(); sendSTTORRY(signal); // At startup in NDB return; @@ -979,18 +978,20 @@ Backup::execUTIL_SEQUENCE_CONF(Signal* signal) c_backupPool.getPtr(ptr); ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_SEQUENCE_REQ); - ptr.p->masterData.gsn = 0; - if (ptr.p->masterData.state.getState() == ABORTING) { + + if (ptr.p->checkError()) + { jam(); sendBackupRef(signal, ptr, ptr.p->errorCode); return; }//if - if (ERROR_INSERTED(10023)) { - ptr.p->masterData.state.setState(ABORTING); + + if (ERROR_INSERTED(10023)) + { sendBackupRef(signal, ptr, 323); return; }//if - ndbrequire(ptr.p->masterData.state.getState() == DEFINING); + { Uint64 backupId; @@ -1018,7 +1019,6 @@ Backup::defineBackupMutex_locked(Signal* signal, Uint32 ptrI, Uint32 retVal){ c_backupPool.getPtr(ptr); ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_LOCK_REQ); - ptr.p->masterData.gsn = 0; ptr.p->masterData.gsn = GSN_UTIL_LOCK_REQ; Mutex mutex(signal, c_mutexMgr, ptr.p->masterData.m_dictCommitTableMutex); @@ -1040,14 +1040,13 @@ Backup::dictCommitTableMutex_locked(Signal* signal, Uint32 ptrI,Uint32 retVal) c_backupPool.getPtr(ptr); ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_LOCK_REQ); - ptr.p->masterData.gsn = 0; if (ERROR_INSERTED(10031)) { - ptr.p->masterData.state.setState(ABORTING); ptr.p->setErrorCode(331); }//if - if (ptr.p->masterData.state.getState() == ABORTING) { + if (ptr.p->checkError()) + { jam(); /** @@ -1062,13 +1061,11 @@ Backup::dictCommitTableMutex_locked(Signal* signal, Uint32 ptrI,Uint32 retVal) Mutex mutex2(signal, c_mutexMgr, ptr.p->masterData.m_defineBackupMutex); jam(); mutex2.unlock(); // ignore response - + sendBackupRef(signal, ptr, ptr.p->errorCode); return; }//if - ndbrequire(ptr.p->masterData.state.getState() == DEFINING); - sendDefineBackupReq(signal, ptr); } @@ -1078,33 +1075,6 @@ Backup::dictCommitTableMutex_locked(Signal* signal, Uint32 ptrI,Uint32 retVal) * *****************************************************************************/ -void -Backup::sendSignalAllWait(BackupRecordPtr ptr, Uint32 gsn, Signal *signal, - Uint32 signalLength, bool executeDirect) -{ - jam(); - ptr.p->masterData.gsn = gsn; - ptr.p->masterData.sendCounter.clearWaitingFor(); - NodePtr node; - for(c_nodes.first(node); node.i != RNIL; c_nodes.next(node)){ - jam(); - const Uint32 nodeId = node.p->nodeId; - if(node.p->alive && ptr.p->nodes.get(nodeId)){ - jam(); - - ptr.p->masterData.sendCounter.setWaitingFor(nodeId); - - const BlockReference ref = numberToRef(BACKUP, nodeId); - if (!executeDirect || ref != reference()) { - sendSignal(ref, gsn, signal, signalLength, JBB); - }//if - }//if - }//for - if (executeDirect) { - EXECUTE_DIRECT(BACKUP, gsn, signal, signalLength); - } -} - bool Backup::haveAllSignals(BackupRecordPtr ptr, Uint32 gsn, Uint32 nodeId) { @@ -1114,10 +1084,6 @@ Backup::haveAllSignals(BackupRecordPtr ptr, Uint32 gsn, Uint32 nodeId) ndbrequire(ptr.p->masterData.sendCounter.isWaitingFor(nodeId)); ptr.p->masterData.sendCounter.clearWaitingFor(nodeId); - - if (ptr.p->masterData.sendCounter.done()) - ptr.p->masterData.gsn = 0; - return ptr.p->masterData.sendCounter.done(); } @@ -1138,11 +1104,12 @@ Backup::sendDefineBackupReq(Signal *signal, BackupRecordPtr ptr) req->nodes = ptr.p->nodes; req->backupDataLen = ptr.p->backupDataLen; - ptr.p->masterData.errorCode = 0; - ptr.p->okToCleanMaster = false; // master must wait with cleaning to last - sendSignalAllWait(ptr, GSN_DEFINE_BACKUP_REQ, signal, - DefineBackupReq::SignalLength, - true /* do execute direct on oneself */); + ptr.p->masterData.gsn = GSN_DEFINE_BACKUP_REQ; + ptr.p->masterData.sendCounter = ptr.p->nodes; + NodeReceiverGroup rg(BACKUP, ptr.p->nodes); + sendSignal(rg, GSN_DEFINE_BACKUP_REQ, signal, + DefineBackupReq::SignalLength, JBB); + /** * Now send backup data */ @@ -1167,17 +1134,15 @@ Backup::execDEFINE_BACKUP_REF(Signal* signal) jamEntry(); DefineBackupRef* ref = (DefineBackupRef*)signal->getDataPtr(); - + const Uint32 ptrI = ref->backupPtr; - const Uint32 backupId = ref->backupId; - const Uint32 nodeId = refToNode(signal->senderBlockRef()); - + //const Uint32 backupId = ref->backupId; + const Uint32 nodeId = ref->nodeId; + BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - - masterAbortCheck(); // macro will do return if ABORTING - ptr.p->masterData.errorCode = ref->errorCode; + ptr.p->setErrorCode(ref->errorCode); defineBackupReply(signal, ptr, nodeId); } @@ -1188,17 +1153,16 @@ Backup::execDEFINE_BACKUP_CONF(Signal* signal) DefineBackupConf* conf = (DefineBackupConf*)signal->getDataPtr(); const Uint32 ptrI = conf->backupPtr; - const Uint32 backupId = conf->backupId; + //const Uint32 backupId = conf->backupId; const Uint32 nodeId = refToNode(signal->senderBlockRef()); BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - masterAbortCheck(); // macro will do return if ABORTING - - if (ERROR_INSERTED(10024)) { - ptr.p->masterData.errorCode = 324; - }//if + if (ERROR_INSERTED(10024)) + { + ptr.p->setErrorCode(324); + } defineBackupReply(signal, ptr, nodeId); } @@ -1210,6 +1174,7 @@ Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) jam(); return; } + /** * Unlock mutexes */ @@ -1223,16 +1188,10 @@ Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) jam(); mutex2.unlock(); // ignore response - if(ptr.p->errorCode) { + if(ptr.p->checkError()) + { jam(); - ptr.p->masterData.errorCode = ptr.p->errorCode; - } - - if(ptr.p->masterData.errorCode){ - jam(); - ptr.p->setErrorCode(ptr.p->masterData.errorCode); - sendAbortBackupOrd(signal, ptr, AbortBackupOrd::OkToClean); - masterSendAbortBackup(signal, ptr); + masterAbort(signal, ptr); return; } @@ -1252,7 +1211,6 @@ Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+3); sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3+NdbNodeBitmask::Size, JBB); - ptr.p->masterData.state.setState(DEFINED); /** * Prepare Trig */ @@ -1286,7 +1244,6 @@ Backup::sendCreateTrig(Signal* signal, { CreateTrigReq * req =(CreateTrigReq *)signal->getDataPtrSend(); - ptr.p->errorCode = 0; ptr.p->masterData.gsn = GSN_CREATE_TRIG_REQ; ptr.p->masterData.sendCounter = 3; ptr.p->masterData.createTrig.tableId = tabPtr.p->tableId; @@ -1395,17 +1352,14 @@ Backup::createTrigReply(Signal* signal, BackupRecordPtr ptr) return; }//if - ptr.p->masterData.gsn = 0; + if (ERROR_INSERTED(10025)) + { + ptr.p->errorCode = 325; + } if(ptr.p->checkError()) { jam(); - masterAbort(signal, ptr, true); - return; - }//if - - if (ERROR_INSERTED(10025)) { - ptr.p->errorCode = 325; - masterAbort(signal, ptr, true); + masterAbort(signal, ptr); return; }//if @@ -1425,10 +1379,7 @@ Backup::createTrigReply(Signal* signal, BackupRecordPtr ptr) /** * Finished with all tables, send StartBackupReq */ - ptr.p->masterData.state.setState(STARTED); - ptr.p->tables.first(tabPtr); - ptr.p->errorCode = 0; ptr.p->masterData.startBackup.signalNo = 0; ptr.p->masterData.startBackup.noOfSignals = (ptr.p->tables.noOfElements() + StartBackupReq::MaxTableTriggers - 1) / @@ -1467,9 +1418,12 @@ Backup::sendStartBackup(Signal* signal, BackupRecordPtr ptr, TablePtr tabPtr) }//for req->noOfTableTriggers = i; - sendSignalAllWait(ptr, GSN_START_BACKUP_REQ, signal, - StartBackupReq::HeaderLength + - (i * StartBackupReq::TableTriggerLength)); + ptr.p->masterData.gsn = GSN_START_BACKUP_REQ; + ptr.p->masterData.sendCounter = ptr.p->nodes; + NodeReceiverGroup rg(BACKUP, ptr.p->nodes); + sendSignal(rg, GSN_START_BACKUP_REQ, signal, + StartBackupReq::HeaderLength + + (i * StartBackupReq::TableTriggerLength), JBB); } void @@ -1479,15 +1433,13 @@ Backup::execSTART_BACKUP_REF(Signal* signal) StartBackupRef* ref = (StartBackupRef*)signal->getDataPtr(); const Uint32 ptrI = ref->backupPtr; - const Uint32 backupId = ref->backupId; + //const Uint32 backupId = ref->backupId; const Uint32 signalNo = ref->signalNo; - const Uint32 nodeId = refToNode(signal->senderBlockRef()); + const Uint32 nodeId = ref->nodeId; BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - masterAbortCheck(); // macro will do return if ABORTING - ptr.p->setErrorCode(ref->errorCode); startBackupReply(signal, ptr, nodeId, signalNo); } @@ -1499,15 +1451,13 @@ Backup::execSTART_BACKUP_CONF(Signal* signal) StartBackupConf* conf = (StartBackupConf*)signal->getDataPtr(); const Uint32 ptrI = conf->backupPtr; - const Uint32 backupId = conf->backupId; + //const Uint32 backupId = conf->backupId; const Uint32 signalNo = conf->signalNo; const Uint32 nodeId = refToNode(signal->senderBlockRef()); BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - masterAbortCheck(); // macro will do return if ABORTING - startBackupReply(signal, ptr, nodeId, signalNo); } @@ -1524,17 +1474,16 @@ Backup::startBackupReply(Signal* signal, BackupRecordPtr ptr, return; } + if (ERROR_INSERTED(10026)) + { + ptr.p->errorCode = 326; + } + if(ptr.p->checkError()){ jam(); - masterAbort(signal, ptr, true); + masterAbort(signal, ptr); return; } - - if (ERROR_INSERTED(10026)) { - ptr.p->errorCode = 326; - masterAbort(signal, ptr, true); - return; - }//if TablePtr tabPtr; c_tablePool.getPtr(tabPtr, ptr.p->masterData.startBackup.tablePtr); @@ -1566,7 +1515,6 @@ Backup::sendAlterTrig(Signal* signal, BackupRecordPtr ptr) { AlterTrigReq * req =(AlterTrigReq *)signal->getDataPtrSend(); - ptr.p->errorCode = 0; ptr.p->masterData.gsn = GSN_ALTER_TRIG_REQ; ptr.p->masterData.sendCounter = 0; @@ -1608,6 +1556,7 @@ Backup::sendAlterTrig(Signal* signal, BackupRecordPtr ptr) return; }//if ptr.p->masterData.alterTrig.tableId = RNIL; + /** * Finished with all tables */ @@ -1669,11 +1618,9 @@ Backup::alterTrigReply(Signal* signal, BackupRecordPtr ptr) return; }//if - ptr.p->masterData.gsn = 0; - if(ptr.p->checkError()){ jam(); - masterAbort(signal, ptr, true); + masterAbort(signal, ptr); return; }//if @@ -1719,11 +1666,10 @@ Backup::execWAIT_GCP_CONF(Signal* signal){ ndbrequire(ptr.p->masterRef == reference()); ndbrequire(ptr.p->masterData.gsn == GSN_WAIT_GCP_REQ); - ptr.p->masterData.gsn = 0; if(ptr.p->checkError()) { jam(); - masterAbort(signal, ptr, true); + masterAbort(signal, ptr); return; }//if @@ -1731,13 +1677,13 @@ Backup::execWAIT_GCP_CONF(Signal* signal){ jam(); CRASH_INSERTION((10008)); ptr.p->startGCP = gcp; - ptr.p->masterData.state.setState(SCANNING); + ptr.p->masterData.sendCounter= 0; + ptr.p->masterData.gsn = GSN_BACKUP_FRAGMENT_REQ; nextFragment(signal, ptr); } else { jam(); CRASH_INSERTION((10009)); ptr.p->stopGCP = gcp; - ptr.p->masterData.state.setState(STOPPING); sendDropTrig(signal, ptr); // regular dropping of triggers }//if } @@ -1787,6 +1733,7 @@ Backup::nextFragment(Signal* signal, BackupRecordPtr ptr) req->fragmentNo = i; req->count = 0; + ptr.p->masterData.sendCounter++; const BlockReference ref = numberToRef(BACKUP, nodeId); sendSignal(ref, GSN_BACKUP_FRAGMENT_REQ, signal, BackupFragmentReq::SignalLength, JBB); @@ -1824,7 +1771,7 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) BackupFragmentConf * conf = (BackupFragmentConf*)signal->getDataPtr(); const Uint32 ptrI = conf->backupPtr; - const Uint32 backupId = conf->backupId; + //const Uint32 backupId = conf->backupId; const Uint32 tableId = conf->tableId; const Uint32 fragmentNo = conf->fragmentNo; const Uint32 nodeId = refToNode(signal->senderBlockRef()); @@ -1834,10 +1781,9 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - masterAbortCheck(); // macro will do return if ABORTING - ptr.p->noOfBytes += noOfBytes; ptr.p->noOfRecords += noOfRecords; + ptr.p->masterData.sendCounter--; TablePtr tabPtr; ndbrequire(findTable(ptr, tabPtr, tableId)); @@ -1852,17 +1798,24 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) fragPtr.p->scanned = 1; fragPtr.p->scanning = 0; - if(ptr.p->checkError()) { - jam(); - masterAbort(signal, ptr, true); - return; - }//if - if (ERROR_INSERTED(10028)) { + if (ERROR_INSERTED(10028)) + { ptr.p->errorCode = 328; - masterAbort(signal, ptr, true); - return; - }//if - nextFragment(signal, ptr); + } + + if(ptr.p->checkError()) + { + if(ptr.p->masterData.sendCounter.done()) + { + jam(); + masterAbort(signal, ptr); + return; + }//if + } + else + { + nextFragment(signal, ptr); + } } void @@ -1874,15 +1827,52 @@ Backup::execBACKUP_FRAGMENT_REF(Signal* signal) BackupFragmentRef * ref = (BackupFragmentRef*)signal->getDataPtr(); const Uint32 ptrI = ref->backupPtr; - const Uint32 backupId = ref->backupId; + //const Uint32 backupId = ref->backupId; + const Uint32 nodeId = ref->nodeId; BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - masterAbortCheck(); // macro will do return if ABORTING + TablePtr tabPtr; + ptr.p->tables.first(tabPtr); + for(; tabPtr.i != RNIL; ptr.p->tables.next(tabPtr)) { + jam(); + FragmentPtr fragPtr; + Array & frags = tabPtr.p->fragments; + const Uint32 fragCount = frags.getSize(); + + for(Uint32 i = 0; ifragments.getPtr(fragPtr, i); + if(fragPtr.p->scanning != 0 && nodeId == fragPtr.p->node) + { + jam(); + ndbrequire(fragPtr.p->scanned == 0); + fragPtr.p->scanned = 1; + fragPtr.p->scanning = 0; + goto done; + } + } + } + ndbrequire(false); +done: + ptr.p->masterData.sendCounter--; ptr.p->setErrorCode(ref->errorCode); - masterAbort(signal, ptr, true); + + if(ptr.p->masterData.sendCounter.done()) + { + jam(); + masterAbort(signal, ptr); + return; + }//if + + AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); + ord->backupId = ptr.p->backupId; + ord->backupPtr = ptr.i; + ord->requestType = AbortBackupOrd::LogBufferFull; + ord->senderData= ptr.i; + execABORT_BACKUP_ORD(signal); } /***************************************************************************** @@ -1910,15 +1900,7 @@ Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr) jam(); ptr.p->masterData.dropTrig.tableId = RNIL; - sendAbortBackupOrd(signal, ptr, AbortBackupOrd::OkToClean); - - if(ptr.p->masterData.state.getState() == STOPPING) { - jam(); - sendStopBackup(signal, ptr); - return; - }//if - ndbrequire(ptr.p->masterData.state.getState() == ABORTING); - masterSendAbortBackup(signal, ptr); + sendStopBackup(signal, ptr); }//if } @@ -2010,7 +1992,6 @@ Backup::dropTrigReply(Signal* signal, BackupRecordPtr ptr) return; }//if - ptr.p->masterData.gsn = 0; sendDropTrig(signal, ptr); // recursive next } @@ -2023,14 +2004,23 @@ void Backup::execSTOP_BACKUP_REF(Signal* signal) { jamEntry(); - ndbrequire(0); + + StopBackupRef* ref = (StopBackupRef*)signal->getDataPtr(); + const Uint32 ptrI = ref->backupPtr; + //const Uint32 backupId = ref->backupId; + const Uint32 nodeId = ref->nodeId; + + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, ptrI); + + ptr.p->setErrorCode(ref->errorCode); + stopBackupReply(signal, ptr, nodeId); } void Backup::sendStopBackup(Signal* signal, BackupRecordPtr ptr) { jam(); - ptr.p->masterData.gsn = GSN_STOP_BACKUP_REQ; StopBackupReq* stop = (StopBackupReq*)signal->getDataPtrSend(); stop->backupPtr = ptr.i; @@ -2038,8 +2028,11 @@ Backup::sendStopBackup(Signal* signal, BackupRecordPtr ptr) stop->startGCP = ptr.p->startGCP; stop->stopGCP = ptr.p->stopGCP; - sendSignalAllWait(ptr, GSN_STOP_BACKUP_REQ, signal, - StopBackupReq::SignalLength); + ptr.p->masterData.gsn = GSN_STOP_BACKUP_REQ; + ptr.p->masterData.sendCounter = ptr.p->nodes; + NodeReceiverGroup rg(BACKUP, ptr.p->nodes); + sendSignal(rg, GSN_STOP_BACKUP_REQ, signal, + StopBackupReq::SignalLength, JBB); } void @@ -2049,14 +2042,12 @@ Backup::execSTOP_BACKUP_CONF(Signal* signal) StopBackupConf* conf = (StopBackupConf*)signal->getDataPtr(); const Uint32 ptrI = conf->backupPtr; - const Uint32 backupId = conf->backupId; + //const Uint32 backupId = conf->backupId; const Uint32 nodeId = refToNode(signal->senderBlockRef()); BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - masterAbortCheck(); // macro will do return if ABORTING - ptr.p->noOfLogBytes += conf->noOfLogBytes; ptr.p->noOfLogRecords += conf->noOfLogRecords; @@ -2073,35 +2064,39 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) return; } - // ptr.p->masterData.state.setState(INITIAL); - - // send backup complete first to slaves so that they know sendAbortBackupOrd(signal, ptr, AbortBackupOrd::BackupComplete); - - BackupCompleteRep * rep = (BackupCompleteRep*)signal->getDataPtrSend(); - rep->backupId = ptr.p->backupId; - rep->senderData = ptr.p->clientData; - rep->startGCP = ptr.p->startGCP; - rep->stopGCP = ptr.p->stopGCP; - rep->noOfBytes = ptr.p->noOfBytes; - rep->noOfRecords = ptr.p->noOfRecords; - rep->noOfLogBytes = ptr.p->noOfLogBytes; - rep->noOfLogRecords = ptr.p->noOfLogRecords; - rep->nodes = ptr.p->nodes; - sendSignal(ptr.p->clientRef, GSN_BACKUP_COMPLETE_REP, signal, - BackupCompleteRep::SignalLength, JBB); - - signal->theData[0] = EventReport::BackupCompleted; - signal->theData[1] = ptr.p->clientRef; - signal->theData[2] = ptr.p->backupId; - signal->theData[3] = ptr.p->startGCP; - signal->theData[4] = ptr.p->stopGCP; - signal->theData[5] = ptr.p->noOfBytes; - signal->theData[6] = ptr.p->noOfRecords; - signal->theData[7] = ptr.p->noOfLogBytes; - signal->theData[8] = ptr.p->noOfLogRecords; - ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9); - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB); + + if(!ptr.p->checkError()) + { + BackupCompleteRep * rep = (BackupCompleteRep*)signal->getDataPtrSend(); + rep->backupId = ptr.p->backupId; + rep->senderData = ptr.p->clientData; + rep->startGCP = ptr.p->startGCP; + rep->stopGCP = ptr.p->stopGCP; + rep->noOfBytes = ptr.p->noOfBytes; + rep->noOfRecords = ptr.p->noOfRecords; + rep->noOfLogBytes = ptr.p->noOfLogBytes; + rep->noOfLogRecords = ptr.p->noOfLogRecords; + rep->nodes = ptr.p->nodes; + sendSignal(ptr.p->clientRef, GSN_BACKUP_COMPLETE_REP, signal, + BackupCompleteRep::SignalLength, JBB); + + signal->theData[0] = EventReport::BackupCompleted; + signal->theData[1] = ptr.p->clientRef; + signal->theData[2] = ptr.p->backupId; + signal->theData[3] = ptr.p->startGCP; + signal->theData[4] = ptr.p->stopGCP; + signal->theData[5] = ptr.p->noOfBytes; + signal->theData[6] = ptr.p->noOfRecords; + signal->theData[7] = ptr.p->noOfLogBytes; + signal->theData[8] = ptr.p->noOfLogRecords; + ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9); + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB); + } + else + { + masterAbort(signal, ptr); + } } /***************************************************************************** @@ -2110,199 +2105,96 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) * *****************************************************************************/ void -Backup::masterAbort(Signal* signal, BackupRecordPtr ptr, bool controlledAbort) +Backup::masterAbort(Signal* signal, BackupRecordPtr ptr) { - if(ptr.p->masterData.state.getState() == ABORTING) { -#ifdef DEBUG_ABORT - ndbout_c("---- Master already aborting"); -#endif - jam(); - return; - } jam(); #ifdef DEBUG_ABORT ndbout_c("************ masterAbort"); #endif - - sendAbortBackupOrd(signal, ptr, AbortBackupOrd::BackupFailure); - if (!ptr.p->checkError()) - ptr.p->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail; - - const State s = ptr.p->masterData.state.getState(); - - ptr.p->masterData.state.setState(ABORTING); - - ndbrequire(s == INITIAL || - s == STARTED || - s == DEFINING || - s == DEFINED || - s == SCANNING || - s == STOPPING || - s == ABORTING); - if(ptr.p->masterData.gsn == GSN_UTIL_SEQUENCE_REQ) { + if(ptr.p->masterData.errorCode != 0) + { jam(); - DEBUG_OUT("masterAbort: gsn = GSN_UTIL_SEQUENCE_REQ"); - //------------------------------------------------------- - // We are waiting for UTIL_SEQUENCE response. We rely on - // this to arrive and check for ABORTING in response. - // No slaves are involved at this point and ABORT simply - // results in BACKUP_REF to client - //------------------------------------------------------- - /** - * Waiting for Sequence Id - * @see execUTIL_SEQUENCE_CONF - */ return; - }//if + } + + BackupAbortRep* rep = (BackupAbortRep*)signal->getDataPtrSend(); + rep->backupId = ptr.p->backupId; + rep->senderData = ptr.p->clientData; + rep->reason = ptr.p->errorCode; + sendSignal(ptr.p->clientRef, GSN_BACKUP_ABORT_REP, signal, + BackupAbortRep::SignalLength, JBB); - if(ptr.p->masterData.gsn == GSN_UTIL_LOCK_REQ) { - jam(); - DEBUG_OUT("masterAbort: gsn = GSN_UTIL_LOCK_REQ"); - //------------------------------------------------------- - // We are waiting for UTIL_LOCK response (mutex). We rely on - // this to arrive and check for ABORTING in response. - // No slaves are involved at this point and ABORT simply - // results in BACKUP_REF to client - //------------------------------------------------------- - /** - * Waiting for lock - * @see execUTIL_LOCK_CONF - */ + signal->theData[0] = EventReport::BackupAborted; + signal->theData[1] = ptr.p->clientRef; + signal->theData[2] = ptr.p->backupId; + signal->theData[3] = ptr.p->errorCode; + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB); + + ndbrequire(ptr.p->errorCode); + ptr.p->masterData.errorCode = ptr.p->errorCode; + + AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); + ord->backupId = ptr.p->backupId; + ord->backupPtr = ptr.i; + ord->senderData= ptr.i; + NodeReceiverGroup rg(BACKUP, ptr.p->nodes); + + switch(ptr.p->masterData.gsn){ + case GSN_DEFINE_BACKUP_REQ: + ord->requestType = AbortBackupOrd::BackupFailure; + sendSignal(rg, GSN_ABORT_BACKUP_ORD, signal, + AbortBackupOrd::SignalLength, JBB); return; - }//if - - /** - * Unlock mutexes only at master - */ - jam(); - Mutex mutex1(signal, c_mutexMgr, ptr.p->masterData.m_dictCommitTableMutex); - jam(); - mutex1.unlock(); // ignore response - - jam(); - Mutex mutex2(signal, c_mutexMgr, ptr.p->masterData.m_defineBackupMutex); - jam(); - mutex2.unlock(); // ignore response - - if (!controlledAbort) { + case GSN_CREATE_TRIG_REQ: + case GSN_START_BACKUP_REQ: + case GSN_ALTER_TRIG_REQ: + case GSN_WAIT_GCP_REQ: + case GSN_BACKUP_FRAGMENT_REQ: jam(); - if (s == DEFINING) { - jam(); -//------------------------------------------------------- -// If we are in the defining phase all work is done by -// slaves. No triggers have been allocated thus slaves -// may free all "Master" resources, let them know... -//------------------------------------------------------- - sendAbortBackupOrd(signal, ptr, AbortBackupOrd::OkToClean); - return; - }//if - if (s == DEFINED) { - jam(); -//------------------------------------------------------- -// DEFINED is the state when triggers are created. We rely -// on that DICT will report create trigger failure in case -// of node failure. Thus no special action is needed here. -// We will check for errorCode != 0 when receiving -// replies on create trigger. -//------------------------------------------------------- - return; - }//if - if(ptr.p->masterData.gsn == GSN_WAIT_GCP_REQ) { - jam(); - DEBUG_OUT("masterAbort: gsn = GSN_WAIT_GCP_REQ"); -//------------------------------------------------------- -// We are waiting for WAIT_GCP response. We rely on -// this to arrive and check for ABORTING in response. -//------------------------------------------------------- - - /** - * Waiting for GCP - * @see execWAIT_GCP_CONF - */ - return; - }//if - - if(ptr.p->masterData.gsn == GSN_ALTER_TRIG_REQ) { - jam(); - DEBUG_OUT("masterAbort: gsn = GSN_ALTER_TRIG_REQ"); -//------------------------------------------------------- -// We are waiting for ALTER_TRIG response. We rely on -// this to arrive and check for ABORTING in response. -//------------------------------------------------------- - - /** - * All triggers haven't been created yet - */ - return; - }//if - - if(ptr.p->masterData.gsn == GSN_DROP_TRIG_REQ) { - jam(); - DEBUG_OUT("masterAbort: gsn = GSN_DROP_TRIG_REQ"); -//------------------------------------------------------- -// We are waiting for DROP_TRIG response. We rely on -// this to arrive and will continue dropping triggers -// until completed. -//------------------------------------------------------- - - /** - * I'm currently dropping the trigger - */ - return; - }//if - }//if - -//------------------------------------------------------- -// If we are waiting for START_BACKUP responses we can -// safely start dropping triggers (state == STARTED). -// We will ignore any START_BACKUP responses after this. -//------------------------------------------------------- - DEBUG_OUT("masterAbort: sendDropTrig"); - sendDropTrig(signal, ptr); // dropping due to error + ptr.p->stopGCP= ptr.p->startGCP + 1; + sendDropTrig(signal, ptr); // dropping due to error + return; + case GSN_UTIL_SEQUENCE_REQ: + case GSN_UTIL_LOCK_REQ: + case GSN_DROP_TRIG_REQ: + ndbrequire(false); + return; + case GSN_STOP_BACKUP_REQ: + return; + } } void -Backup::masterSendAbortBackup(Signal* signal, BackupRecordPtr ptr) +Backup::abort_scan(Signal * signal, BackupRecordPtr ptr) { - if (ptr.p->masterData.state.getState() != ABORTING) { - sendAbortBackupOrd(signal, ptr, AbortBackupOrd::BackupFailure); - ptr.p->masterData.state.setState(ABORTING); + AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); + ord->backupId = ptr.p->backupId; + ord->backupPtr = ptr.i; + ord->senderData= ptr.i; + ord->requestType = AbortBackupOrd::AbortScan; + + TablePtr tabPtr; + ptr.p->tables.first(tabPtr); + for(; tabPtr.i != RNIL; ptr.p->tables.next(tabPtr)) { + jam(); + FragmentPtr fragPtr; + Array & frags = tabPtr.p->fragments; + const Uint32 fragCount = frags.getSize(); + + for(Uint32 i = 0; ifragments.getPtr(fragPtr, i); + const Uint32 nodeId = fragPtr.p->node; + if(fragPtr.p->scanning != 0 && ptr.p->nodes.get(nodeId)) { + jam(); + + const BlockReference ref = numberToRef(BACKUP, nodeId); + sendSignal(ref, GSN_ABORT_BACKUP_ORD, signal, + AbortBackupOrd::SignalLength, JBB); + + } + } } - const State s = ptr.p->masterData.state.getAbortState(); - - /** - * First inform to client - */ - if(s == DEFINING) { - jam(); -#ifdef DEBUG_ABORT - ndbout_c("** Abort: sending BACKUP_REF to mgmtsrvr"); -#endif - sendBackupRef(ptr.p->clientRef, signal, ptr.p->clientData, - ptr.p->errorCode); - - } else { - jam(); -#ifdef DEBUG_ABORT - ndbout_c("** Abort: sending BACKUP_ABORT_REP to mgmtsrvr"); -#endif - BackupAbortRep* rep = (BackupAbortRep*)signal->getDataPtrSend(); - rep->backupId = ptr.p->backupId; - rep->senderData = ptr.p->clientData; - rep->reason = ptr.p->errorCode; - sendSignal(ptr.p->clientRef, GSN_BACKUP_ABORT_REP, signal, - BackupAbortRep::SignalLength, JBB); - - signal->theData[0] = EventReport::BackupAborted; - signal->theData[1] = ptr.p->clientRef; - signal->theData[2] = ptr.p->backupId; - signal->theData[3] = ptr.p->errorCode; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB); - }//if - - // ptr.p->masterData.state.setState(INITIAL); - - sendAbortBackupOrd(signal, ptr, AbortBackupOrd::BackupFailure); } /***************************************************************************** @@ -2313,26 +2205,17 @@ Backup::masterSendAbortBackup(Signal* signal, BackupRecordPtr ptr) void Backup::defineBackupRef(Signal* signal, BackupRecordPtr ptr, Uint32 errCode) { - if (ptr.p->slaveState.getState() == ABORTING) { - jam(); - return; - } - ptr.p->slaveState.setState(ABORTING); - - if (errCode != 0) { - jam(); - ptr.p->setErrorCode(errCode); - }//if + ptr.p->m_gsn = GSN_DEFINE_BACKUP_REF; + ptr.p->setErrorCode(errCode); ndbrequire(ptr.p->errorCode != 0); - + DefineBackupRef* ref = (DefineBackupRef*)signal->getDataPtrSend(); ref->backupId = ptr.p->backupId; ref->backupPtr = ptr.i; ref->errorCode = ptr.p->errorCode; + ref->nodeId = getOwnNodeId(); sendSignal(ptr.p->masterRef, GSN_DEFINE_BACKUP_REF, signal, DefineBackupRef::SignalLength, JBB); - - closeFiles(signal, ptr); } void @@ -2366,6 +2249,7 @@ Backup::execDEFINE_BACKUP_REQ(Signal* signal) CRASH_INSERTION((10014)); + ptr.p->m_gsn = GSN_DEFINE_BACKUP_REQ; ptr.p->slaveState.forceState(INITIAL); ptr.p->slaveState.setState(DEFINING); ptr.p->errorCode = 0; @@ -2432,7 +2316,7 @@ Backup::execDEFINE_BACKUP_REQ(Signal* signal) files[i].p->tableId = RNIL; files[i].p->backupPtr = ptr.i; files[i].p->filePointer = RNIL; - files[i].p->fileDone = 0; + files[i].p->fileClosing = 0; files[i].p->fileOpened = 0; files[i].p->fileRunning = 0; files[i].p->scanRunning = 0; @@ -2468,17 +2352,14 @@ Backup::execDEFINE_BACKUP_REQ(Signal* signal) ptr.p->logFilePtr = files[1].i; ptr.p->dataFilePtr = files[2].i; - if (!verifyNodesAlive(ptr.p->nodes)) { + if (!verifyNodesAlive(ptr, ptr.p->nodes)) { jam(); defineBackupRef(signal, ptr, DefineBackupRef::Undefined); - // sendBackupRef(signal, ptr, - // ptr.p->errorCode?ptr.p->errorCode:BackupRef::Undefined); return; }//if if (ERROR_INSERTED(10027)) { jam(); defineBackupRef(signal, ptr, 327); - // sendBackupRef(signal, ptr, 327); return; }//if @@ -2546,8 +2427,6 @@ Backup::execLIST_TABLES_CONF(Signal* signal) return; }//if - defineSlaveAbortCheck(); - /** * All tables fetched */ @@ -2679,8 +2558,6 @@ Backup::openFilesReply(Signal* signal, }//if }//for - defineSlaveAbortCheck(); - /** * Did open succeed for all files */ @@ -2810,8 +2687,6 @@ Backup::execGET_TABINFOREF(Signal* signal) BackupRecordPtr ptr; c_backupPool.getPtr(ptr, senderData); - defineSlaveAbortCheck(); - defineBackupRef(signal, ptr, ref->errorCode); } @@ -2833,8 +2708,6 @@ Backup::execGET_TABINFO_CONF(Signal* signal) BackupRecordPtr ptr; c_backupPool.getPtr(ptr, senderData); - defineSlaveAbortCheck(); - SegmentedSectionPtr dictTabInfoPtr; signal->getSection(dictTabInfoPtr, GetTabInfoConf::DICT_TAB_INFO); ndbrequire(dictTabInfoPtr.sz == len); @@ -3047,8 +2920,6 @@ Backup::execDI_FCOUNTCONF(Signal* signal) BackupRecordPtr ptr; c_backupPool.getPtr(ptr, senderData); - defineSlaveAbortCheck(); - TablePtr tabPtr; ndbrequire(findTable(ptr, tabPtr, tableId)); @@ -3127,8 +2998,6 @@ Backup::execDIGETPRIMCONF(Signal* signal) BackupRecordPtr ptr; c_backupPool.getPtr(ptr, senderData); - defineSlaveAbortCheck(); - TablePtr tabPtr; ndbrequire(findTable(ptr, tabPtr, tableId)); @@ -3143,9 +3012,7 @@ Backup::execDIGETPRIMCONF(Signal* signal) void Backup::getFragmentInfoDone(Signal* signal, BackupRecordPtr ptr) { - // Slave must now hold on to master data until - // AbortBackupOrd::OkToClean signal - ptr.p->okToCleanMaster = false; + ptr.p->m_gsn = GSN_DEFINE_BACKUP_CONF; ptr.p->slaveState.setState(DEFINED); DefineBackupConf * conf = (DefineBackupConf*)signal->getDataPtr(); conf->backupPtr = ptr.i; @@ -3169,16 +3036,15 @@ Backup::execSTART_BACKUP_REQ(Signal* signal) StartBackupReq* req = (StartBackupReq*)signal->getDataPtr(); const Uint32 ptrI = req->backupPtr; - const Uint32 backupId = req->backupId; + //const Uint32 backupId = req->backupId; const Uint32 signalNo = req->signalNo; - + BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - - slaveAbortCheck(); // macro will do return if ABORTING ptr.p->slaveState.setState(STARTED); - + ptr.p->m_gsn = GSN_START_BACKUP_REQ; + for(Uint32 i = 0; inoOfTableTriggers; i++) { jam(); TablePtr tabPtr; @@ -3191,11 +3057,13 @@ Backup::execSTART_BACKUP_REQ(Signal* signal) TriggerPtr trigPtr; if(!ptr.p->triggers.seizeId(trigPtr, triggerId)) { jam(); + ptr.p->m_gsn = GSN_START_BACKUP_REF; StartBackupRef* ref = (StartBackupRef*)signal->getDataPtrSend(); ref->backupPtr = ptr.i; ref->backupId = ptr.p->backupId; ref->signalNo = signalNo; ref->errorCode = StartBackupRef::FailedToAllocateTriggerRecord; + ref->nodeId = getOwnNodeId(); sendSignal(ptr.p->masterRef, GSN_START_BACKUP_REF, signal, StartBackupRef::SignalLength, JBB); return; @@ -3233,6 +3101,7 @@ Backup::execSTART_BACKUP_REQ(Signal* signal) }//if }//for + ptr.p->m_gsn = GSN_START_BACKUP_CONF; StartBackupConf* conf = (StartBackupConf*)signal->getDataPtrSend(); conf->backupPtr = ptr.i; conf->backupId = ptr.p->backupId; @@ -3255,7 +3124,7 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal) CRASH_INSERTION((10016)); const Uint32 ptrI = req->backupPtr; - const Uint32 backupId = req->backupId; + //const Uint32 backupId = req->backupId; const Uint32 tableId = req->tableId; const Uint32 fragNo = req->fragmentNo; const Uint32 count = req->count; @@ -3266,10 +3135,9 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal) BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - slaveAbortCheck(); // macro will do return if ABORTING - ptr.p->slaveState.setState(SCANNING); - + ptr.p->m_gsn = GSN_BACKUP_FRAGMENT_REQ; + /** * Get file */ @@ -3280,7 +3148,7 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal) ndbrequire(filePtr.p->fileOpened == 1); ndbrequire(filePtr.p->fileRunning == 1); ndbrequire(filePtr.p->scanRunning == 0); - ndbrequire(filePtr.p->fileDone == 0); + ndbrequire(filePtr.p->fileClosing == 0); /** * Get table @@ -3350,7 +3218,7 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal) req->transId1 = 0; req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8); req->clientOpPtr= filePtr.i; - req->batch_size_rows= 16; + req->batch_size_rows= parallelism; req->batch_size_bytes= 0; sendSignal(DBLQH_REF, GSN_SCAN_FRAGREQ, signal, ScanFragReq::SignalLength, JBB); @@ -3572,6 +3440,13 @@ Backup::OperationRecord::newScan() return false; } +bool +Backup::OperationRecord::closeScan() +{ + opNoDone = opNoConf = opLen = 0; + return true; +} + bool Backup::OperationRecord::scanConf(Uint32 noOfOps, Uint32 total_len) { @@ -3600,11 +3475,9 @@ Backup::execSCAN_FRAGREF(Signal* signal) c_backupFilePool.getPtr(filePtr, filePtrI); filePtr.p->errorCode = ref->errorCode; + filePtr.p->scanRunning = 0; - BackupRecordPtr ptr; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - - abortFile(signal, ptr, filePtr); + backupFragmentRef(signal, filePtr); } void @@ -3639,9 +3512,11 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) { jam(); - if(filePtr.p->errorCode != 0){ + if(filePtr.p->errorCode != 0) + { jam(); - abortFileHook(signal, filePtr, true); // Scan completed + filePtr.p->scanRunning = 0; + backupFragmentRef(signal, filePtr); // Scan completed return; }//if @@ -3669,20 +3544,51 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) sendSignal(ptr.p->masterRef, GSN_BACKUP_FRAGMENT_CONF, signal, BackupFragmentConf::SignalLength, JBB); + ptr.p->m_gsn = GSN_BACKUP_FRAGMENT_CONF; ptr.p->slaveState.setState(STARTED); return; } + +void +Backup::backupFragmentRef(Signal * signal, BackupFilePtr filePtr) +{ + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, filePtr.p->backupPtr); + + ptr.p->m_gsn = GSN_BACKUP_FRAGMENT_REF; + + BackupFragmentRef * ref = (BackupFragmentRef*)signal->getDataPtrSend(); + ref->backupId = ptr.p->backupId; + ref->backupPtr = ptr.i; + ref->nodeId = getOwnNodeId(); + ref->errorCode = ptr.p->errorCode; + sendSignal(ptr.p->masterRef, GSN_BACKUP_FRAGMENT_REF, signal, + BackupFragmentRef::SignalLength, JBB); +} void Backup::checkScan(Signal* signal, BackupFilePtr filePtr) { - if(filePtr.p->errorCode != 0){ + OperationRecord & op = filePtr.p->operation; + + if(filePtr.p->errorCode != 0) + { jam(); - abortFileHook(signal, filePtr, false); // Scan not completed + + /** + * Close scan + */ + op.closeScan(); + ScanFragNextReq * req = (ScanFragNextReq *)signal->getDataPtrSend(); + req->senderData = filePtr.i; + req->closeFlag = 1; + req->transId1 = 0; + req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8); + sendSignal(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, + ScanFragNextReq::SignalLength, JBB); return; }//if - - OperationRecord & op = filePtr.p->operation; + if(op.newScan()) { jam(); @@ -3693,8 +3599,28 @@ Backup::checkScan(Signal* signal, BackupFilePtr filePtr) req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8); req->batch_size_rows= 16; req->batch_size_bytes= 0; - sendSignal(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, - ScanFragNextReq::SignalLength, JBB); + if(ERROR_INSERTED(10032)) + sendSignalWithDelay(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, + 100, ScanFragNextReq::SignalLength); + else if(ERROR_INSERTED(10033)) + { + SET_ERROR_INSERT_VALUE(10032); + sendSignalWithDelay(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, + 10000, ScanFragNextReq::SignalLength); + + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, filePtr.p->backupPtr); + AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); + ord->backupId = ptr.p->backupId; + ord->backupPtr = ptr.i; + ord->requestType = AbortBackupOrd::FileOrScanError; + ord->senderData= ptr.i; + sendSignal(ptr.p->masterRef, GSN_ABORT_BACKUP_ORD, signal, + AbortBackupOrd::SignalLength, JBB); + } + else + sendSignal(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, + ScanFragNextReq::SignalLength, JBB); return; }//if @@ -3718,11 +3644,8 @@ Backup::execFSAPPENDREF(Signal* signal) filePtr.p->fileRunning = 0; filePtr.p->errorCode = errCode; - - BackupRecordPtr ptr; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - - abortFile(signal, ptr, filePtr); + + checkFile(signal, filePtr); } void @@ -3738,12 +3661,6 @@ Backup::execFSAPPENDCONF(Signal* signal) BackupFilePtr filePtr; c_backupFilePool.getPtr(filePtr, filePtrI); - - if (ERROR_INSERTED(10029)) { - BackupRecordPtr ptr; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - abortFile(signal, ptr, filePtr); - }//if OperationRecord & op = filePtr.p->operation; @@ -3761,30 +3678,25 @@ Backup::checkFile(Signal* signal, BackupFilePtr filePtr) #endif OperationRecord & op = filePtr.p->operation; - + Uint32 * tmp, sz; bool eof; - if(op.dataBuffer.getReadPtr(&tmp, &sz, &eof)) { + if(op.dataBuffer.getReadPtr(&tmp, &sz, &eof)) + { jam(); - if(filePtr.p->errorCode == 0) { - jam(); - FsAppendReq * req = (FsAppendReq *)signal->getDataPtrSend(); - req->filePointer = filePtr.p->filePointer; - req->userPointer = filePtr.i; - req->userReference = reference(); - req->varIndex = 0; - req->offset = tmp - c_startOfPages; - req->size = sz; - - sendSignal(NDBFS_REF, GSN_FSAPPENDREQ, signal, - FsAppendReq::SignalLength, JBA); - return; - } else { - jam(); - if (filePtr.p->scanRunning == 1) - eof = false; - }//if - }//if + jam(); + FsAppendReq * req = (FsAppendReq *)signal->getDataPtrSend(); + req->filePointer = filePtr.p->filePointer; + req->userPointer = filePtr.i; + req->userReference = reference(); + req->varIndex = 0; + req->offset = tmp - c_startOfPages; + req->size = sz; + + sendSignal(NDBFS_REF, GSN_FSAPPENDREQ, signal, + FsAppendReq::SignalLength, JBA); + return; + } if(!eof) { jam(); @@ -3794,9 +3706,7 @@ Backup::checkFile(Signal* signal, BackupFilePtr filePtr) return; }//if - ndbrequire(filePtr.p->fileDone == 1); - - if(sz > 0 && filePtr.p->errorCode == 0) { + if(sz > 0) { jam(); FsAppendReq * req = (FsAppendReq *)signal->getDataPtrSend(); req->filePointer = filePtr.p->filePointer; @@ -3812,6 +3722,7 @@ Backup::checkFile(Signal* signal, BackupFilePtr filePtr) }//if filePtr.p->fileRunning = 0; + filePtr.p->fileClosing = 1; FsCloseReq * req = (FsCloseReq *)signal->getDataPtrSend(); req->filePointer = filePtr.p->filePointer; @@ -3819,64 +3730,11 @@ Backup::checkFile(Signal* signal, BackupFilePtr filePtr) req->userReference = reference(); req->fileFlag = 0; #ifdef DEBUG_ABORT - ndbout_c("***** FSCLOSEREQ filePtr.i = %u", filePtr.i); + ndbout_c("***** a FSCLOSEREQ filePtr.i = %u", filePtr.i); #endif sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, FsCloseReq::SignalLength, JBA); } -void -Backup::abortFile(Signal* signal, BackupRecordPtr ptr, BackupFilePtr filePtr) -{ - jam(); - - if(ptr.p->slaveState.getState() != ABORTING) { - /** - * Inform master of failure - */ - jam(); - ptr.p->slaveState.setState(ABORTING); - ptr.p->setErrorCode(AbortBackupOrd::FileOrScanError); - sendAbortBackupOrdSlave(signal, ptr, AbortBackupOrd::FileOrScanError); - return; - }//if - - - for(ptr.p->files.first(filePtr); - filePtr.i!=RNIL; - ptr.p->files.next(filePtr)){ - jam(); - filePtr.p->errorCode = 1; - }//for - - closeFiles(signal, ptr); -} - -void -Backup::abortFileHook(Signal* signal, BackupFilePtr filePtr, bool scanComplete) -{ - jam(); - - if(!scanComplete) { - jam(); - - ScanFragNextReq * req = (ScanFragNextReq *)signal->getDataPtrSend(); - req->senderData = filePtr.i; - req->closeFlag = 1; - req->transId1 = 0; - req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8); - sendSignal(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, - ScanFragNextReq::SignalLength, JBB); - return; - }//if - - filePtr.p->scanRunning = 0; - - BackupRecordPtr ptr; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - - filePtr.i = RNIL; - abortFile(signal, ptr, filePtr); -} /**************************************************************************** * @@ -3953,27 +3811,30 @@ Backup::execTRIG_ATTRINFO(Signal* signal) { }//if BackupFormat::LogFile::LogEntry * logEntry = trigPtr.p->logEntry; - if(logEntry == 0) { + if(logEntry == 0) + { jam(); Uint32 * dst; FsBuffer & buf = trigPtr.p->operation->dataBuffer; ndbrequire(trigPtr.p->maxRecordSize <= buf.getMaxWrite()); - BackupRecordPtr ptr; - c_backupPool.getPtr(ptr, trigPtr.p->backupPtr); - if(!buf.getWritePtr(&dst, trigPtr.p->maxRecordSize)) { + if(ERROR_INSERTED(10030) || + !buf.getWritePtr(&dst, trigPtr.p->maxRecordSize)) + { jam(); + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, trigPtr.p->backupPtr); trigPtr.p->errorCode = AbortBackupOrd::LogBufferFull; - sendAbortBackupOrdSlave(signal, ptr, AbortBackupOrd::LogBufferFull); + AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); + ord->backupId = ptr.p->backupId; + ord->backupPtr = ptr.i; + ord->requestType = AbortBackupOrd::LogBufferFull; + ord->senderData= ptr.i; + sendSignal(ptr.p->masterRef, GSN_ABORT_BACKUP_ORD, signal, + AbortBackupOrd::SignalLength, JBB); return; }//if - if(trigPtr.p->operation->noOfBytes > 123 && ERROR_INSERTED(10030)) { - jam(); - trigPtr.p->errorCode = AbortBackupOrd::LogBufferFull; - sendAbortBackupOrdSlave(signal, ptr, AbortBackupOrd::LogBufferFull); - return; - }//if - + logEntry = (BackupFormat::LogFile::LogEntry *)dst; trigPtr.p->logEntry = logEntry; logEntry->Length = 0; @@ -4015,9 +3876,10 @@ Backup::execFIRE_TRIG_ORD(Signal* signal) BackupRecordPtr ptr; c_backupPool.getPtr(ptr, trigPtr.p->backupPtr); - if(gci != ptr.p->currGCP) { + if(gci != ptr.p->currGCP) + { jam(); - + trigPtr.p->logEntry->TriggerEvent = htonl(trigPtr.p->event | 0x10000); trigPtr.p->logEntry->Data[len] = htonl(gci); len ++; @@ -4035,20 +3897,6 @@ Backup::execFIRE_TRIG_ORD(Signal* signal) trigPtr.p->operation->noOfRecords += 1; } -void -Backup::sendAbortBackupOrdSlave(Signal* signal, BackupRecordPtr ptr, - Uint32 requestType) -{ - jam(); - AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); - ord->backupId = ptr.p->backupId; - ord->backupPtr = ptr.i; - ord->requestType = requestType; - ord->senderData= ptr.i; - sendSignal(ptr.p->masterRef, GSN_ABORT_BACKUP_ORD, signal, - AbortBackupOrd::SignalLength, JBB); -} - void Backup::sendAbortBackupOrd(Signal* signal, BackupRecordPtr ptr, Uint32 requestType) @@ -4085,7 +3933,7 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal) CRASH_INSERTION((10020)); const Uint32 ptrI = req->backupPtr; - const Uint32 backupId = req->backupId; + //const Uint32 backupId = req->backupId; const Uint32 startGCP = req->startGCP; const Uint32 stopGCP = req->stopGCP; @@ -4101,7 +3949,7 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal) c_backupPool.getPtr(ptr, ptrI); ptr.p->slaveState.setState(STOPPING); - slaveAbortCheck(); // macro will do return if ABORTING + ptr.p->m_gsn = GSN_STOP_BACKUP_REQ; /** * Insert footers @@ -4140,12 +3988,6 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal) void Backup::closeFiles(Signal* sig, BackupRecordPtr ptr) { - if (ptr.p->closingFiles) { - jam(); - return; - } - ptr.p->closingFiles = true; - /** * Close all files */ @@ -4161,12 +4003,12 @@ Backup::closeFiles(Signal* sig, BackupRecordPtr ptr) jam(); openCount++; - if(filePtr.p->fileDone == 1){ + if(filePtr.p->fileClosing == 1){ jam(); continue; }//if - filePtr.p->fileDone = 1; + filePtr.p->fileClosing = 1; if(filePtr.p->fileRunning == 1){ jam(); @@ -4183,7 +4025,7 @@ Backup::closeFiles(Signal* sig, BackupRecordPtr ptr) req->userReference = reference(); req->fileFlag = 0; #ifdef DEBUG_ABORT - ndbout_c("***** FSCLOSEREQ filePtr.i = %u", filePtr.i); + ndbout_c("***** b FSCLOSEREQ filePtr.i = %u", filePtr.i); #endif sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, sig, FsCloseReq::SignalLength, JBA); @@ -4210,11 +4052,6 @@ Backup::execFSCLOSEREF(Signal* signal) BackupRecordPtr ptr; c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - /** - * This should only happen during abort of backup - */ - ndbrequire(ptr.p->slaveState.getState() == ABORTING); - filePtr.p->fileOpened = 1; FsConf * conf = (FsConf*)signal->getDataPtr(); conf->userPointer = filePtrI; @@ -4237,7 +4074,7 @@ Backup::execFSCLOSECONF(Signal* signal) ndbout_c("***** FSCLOSECONF filePtrI = %u", filePtrI); #endif - ndbrequire(filePtr.p->fileDone == 1); + ndbrequire(filePtr.p->fileClosing == 1); ndbrequire(filePtr.p->fileOpened == 1); ndbrequire(filePtr.p->fileRunning == 0); ndbrequire(filePtr.p->scanRunning == 0); @@ -4265,25 +4102,20 @@ Backup::closeFilesDone(Signal* signal, BackupRecordPtr ptr) { jam(); - if(ptr.p->slaveState.getState() == STOPPING) { - jam(); - BackupFilePtr filePtr; - ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr); - - StopBackupConf* conf = (StopBackupConf*)signal->getDataPtrSend(); - conf->backupId = ptr.p->backupId; - conf->backupPtr = ptr.i; - conf->noOfLogBytes = filePtr.p->operation.noOfBytes; - conf->noOfLogRecords = filePtr.p->operation.noOfRecords; - sendSignal(ptr.p->masterRef, GSN_STOP_BACKUP_CONF, signal, - StopBackupConf::SignalLength, JBB); - - ptr.p->slaveState.setState(CLEANING); - return; - }//if + jam(); + BackupFilePtr filePtr; + ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr); - ndbrequire(ptr.p->slaveState.getState() == ABORTING); - removeBackup(signal, ptr); + StopBackupConf* conf = (StopBackupConf*)signal->getDataPtrSend(); + conf->backupId = ptr.p->backupId; + conf->backupPtr = ptr.i; + conf->noOfLogBytes = filePtr.p->operation.noOfBytes; + conf->noOfLogRecords = filePtr.p->operation.noOfRecords; + sendSignal(ptr.p->masterRef, GSN_STOP_BACKUP_CONF, signal, + StopBackupConf::SignalLength, JBB); + + ptr.p->m_gsn = GSN_STOP_BACKUP_CONF; + ptr.p->slaveState.setState(CLEANING); } /***************************************************************************** @@ -4291,57 +4123,6 @@ Backup::closeFilesDone(Signal* signal, BackupRecordPtr ptr) * Slave functionallity: Abort backup * *****************************************************************************/ -void -Backup::removeBackup(Signal* signal, BackupRecordPtr ptr) -{ - jam(); - - FsRemoveReq * req = (FsRemoveReq *)signal->getDataPtrSend(); - req->userReference = reference(); - req->userPointer = ptr.i; - req->directory = 1; - req->ownDirectory = 1; - FsOpenReq::setVersion(req->fileNumber, 2); - FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); - FsOpenReq::v2_setSequence(req->fileNumber, ptr.p->backupId); - FsOpenReq::v2_setNodeId(req->fileNumber, getOwnNodeId()); - sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal, - FsRemoveReq::SignalLength, JBA); -} - -void -Backup::execFSREMOVEREF(Signal* signal) -{ - jamEntry(); - ndbrequire(0); -} - -void -Backup::execFSREMOVECONF(Signal* signal){ - jamEntry(); - - FsConf * conf = (FsConf*)signal->getDataPtr(); - const Uint32 ptrI = conf->userPointer; - - /** - * Get backup record - */ - BackupRecordPtr ptr; - c_backupPool.getPtr(ptr, ptrI); - - ndbrequire(ptr.p->slaveState.getState() == ABORTING); - if (ptr.p->masterRef == reference()) { - if (ptr.p->masterData.state.getAbortState() == DEFINING) { - jam(); - sendBackupRef(signal, ptr, ptr.p->errorCode); - return; - } else { - jam(); - }//if - }//if - cleanupSlaveResources(ptr); -} - /***************************************************************************** * * Slave functionallity: Abort backup @@ -4394,8 +4175,7 @@ Backup::execABORT_BACKUP_ORD(Signal* signal) if (c_backupPool.findId(senderData)) { jam(); c_backupPool.getPtr(ptr, senderData); - } else { // TODO might be abort sent to not master, - // or master aborting too early + } else { jam(); #ifdef DEBUG_ABORT ndbout_c("Backup: abort request type=%u on id=%u,%u not found", @@ -4405,15 +4185,15 @@ Backup::execABORT_BACKUP_ORD(Signal* signal) } }//if + ptr.p->m_gsn = GSN_ABORT_BACKUP_ORD; const bool isCoordinator = (ptr.p->masterRef == reference()); - + bool ok = false; switch(requestType){ /** * Requests sent to master */ - case AbortBackupOrd::ClientAbort: jam(); // fall through @@ -4422,113 +4202,61 @@ Backup::execABORT_BACKUP_ORD(Signal* signal) // fall through case AbortBackupOrd::FileOrScanError: jam(); - if(ptr.p->masterData.state.getState() == ABORTING) { -#ifdef DEBUG_ABORT - ndbout_c("---- Already aborting"); -#endif - jam(); - return; - } + ndbrequire(isCoordinator); ptr.p->setErrorCode(requestType); - ndbrequire(isCoordinator); // Sent from slave to coordinator - masterAbort(signal, ptr, false); + if(ptr.p->masterData.gsn == GSN_BACKUP_FRAGMENT_REQ) + { + /** + * Only scans are actively aborted + */ + abort_scan(signal, ptr); + } return; - - /** - * Info sent to slave - */ - - case AbortBackupOrd::OkToClean: - jam(); - cleanupMasterResources(ptr); - return; - + /** * Requests sent to slave */ - + case AbortBackupOrd::AbortScan: + jam(); + ptr.p->setErrorCode(requestType); + return; + case AbortBackupOrd::BackupComplete: jam(); - if (ptr.p->slaveState.getState() == CLEANING) { // TODO what if state is - // not CLEANING? - jam(); - cleanupSlaveResources(ptr); - }//if + cleanup(signal, ptr); return; - break; - case AbortBackupOrd::BackupFailureDueToNodeFail: - jam(); - ok = true; - if (ptr.p->errorCode != 0) - ptr.p->setErrorCode(requestType); - break; case AbortBackupOrd::BackupFailure: - jam(); - ok = true; - break; + case AbortBackupOrd::BackupFailureDueToNodeFail: + case AbortBackupOrd::OkToClean: + case AbortBackupOrd::IncompatibleVersions: +#ifndef VM_TRACE + default: +#endif + ptr.p->setErrorCode(requestType); + ok= true; } ndbrequire(ok); - /** - * Slave abort - */ - slaveAbort(signal, ptr); -} - -void -Backup::slaveAbort(Signal* signal, BackupRecordPtr ptr) -{ - if(ptr.p->slaveState.getState() == ABORTING) { -#ifdef DEBUG_ABORT - ndbout_c("---- Slave already aborting"); -#endif - jam(); - return; + Uint32 ref= ptr.p->masterRef; + ptr.p->masterRef = reference(); + ptr.p->nodes.clear(); + ptr.p->nodes.set(getOwnNodeId()); + + if(ref == reference()) + { + ptr.p->stopGCP= ptr.p->startGCP + 1; + sendDropTrig(signal, ptr); } -#ifdef DEBUG_ABORT - ndbout_c("************* slaveAbort"); -#endif - - State slaveState = ptr.p->slaveState.getState(); - ptr.p->slaveState.setState(ABORTING); - switch(slaveState) { - case DEFINING: - jam(); - return; -//------------------------------------------ -// Will watch for the abort at various places -// in the defining phase. -//------------------------------------------ - case ABORTING: - jam(); - //Fall through - case DEFINED: - jam(); - //Fall through - case STOPPING: - jam(); + else + { + ptr.p->masterData.gsn = GSN_STOP_BACKUP_REQ; + ptr.p->masterData.sendCounter.clearWaitingFor(); + ptr.p->masterData.sendCounter.setWaitingFor(getOwnNodeId()); closeFiles(signal, ptr); - return; - case STARTED: - jam(); - //Fall through - case SCANNING: - jam(); - BackupFilePtr filePtr; - filePtr.i = RNIL; - abortFile(signal, ptr, filePtr); - return; - case CLEANING: - jam(); - cleanupSlaveResources(ptr); - return; - case INITIAL: - jam(); - ndbrequire(false); - return; } } + void Backup::dumpUsedResources() { @@ -4576,12 +4304,8 @@ Backup::dumpUsedResources() } void -Backup::cleanupMasterResources(BackupRecordPtr ptr) +Backup::cleanup(Signal* signal, BackupRecordPtr ptr) { -#ifdef DEBUG_ABORT - ndbout_c("******** Cleanup Master Resources *********"); - ndbout_c("backupId = %u, errorCode = %u", ptr.p->backupId, ptr.p->errorCode); -#endif TablePtr tabPtr; for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL;ptr.p->tables.next(tabPtr)) @@ -4601,20 +4325,6 @@ Backup::cleanupMasterResources(BackupRecordPtr ptr) tabPtr.p->triggerIds[j] = ILLEGAL_TRIGGER_ID; }//for }//for - ptr.p->tables.release(); - ptr.p->triggers.release(); - ptr.p->okToCleanMaster = true; - - cleanupFinalResources(ptr); -} - -void -Backup::cleanupSlaveResources(BackupRecordPtr ptr) -{ -#ifdef DEBUG_ABORT - ndbout_c("******** Clean Up Slave Resources*********"); - ndbout_c("backupId = %u, errorCode = %u", ptr.p->backupId, ptr.p->errorCode); -#endif BackupFilePtr filePtr; for(ptr.p->files.first(filePtr); @@ -4626,35 +4336,65 @@ Backup::cleanupSlaveResources(BackupRecordPtr ptr) ndbrequire(filePtr.p->scanRunning == 0); filePtr.p->pages.release(); }//for - ptr.p->files.release(); - cleanupFinalResources(ptr); + ptr.p->files.release(); + ptr.p->tables.release(); + ptr.p->triggers.release(); + + ptr.p->tables.release(); + ptr.p->triggers.release(); + ptr.p->pages.release(); + ptr.p->backupId = ~0; + + if(ptr.p->checkError()) + removeBackup(signal, ptr); + else + c_backups.release(ptr); +} + + +void +Backup::removeBackup(Signal* signal, BackupRecordPtr ptr) +{ + jam(); + + FsRemoveReq * req = (FsRemoveReq *)signal->getDataPtrSend(); + req->userReference = reference(); + req->userPointer = ptr.i; + req->directory = 1; + req->ownDirectory = 1; + FsOpenReq::setVersion(req->fileNumber, 2); + FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); + FsOpenReq::v2_setSequence(req->fileNumber, ptr.p->backupId); + FsOpenReq::v2_setNodeId(req->fileNumber, getOwnNodeId()); + sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal, + FsRemoveReq::SignalLength, JBA); } void -Backup::cleanupFinalResources(BackupRecordPtr ptr) +Backup::execFSREMOVEREF(Signal* signal) { -#ifdef DEBUG_ABORT - ndbout_c("******** Clean Up Final Resources*********"); - ndbout_c("backupId = %u, errorCode = %u", ptr.p->backupId, ptr.p->errorCode); -#endif + jamEntry(); + FsRef * ref = (FsRef*)signal->getDataPtr(); + const Uint32 ptrI = ref->userPointer; - // if (!ptr.p->tables.empty() || !ptr.p->files.empty()) { - if (!ptr.p->okToCleanMaster || !ptr.p->files.empty()) { - jam(); -#ifdef DEBUG_ABORT - ndbout_c("******** Waiting to do final cleanup"); -#endif - return; - } - ptr.p->pages.release(); - ptr.p->masterData.state.setState(INITIAL); - ptr.p->slaveState.setState(INITIAL); - ptr.p->backupId = 0; - - ptr.p->closingFiles = false; - ptr.p->okToCleanMaster = true; - - c_backups.release(ptr); - // ndbrequire(false); + FsConf * conf = (FsConf*)signal->getDataPtr(); + conf->userPointer = ptrI; + execFSREMOVECONF(signal); } + +void +Backup::execFSREMOVECONF(Signal* signal){ + jamEntry(); + + FsConf * conf = (FsConf*)signal->getDataPtr(); + const Uint32 ptrI = conf->userPointer; + + /** + * Get backup record + */ + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, ptrI); + c_backups.release(ptr); +} + diff --git a/ndb/src/kernel/blocks/backup/Backup.hpp b/ndb/src/kernel/blocks/backup/Backup.hpp index 1a5d6c7a925..7bcea5655b4 100644 --- a/ndb/src/kernel/blocks/backup/Backup.hpp +++ b/ndb/src/kernel/blocks/backup/Backup.hpp @@ -232,6 +232,7 @@ public: */ bool newScan(); bool scanConf(Uint32 noOfOps, Uint32 opLen); + bool closeScan(); /** * Per record @@ -330,7 +331,7 @@ public: Uint8 fileOpened; Uint8 fileRunning; - Uint8 fileDone; + Uint8 fileClosing; Uint8 scanRunning; }; typedef Ptr BackupFilePtr; @@ -403,13 +404,11 @@ public: ArrayPool & trp) : slaveState(b, validSlaveTransitions, validSlaveTransitionsCount,1) , tables(tp), triggers(trp), files(bp), pages(pp) - , masterData(b, validMasterTransitions, validMasterTransitionsCount) - , backup(b) - { - closingFiles = false; - okToCleanMaster = true; - } + , masterData(b), backup(b) + { + } + Uint32 m_gsn; CompoundState slaveState; Uint32 clientRef; @@ -420,9 +419,6 @@ public: Uint32 errorCode; NdbNodeBitmask nodes; - bool okToCleanMaster; - bool closingFiles; - Uint64 noOfBytes; Uint64 noOfRecords; Uint64 noOfLogBytes; @@ -444,15 +440,13 @@ public: SimpleProperties props;// Used for (un)packing backup request struct MasterData { - MasterData(Backup & b, const State valid[], Uint32 count) - : state(b, valid, count, 0) - { - } + MasterData(Backup & b) + { + } MutexHandle2 m_defineBackupMutex; MutexHandle2 m_dictCommitTableMutex; Uint32 gsn; - CompoundState state; SignalCounter sendCounter; Uint32 errorCode; struct { @@ -557,7 +551,8 @@ public: void stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId); void defineBackupRef(Signal*, BackupRecordPtr, Uint32 errCode = 0); - + void backupFragmentRef(Signal * signal, BackupFilePtr filePtr); + void nextFragment(Signal*, BackupRecordPtr); void sendCreateTrig(Signal*, BackupRecordPtr ptr, TablePtr tabPtr); @@ -578,14 +573,14 @@ public: void sendAbortBackupOrd(Signal* signal, BackupRecordPtr ptr, Uint32 errCode); void sendAbortBackupOrdSlave(Signal* signal, BackupRecordPtr ptr, Uint32 errCode); - void masterAbort(Signal*, BackupRecordPtr ptr, bool controlledAbort); + void masterAbort(Signal*, BackupRecordPtr ptr); void masterSendAbortBackup(Signal*, BackupRecordPtr ptr); void slaveAbort(Signal*, BackupRecordPtr ptr); void abortFile(Signal* signal, BackupRecordPtr ptr, BackupFilePtr filePtr); void abortFileHook(Signal* signal, BackupFilePtr filePtr, bool scanDone); - bool verifyNodesAlive(const NdbNodeBitmask& aNodeBitMask); + bool verifyNodesAlive(BackupRecordPtr, const NdbNodeBitmask& aNodeBitMask); bool checkAbort(BackupRecordPtr ptr); void checkNodeFail(Signal* signal, BackupRecordPtr ptr, @@ -603,9 +598,8 @@ public: void sendBackupRef(BlockReference ref, Signal *signal, Uint32 senderData, Uint32 errorCode); void dumpUsedResources(); - void cleanupMasterResources(BackupRecordPtr ptr); - void cleanupSlaveResources(BackupRecordPtr ptr); - void cleanupFinalResources(BackupRecordPtr ptr); + void cleanup(Signal*, BackupRecordPtr ptr); + void abort_scan(Signal*, BackupRecordPtr ptr); void removeBackup(Signal*, BackupRecordPtr ptr); void sendSTTORRY(Signal*); diff --git a/ndb/src/kernel/blocks/backup/Backup.txt b/ndb/src/kernel/blocks/backup/Backup.txt index ee5e02bb549..73942c6ebdc 100644 --- a/ndb/src/kernel/blocks/backup/Backup.txt +++ b/ndb/src/kernel/blocks/backup/Backup.txt @@ -341,3 +341,28 @@ start backup (ERROR_INSERTED(10022))) { if (ERROR_INSERTED(10029)) { if(trigPtr.p->operation->noOfBytes > 123 && ERROR_INSERTED(10030)) { + +----- XXX --- + +DEFINE_BACKUP_REF -> + ABORT_BACKUP_ORD(no reply) when all DEFINE_BACKUP replies has arrived + +START_BACKUP_REF + ABORT_BACKUP_ORD(no reply) when all START_BACKUP_ replies has arrived + +BACKUP_FRAGMENT_REF + ABORT_BACKUP_ORD(reply) directly to all nodes running BACKUP_FRAGMENT + + When all nodes has replied BACKUP_FRAGMENT + ABORT_BACKUP_ORD(no reply) + +STOP_BACKUP_REF + ABORT_BACKUP_ORD(no reply) when all STOP_BACKUP_ replies has arrived + +NF_COMPLETE_REP + slave dies + master sends OUTSTANDING_REF to self + slave does nothing + + master dies + slave elects self as master and sets only itself as participant diff --git a/ndb/src/kernel/blocks/backup/BackupInit.cpp b/ndb/src/kernel/blocks/backup/BackupInit.cpp index 08fa089a9c0..eae72f43db5 100644 --- a/ndb/src/kernel/blocks/backup/BackupInit.cpp +++ b/ndb/src/kernel/blocks/backup/BackupInit.cpp @@ -175,7 +175,7 @@ Backup::Backup(const Configuration & conf) : addRecSignal(GSN_START_BACKUP_CONF, &Backup::execSTART_BACKUP_CONF); addRecSignal(GSN_BACKUP_FRAGMENT_REQ, &Backup::execBACKUP_FRAGMENT_REQ); - //addRecSignal(GSN_BACKUP_FRAGMENT_REF, &Backup::execBACKUP_FRAGMENT_REF); + addRecSignal(GSN_BACKUP_FRAGMENT_REF, &Backup::execBACKUP_FRAGMENT_REF); addRecSignal(GSN_BACKUP_FRAGMENT_CONF, &Backup::execBACKUP_FRAGMENT_CONF); addRecSignal(GSN_STOP_BACKUP_REQ, &Backup::execSTOP_BACKUP_REQ); diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index dfae180ae71..0274ef4af3e 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -126,6 +126,7 @@ Cmvmi::Cmvmi(const Configuration & conf) : } setNodeInfo(getOwnNodeId()).m_connected = true; + setNodeInfo(getOwnNodeId()).m_version = ndbGetOwnVersion(); } Cmvmi::~Cmvmi() diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp index 68106c4689d..a8931fb32ea 100644 --- a/ndb/src/mgmapi/mgmapi.cpp +++ b/ndb/src/mgmapi/mgmapi.cpp @@ -1565,9 +1565,9 @@ ndb_mgm_start_backup(NdbMgmHandle handle, int wait_completed, { // start backup can take some time, set timeout high Uint64 old_timeout= handle->read_timeout; if (wait_completed == 2) - handle->read_timeout= 30*60*1000; // 30 minutes + handle->read_timeout= 48*60*60*1000; // 48 hours else if (wait_completed == 1) - handle->read_timeout= 5*60*1000; // 5 minutes + handle->read_timeout= 10*60*1000; // 10 minutes reply = ndb_mgm_call(handle, start_backup_reply, "start backup", &args); handle->read_timeout= old_timeout; } diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index fb05e57e138..ceaedc9955b 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -791,7 +791,7 @@ MgmtSrvr::restartNode(int processId, bool nostart, result = sendSignal(processId, NO_WAIT, signal, true); } - if (result == -1) { + if (result == -1 && theWaitState != WAIT_NODEFAILURE) { m_stopRec.inUse = false; return SEND_OR_RECEIVE_FAILED; } @@ -1920,6 +1920,7 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal) #ifdef VM_TRACE ndbout_c("I'm not master resending to %d", aNodeId); #endif + theWaitNode= aNodeId; NdbApiSignal aSignal(_ownReference); BackupReq* req = CAST_PTR(BackupReq, aSignal.getDataPtrSend()); aSignal.set(TestOrd::TraceAPI, BACKUP, GSN_BACKUP_REQ, @@ -1947,6 +1948,7 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal) event.Event = BackupEvent::BackupAborted; event.Aborted.Reason = rep->reason; event.Aborted.BackupId = rep->backupId; + event.Aborted.ErrorCode = rep->reason; backupCallback(event); } break; @@ -2076,6 +2078,13 @@ MgmtSrvr::handleStatus(NodeId nodeId, bool alive, bool nfComplete) handleStopReply(nodeId, 0); DBUG_VOID_RETURN; } + + if(theWaitNode == nodeId && + theWaitState != NO_WAIT && theWaitState != WAIT_STOP) + { + theWaitState = WAIT_NODEFAILURE; + NdbCondition_Signal(theMgmtWaitForResponseCondPtr); + } } eventReport(_ownNodeId, theData); @@ -2427,7 +2436,7 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted) int result; if (waitCompleted == 2) { result = sendRecSignal(nodeId, WAIT_BACKUP_COMPLETED, - signal, true, 30*60*1000 /*30 secs*/); + signal, true, 48*60*60*1000 /* 48 hours */); } else if (waitCompleted == 1) { result = sendRecSignal(nodeId, WAIT_BACKUP_STARTED, @@ -2456,22 +2465,6 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted) return -1; break; } - } else { - switch(m_lastBackupEvent.Event){ - case BackupEvent::BackupCompleted: - backupId = m_lastBackupEvent.Completed.BackupId; - break; - case BackupEvent::BackupStarted: - backupId = m_lastBackupEvent.Started.BackupId; - break; - case BackupEvent::BackupFailedToStart: - return m_lastBackupEvent.FailedToStart.ErrorCode; - case BackupEvent::BackupAborted: - return m_lastBackupEvent.Aborted.ErrorCode; - default: - return -1; - break; - } } return 0; diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index a05b29b7f31..ce78983b3c3 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -611,7 +611,8 @@ private: WAIT_STOP, WAIT_BACKUP_STARTED, WAIT_BACKUP_COMPLETED, - WAIT_VERSION + WAIT_VERSION, + WAIT_NODEFAILURE }; /** @@ -695,6 +696,7 @@ private: NdbApiSignal* theSignalIdleList; // List of unused signals + Uint32 theWaitNode; WaitSignalType theWaitState; // State denoting a set of signals we accept to recieve. diff --git a/ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp b/ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp index 2126c9d358d..f93948abc75 100644 --- a/ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp +++ b/ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp @@ -108,6 +108,7 @@ MgmtSrvr::sendRecSignal(Uint16 aNodeId, return -1; } theWaitState = aWaitState; + theWaitNode = aNodeId; return receiveOptimisedResponse(waitTime); } @@ -119,11 +120,12 @@ MgmtSrvr::receiveOptimisedResponse(int waitTime) theFacade->checkForceSend(_blockNumber); NDB_TICKS maxTime = NdbTick_CurrentMillisecond() + waitTime; - while (theWaitState != NO_WAIT && waitTime > 0) { + while (theWaitState != NO_WAIT && theWaitState != WAIT_NODEFAILURE + && waitTime > 0) { NdbCondition_WaitTimeout(theMgmtWaitForResponseCondPtr, theFacade->theMutexPtr, waitTime); - if(theWaitState == NO_WAIT) + if(theWaitState == NO_WAIT || theWaitState == WAIT_NODEFAILURE) break; waitTime = (maxTime - NdbTick_CurrentMillisecond()); }//while diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index d4ad9cd6f1c..484e91f2977 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -345,7 +345,7 @@ ErrorBundle ErrorCodes[] = { { 1325, IE, "File or scan error" }, { 1326, IE, "Backup abortet due to node failure" }, { 1327, IE, "1327" }, - + { 1340, IE, "Backup undefined error" }, { 1342, AE, "Backup failed to allocate buffers (check configuration)" }, { 1343, AE, "Backup failed to setup fs buffers (check configuration)" }, @@ -355,7 +355,8 @@ ErrorBundle ErrorCodes[] = { { 1347, AE, "Backup failed to allocate table memory (check configuration)" }, { 1348, AE, "Backup failed to allocate file record (check configuration)" }, { 1349, AE, "Backup failed to allocate attribute record (check configuration)" }, - + { 1329, AE, "Backup during software upgrade not supported" }, + /** * Still uncategorized */ diff --git a/ndb/test/ndbapi/testBackup.cpp b/ndb/test/ndbapi/testBackup.cpp index 77b9d0a4baa..bea5d5307e2 100644 --- a/ndb/test/ndbapi/testBackup.cpp +++ b/ndb/test/ndbapi/testBackup.cpp @@ -74,20 +74,20 @@ int runAbort(NDBT_Context* ctx, NDBT_Step* step){ if (testMaster) { if (testSlave) { - if (backup.NFMasterAsSlave(restarter) == -1){ + if (backup.NFMasterAsSlave(restarter) != NDBT_OK){ return NDBT_FAILED; } } else { - if (backup.NFMaster(restarter) == -1){ + if (backup.NFMaster(restarter) != NDBT_OK){ return NDBT_FAILED; } } } else { - if (backup.NFSlave(restarter) == -1){ + if (backup.NFSlave(restarter) != NDBT_OK){ return NDBT_FAILED; } } - + return NDBT_OK; } @@ -108,16 +108,16 @@ int runFail(NDBT_Context* ctx, NDBT_Step* step){ if (testMaster) { if (testSlave) { - if (backup.FailMasterAsSlave(restarter) == -1){ + if (backup.FailMasterAsSlave(restarter) != NDBT_OK){ return NDBT_FAILED; } } else { - if (backup.FailMaster(restarter) == -1){ + if (backup.FailMaster(restarter) != NDBT_OK){ return NDBT_FAILED; } } } else { - if (backup.FailSlave(restarter) == -1){ + if (backup.FailSlave(restarter) != NDBT_OK){ return NDBT_FAILED; } } diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index 453fe1ad7ae..2d7c435e8b4 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -2,6 +2,30 @@ max-time: 3600 cmd: atrt-mysql-test-run args: --force +max-time: 600 +cmd: atrt-testBackup +args: -n NFMaster T1 + +max-time: 600 +cmd: atrt-testBackup +args: -n NFMasterAsSlave T1 + +max-time: 600 +cmd: atrt-testBackup +args: -n NFSlave T1 + +max-time: 600 +cmd: atrt-testBackup +args: -n FailMaster T1 + +max-time: 600 +cmd: atrt-testBackup +args: -n FailMasterAsSlave T1 + +max-time: 600 +cmd: atrt-testBackup +args: -n FailSlave T1 + max-time: 600 cmd: atrt-testBackup args: -n BackupOne T1 T6 T3 I3 diff --git a/ndb/test/src/NdbBackup.cpp b/ndb/test/src/NdbBackup.cpp index 5e22468692e..7c5d1405f6b 100644 --- a/ndb/test/src/NdbBackup.cpp +++ b/ndb/test/src/NdbBackup.cpp @@ -245,6 +245,10 @@ NdbBackup::NFSlave(NdbRestarter& _restarter){ int NdbBackup::NF(NdbRestarter& _restarter, int *NFDuringBackup_codes, const int sz, bool onMaster){ { + int nNodes = _restarter.getNumDbNodes(); + if(nNodes == 1) + return NDBT_OK; + int nodeId = _restarter.getMasterNodeId(); CHECK(_restarter.restartOneDbNode(nodeId, false, true, true) == 0, @@ -255,15 +259,11 @@ NdbBackup::NF(NdbRestarter& _restarter, int *NFDuringBackup_codes, const int sz, CHECK(_restarter.startNodes(&nodeId, 1) == 0, "failed to start node"); - - NdbSleep_SecSleep(10); } - + CHECK(_restarter.waitClusterStarted() == 0, "waitClusterStarted failed"); - - int nNodes = _restarter.getNumDbNodes(); - + myRandom48Init(NdbTick_CurrentMillisecond()); for(int i = 0; i Date: Fri, 22 Apr 2005 09:40:44 +0200 Subject: [PATCH 45/65] bug#9969 - ndb missleading error message --- ndb/src/ndbapi/ndberror.c | 6 +++--- ndb/test/src/NdbBackup.cpp | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index 484e91f2977..98cce88e2a7 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -140,10 +140,10 @@ ErrorBundle ErrorCodes[] = { { 4008, UR, "Receive from NDB failed" }, { 4009, UR, "Cluster Failure" }, { 4012, UR, - "Time-out, most likely caused by simple read or cluster failure" }, + "Request ndbd time-out, maybe due to high load or communication problems"}, { 4024, UR, - "Time-out, most likely caused by simple read or cluster failure" }, - + "Time-out, most likely caused by simple read or cluster failure" }, + /** * TemporaryResourceError */ diff --git a/ndb/test/src/NdbBackup.cpp b/ndb/test/src/NdbBackup.cpp index 7c5d1405f6b..28724323bd7 100644 --- a/ndb/test/src/NdbBackup.cpp +++ b/ndb/test/src/NdbBackup.cpp @@ -244,8 +244,8 @@ NdbBackup::NFSlave(NdbRestarter& _restarter){ int NdbBackup::NF(NdbRestarter& _restarter, int *NFDuringBackup_codes, const int sz, bool onMaster){ + int nNodes = _restarter.getNumDbNodes(); { - int nNodes = _restarter.getNumDbNodes(); if(nNodes == 1) return NDBT_OK; From 0bfc92467255010583aa4eee6166d69ea7cfff76 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Fri, 22 Apr 2005 11:04:26 +0200 Subject: [PATCH 46/65] bug#9724 - ndb restart if file already open occur print files... --- ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp b/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp index b944bb5485b..0fee687f1bc 100644 --- a/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp +++ b/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp @@ -82,8 +82,14 @@ inline bool OpenFiles::insert(AsyncFile* file, Uint16 id){ continue; if(strcmp(m_files[i].m_file->theFileName.c_str(), - file->theFileName.c_str()) == 0){ - ERROR_SET(fatal, AFS_ERROR_ALLREADY_OPEN,"","OpenFiles::insert()"); + file->theFileName.c_str()) == 0) + { + BaseString names; + names.assfmt("open: >%s< existing: >%s<", + file->theFileName.c_str(), + m_files[i].m_file->theFileName.c_str()); + ERROR_SET(fatal, AFS_ERROR_ALLREADY_OPEN, names.c_str(), + "OpenFiles::insert()"); } } From 0e7f6a601f9e1d32d09a53364bd7d2388851721e Mon Sep 17 00:00:00 2001 From: "ingo@mysql.com" <> Date: Fri, 22 Apr 2005 12:30:09 +0200 Subject: [PATCH 47/65] Bug#7806 - insert on duplicate key and auto-update of timestamp A fix of the original patch. Correctly clear a bit from an enum value. --- sql/sql_insert.cc | 9 ++++++--- sql/table.h | 16 ++++++++++------ 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 96d94127316..7f890a583c6 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -80,7 +80,8 @@ static int check_insert_fields(THD *thd, TABLE *table, List &fields, check_grant_all_columns(thd,INSERT_ACL,table)) return -1; #endif - *(int*)&table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_INSERT; + clear_timestamp_auto_bits(table->timestamp_field_type, + TIMESTAMP_AUTO_SET_ON_INSERT); } else { // Part field list @@ -110,7 +111,8 @@ static int check_insert_fields(THD *thd, TABLE *table, List &fields, } if (table->timestamp_field && // Don't set timestamp if used table->timestamp_field->query_id == thd->query_id) - *(int*)&table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_INSERT; + clear_timestamp_auto_bits(table->timestamp_field_type, + TIMESTAMP_AUTO_SET_ON_INSERT); } // For the values we need select_priv #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -167,7 +169,8 @@ static int check_update_fields(THD *thd, TABLE *table, { /* Don't set timestamp column if this is modified. */ if (table->timestamp_field->query_id == thd->query_id) - *(int*)&table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_UPDATE; + clear_timestamp_auto_bits(table->timestamp_field_type, + TIMESTAMP_AUTO_SET_ON_UPDATE); else table->timestamp_field->query_id= timestamp_query_id; } diff --git a/sql/table.h b/sql/table.h index e822d68531e..77153e5d8cd 100644 --- a/sql/table.h +++ b/sql/table.h @@ -58,18 +58,22 @@ typedef struct st_filesort_info /* - Values in this enum are used to indicate during which operations value - of TIMESTAMP field should be set to current timestamp. - WARNING: The values are used for bit operations. If you change the enum, - you must keep the bitwise relation of the values. For example: - (int) TIMESTAMP_AUTO_SET_ON_BOTH == - (int) TIMESTAMP_AUTO_SET_ON_INSERT | (int) TIMESTAMP_AUTO_SET_ON_UPDATE. + Values in this enum are used to indicate how a tables TIMESTAMP field + should be treated. It can be set to the current timestamp on insert or + update or both. + WARNING: The values are used for bit operations. If you change the + enum, you must keep the bitwise relation of the values. For example: + (int) TIMESTAMP_AUTO_SET_ON_BOTH must be equal to + (int) TIMESTAMP_AUTO_SET_ON_INSERT | (int) TIMESTAMP_AUTO_SET_ON_UPDATE. + We use an enum here so that the debugger can display the value names. */ enum timestamp_auto_set_type { TIMESTAMP_NO_AUTO_SET= 0, TIMESTAMP_AUTO_SET_ON_INSERT= 1, TIMESTAMP_AUTO_SET_ON_UPDATE= 2, TIMESTAMP_AUTO_SET_ON_BOTH= 3 }; +#define clear_timestamp_auto_bits(_target_, _bits_) \ + (_target_)= (enum timestamp_auto_set_type)((int)(_target_) & ~(int)(_bits_)) /* Table cache entry struct */ From 057b8aa9a874491fb956a837e75d50dbc4c36a2e Mon Sep 17 00:00:00 2001 From: "pem@mysql.comhem.se" <> Date: Fri, 22 Apr 2005 12:53:48 +0200 Subject: [PATCH 48/65] Fixed BUG#9004: Inconsistent behaviour of SP re. warnings --- mysql-test/r/sp.result | 31 +++++++++++++++++++++++++++++++ mysql-test/t/sp.test | 24 ++++++++++++++++++++++++ sql/sql_error.cc | 2 +- sql/sql_parse.cc | 6 ++++-- 4 files changed, 60 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index 4f2d0d69395..9f28acbcb7d 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -667,6 +667,8 @@ delete from t1| drop table if exists t3| create table t3 ( s char(16), d int)| call into_test4()| +Warnings: +Warning 1329 No data to FETCH select * from t3| s d into4 NULL @@ -1792,7 +1794,12 @@ end if; insert into t4 values (2, rc, t3); end| call bug1863(10)| +Warnings: +Note 1051 Unknown table 'temp_t1' +Warning 1329 No data to FETCH call bug1863(10)| +Warnings: +Warning 1329 No data to FETCH select * from t4| f1 rc t3 2 0 NULL @@ -2090,7 +2097,11 @@ begin end| call bug4579_1()| call bug4579_1()| +Warnings: +Warning 1329 No data to FETCH call bug4579_1()| +Warnings: +Warning 1329 No data to FETCH drop procedure bug4579_1| drop procedure bug4579_2| drop table t3| @@ -3010,4 +3021,24 @@ select @x| @x 2005 drop function bug8861| +drop procedure if exists bug9004_1| +drop procedure if exists bug9004_2| +create procedure bug9004_1(x char(16)) +begin +insert into t1 values (x, 42); +insert into t1 values (x, 17); +end| +create procedure bug9004_2(x char(16)) +call bug9004_1(x)| +call bug9004_1('12345678901234567')| +Warnings: +Warning 1265 Data truncated for column 'id' at row 1 +Warning 1265 Data truncated for column 'id' at row 2 +call bug9004_2('12345678901234567890')| +Warnings: +Warning 1265 Data truncated for column 'id' at row 1 +Warning 1265 Data truncated for column 'id' at row 2 +delete from t1| +drop procedure bug9004_1| +drop procedure bug9004_2| drop table t1,t2; diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test index 1974cf2eab5..7b3bff4eb55 100644 --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@ -3700,6 +3700,30 @@ select @x| drop function bug8861| +# +# BUG#9004: Inconsistent behaviour of SP re. warnings +# +--disable_warnings +drop procedure if exists bug9004_1| +drop procedure if exists bug9004_2| +--enable_warnings +create procedure bug9004_1(x char(16)) +begin + insert into t1 values (x, 42); + insert into t1 values (x, 17); +end| +create procedure bug9004_2(x char(16)) + call bug9004_1(x)| + +# Truncation warnings expected... +call bug9004_1('12345678901234567')| +call bug9004_2('12345678901234567890')| + +delete from t1| +drop procedure bug9004_1| +drop procedure bug9004_2| + + # # BUG#NNNN: New bug synopsis # diff --git a/sql/sql_error.cc b/sql/sql_error.cc index 04fd27abef5..3bda16202b9 100644 --- a/sql/sql_error.cc +++ b/sql/sql_error.cc @@ -113,7 +113,7 @@ MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, !(thd->options & OPTION_SQL_NOTES)) DBUG_RETURN(0); - if (thd->query_id != thd->warn_id) + if (thd->query_id != thd->warn_id && !thd->spcont) mysql_reset_errors(thd, 0); thd->got_warning= 1; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index bef35dcfd0d..c5ef9f4e713 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2268,9 +2268,11 @@ mysql_execute_command(THD *thd) A better approach would be to reset this for any commands that is not a SHOW command or a select that only access local variables, but for now this is probably good enough. + Don't reset warnings when executing a stored routine. */ - if (all_tables || &lex->select_lex != lex->all_selects_list || - lex->spfuns.records || lex->spprocs.records) + if ((all_tables || &lex->select_lex != lex->all_selects_list || + lex->spfuns.records || lex->spprocs.records) && + !thd->spcont) mysql_reset_errors(thd, 0); #ifdef HAVE_REPLICATION From 945755161658cf6ecae3290cf511d3ecee9aa96f Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Fri, 22 Apr 2005 15:28:23 +0200 Subject: [PATCH 49/65] Fix for Bug #9318 drop database does not drop ndb tables --- mysql-test/r/ndb_database.result | 27 +++++++ mysql-test/t/ndb_database.test | 50 +++++++++++++ sql/ha_ndbcluster.cc | 123 ++++++++++++++++++------------- sql/ha_ndbcluster.h | 6 +- 4 files changed, 154 insertions(+), 52 deletions(-) create mode 100644 mysql-test/r/ndb_database.result create mode 100644 mysql-test/t/ndb_database.test diff --git a/mysql-test/r/ndb_database.result b/mysql-test/r/ndb_database.result new file mode 100644 index 00000000000..566a3eaf3dd --- /dev/null +++ b/mysql-test/r/ndb_database.result @@ -0,0 +1,27 @@ +drop table if exists t1; +drop database if exists mysqltest; +drop table if exists t1; +drop database if exists mysqltest; +create database mysqltest; +create database mysqltest; +create table mysqltest.t1 (a int primary key, b int) engine=ndb; +use mysqltest; +show tables; +Tables_in_mysqltest +t1 +drop database mysqltest; +use mysqltest; +show tables; +Tables_in_mysqltest +create database mysqltest; +create table mysqltest.t1 (c int, d int primary key) engine=ndb; +use mysqltest; +show tables; +Tables_in_mysqltest +t1 +drop database mysqltest; +use mysqltest; +show tables; +Tables_in_mysqltest +drop table if exists t1; +drop database if exists mysqltest; diff --git a/mysql-test/t/ndb_database.test b/mysql-test/t/ndb_database.test new file mode 100644 index 00000000000..1264c3fa73b --- /dev/null +++ b/mysql-test/t/ndb_database.test @@ -0,0 +1,50 @@ +-- source include/have_ndb.inc +-- source include/have_multi_ndb.inc +-- source include/not_embedded.inc + +--disable_warnings +connection server1; +drop table if exists t1; +drop database if exists mysqltest; +connection server2; +drop table if exists t1; +drop database if exists mysqltest; +--enable_warnings + +# +# Check that all tables in a database are dropped when database is dropped +# + +connection server1; +create database mysqltest; + +connection server2; +create database mysqltest; +create table mysqltest.t1 (a int primary key, b int) engine=ndb; +use mysqltest; +show tables; + +connection server1; +drop database mysqltest; + +connection server2; +use mysqltest; +show tables; + +connection server1; +create database mysqltest; +create table mysqltest.t1 (c int, d int primary key) engine=ndb; +use mysqltest; +show tables; + +connection server2; +drop database mysqltest; + +connection server1; +use mysqltest; +show tables; + +--disable_warnings +drop table if exists t1; +drop database if exists mysqltest; +--enable_warnings diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index bf8db96c76f..230ca2826b2 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3965,18 +3965,6 @@ int ha_ndbcluster::drop_table() } -/* - Drop a database in NDB Cluster - */ - -int ndbcluster_drop_database(const char *path) -{ - DBUG_ENTER("ndbcluster_drop_database"); - // TODO drop all tables for this database - DBUG_RETURN(1); -} - - longlong ha_ndbcluster::get_auto_increment() { DBUG_ENTER("get_auto_increment"); @@ -4325,6 +4313,53 @@ extern "C" byte* tables_get_key(const char *entry, uint *length, } +/* + Drop a database in NDB Cluster + */ + +int ndbcluster_drop_database(const char *path) +{ + DBUG_ENTER("ndbcluster_drop_database"); + THD *thd= current_thd; + char dbname[FN_HEADLEN]; + Ndb* ndb; + NdbDictionary::Dictionary::List list; + uint i; + char *tabname; + List drop_list; + ha_ndbcluster::set_dbname(path, (char *)&dbname); + DBUG_PRINT("enter", ("db: %s", dbname)); + + if (!(ndb= check_ndb_in_thd(thd))) + DBUG_RETURN(HA_ERR_NO_CONNECTION); + + // List tables in NDB + NDBDICT *dict= ndb->getDictionary(); + if (dict->listObjects(list, + NdbDictionary::Object::UserTable) != 0) + ERR_RETURN(dict->getNdbError()); + for (i= 0 ; i < list.count ; i++) + { + NdbDictionary::Dictionary::List::Element& t= list.elements[i]; + DBUG_PRINT("info", ("Found %s/%s in NDB", t.database, t.name)); + + // Add only tables that belongs to db + if (my_strcasecmp(system_charset_info, t.database, dbname)) + continue; + DBUG_PRINT("info", ("%s must be dropped", t.name)); + drop_list.push_back(thd->strdup(t.name)); + } + // Drop any tables belonging to database + ndb->setDatabaseName(dbname); + List_iterator_fast it(drop_list); + while ((tabname=it++)) + if (dict->dropTable(tabname)) + ERR_RETURN(dict->getNdbError()); + + DBUG_RETURN(0); +} + + int ndbcluster_find_files(THD *thd,const char *db,const char *path, const char *wild, bool dir, List *files) { @@ -4595,26 +4630,31 @@ void ndbcluster_print_error(int error, const NdbOperation *error_op) DBUG_VOID_RETURN; } -/* - Set m_tabname from full pathname to table file +/** + * Set a given location from full pathname to database name + * */ - -void ha_ndbcluster::set_tabname(const char *path_name) +void ha_ndbcluster::set_dbname(const char *path_name, char *dbname) { char *end, *ptr; /* Scan name from the end */ - end= strend(path_name)-1; - ptr= end; + ptr= strend(path_name)-1; + while (ptr >= path_name && *ptr != '\\' && *ptr != '/') { + ptr--; + } + ptr--; + end= ptr; while (ptr >= path_name && *ptr != '\\' && *ptr != '/') { ptr--; } uint name_len= end - ptr; - memcpy(m_tabname, ptr + 1, end - ptr); - m_tabname[name_len]= '\0'; + memcpy(dbname, ptr + 1, name_len); + dbname[name_len]= '\0'; #ifdef __WIN__ /* Put to lower case */ - ptr= m_tabname; + + ptr= dbname; while (*ptr != '\0') { *ptr= tolower(*ptr); @@ -4623,6 +4663,15 @@ void ha_ndbcluster::set_tabname(const char *path_name) #endif } +/* + Set m_dbname from full pathname to table file + */ + +void ha_ndbcluster::set_dbname(const char *path_name) +{ + set_dbname(path_name, m_dbname); +} + /** * Set a given location from full pathname to table file * @@ -4652,39 +4701,13 @@ ha_ndbcluster::set_tabname(const char *path_name, char * tabname) #endif } - /* - Set m_dbname from full pathname to table file - + Set m_tabname from full pathname to table file */ -void ha_ndbcluster::set_dbname(const char *path_name) +void ha_ndbcluster::set_tabname(const char *path_name) { - char *end, *ptr; - - /* Scan name from the end */ - ptr= strend(path_name)-1; - while (ptr >= path_name && *ptr != '\\' && *ptr != '/') { - ptr--; - } - ptr--; - end= ptr; - while (ptr >= path_name && *ptr != '\\' && *ptr != '/') { - ptr--; - } - uint name_len= end - ptr; - memcpy(m_dbname, ptr + 1, name_len); - m_dbname[name_len]= '\0'; -#ifdef __WIN__ - /* Put to lower case */ - - ptr= m_dbname; - - while (*ptr != '\0') { - *ptr= tolower(*ptr); - ptr++; - } -#endif + set_tabname(path_name, m_tabname); } diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 9fab34448ce..ac2d27b9ec7 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -147,7 +147,10 @@ class ha_ndbcluster: public handler static Thd_ndb* seize_thd_ndb(); static void release_thd_ndb(Thd_ndb* thd_ndb); uint8 table_cache_type(); - + + static void set_dbname(const char *pathname, char *dbname); + static void set_tabname(const char *pathname, char *tabname); + private: int alter_table_name(const char *to); int drop_table(); @@ -183,7 +186,6 @@ class ha_ndbcluster: public handler void set_dbname(const char *pathname); void set_tabname(const char *pathname); - void set_tabname(const char *pathname, char *tabname); bool set_hidden_key(NdbOperation*, uint fieldnr, const byte* field_ptr); From 307c0cf9083b1fb9f10580197edef147369fb101 Mon Sep 17 00:00:00 2001 From: "sergefp@mysql.com" <> Date: Sat, 23 Apr 2005 00:13:46 +0400 Subject: [PATCH 50/65] Fix for BUG#8490: In mysql_make_view for join algorithm views we need to insert view's subqueries into select_lex->slave(->next)* chain. In case a join has several such views, don't add the same subqueries several times (this forms a loop on the above chain which breaks many parts of the code) --- mysql-test/r/view.result | 18 ++++++++++++++++++ mysql-test/t/view.test | 14 ++++++++++++++ sql/sql_view.cc | 14 +++++++++++--- 3 files changed, 43 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index 59ff6abdde2..f8629e578f0 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -1694,3 +1694,21 @@ col1 col2 col2 col3 5 david NULL NULL DROP VIEW v1,v2,v3; DROP TABLE t1,t2; +create table t1 as select 1 A union select 2 union select 3; +create table t2 as select * from t1; +create view v1 as select * from t1 where a in (select * from t2); +select * from v1 A, v1 B where A.a = B.a; +A A +1 1 +2 2 +3 3 +create table t3 as select a a,a b from t2; +create view v2 as select * from t3 where +a in (select * from t1) or b in (select * from t2); +select * from v2 A, v2 B where A.a = B.b; +a b a b +1 1 1 1 +2 2 2 2 +3 3 3 3 +drop view v1, v2; +drop table t1, t2, t3; diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test index 0c3c81d0b89..0303605a9ef 100644 --- a/mysql-test/t/view.test +++ b/mysql-test/t/view.test @@ -1519,3 +1519,17 @@ SELECT a.col1,a.col2,b.col2,b.col3 DROP VIEW v1,v2,v3; DROP TABLE t1,t2; + +# BUG#8490 Select from views containing subqueries causes server to hang +# forever. +create table t1 as select 1 A union select 2 union select 3; +create table t2 as select * from t1; +create view v1 as select * from t1 where a in (select * from t2); +select * from v1 A, v1 B where A.a = B.a; +create table t3 as select a a,a b from t2; +create view v2 as select * from t3 where + a in (select * from t1) or b in (select * from t2); +select * from v2 A, v2 B where A.a = B.b; +drop view v1, v2; +drop table t1, t2, t3; + diff --git a/sql/sql_view.cc b/sql/sql_view.cc index be643c36d7d..21de99f2484 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -796,17 +796,25 @@ mysql_make_view(File_parser *parser, TABLE_LIST *table) /* Store WHERE clause for post-processing in setup_ancestor */ table->where= view_select->where; - /* - Add subqueries units to SELECT in which we merging current view. - + Add subqueries units to SELECT into which we merging current view. + + unit(->next)* chain starts with subqueries that are used by this + view and continues with subqueries that are used by other views. + We must not add any subquery twice (otherwise we'll form a loop), + to do this we remember in end_unit the first subquery that has + been already added. + NOTE: we do not support UNION here, so we take only one select */ + SELECT_LEX_NODE *end_unit= table->select_lex->slave; for (SELECT_LEX_UNIT *unit= lex->select_lex.first_inner_unit(); unit; unit= unit->next_unit()) { SELECT_LEX_NODE *save_slave= unit->slave; + if (unit == end_unit) + break; unit->include_down(table->select_lex); unit->slave= save_slave; // fix include_down initialisation } From b75789e3d8e25f263a2bbcd1cc749790a40156b8 Mon Sep 17 00:00:00 2001 From: "gbichot@quadita2.mysql.com" <> Date: Sat, 23 Apr 2005 00:05:05 +0200 Subject: [PATCH 51/65] Fixes for BUG#10039 "MEMORY engine is reported as HEAP", BUG#9738 "SHOW VARIABLES still displays the deprecated 'log_update' in 5.0", BUG#9542 "MySQL dies with signal 11 when it is using non-existent location of binary logs" --- mysql-test/r/create.result | 8 ++++---- mysql-test/r/ps_1general.result | 2 +- mysql-test/r/variables.result | 4 ++-- sql/handler.cc | 4 ++-- sql/log.cc | 7 +++++++ sql/set_var.cc | 1 - 6 files changed, 16 insertions(+), 10 deletions(-) diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result index cafe6f23ccf..4f4400b4a9b 100644 --- a/mysql-test/r/create.result +++ b/mysql-test/r/create.result @@ -210,7 +210,7 @@ drop table if exists t1; SET SESSION storage_engine="heap"; SELECT @@storage_engine; @@storage_engine -HEAP +MEMORY CREATE TABLE t1 (a int not null); show create table t1; Table Create Table @@ -222,7 +222,7 @@ SET SESSION storage_engine="gemini"; ERROR 42000: Unknown table engine 'gemini' SELECT @@storage_engine; @@storage_engine -HEAP +MEMORY CREATE TABLE t1 (a int not null); show create table t1; Table Create Table @@ -371,7 +371,7 @@ drop database mysqltest; SET SESSION storage_engine="heap"; SELECT @@storage_engine; @@storage_engine -HEAP +MEMORY CREATE TABLE t1 (a int not null); show create table t1; Table Create Table @@ -383,7 +383,7 @@ SET SESSION storage_engine="gemini"; ERROR 42000: Unknown table engine 'gemini' SELECT @@storage_engine; @@storage_engine -HEAP +MEMORY CREATE TABLE t1 (a int not null); show create table t1; Table Create Table diff --git a/mysql-test/r/ps_1general.result b/mysql-test/r/ps_1general.result index 0fe907ac8c1..6c616a99fb0 100644 --- a/mysql-test/r/ps_1general.result +++ b/mysql-test/r/ps_1general.result @@ -322,8 +322,8 @@ prepare stmt4 from ' show storage engines '; execute stmt4; Engine Support Comment MyISAM YES/NO Default engine as of MySQL 3.23 with great performance -HEAP YES/NO Alias for MEMORY MEMORY YES/NO Hash based, stored in memory, useful for temporary tables +HEAP YES/NO Alias for MEMORY MERGE YES/NO Collection of identical MyISAM tables MRG_MYISAM YES/NO Alias for MERGE ISAM YES/NO Obsolete storage engine, now replaced by MyISAM diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index 0f4f25dfdb5..b3850bcc72c 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -148,7 +148,7 @@ timed_mutexes OFF set storage_engine=MYISAM, storage_engine="HEAP", global storage_engine="MERGE"; show local variables like 'storage_engine'; Variable_name Value -storage_engine HEAP +storage_engine MEMORY show global variables like 'storage_engine'; Variable_name Value storage_engine MERGE @@ -254,7 +254,7 @@ set storage_engine=MERGE, big_tables=2; ERROR 42000: Variable 'big_tables' can't be set to the value of '2' show local variables like 'storage_engine'; Variable_name Value -storage_engine HEAP +storage_engine MEMORY set SESSION query_cache_size=10000; ERROR HY000: Variable 'query_cache_size' is a GLOBAL variable and should be set with SET GLOBAL set GLOBAL storage_engine=DEFAULT; diff --git a/sql/handler.cc b/sql/handler.cc index 14b8974ece9..542efaba2bf 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -70,10 +70,10 @@ struct show_table_type_st sys_table_types[]= { {"MyISAM", &have_yes, "Default engine as of MySQL 3.23 with great performance", DB_TYPE_MYISAM}, - {"HEAP", &have_yes, - "Alias for MEMORY", DB_TYPE_HEAP}, {"MEMORY", &have_yes, "Hash based, stored in memory, useful for temporary tables", DB_TYPE_HEAP}, + {"HEAP", &have_yes, + "Alias for MEMORY", DB_TYPE_HEAP}, {"MERGE", &have_yes, "Collection of identical MyISAM tables", DB_TYPE_MRG_MYISAM}, {"MRG_MYISAM",&have_yes, diff --git a/sql/log.cc b/sql/log.cc index 1d6bb4cdf41..fc74223d7b6 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2857,6 +2857,13 @@ int TC_LOG_BINLOG::open(const char *opt_name) pthread_mutex_init(&LOCK_prep_xids, MY_MUTEX_INIT_FAST); pthread_cond_init (&COND_prep_xids, 0); + if (!my_b_inited(&index_file)) + { + /* There was a failure to open the index file, can't open the binlog */ + cleanup(); + return 1; + } + if (using_heuristic_recover()) { /* generate a new binlog to mask a corrupted one */ diff --git a/sql/set_var.cc b/sql/set_var.cc index 70d64b5dac6..bb3db177936 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -847,7 +847,6 @@ struct show_var_st init_vars[]= { {"log_slave_updates", (char*) &opt_log_slave_updates, SHOW_MY_BOOL}, #endif {"log_slow_queries", (char*) &opt_slow_log, SHOW_BOOL}, - {"log_update", (char*) &opt_update_log, SHOW_BOOL}, {sys_log_warnings.name, (char*) &sys_log_warnings, SHOW_SYS}, {sys_long_query_time.name, (char*) &sys_long_query_time, SHOW_SYS}, {sys_low_priority_updates.name, (char*) &sys_low_priority_updates, SHOW_SYS}, From 51064c0575d0ccf0c3b563f8f2d5591d40e50345 Mon Sep 17 00:00:00 2001 From: "sergefp@mysql.com" <> Date: Sat, 23 Apr 2005 06:55:43 +0400 Subject: [PATCH 52/65] Fix for BUG#10107: Memory leak in view over subquery: In mysql_make_view when joining subquery lists, take into account that both lists can have length > 1 (see also fix for BUG#8490) (note to bk trigger: this commit is about BUG#10107) --- sql/sql_view.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 21de99f2484..ce08763015f 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -808,13 +808,15 @@ mysql_make_view(File_parser *parser, TABLE_LIST *table) NOTE: we do not support UNION here, so we take only one select */ SELECT_LEX_NODE *end_unit= table->select_lex->slave; + SELECT_LEX_UNIT *next_unit; for (SELECT_LEX_UNIT *unit= lex->select_lex.first_inner_unit(); unit; - unit= unit->next_unit()) + unit= next_unit) { - SELECT_LEX_NODE *save_slave= unit->slave; if (unit == end_unit) break; + SELECT_LEX_NODE *save_slave= unit->slave; + next_unit= unit->next_unit(); unit->include_down(table->select_lex); unit->slave= save_slave; // fix include_down initialisation } From 6778b029e0d6e0256011d663f6c0f7142b93f595 Mon Sep 17 00:00:00 2001 From: "pekka@mysql.com" <> Date: Sat, 23 Apr 2005 17:33:12 +0200 Subject: [PATCH 53/65] ndb - bug#10029 fix --- ndb/include/kernel/signaldata/TuxMaint.hpp | 4 +- ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 8 + ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp | 80 +++++----- ndb/src/kernel/blocks/dbtup/Notes.txt | 25 ++- ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp | 11 ++ ndb/src/ndbapi/ndberror.c | 7 +- ndb/test/ndbapi/testOIBasic.cpp | 160 ++++++++++++++----- 7 files changed, 205 insertions(+), 90 deletions(-) diff --git a/ndb/include/kernel/signaldata/TuxMaint.hpp b/ndb/include/kernel/signaldata/TuxMaint.hpp index 9fee031dc41..4518f0531ea 100644 --- a/ndb/include/kernel/signaldata/TuxMaint.hpp +++ b/ndb/include/kernel/signaldata/TuxMaint.hpp @@ -36,8 +36,8 @@ public: }; enum ErrorCode { NoError = 0, // must be zero - SearchError = 895, // add + found or remove + not found - NoMemError = 827 + SearchError = 901, // add + found or remove + not found + NoMemError = 902 }; STATIC_CONST( SignalLength = 8 ); private: diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index b48546576f9..06cfd420eac 100644 --- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -1777,6 +1777,10 @@ private: Operationrec* const regOperPtr, Tablerec* const regTabPtr); + int addTuxEntries(Signal* signal, + Operationrec* regOperPtr, + Tablerec* regTabPtr); + // these crash the node on error void executeTuxCommitTriggers(Signal* signal, @@ -1787,6 +1791,10 @@ private: Operationrec* regOperPtr, Tablerec* const regTabPtr); + void removeTuxEntries(Signal* signal, + Operationrec* regOperPtr, + Tablerec* regTabPtr); + // ***************************************************************** // Error Handling routines. // ***************************************************************** diff --git a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp index aac5c326cad..575d08efffc 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp @@ -973,25 +973,7 @@ Dbtup::executeTuxInsertTriggers(Signal* signal, req->pageOffset = regOperPtr->pageOffset; req->tupVersion = tupVersion; req->opInfo = TuxMaintReq::OpAdd; - // loop over index list - const ArrayList& triggerList = regTabPtr->tuxCustomTriggers; - TriggerPtr triggerPtr; - triggerList.first(triggerPtr); - while (triggerPtr.i != RNIL) { - ljam(); - req->indexId = triggerPtr.p->indexId; - req->errorCode = RNIL; - EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, - signal, TuxMaintReq::SignalLength); - ljamEntry(); - if (req->errorCode != 0) { - ljam(); - terrorCode = req->errorCode; - return -1; - } - triggerList.next(triggerPtr); - } - return 0; + return addTuxEntries(signal, regOperPtr, regTabPtr); } int @@ -1012,9 +994,18 @@ Dbtup::executeTuxUpdateTriggers(Signal* signal, req->pageOffset = regOperPtr->pageOffset; req->tupVersion = tupVersion; req->opInfo = TuxMaintReq::OpAdd; - // loop over index list + return addTuxEntries(signal, regOperPtr, regTabPtr); +} + +int +Dbtup::addTuxEntries(Signal* signal, + Operationrec* regOperPtr, + Tablerec* regTabPtr) +{ + TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); const ArrayList& triggerList = regTabPtr->tuxCustomTriggers; TriggerPtr triggerPtr; + Uint32 failPtrI; triggerList.first(triggerPtr); while (triggerPtr.i != RNIL) { ljam(); @@ -1026,11 +1017,29 @@ Dbtup::executeTuxUpdateTriggers(Signal* signal, if (req->errorCode != 0) { ljam(); terrorCode = req->errorCode; - return -1; + failPtrI = triggerPtr.i; + goto fail; } triggerList.next(triggerPtr); } return 0; +fail: + req->opInfo = TuxMaintReq::OpRemove; + triggerList.first(triggerPtr); + while (triggerPtr.i != failPtrI) { + ljam(); + req->indexId = triggerPtr.p->indexId; + req->errorCode = RNIL; + EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, + signal, TuxMaintReq::SignalLength); + ljamEntry(); + ndbrequire(req->errorCode == 0); + triggerList.next(triggerPtr); + } +#ifdef VM_TRACE + ndbout << "aborted partial tux update: op " << hex << regOperPtr << endl; +#endif + return -1; } int @@ -1049,7 +1058,6 @@ Dbtup::executeTuxCommitTriggers(Signal* signal, { TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); // get version - // XXX could add prevTupVersion to Operationrec Uint32 tupVersion; if (regOperPtr->optype == ZINSERT) { if (! regOperPtr->deleteInsertFlag) @@ -1087,21 +1095,7 @@ Dbtup::executeTuxCommitTriggers(Signal* signal, req->pageOffset = regOperPtr->pageOffset; req->tupVersion = tupVersion; req->opInfo = TuxMaintReq::OpRemove; - // loop over index list - const ArrayList& triggerList = regTabPtr->tuxCustomTriggers; - TriggerPtr triggerPtr; - triggerList.first(triggerPtr); - while (triggerPtr.i != RNIL) { - ljam(); - req->indexId = triggerPtr.p->indexId; - req->errorCode = RNIL; - EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, - signal, TuxMaintReq::SignalLength); - ljamEntry(); - // commit must succeed - ndbrequire(req->errorCode == 0); - triggerList.next(triggerPtr); - } + removeTuxEntries(signal, regOperPtr, regTabPtr); } void @@ -1132,7 +1126,15 @@ Dbtup::executeTuxAbortTriggers(Signal* signal, req->pageOffset = regOperPtr->pageOffset; req->tupVersion = tupVersion; req->opInfo = TuxMaintReq::OpRemove; - // loop over index list + removeTuxEntries(signal, regOperPtr, regTabPtr); +} + +void +Dbtup::removeTuxEntries(Signal* signal, + Operationrec* regOperPtr, + Tablerec* regTabPtr) +{ + TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); const ArrayList& triggerList = regTabPtr->tuxCustomTriggers; TriggerPtr triggerPtr; triggerList.first(triggerPtr); @@ -1143,7 +1145,7 @@ Dbtup::executeTuxAbortTriggers(Signal* signal, EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, signal, TuxMaintReq::SignalLength); ljamEntry(); - // abort must succeed + // must succeed ndbrequire(req->errorCode == 0); triggerList.next(triggerPtr); } diff --git a/ndb/src/kernel/blocks/dbtup/Notes.txt b/ndb/src/kernel/blocks/dbtup/Notes.txt index 9d47c591fe8..c2973bb0a76 100644 --- a/ndb/src/kernel/blocks/dbtup/Notes.txt +++ b/ndb/src/kernel/blocks/dbtup/Notes.txt @@ -135,6 +135,24 @@ abort DELETE none - 1) alternatively, store prevTupVersion in operation record. +Abort from ordered index error +------------------------------ + +Obviously, index update failure causes operation failure. +The operation is then aborted later by TC. + +The problem here is with multiple indexes. Some may have been +updated successfully before the one that failed. Therefore +the trigger code aborts the successful ones already in +the prepare phase. + +In other words, multiple indexes are treated as one. + +Abort from any cause +-------------------- + +[ hairy stuff ] + Read attributes, query status ----------------------------- @@ -170,14 +188,11 @@ used to decide if the scan can see the tuple. This signal may also be called during any phase since commit/abort of all operations is not done in one time-slice. -Commit and abort ----------------- - -[ hairy stuff ] - Problems -------- Current abort code can destroy a tuple version too early. This happens in test case "ticuur" (insert-commit-update-update-rollback), if abort of first update arrives before abort of second update. + +vim: set textwidth=68: diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp index 389192fd0cf..9f9d4cb68e3 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp @@ -23,6 +23,11 @@ int Dbtux::allocNode(Signal* signal, NodeHandle& node) { + if (ERROR_INSERTED(12007)) { + jam(); + CLEAR_ERROR_INSERT_VALUE; + return TuxMaintReq::NoMemError; + } Frag& frag = node.m_frag; Uint32 pageId = NullTupLoc.getPageId(); Uint32 pageOffset = NullTupLoc.getPageOffset(); @@ -34,6 +39,12 @@ Dbtux::allocNode(Signal* signal, NodeHandle& node) node.m_loc = TupLoc(pageId, pageOffset); node.m_node = reinterpret_cast(node32); ndbrequire(node.m_loc != NullTupLoc && node.m_node != 0); + } else { + switch (errorCode) { + case 827: + errorCode = TuxMaintReq::NoMemError; + break; + } } return errorCode; } diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index 94e1aeb5545..f052200b67d 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -175,10 +175,11 @@ ErrorBundle ErrorCodes[] = { */ { 623, IS, "623" }, { 624, IS, "624" }, - { 625, IS, "Out of memory in Ndb Kernel, index part (increase IndexMemory)" }, + { 625, IS, "Out of memory in Ndb Kernel, hash index part (increase IndexMemory)" }, { 800, IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" }, { 826, IS, "Too many tables and attributes (increase MaxNoOfAttributes or MaxNoOfTables)" }, - { 827, IS, "Out of memory in Ndb Kernel, data part (increase DataMemory)" }, + { 827, IS, "Out of memory in Ndb Kernel, table data (increase DataMemory)" }, + { 902, IS, "Out of memory in Ndb Kernel, ordered index data (increase DataMemory)" }, { 832, IS, "832" }, /** @@ -205,7 +206,7 @@ ErrorBundle ErrorCodes[] = { * Internal errors */ { 892, IE, "Inconsistent hash index. The index needs to be dropped and recreated" }, - { 895, IE, "Inconsistent ordered index. The index needs to be dropped and recreated" }, + { 901, IE, "Inconsistent ordered index. The index needs to be dropped and recreated" }, { 202, IE, "202" }, { 203, IE, "203" }, { 207, IE, "207" }, diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp index e6d3844d18e..9f8da850ff4 100644 --- a/ndb/test/ndbapi/testOIBasic.cpp +++ b/ndb/test/ndbapi/testOIBasic.cpp @@ -228,6 +228,8 @@ struct Par : public Opt { bool m_verify; // deadlock possible bool m_deadlock; + // abort percentabge + unsigned m_abortpct; // timer location Par(const Opt& opt) : Opt(opt), @@ -243,7 +245,8 @@ struct Par : public Opt { m_pctrange(0), m_randomkey(false), m_verify(false), - m_deadlock(false) { + m_deadlock(false), + m_abortpct(0) { } }; @@ -684,7 +687,7 @@ struct Con { NdbResultSet* m_resultset; enum ScanMode { ScanNo = 0, Committed, Latest, Exclusive }; ScanMode m_scanmode; - enum ErrType { ErrNone = 0, ErrDeadlock, ErrOther }; + enum ErrType { ErrNone = 0, ErrDeadlock, ErrNospace, ErrOther }; ErrType m_errtype; Con() : m_ndb(0), m_dic(0), m_tx(0), m_op(0), @@ -705,7 +708,7 @@ struct Con { int setValue(int num, const char* addr); int setBound(int num, int type, const void* value); int execute(ExecType t); - int execute(ExecType t, bool& deadlock); + int execute(ExecType t, bool& deadlock, bool& nospace); int openScanRead(unsigned scanbat, unsigned scanpar); int openScanExclusive(unsigned scanbat, unsigned scanpar); int executeScan(); @@ -818,17 +821,21 @@ Con::execute(ExecType t) } int -Con::execute(ExecType t, bool& deadlock) +Con::execute(ExecType t, bool& deadlock, bool& nospace) { int ret = execute(t); - if (ret != 0) { - if (deadlock && m_errtype == ErrDeadlock) { - LL3("caught deadlock"); - ret = 0; - } + if (ret != 0 && deadlock && m_errtype == ErrDeadlock) { + LL3("caught deadlock"); + ret = 0; } else { deadlock = false; } + if (ret != 0 && nospace && m_errtype == ErrNospace) { + LL3("caught nospace"); + ret = 0; + } else { + nospace = false; + } CHK(ret == 0); return 0; } @@ -940,6 +947,8 @@ Con::printerror(NdbOut& out) die += (code == g_opt.m_die); if (code == 266 || code == 274 || code == 296 || code == 297 || code == 499) m_errtype = ErrDeadlock; + if (code == 826 || code == 827 || code == 902) + m_errtype = ErrNospace; } if (m_op && m_op->getNdbError().code != 0) { LL0(++any << " op : error " << m_op->getNdbError()); @@ -1128,6 +1137,16 @@ irandom(unsigned n) return i; } +static bool +randompct(unsigned pct) +{ + if (pct == 0) + return false; + if (pct >= 100) + return true; + return urandom(100) < pct; +} + // Val - typed column value struct Val { @@ -1565,8 +1584,8 @@ struct Set { // row methods bool exist(unsigned i) const; Row::Op pending(unsigned i) const; - void notpending(unsigned i); - void notpending(const Lst& lst); + void notpending(unsigned i, ExecType et = Commit); + void notpending(const Lst& lst, ExecType et = Commit); void calc(Par par, unsigned i); int insrow(Par par, unsigned i); int updrow(Par par, unsigned i); @@ -1775,23 +1794,30 @@ Set::putval(unsigned i, bool force) } void -Set::notpending(unsigned i) +Set::notpending(unsigned i, ExecType et) { assert(m_row[i] != 0); Row& row = *m_row[i]; - if (row.m_pending == Row::InsOp) - row.m_exist = true; - if (row.m_pending == Row::DelOp) - row.m_exist = false; + if (et == Commit) { + if (row.m_pending == Row::InsOp) + row.m_exist = true; + if (row.m_pending == Row::DelOp) + row.m_exist = false; + } else { + if (row.m_pending == Row::InsOp) + row.m_exist = false; + if (row.m_pending == Row::DelOp) + row.m_exist = true; + } row.m_pending = Row::NoOp; } void -Set::notpending(const Lst& lst) +Set::notpending(const Lst& lst, ExecType et) { for (unsigned j = 0; j < lst.m_cnt; j++) { unsigned i = lst.m_arr[j]; - notpending(i); + notpending(i, et); } } @@ -2121,14 +2147,20 @@ pkinsert(Par par) lst.push(i); if (lst.cnt() == par.m_batch) { bool deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + bool nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); con.closeTransaction(); if (deadlock) { LL1("pkinsert: stop on deadlock"); return 0; } + if (nospace) { + LL1("pkinsert: cnt=" << j << " stop on nospace"); + return 0; + } set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.unlock(); lst.reset(); CHK(con.startTransaction() == 0); @@ -2136,14 +2168,20 @@ pkinsert(Par par) } if (lst.cnt() != 0) { bool deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + bool nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); con.closeTransaction(); if (deadlock) { LL1("pkinsert: stop on deadlock"); return 0; } + if (nospace) { + LL1("pkinsert: end: stop on nospace"); + return 0; + } set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.unlock(); return 0; } @@ -2160,6 +2198,7 @@ pkupdate(Par par) CHK(con.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; for (unsigned j = 0; j < par.m_rows; j++) { unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); @@ -2175,27 +2214,37 @@ pkupdate(Par par) lst.push(i); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); if (deadlock) { LL1("pkupdate: stop on deadlock"); break; } + if (nospace) { + LL1("pkupdate: cnt=" << j << " stop on nospace"); + break; + } con.closeTransaction(); set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.unlock(); lst.reset(); CHK(con.startTransaction() == 0); } } - if (! deadlock && lst.cnt() != 0) { + if (! deadlock && ! nospace && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); if (deadlock) { LL1("pkupdate: stop on deadlock"); + } else if (nospace) { + LL1("pkupdate: end: stop on nospace"); } else { set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.unlock(); } } @@ -2212,6 +2261,7 @@ pkdelete(Par par) CHK(con.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; for (unsigned j = 0; j < par.m_rows; j++) { unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); @@ -2226,27 +2276,31 @@ pkdelete(Par par) lst.push(i); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); if (deadlock) { LL1("pkdelete: stop on deadlock"); break; } con.closeTransaction(); set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.unlock(); lst.reset(); CHK(con.startTransaction() == 0); } } - if (! deadlock && lst.cnt() != 0) { + if (! deadlock && ! nospace && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); if (deadlock) { LL1("pkdelete: stop on deadlock"); } else { set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.unlock(); } } @@ -2730,6 +2784,10 @@ readverify(Par par) if (par.m_noverify) return 0; par.m_verify = true; + if (par.m_abortpct != 0) { + LL2("skip verify in this version"); // implement in 5.0 version + par.m_verify = false; + } CHK(pkread(par) == 0); CHK(scanreadall(par) == 0); return 0; @@ -3028,11 +3086,11 @@ runstep(Par par, const char* fname, TFunc func, unsigned mode) for (n = 0; n < threads; n++) { LL4("start " << n); Thr& thr = *g_thrlist[n]; - thr.m_par.m_tab = par.m_tab; - thr.m_par.m_set = par.m_set; - thr.m_par.m_tmr = par.m_tmr; - thr.m_par.m_lno = par.m_lno; - thr.m_par.m_slno = par.m_slno; + Par oldpar = thr.m_par; + // update parameters + thr.m_par = par; + thr.m_par.m_no = oldpar.m_no; + thr.m_par.m_con = oldpar.m_con; thr.m_func = func; thr.start(); } @@ -3143,6 +3201,24 @@ tbusybuild(Par par) return 0; } +static int +trollback(Par par) +{ + par.m_abortpct = 50; + RUNSTEP(par, droptable, ST); + RUNSTEP(par, createtable, ST); + RUNSTEP(par, invalidatetable, MT); + RUNSTEP(par, pkinsert, MT); + RUNSTEP(par, createindex, ST); + RUNSTEP(par, invalidateindex, MT); + RUNSTEP(par, readverify, ST); + for (par.m_slno = 0; par.m_slno < par.m_subloop; par.m_slno++) { + RUNSTEP(par, mixedoperations, MT); + RUNSTEP(par, readverify, ST); + } + return 0; +} + static int ttimebuild(Par par) { @@ -3252,10 +3328,12 @@ struct TCase { static const TCase tcaselist[] = { TCase("a", tbuild, "index build"), - TCase("b", tpkops, "pk operations"), - TCase("c", tpkopsread, "pk operations and scan reads"), - TCase("d", tmixedops, "pk operations and scan operations"), - TCase("e", tbusybuild, "pk operations and index build"), + // "b" in 5.0 + TCase("c", tpkops, "pk operations"), + TCase("d", tpkopsread, "pk operations and scan reads"), + TCase("e", tmixedops, "pk operations and scan operations"), + TCase("f", tbusybuild, "pk operations and index build"), + TCase("g", trollback, "operations with random rollbacks"), TCase("t", ttimebuild, "time index build"), TCase("u", ttimemaint, "time index maintenance"), TCase("v", ttimescan, "time full scan table vs index on pk"), From 48d6d545c01a85452353d5c4e0ddb153c331ea8b Mon Sep 17 00:00:00 2001 From: "pekka@mysql.com" <> Date: Sat, 23 Apr 2005 18:51:18 +0200 Subject: [PATCH 54/65] ndb - post merge 4.1->5.0 --- ndb/src/kernel/blocks/ERROR_codes.txt | 3 +- ndb/src/ndbapi/ndberror.c | 1 - ndb/test/ndbapi/testOIBasic.cpp | 112 +++++++++----------------- 3 files changed, 38 insertions(+), 78 deletions(-) diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt index a30021607cc..fedddb58c0d 100644 --- a/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/ndb/src/kernel/blocks/ERROR_codes.txt @@ -10,7 +10,7 @@ Next DBTC 8035 Next CMVMI 9000 Next BACKUP 10022 Next DBUTIL 11002 -Next DBTUX 12007 +Next DBTUX 12008 Next SUMA 13001 TESTING NODE FAILURE, ARBITRATION @@ -443,6 +443,7 @@ Test routing of signals: Ordered index: -------------- +12007: Make next alloc node fail with no memory error Dbdict: ------- diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index cb8af1e26f6..ac9bda5a9f3 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -184,7 +184,6 @@ ErrorBundle ErrorCodes[] = { { 826, IS, "Too many tables and attributes (increase MaxNoOfAttributes or MaxNoOfTables)" }, { 827, IS, "Out of memory in Ndb Kernel, table data (increase DataMemory)" }, { 902, IS, "Out of memory in Ndb Kernel, ordered index data (increase DataMemory)" }, - { 902, IS, "Out of memory in Ndb Kernel, data part (increase DataMemory)" }, { 903, IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" }, { 904, IS, "Out of fragment records (increase MaxNoOfOrderedIndexes)" }, { 905, IS, "Out of attribute records (increase MaxNoOfAttributes)" }, diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp index e9e6e7638d3..c7c9f417d1a 100644 --- a/ndb/test/ndbapi/testOIBasic.cpp +++ b/ndb/test/ndbapi/testOIBasic.cpp @@ -164,6 +164,16 @@ irandom(unsigned n) return i; } +static bool +randompct(unsigned pct) +{ + if (pct == 0) + return false; + if (pct >= 100) + return true; + return urandom(100) < pct; +} + // log and error macros static NdbMutex *ndbout_mutex = NULL; @@ -1653,36 +1663,6 @@ createindex(Par par) // data sets -static unsigned -urandom(unsigned n) -{ - if (n == 0) - return 0; - unsigned i = random() % n; - return i; -} - -static int -irandom(unsigned n) -{ - if (n == 0) - return 0; - int i = random() % n; - if (random() & 0x1) - i = -i; - return i; -} - -static bool -randompct(unsigned pct) -{ - if (pct == 0) - return false; - if (pct >= 100) - return true; - return urandom(100) < pct; -} - // Val - typed column value struct Val { @@ -2659,26 +2639,30 @@ Set::pending(unsigned i, unsigned mask) const } void -Set::notpending(unsigned i) +Set::notpending(unsigned i, ExecType et) { assert(m_row[i] != 0); Row& row = *m_row[i]; - if (row.m_pending == Row::InsOp) { - row.m_exist = true; - } else if (row.m_pending == Row::UpdOp) { - ; - } else if (row.m_pending == Row::DelOp) { - row.m_exist = false; + if (et == Commit) { + if (row.m_pending == Row::InsOp) + row.m_exist = true; + if (row.m_pending == Row::DelOp) + row.m_exist = false; + } else { + if (row.m_pending == Row::InsOp) + row.m_exist = false; + if (row.m_pending == Row::DelOp) + row.m_exist = true; } row.m_pending = Row::NoOp; } void -Set::notpending(const Lst& lst) +Set::notpending(const Lst& lst, ExecType et) { for (unsigned j = 0; j < lst.m_cnt; j++) { unsigned i = lst.m_arr[j]; - notpending(i); + notpending(i, et); } } @@ -2870,34 +2854,6 @@ Set::putval(unsigned i, bool force, unsigned n) return 0; } -void -Set::notpending(unsigned i, ExecType et) -{ - assert(m_row[i] != 0); - Row& row = *m_row[i]; - if (et == Commit) { - if (row.m_pending == Row::InsOp) - row.m_exist = true; - if (row.m_pending == Row::DelOp) - row.m_exist = false; - } else { - if (row.m_pending == Row::InsOp) - row.m_exist = false; - if (row.m_pending == Row::DelOp) - row.m_exist = true; - } - row.m_pending = Row::NoOp; -} - -void -Set::notpending(const Lst& lst, ExecType et) -{ - for (unsigned j = 0; j < lst.m_cnt; j++) { - unsigned i = lst.m_arr[j]; - notpending(i, et); - } -} - int Set::verify(Par par, const Set& set2) const { @@ -3511,6 +3467,7 @@ hashindexupdate(Par par, const ITab& itab) CHK(con.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; for (unsigned j = 0; j < par.m_rows; j++) { unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); @@ -3528,7 +3485,7 @@ hashindexupdate(Par par, const ITab& itab) lst.push(i); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + CHK(con.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("hashindexupdate: stop on deadlock [at 1]"); break; @@ -3544,9 +3501,9 @@ hashindexupdate(Par par, const ITab& itab) } if (! deadlock && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + CHK(con.execute(Commit, deadlock, nospace) == 0); if (deadlock) { - LL1("hashindexupdate: stop on deadlock [at 1]"); + LL1("hashindexupdate: stop on deadlock [at 2]"); } else { set.lock(); set.notpending(lst); @@ -3567,6 +3524,7 @@ hashindexdelete(Par par, const ITab& itab) CHK(con.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; for (unsigned j = 0; j < par.m_rows; j++) { unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); @@ -3581,7 +3539,7 @@ hashindexdelete(Par par, const ITab& itab) lst.push(i); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + CHK(con.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("hashindexdelete: stop on deadlock [at 1]"); break; @@ -3596,7 +3554,7 @@ hashindexdelete(Par par, const ITab& itab) } if (! deadlock && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + CHK(con.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("hashindexdelete: stop on deadlock [at 2]"); } else { @@ -3968,6 +3926,7 @@ scanupdatetable(Par par) CHK(con2.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; while (1) { int ret; deadlock = par.m_deadlock; @@ -4003,7 +3962,7 @@ scanupdatetable(Par par) set.unlock(); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con2.execute(Commit, deadlock) == 0); + CHK(con2.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("scanupdatetable: stop on deadlock [at 2]"); goto out; @@ -4020,7 +3979,7 @@ scanupdatetable(Par par) CHK((ret = con.nextScanResult(false)) == 0 || ret == 1 || ret == 2); if (ret == 2 && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con2.execute(Commit, deadlock) == 0); + CHK(con2.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("scanupdatetable: stop on deadlock [at 3]"); goto out; @@ -4067,6 +4026,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) CHK(con2.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; while (1) { int ret; deadlock = par.m_deadlock; @@ -4102,7 +4062,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) set.unlock(); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con2.execute(Commit, deadlock) == 0); + CHK(con2.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("scanupdateindex: stop on deadlock [at 2]"); goto out; @@ -4119,7 +4079,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) CHK((ret = con.nextScanResult(false)) == 0 || ret == 1 || ret == 2); if (ret == 2 && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con2.execute(Commit, deadlock) == 0); + CHK(con2.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("scanupdateindex: stop on deadlock [at 3]"); goto out; From d9d1c933b05b3d35ad9a45d052d97ffdba479f11 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Sun, 24 Apr 2005 20:34:42 +0200 Subject: [PATCH 55/65] bug#9924 - ndb backup - abort handling uninit variable --- ndb/src/kernel/blocks/backup/Backup.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp index 713991a4f58..d1bf4e55f95 100644 --- a/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/ndb/src/kernel/blocks/backup/Backup.cpp @@ -2263,6 +2263,7 @@ Backup::execDEFINE_BACKUP_REQ(Signal* signal) ptr.p->backupDataLen = req->backupDataLen; ptr.p->masterData.dropTrig.tableId = RNIL; ptr.p->masterData.alterTrig.tableId = RNIL; + ptr.p->masterData.errorCode = 0; ptr.p->noOfBytes = 0; ptr.p->noOfRecords = 0; ptr.p->noOfLogBytes = 0; From 3022cbc60c79ef8efcc767e2fb5ebc4f2981c1dc Mon Sep 17 00:00:00 2001 From: "guilhem@mysql.com" <> Date: Sun, 24 Apr 2005 22:37:43 +0200 Subject: [PATCH 56/65] Informing of deprecation of mysql_tableinfo by INFORMATION_SCHEMA (and I'm thinking of removing the script in 5.1). --- scripts/mysql_tableinfo.sh | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/scripts/mysql_tableinfo.sh b/scripts/mysql_tableinfo.sh index f5083a776c6..2ed7e381fa3 100644 --- a/scripts/mysql_tableinfo.sh +++ b/scripts/mysql_tableinfo.sh @@ -6,6 +6,14 @@ use DBI; =head1 NAME +WARNING: MySQL versions 5.0 and above feature the INFORMATION_SCHEMA +pseudo-database which contains always up-to-date metadata information +about all tables. So instead of using this script one can now +simply query the INFORMATION_SCHEMA.SCHEMATA, INFORMATION_SCHEMA.TABLES, +INFORMATION_SCHEMA.COLUMNS, INFORMATION_SCHEMA.STATISTICS pseudo-tables. +Please see the MySQL manual for more information about INFORMATION_SCHEMA. +This script will be removed from the MySQL distribution in version 5.1. + mysql_tableinfo - creates and populates information tables with the output of SHOW DATABASES, SHOW TABLES (or SHOW TABLE STATUS), SHOW COLUMNS and SHOW INDEX. @@ -62,6 +70,19 @@ GetOptions( \%opt, "quiet|q", ) or usage("Invalid option"); +if (!$opt{'quiet'}) + { + print <quote($tbl_like_wild); if (!$opt{'quiet'}) { - print "\n!! This program is doing to do:\n\n"; + print "\n!! This program is going to do:\n\n"; print "**DROP** TABLE ...\n" if ($opt{'clear'} or $opt{'clear-only'}); print "**DELETE** FROM ... WHERE `Database` LIKE $db_like_wild AND `Table` LIKE $tbl_like_wild **INSERT** INTO ... @@ -456,17 +477,14 @@ UNIX domain socket to use when connecting to server =head1 WARRANTY -This software is free and comes without warranty of any kind. You -should never trust backup software without studying the code yourself. -Study the code inside this script and only rely on it if I believe -that it does the right thing for you. +This software is free and comes without warranty of any kind. Patches adding bug fixes, documentation and new features are welcome. =head1 TO DO -Use extended inserts to be faster (for servers with many databases -or tables). But to do that, must care about net-buffer-length. +Nothing: starting from MySQL 5.0, this program is replaced by the +INFORMATION_SCHEMA pseudo-database. =head1 AUTHOR From 763d835a3c073e2726c45b32b5b42d51a5d2fb68 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Mon, 25 Apr 2005 08:35:18 +0200 Subject: [PATCH 57/65] bug#9924 - ndb backup remove incorrect assertion --- ndb/src/kernel/blocks/backup/Backup.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp index d1bf4e55f95..3ef73beb8d2 100644 --- a/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/ndb/src/kernel/blocks/backup/Backup.cpp @@ -665,10 +665,6 @@ Backup::checkNodeFail(Signal* signal, NodeId newCoord, Uint32 theFailedNodes[NodeBitmask::Size]) { - ndbrequire( ptr.p->nodes.get(newCoord)); /* just to make sure newCoord - * is part of the backup - */ - NdbNodeBitmask mask; mask.assign(2, theFailedNodes); From 12e0d4f588ee6f1a05acddd7a48fd67d4e1de25e Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Mon, 25 Apr 2005 08:43:16 +0200 Subject: [PATCH 58/65] ndb - autotest add more tests to verify correctness of backups --- ndb/test/run-test/daily-basic-tests.txt | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index 2d7c435e8b4..e7753f758a1 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -6,26 +6,50 @@ max-time: 600 cmd: atrt-testBackup args: -n NFMaster T1 +max-time: 600 +cmd: testBasic +args: -n PkRead T1 + max-time: 600 cmd: atrt-testBackup args: -n NFMasterAsSlave T1 +max-time: 600 +cmd: testBasic +args: -n PkRead T1 + max-time: 600 cmd: atrt-testBackup args: -n NFSlave T1 +max-time: 600 +cmd: testBasic +args: -n PkRead T1 + max-time: 600 cmd: atrt-testBackup args: -n FailMaster T1 +max-time: 600 +cmd: testBasic +args: -n PkRead T1 + max-time: 600 cmd: atrt-testBackup args: -n FailMasterAsSlave T1 +max-time: 600 +cmd: testBasic +args: -n PkRead T1 + max-time: 600 cmd: atrt-testBackup args: -n FailSlave T1 +max-time: 600 +cmd: testBasic +args: -n PkRead T1 + max-time: 600 cmd: atrt-testBackup args: -n BackupOne T1 T6 T3 I3 From a4a4c5cb69671329d863e5af04fe114734e731e5 Mon Sep 17 00:00:00 2001 From: "dlenev@brandersnatch.localdomain" <> Date: Mon, 25 Apr 2005 11:25:40 +0400 Subject: [PATCH 59/65] Fix for func_sapdb failures on 64-bit platforms (aka Bug #10040 "'func_sapdb' fails on 64bit build hosts") --- sql/item_timefunc.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 6715930bc61..23cd9c7ced2 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -800,14 +800,14 @@ static bool calc_time_diff(TIME *l_time1, TIME *l_time2, int l_sign, We should check it before calc_time_diff call. */ if (l_time1->time_type == MYSQL_TIMESTAMP_TIME) // Time value - days= l_time1->day - l_sign*l_time2->day; + days= (long)l_time1->day - l_sign * (long)l_time2->day; else { days= calc_daynr((uint) l_time1->year, (uint) l_time1->month, (uint) l_time1->day); if (l_time2->time_type == MYSQL_TIMESTAMP_TIME) - days-= l_sign*l_time2->day; + days-= l_sign * (long)l_time2->day; else days-= l_sign*calc_daynr((uint) l_time2->year, (uint) l_time2->month, From 3a9af0553b1ea1056ce0cfea531f46ca1f826b36 Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Mon, 25 Apr 2005 11:31:16 +0200 Subject: [PATCH 60/65] Post review fix of ndbcluster_drop_database --- sql/ha_ndbcluster.cc | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 230ca2826b2..bb20cedec2e 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000-2003 MySQL AB + /* Copyright (C) 2000-2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -4327,6 +4327,7 @@ int ndbcluster_drop_database(const char *path) uint i; char *tabname; List drop_list; + int ret= 0; ha_ndbcluster::set_dbname(path, (char *)&dbname); DBUG_PRINT("enter", ("db: %s", dbname)); @@ -4353,10 +4354,15 @@ int ndbcluster_drop_database(const char *path) ndb->setDatabaseName(dbname); List_iterator_fast it(drop_list); while ((tabname=it++)) - if (dict->dropTable(tabname)) - ERR_RETURN(dict->getNdbError()); - - DBUG_RETURN(0); + { + if (!dict->dropTable(tabname)) + { + const NdbError err= dict->getNdbError(); + if (err.code != 709) + ret= ndb_to_mysql_error(&err); + } + } + DBUG_RETURN(ret); } From 4649d5c30f078b6c0254f877c239964978ee409a Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Mon, 25 Apr 2005 11:34:47 +0200 Subject: [PATCH 61/65] Post review fix of ndbcluster_drop_database, print error --- sql/ha_ndbcluster.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index bb20cedec2e..2077fcb8f2d 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4359,7 +4359,10 @@ int ndbcluster_drop_database(const char *path) { const NdbError err= dict->getNdbError(); if (err.code != 709) + { + ERR_PRINT(err); ret= ndb_to_mysql_error(&err); + } } } DBUG_RETURN(ret); From 93498bdf5a6d13b4bf9e8a3d996df09b8f68799e Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Mon, 25 Apr 2005 11:54:00 +0200 Subject: [PATCH 62/65] Post review fix of ndbcluster_drop_database, typo --- sql/ha_ndbcluster.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 2077fcb8f2d..8c12cccf5ee 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4355,7 +4355,7 @@ int ndbcluster_drop_database(const char *path) List_iterator_fast it(drop_list); while ((tabname=it++)) { - if (!dict->dropTable(tabname)) + if (dict->dropTable(tabname)) { const NdbError err= dict->getNdbError(); if (err.code != 709) From 280bef18457bef5a489020db7c836ad5399378f2 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Mon, 25 Apr 2005 11:59:00 +0200 Subject: [PATCH 63/65] bug#9749 - ndb lock upgrade post review fix. don't init hash value on restart operations make sure that lock mode is correct in entire que. --- ndb/src/kernel/blocks/dbacc/DbaccMain.cpp | 13 ++++++------- ndb/test/ndbapi/testOperations.cpp | 16 ++++++++++++---- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index d566639489c..44c891fc220 100644 --- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -5899,33 +5899,32 @@ Dbacc::check_lock_upgrade(Signal* signal, // Find end of parallell que tmp = lock_owner; + Uint32 lockMode = next.p->lockMode > lock_owner.p->lockMode ? + next.p->lockMode : lock_owner.p->lockMode; while(tmp.p->nextParallelQue != RNIL) { jam(); tmp.i = tmp.p->nextParallelQue; + tmp.p->lockMode = lockMode; ptrCheckGuard(tmp, coprecsize, operationrec); } + tmp.p->lockMode = lockMode; next.p->prevParallelQue = tmp.i; tmp.p->nextParallelQue = next.i; OperationrecPtr save = operationRecPtr; - Uint32 lockMode = lock_owner.p->lockMode; - Uint32 TelementIsDisappeared = 0; // lock upgrade = all reads - Uint32 ThashValue = lock_owner.p->hashValue; Uint32 localdata[2]; localdata[0] = lock_owner.p->localdata[0]; localdata[1] = lock_owner.p->localdata[1]; do { - next.p->elementIsDisappeared = TelementIsDisappeared; - next.p->hashValue = ThashValue; next.p->localdata[0] = localdata[0]; next.p->localdata[1] = localdata[1]; + next.p->lockMode = lockMode; operationRecPtr = next; - next.p->lockMode = lockMode; - TelementIsDisappeared = executeNextOperation(signal); + executeNextOperation(signal); if (next.p->nextParallelQue != RNIL) { jam(); diff --git a/ndb/test/ndbapi/testOperations.cpp b/ndb/test/ndbapi/testOperations.cpp index 773511a0475..726f35b01fb 100644 --- a/ndb/test/ndbapi/testOperations.cpp +++ b/ndb/test/ndbapi/testOperations.cpp @@ -635,7 +635,14 @@ runLockUpgrade2(NDBT_Context* ctx, NDBT_Step* step){ ndbout_c("wait 3 - done"); NdbSleep_MilliSleep(200); - CHECK(hugoOps.execute_Commit(pNdb) == 0); + if(ctx->getProperty("LU_COMMIT", (Uint32)0) == 0) + { + CHECK(hugoOps.execute_Commit(pNdb) == 0); + } + else + { + CHECK(hugoOps.execute_Rollback(pNdb) == 0); + } } while(0); return result; @@ -650,7 +657,7 @@ main(int argc, const char** argv){ NDBT_TestSuite ts("testOperations"); - for(Uint32 i = 0; i <6; i++) + for(Uint32 i = 0; i < 12; i++) { BaseString name("bug_9749"); name.appfmt("_%d", i); @@ -658,8 +665,9 @@ main(int argc, const char** argv){ name.c_str(), ""); pt->setProperty("LOCK_UPGRADE", 1 + (i & 1)); - pt->setProperty("LU_OP", 1 + (i >> 1)); - + pt->setProperty("LU_OP", 1 + ((i >> 1) % 3)); + pt->setProperty("LU_COMMIT", i / 6); + pt->addInitializer(new NDBT_Initializer(pt, "runClearTable", runClearTable)); From cd10a69be2afb0f2fcea48446160c0ba2c645b9d Mon Sep 17 00:00:00 2001 From: "lenz@mysql.com" <> Date: Mon, 25 Apr 2005 12:58:03 +0200 Subject: [PATCH 64/65] - bumped up version number in configure.in to 5.0.6 now that engineering branched off for the 5.0.5 builds - tagged ChangeSet 1.1882 as the "clone-5.0.5-build" branchoff point --- configure.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.in b/configure.in index f4ced66a06d..882267c260e 100644 --- a/configure.in +++ b/configure.in @@ -6,7 +6,7 @@ AC_PREREQ(2.50)dnl Minimum Autoconf version required. AC_INIT(sql/mysqld.cc) AC_CANONICAL_SYSTEM # Don't forget to also update the NDB lines below. -AM_INIT_AUTOMAKE(mysql, 5.0.5-beta) +AM_INIT_AUTOMAKE(mysql, 5.0.6-beta) AM_CONFIG_HEADER(config.h) PROTOCOL_VERSION=10 From 83978acd5a8f5311fcbd04780fabeae1a6a2015d Mon Sep 17 00:00:00 2001 From: "lenz@mysql.com" <> Date: Mon, 25 Apr 2005 13:18:44 +0200 Subject: [PATCH 65/65] - update NDB_VERSION_BUILD, too --- configure.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.in b/configure.in index 882267c260e..a160f84a165 100644 --- a/configure.in +++ b/configure.in @@ -17,7 +17,7 @@ SHARED_LIB_VERSION=14:0:0 # ndb version NDB_VERSION_MAJOR=5 NDB_VERSION_MINOR=0 -NDB_VERSION_BUILD=4 +NDB_VERSION_BUILD=6 NDB_VERSION_STATUS="beta" # Set all version vars based on $VERSION. How do we do this more elegant ?