From f16e5a0afa5f79dfae93c525574fcca5d8e7d6e2 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Fri, 9 Jul 2004 10:00:09 +0200 Subject: [PATCH 01/93] Removed some macros and moved some code. Improve portability --- ndb/include/kernel/signaldata/SignalData.hpp | 16 ++-------- ndb/include/mgmcommon/MgmtErrorReporter.hpp | 6 ---- ndb/include/util/Bitmask.hpp | 11 ++----- .../common/debugger/SignalLoggerManager.cpp | 25 ---------------- ndb/src/kernel/Main.cpp | 4 +-- ndb/src/kernel/blocks/dbtux/Dbtux.hpp | 2 +- ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp | 2 +- ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp | 3 +- ndb/src/kernel/blocks/ndbfs/Pool.hpp | 1 - ndb/src/kernel/error/ErrorReporter.hpp | 29 ------------------- ndb/src/kernel/vm/MetaData.cpp | 2 +- ndb/src/kernel/vm/SimulatedBlock.cpp | 12 ++++---- ndb/src/kernel/vm/SimulatedBlock.hpp | 4 +-- ndb/src/kernel/vm/TransporterCallback.cpp | 25 +++++++++++++++- ndb/src/mgmclient/CommandInterpreter.cpp | 4 +-- ndb/src/mgmsrv/MgmtSrvr.cpp | 2 +- ndb/src/ndbapi/TransporterFacade.cpp | 15 +++++----- 17 files changed, 55 insertions(+), 108 deletions(-) diff --git a/ndb/include/kernel/signaldata/SignalData.hpp b/ndb/include/kernel/signaldata/SignalData.hpp index 511e7d30c21..6e5748217b2 100644 --- a/ndb/include/kernel/signaldata/SignalData.hpp +++ b/ndb/include/kernel/signaldata/SignalData.hpp @@ -21,20 +21,10 @@ #include #include -#ifndef NDB_ASSERT -#ifdef VM_TRACE -#define NDB_ASSERT(test, message) { if(!(test)) { printf(message); exit(-1); }} -#else -#define NDB_ASSERT(test, message) -#endif -#endif - -// Useful ASSERT macros... -#define ASSERT_BOOL(flag, message) NDB_ASSERT( (flag<=1), (message) ) +#define ASSERT_BOOL(flag, message) assert(flag<=1) #define ASSERT_RANGE(value, min, max, message) \ - NDB_ASSERT((value) >= (min) && (value) <= (max), (message)) -#define ASSERT_MAX(value, max, message) \ - NDB_ASSERT((value) <= (max), (message)) + assert((value) >= (min) && (value) <= (max)) +#define ASSERT_MAX(value, max, message) assert((value) <= (max)) #define SECTION(x) STATIC_CONST(x) diff --git a/ndb/include/mgmcommon/MgmtErrorReporter.hpp b/ndb/include/mgmcommon/MgmtErrorReporter.hpp index 925d9e6407a..0d980aa7245 100644 --- a/ndb/include/mgmcommon/MgmtErrorReporter.hpp +++ b/ndb/include/mgmcommon/MgmtErrorReporter.hpp @@ -63,12 +63,6 @@ // Returns: - //**************************************************************************** -#ifndef NDB_ASSERT -#define NDB_ASSERT(trueToContinue, message) \ - if ( !(trueToContinue) ) { \ -ndbout << "ASSERT FAILED. FILE: " << __FILE__ << ", LINE: " << __LINE__ << ", MSG: " << message << endl;exit(-1);} -#endif - #define MGM_REQUIRE(x) \ if (!(x)) { ndbout << __FILE__ << " " << __LINE__ \ << ": Warning! Requirement failed" << endl; } diff --git a/ndb/include/util/Bitmask.hpp b/ndb/include/util/Bitmask.hpp index 7355742f845..a670889f3b3 100644 --- a/ndb/include/util/Bitmask.hpp +++ b/ndb/include/util/Bitmask.hpp @@ -19,11 +19,6 @@ #include -#ifndef NDB_ASSERT -#define NDB_ASSERT(x, s) \ - do { if (!(x)) { printf("%s\n", s); abort(); } } while (0) -#endif - /** * Bitmask implementation. Size is given explicitly * (as first argument). All methods are static. @@ -140,7 +135,7 @@ public: inline bool BitmaskImpl::get(unsigned size, const Uint32 data[], unsigned n) { - NDB_ASSERT(n < (size << 5), "bit get out of range"); + assert(n < (size << 5)); return (data[n >> 5] & (1 << (n & 31))) != 0; } @@ -153,7 +148,7 @@ BitmaskImpl::set(unsigned size, Uint32 data[], unsigned n, bool value) inline void BitmaskImpl::set(unsigned size, Uint32 data[], unsigned n) { - NDB_ASSERT(n < (size << 5), "bit set out of range"); + assert(n < (size << 5)); data[n >> 5] |= (1 << (n & 31)); } @@ -176,7 +171,7 @@ BitmaskImpl::assign(unsigned size, Uint32 dst[], const Uint32 src[]) inline void BitmaskImpl::clear(unsigned size, Uint32 data[], unsigned n) { - NDB_ASSERT(n < (size << 5), "bit clear out of range"); + assert(n < (size << 5)); data[n >> 5] &= ~(1 << (n & 31)); } diff --git a/ndb/src/common/debugger/SignalLoggerManager.cpp b/ndb/src/common/debugger/SignalLoggerManager.cpp index 3839a348222..d642ed09a68 100644 --- a/ndb/src/common/debugger/SignalLoggerManager.cpp +++ b/ndb/src/common/debugger/SignalLoggerManager.cpp @@ -487,31 +487,6 @@ SignalLoggerManager::printLinearSection(FILE * output, putc('\n', output); } -void -SignalLoggerManager::printSegmentedSection(FILE * output, - const SignalHeader & sh, - const SegmentedSectionPtr ptr[3], - unsigned i) -{ - fprintf(output, "SECTION %u type=segmented", i); - if (i >= 3) { - fprintf(output, " *** invalid ***\n"); - return; - } - const Uint32 len = ptr[i].sz; - SectionSegment * ssp = ptr[i].p; - Uint32 pos = 0; - fprintf(output, " size=%u\n", (unsigned)len); - while (pos < len) { - if (pos > 0 && pos % SectionSegment::DataLength == 0) { - ssp = g_sectionSegmentPool.getPtr(ssp->m_nextSegment); - } - printDataWord(output, pos, ssp->theData[pos % SectionSegment::DataLength]); - } - if (len > 0) - putc('\n', output); -} - void SignalLoggerManager::printDataWord(FILE * output, Uint32 & pos, const Uint32 data) { diff --git a/ndb/src/kernel/Main.cpp b/ndb/src/kernel/Main.cpp index 7bd4e75ca18..51960dbf694 100644 --- a/ndb/src/kernel/Main.cpp +++ b/ndb/src/kernel/Main.cpp @@ -143,7 +143,7 @@ NDB_MAIN(ndb_kernel){ // Set thread concurrency for Solaris' light weight processes int status; status = NdbThread_SetConcurrencyLevel(30); - NDB_ASSERT(status == 0, "Can't set appropriate concurrency level."); + assert(status == 0); #ifdef VM_TRACE // Create a signal logger @@ -168,7 +168,7 @@ NDB_MAIN(ndb_kernel){ globalEmulatorData.theThreadConfig->doStart(NodeState::SL_STARTING); break; default: - NDB_ASSERT(0, "Illegal state globalData.theRestartFlag"); + assert("Illegal state globalData.theRestartFlag" == 0); } globalTransporterRegistry.startSending(); diff --git a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp index 25e85ba9f5f..62f47af94bd 100644 --- a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp +++ b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp @@ -924,7 +924,7 @@ Dbtux::TreeHead::getSize(AccSize acc) const case AccFull: return m_nodeSize; } - REQUIRE(false, "invalid Dbtux::AccSize"); + abort(); return 0; } diff --git a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp index 7ba7d0d25c6..f6607cdbdbb 100644 --- a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp +++ b/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp @@ -229,7 +229,7 @@ AsyncFile::run() endReq(); return; default: - THREAD_REQUIRE(false, "Using default switch in AsyncFile::run"); + abort(); break; }//switch theReportTo->writeChannel(request); diff --git a/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp b/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp index 435a6a6b208..03911d195ec 100644 --- a/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp +++ b/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp @@ -120,8 +120,7 @@ template void MemoryChannel::writeChannel( T *t) { NdbMutex_Lock(theMutexPtr); - REQUIRE(!full(theWriteIndex, theReadIndex), "Memory Channel Full"); - REQUIRE(theChannel != NULL, "Memory Channel Full"); + if(full(theWriteIndex, theReadIndex) || theChannel == NULL) abort(); theChannel[theWriteIndex]= t; ++theWriteIndex; NdbMutex_Unlock(theMutexPtr); diff --git a/ndb/src/kernel/blocks/ndbfs/Pool.hpp b/ndb/src/kernel/blocks/ndbfs/Pool.hpp index a26fa730727..0410673af6f 100644 --- a/ndb/src/kernel/blocks/ndbfs/Pool.hpp +++ b/ndb/src/kernel/blocks/ndbfs/Pool.hpp @@ -215,7 +215,6 @@ protected: T** tList = theList; int i; theList = new T*[aSize+theCurrentSize]; - REQUIRE(theList != 0, "Allocate in Pool.hpp failed"); // allocate full list for (i = 0; i < theTop; i++) { theList[i] = tList[i]; diff --git a/ndb/src/kernel/error/ErrorReporter.hpp b/ndb/src/kernel/error/ErrorReporter.hpp index b43b30f1873..201c998307e 100644 --- a/ndb/src/kernel/error/ErrorReporter.hpp +++ b/ndb/src/kernel/error/ErrorReporter.hpp @@ -23,35 +23,6 @@ #include "Error.hpp" #include - -#ifdef ASSERT -#undef ASSERT -#endif - -#define REQUIRE(trueToContinue, message) \ - if ( (trueToContinue) ) { } else { \ - ErrorReporter::handleAssert(message, __FILE__, __LINE__); } - -#define THREAD_REQUIRE(trueToContinue, message) \ - if ( (trueToContinue) ) { } else { \ - ErrorReporter::handleThreadAssert(message, __FILE__, __LINE__); } - -#ifdef NDEBUG -#define NDB_ASSERT(trueToContinue, message) -#else -#define NDB_ASSERT(trueToContinue, message) \ - if ( !(trueToContinue) ) { \ - ErrorReporter::handleAssert(message, __FILE__, __LINE__); } -#endif - - // Description: - // This macro is used to report programming errors. - // Parameters: - // trueToContinue IN An expression. If it evaluates to 0 - // execution is stopped. - // message IN A message from the programmer - // explaining what went wrong. - class ErrorReporter { public: diff --git a/ndb/src/kernel/vm/MetaData.cpp b/ndb/src/kernel/vm/MetaData.cpp index bcde6c63272..51afbf21503 100644 --- a/ndb/src/kernel/vm/MetaData.cpp +++ b/ndb/src/kernel/vm/MetaData.cpp @@ -47,7 +47,7 @@ MetaData::MetaData(SimulatedBlock* block) : MetaData::~MetaData() { for (int i = false; i <= true; i++) { - NDB_ASSERT(m_common.m_lock[i] >= m_lock[i], "invalid lock count"); + assert(m_common.m_lock[i] >= m_lock[i]); m_common.m_lock[i] -= m_lock[i]; m_lock[i] = 0; } diff --git a/ndb/src/kernel/vm/SimulatedBlock.cpp b/ndb/src/kernel/vm/SimulatedBlock.cpp index a6a8a6242cd..781c60e3817 100644 --- a/ndb/src/kernel/vm/SimulatedBlock.cpp +++ b/ndb/src/kernel/vm/SimulatedBlock.cpp @@ -136,12 +136,12 @@ SimulatedBlock::installSimulatedBlockFunctions(){ void SimulatedBlock::addRecSignalImpl(GlobalSignalNumber gsn, ExecFunction f, bool force){ - REQUIRE(gsn <= MAX_GSN, "Illegal signal added in block (GSN too high)"); - char probData[255]; - snprintf(probData, 255, - "Signal (%d) already added in block", - gsn); - REQUIRE(force || theExecArray[gsn] == 0, probData); + if(gsn > MAX_GSN || (!force && theExecArray[gsn] != 0)){ + char errorMsg[255]; + snprintf(errorMsg, 255, + "Illeagal signal (%d %d)", gsn, MAX_GSN); + ERROR_SET(fatal, ERR_ERROR_PRGERR, errorMsg, errorMsg); + } theExecArray[gsn] = f; } diff --git a/ndb/src/kernel/vm/SimulatedBlock.hpp b/ndb/src/kernel/vm/SimulatedBlock.hpp index 491d432625e..e3eac8c0e20 100644 --- a/ndb/src/kernel/vm/SimulatedBlock.hpp +++ b/ndb/src/kernel/vm/SimulatedBlock.hpp @@ -458,11 +458,11 @@ SimulatedBlock::executeFunction(GlobalSignalNumber gsn, Signal* signal){ char errorMsg[255]; if (!(gsn <= MAX_GSN)) { snprintf(errorMsg, 255, "Illegal signal received (GSN %d too high)", gsn); - REQUIRE(false, errorMsg); + ERROR_SET(fatal, ERR_ERROR_PRGERR, errorMsg, errorMsg); } if (!(theExecArray[gsn] != 0)) { snprintf(errorMsg, 255, "Illegal signal received (GSN %d not added)", gsn); - REQUIRE(false, errorMsg); + ERROR_SET(fatal, ERR_ERROR_PRGERR, errorMsg, errorMsg); } ndbrequire(false); } diff --git a/ndb/src/kernel/vm/TransporterCallback.cpp b/ndb/src/kernel/vm/TransporterCallback.cpp index eb7d138895c..158de64c87f 100644 --- a/ndb/src/kernel/vm/TransporterCallback.cpp +++ b/ndb/src/kernel/vm/TransporterCallback.cpp @@ -434,5 +434,28 @@ reportDisconnect(void * callbackObj, NodeId nodeId, Uint32 errNo){ globalScheduler.execute(&signal, JBA, CMVMI, GSN_DISCONNECT_REP); } - +void +SignalLoggerManager::printSegmentedSection(FILE * output, + const SignalHeader & sh, + const SegmentedSectionPtr ptr[3], + unsigned i) +{ + fprintf(output, "SECTION %u type=segmented", i); + if (i >= 3) { + fprintf(output, " *** invalid ***\n"); + return; + } + const Uint32 len = ptr[i].sz; + SectionSegment * ssp = ptr[i].p; + Uint32 pos = 0; + fprintf(output, " size=%u\n", (unsigned)len); + while (pos < len) { + if (pos > 0 && pos % SectionSegment::DataLength == 0) { + ssp = g_sectionSegmentPool.getPtr(ssp->m_nextSegment); + } + printDataWord(output, pos, ssp->theData[pos % SectionSegment::DataLength]); + } + if (len > 0) + putc('\n', output); +} diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp index cf9d885847a..061ae3be8f0 100644 --- a/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/ndb/src/mgmclient/CommandInterpreter.cpp @@ -1478,7 +1478,7 @@ CommandInterpreter::executeSet(int /*processId*/, << endl; } else { - NDB_ASSERT(false, ""); + assert(false); } } else { @@ -1497,7 +1497,7 @@ CommandInterpreter::executeSet(int /*processId*/, } else { // The primary is not tried to write if the write of backup file fails - NDB_ASSERT(false, ""); + abort(); } } free(newpar); diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 713433cb8e9..5417d4a37e5 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -1698,7 +1698,7 @@ MgmtSrvr::setSignalLoggingMode(int processId, LogMode mode, logSpec = TestOrd::InputOutputSignals; break; default: - NDB_ASSERT(false, "Unexpected value, MgmtSrvr::setSignalLoggingMode"); + assert("Unexpected value, MgmtSrvr::setSignalLoggingMode" == 0); } NdbApiSignal* signal = getSignal(); diff --git a/ndb/src/ndbapi/TransporterFacade.cpp b/ndb/src/ndbapi/TransporterFacade.cpp index e725144a8f8..58e5d68c4b9 100644 --- a/ndb/src/ndbapi/TransporterFacade.cpp +++ b/ndb/src/ndbapi/TransporterFacade.cpp @@ -162,13 +162,6 @@ setSignalLog(){ } #endif -// These symbols are needed, but not used in the API -int g_sectionSegmentPool; -struct ErrorReporter { - void handleAssert(const char*, const char*, int); -}; -void ErrorReporter::handleAssert(const char* message, const char* file, int line) {} - /** * The execute function : Handle received signal */ @@ -314,6 +307,14 @@ execute(void * callbackObj, SignalHeader * const header, } } +// These symbols are needed, but not used in the API +void +SignalLoggerManager::printSegmentedSection(FILE *, const SignalHeader &, + const SegmentedSectionPtr ptr[3], + unsigned i){ + abort(); +} + void copy(Uint32 * & insertPtr, class SectionSegmentPool & thePool, const SegmentedSectionPtr & _ptr){ From 7e6bb0eee81c97d44ad021c99898430acfde6a9a Mon Sep 17 00:00:00 2001 From: "mysqldev@o2k.irixworld.net" <> Date: Fri, 9 Jul 2004 12:48:32 +0200 Subject: [PATCH 02/93] Irix64 mipspro ndb compile fixes --- BUILD/compile-irix-mips64-mipspro | 8 +- BitKeeper/etc/logging_ok | 1 + ndb/include/kernel/NodeState.hpp | 15 +++- ndb/include/ndbapi/NdbDictionary.hpp | 2 +- ndb/src/common/debugger/DebuggerNames.cpp | 15 ++-- ndb/src/common/debugger/EventLogger.cpp | 5 +- ndb/src/common/debugger/signaldata/LCP.cpp | 3 +- ndb/src/common/logger/Logger.cpp | 1 + ndb/src/common/mgmcommon/ConfigInfo.cpp | 23 +++-- ndb/src/common/transporter/Packer.cpp | 14 ++-- .../transporter/TransporterRegistry.cpp | 12 +-- ndb/src/common/util/BaseString.cpp | 3 + ndb/src/common/util/ConfigValues.cpp | 6 +- ndb/src/common/util/Parser.cpp | 1 + ndb/src/common/util/Properties.cpp | 6 ++ ndb/src/common/util/SocketServer.cpp | 10 ++- ndb/src/kernel/blocks/backup/Backup.cpp | 5 +- .../kernel/blocks/backup/restore/Restore.cpp | 15 ++-- .../kernel/blocks/backup/restore/Restore.hpp | 5 +- ndb/src/kernel/blocks/backup/restore/main.cpp | 8 +- ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 22 ++--- ndb/src/kernel/blocks/dbacc/DbaccMain.cpp | 4 +- ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 26 +++--- ndb/src/kernel/blocks/dbdih/DbdihInit.cpp | 7 +- ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 83 +++++++++++-------- ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 21 +++-- ndb/src/kernel/blocks/dbtc/DbtcMain.cpp | 6 +- ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp | 5 +- .../blocks/dbtup/DbtupSystemRestart.cpp | 12 +-- ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp | 5 +- ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp | 5 +- ndb/src/kernel/blocks/grep/Grep.cpp | 7 +- ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 5 +- ndb/src/kernel/blocks/suma/Suma.cpp | 3 + ndb/src/kernel/blocks/trix/Trix.cpp | 2 + ndb/src/kernel/vm/DataBuffer.hpp | 2 +- ndb/src/kernel/vm/SimulatedBlock.cpp | 4 +- ndb/src/mgmapi/mgmapi.cpp | 9 +- ndb/src/mgmsrv/CommandInterpreter.cpp | 12 +-- ndb/src/mgmsrv/MgmtSrvr.cpp | 18 ++-- ndb/src/mgmsrv/Services.cpp | 5 +- ndb/src/ndbapi/DictCache.cpp | 11 ++- ndb/src/ndbapi/Ndb.cpp | 8 +- ndb/src/ndbapi/NdbDictionaryImpl.cpp | 32 ++++--- ndb/src/ndbapi/NdbEventOperationImpl.cpp | 12 ++- ndb/src/ndbapi/NdbLinHash.hpp | 12 +-- ndb/src/ndbapi/NdbOperationScan.cpp | 7 +- ndb/src/ndbapi/Ndbinit.cpp | 5 +- ndb/src/ndbapi/TransporterFacade.cpp | 3 + 49 files changed, 303 insertions(+), 208 deletions(-) diff --git a/BUILD/compile-irix-mips64-mipspro b/BUILD/compile-irix-mips64-mipspro index d8107ad73c0..1987fa13b1f 100755 --- a/BUILD/compile-irix-mips64-mipspro +++ b/BUILD/compile-irix-mips64-mipspro @@ -6,7 +6,7 @@ if [ ! -f "sql/mysqld.cc" ]; then fi cflags="-64 -mips4" - +config_args= if [ "$#" != 0 ]; then case "$1" in --help) @@ -25,8 +25,7 @@ if [ "$#" != 0 ]; then cflags="" ;; *) - echo "$0: invalid option '$1'; use --help to show usage" - exit 1 + config_args="$config_args $1"; shift ;; esac else @@ -79,6 +78,7 @@ cxxflags="$cxxflags -LANG:libc_in_namespace_std=OFF" CC=cc CXX=CC CFLAGS="$cflags" CXXFLAGS="$cxxflags" \ ./configure --prefix=/usr/local/mysql --disable-shared \ --with-extra-charsets=complex --enable-thread-safe-client \ - --without-extra-tools --disable-dependency-tracking + --without-extra-tools --disable-dependency-tracking \ + $config_args make diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 398a9295579..d48d9f11a98 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -126,6 +126,7 @@ mysql@home.(none) mysqldev@build.mysql2.com mysqldev@melody.local mysqldev@mysql.com +mysqldev@o2k.irixworld.net ndbdev@ndbmaster.mysql.com nick@mysql.com nick@nick.leippe.com diff --git a/ndb/include/kernel/NodeState.hpp b/ndb/include/kernel/NodeState.hpp index 1bc7806876d..ab4116f6c92 100644 --- a/ndb/include/kernel/NodeState.hpp +++ b/ndb/include/kernel/NodeState.hpp @@ -108,7 +108,8 @@ public: NodeState(StartLevel); NodeState(StartLevel, bool systemShutdown); NodeState(StartLevel, Uint32 startPhase, StartType); - + void init(); + /** * Current start level */ @@ -177,6 +178,12 @@ public: inline NodeState::NodeState(){ + init(); +} + +inline +void +NodeState::init(){ startLevel = SL_CMVMI; nodeGroup = 0xFFFFFFFF; dynamicId = 0xFFFFFFFF; @@ -186,7 +193,7 @@ NodeState::NodeState(){ inline NodeState::NodeState(StartLevel sl){ - NodeState::NodeState(); + init(); startLevel = sl; singleUserMode = 0; singleUserApi = 0xFFFFFFFF; @@ -194,7 +201,7 @@ NodeState::NodeState(StartLevel sl){ inline NodeState::NodeState(StartLevel sl, Uint32 sp, StartType typeOfStart){ - NodeState::NodeState(); + init(); startLevel = sl; starting.startPhase = sp; starting.restartType = typeOfStart; @@ -204,7 +211,7 @@ NodeState::NodeState(StartLevel sl, Uint32 sp, StartType typeOfStart){ inline NodeState::NodeState(StartLevel sl, bool sys){ - NodeState::NodeState(); + init(); startLevel = sl; stopping.systemShutdown = sys; singleUserMode = 0; diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index 3b38e33ec91..347e81450df 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -324,7 +324,7 @@ public: /** * Get size of element */ - int Column::getSize() const; + int getSize() const; /** * Set distribution key diff --git a/ndb/src/common/debugger/DebuggerNames.cpp b/ndb/src/common/debugger/DebuggerNames.cpp index ebe94a6059f..2142138e435 100644 --- a/ndb/src/common/debugger/DebuggerNames.cpp +++ b/ndb/src/common/debugger/DebuggerNames.cpp @@ -29,10 +29,11 @@ static const char * localBlockNames[NO_OF_BLOCKS]; static int initSignalNames(const char * dst[], const GsnName src[], unsigned short len){ - for(int i = 0; i<=MAX_GSN; i++) + int i; + for(i = 0; i<=MAX_GSN; i++) dst[i] = 0; - for(int i = 0; i= NO_OF_BLOCKS || dst[index] != 0){ fprintf(stderr, diff --git a/ndb/src/common/debugger/EventLogger.cpp b/ndb/src/common/debugger/EventLogger.cpp index dd957d67383..91f144c2230 100644 --- a/ndb/src/common/debugger/EventLogger.cpp +++ b/ndb/src/common/debugger/EventLogger.cpp @@ -1303,14 +1303,15 @@ bool EventLogger::matchEventCategory(const char * str, LogLevel::EventCategory * cat, bool exactMatch){ + unsigned i; if(cat == 0 || str == 0) return false; char * tmp = strdup(str); - for(size_t i = 0; iparticipatingDIH.TextLength+1], buf2[sig->participatingLQH.TextLength+1]; + char buf1[8*_NDB_NODE_BITMASK_SIZE+1]; + char buf2[8*_NDB_NODE_BITMASK_SIZE+1]; fprintf(output, " Sender: %d LcpId: %d\n" " ParticipatingDIH = %s\n" diff --git a/ndb/src/common/logger/Logger.cpp b/ndb/src/common/logger/Logger.cpp index 9c9f1eece18..c2fdecb642b 100644 --- a/ndb/src/common/logger/Logger.cpp +++ b/ndb/src/common/logger/Logger.cpp @@ -350,3 +350,4 @@ Logger::log(LoggerLevel logLevel, const char* pMsg, va_list ap) const // PRIVATE // +template class Vector; diff --git a/ndb/src/common/mgmcommon/ConfigInfo.cpp b/ndb/src/common/mgmcommon/ConfigInfo.cpp index c2b5fdabf01..21a6b2308ac 100644 --- a/ndb/src/common/mgmcommon/ConfigInfo.cpp +++ b/ndb/src/common/mgmcommon/ConfigInfo.cpp @@ -2200,13 +2200,14 @@ const int ConfigInfo::m_NoOfParams = sizeof(m_ParamInfo) / sizeof(ParamInfo); inline void require(bool v) { if(!v) abort();} ConfigInfo::ConfigInfo() { + int i; Properties *section; const Properties *oldpinfo; m_info.setCaseInsensitiveNames(true); m_systemDefaults.setCaseInsensitiveNames(true); - for (int i=0; im_section != 0){ if(strcmp(p->m_section, ctx.fname) == 0){ @@ -3056,7 +3058,7 @@ fixDepricated(InitConfigFileParser::Context & ctx, const char * data){ } Properties::Iterator it2(&tmp); - for (const char* name = it2.first(); name != NULL; name = it2.next()) { + for (name = it2.first(); name != NULL; name = it2.next()) { PropertiesType type; require(tmp.getTypeOf(name, &type)); switch(type){ @@ -3162,11 +3164,12 @@ addNodeConnections(Vector§ions, struct InitConfigFileParser::Context &ctx, const char * ruleData) { + Uint32 i; Properties * props= ctx.m_config; Properties p_connections; Properties p_connections2; - for (Uint32 i = 0;; i++){ + for (i = 0;; i++){ const Properties * tmp; Uint32 nodeId1, nodeId2; @@ -3187,8 +3190,8 @@ addNodeConnections(Vector§ions, Properties p_db_nodes; Properties p_api_mgm_nodes; - Uint32 i_db= 0, i_api_mgm= 0; - for (Uint32 i= 0, n= 0; n < nNodes; i++){ + Uint32 i_db= 0, i_api_mgm= 0, n; + for (i= 0, n= 0; n < nNodes; i++){ const Properties * tmp; if(!props->get("Node", i, &tmp)) continue; n++; @@ -3205,7 +3208,7 @@ addNodeConnections(Vector§ions, Uint32 nodeId1, nodeId2, dummy; - for (Uint32 i= 0; p_db_nodes.get("", i, &nodeId1); i++){ + for (i= 0; p_db_nodes.get("", i, &nodeId1); i++){ for (Uint32 j= i+1;; j++){ if(!p_db_nodes.get("", j, &nodeId2)) break; if(!p_connections2.get("", nodeId1+nodeId2<<16, &dummy)) { @@ -3222,7 +3225,7 @@ addNodeConnections(Vector§ions, } } - for (Uint32 i= 0; p_api_mgm_nodes.get("", i, &nodeId1); i++){ + for (i= 0; p_api_mgm_nodes.get("", i, &nodeId1); i++){ if(!p_connections.get("", nodeId1, &dummy)) { for (Uint32 j= 0;; j++){ if(!p_db_nodes.get("", j, &nodeId2)) break; @@ -3241,3 +3244,5 @@ addNodeConnections(Vector§ions, return true; } + +template class Vector; diff --git a/ndb/src/common/transporter/Packer.cpp b/ndb/src/common/transporter/Packer.cpp index fa72af12dac..645517a4b1a 100644 --- a/ndb/src/common/transporter/Packer.cpp +++ b/ndb/src/common/transporter/Packer.cpp @@ -391,6 +391,7 @@ Packer::pack(Uint32 * insertPtr, const SignalHeader * header, const Uint32 * theData, const LinearSectionPtr ptr[3]) const { + Uint32 i; Uint32 dataLen32 = header->theLength; Uint32 no_segs = header->m_noOfSections; @@ -400,7 +401,7 @@ Packer::pack(Uint32 * insertPtr, checksumUsed + signalIdUsed + (sizeof(Protocol6)/4); - for(Uint32 i = 0; itheLength; Uint32 no_segs = header->m_noOfSections; @@ -458,7 +460,7 @@ Packer::pack(Uint32 * insertPtr, dataLen32 + no_segs + checksumUsed + signalIdUsed + (sizeof(Protocol6)/4); - for(Uint32 i = 0; igetRemoteNodeId(); const int socket = t->getSocket(); @@ -896,7 +896,7 @@ TransporterRegistry::performSend(){ } #endif #ifdef NDB_TCP_TRANSPORTER - for (int i = x; i < nTCPTransporters; i++) { + for (i = x; i < nTCPTransporters; i++) { TCP_Transporter *t = theTCPTransporters[i]; if (t && (t->hasDataToSend()) && @@ -905,7 +905,7 @@ TransporterRegistry::performSend(){ t->doSend(); }//if }//for - for (int i = 0; i < x && i < nTCPTransporters; i++) { + for (i = 0; i < x && i < nTCPTransporters; i++) { TCP_Transporter *t = theTCPTransporters[i]; if (t && (t->hasDataToSend()) && @@ -921,7 +921,7 @@ TransporterRegistry::performSend(){ #ifdef NDB_SCI_TRANSPORTER //scroll through the SCI transporters, // get each transporter, check if connected, send data - for (int i=0; igetRemoteNodeId(); diff --git a/ndb/src/common/util/BaseString.cpp b/ndb/src/common/util/BaseString.cpp index d15249adf72..8b7df485f77 100644 --- a/ndb/src/common/util/BaseString.cpp +++ b/ndb/src/common/util/BaseString.cpp @@ -412,3 +412,6 @@ int main() } #endif + +template class Vector; +template class Vector; diff --git a/ndb/src/common/util/ConfigValues.cpp b/ndb/src/common/util/ConfigValues.cpp index 7fc99bc526c..b4cf6c9a919 100644 --- a/ndb/src/common/util/ConfigValues.cpp +++ b/ndb/src/common/util/ConfigValues.cpp @@ -578,11 +578,11 @@ ConfigValues::getPackedSize() const { Uint32 ConfigValues::pack(void * _dst, Uint32 _len) const { - + Uint32 i; char * dst = (char*)_dst; memcpy(dst, Magic, sizeof(Magic)); dst += sizeof(Magic); - for(Uint32 i = 0; i < 2 * m_size; i += 2){ + for(i = 0; i < 2 * m_size; i += 2){ Uint32 key = m_values[i]; Uint32 val = m_values[i+1]; if(key != CFV_KEY_FREE){ @@ -621,7 +621,7 @@ ConfigValues::pack(void * _dst, Uint32 _len) const { const Uint32 * sum = (Uint32*)_dst; const Uint32 len = ((Uint32*)dst) - sum; Uint32 chk = 0; - for(Uint32 i = 0; i*>; diff --git a/ndb/src/common/util/Properties.cpp b/ndb/src/common/util/Properties.cpp index 3e41056ac18..8db7b075d1b 100644 --- a/ndb/src/common/util/Properties.cpp +++ b/ndb/src/common/util/Properties.cpp @@ -169,6 +169,7 @@ put(PropertiesImpl * impl, const char * name, T value, bool replace){ return tmp->put(new PropertyImpl(short_name, value)); } + bool Properties::put(const char * name, Uint32 value, bool replace){ return ::put(impl, name, value, replace); @@ -1120,3 +1121,8 @@ bool Properties::getCaseInsensitiveNames() const { return impl->m_insensitive; } + +template bool put(PropertiesImpl *, const char *, Uint32, bool); +template bool put(PropertiesImpl *, const char *, Uint64, bool); +template bool put(PropertiesImpl *, const char *, const char *, bool); +template bool put(PropertiesImpl *, const char *, const Properties*, bool); diff --git a/ndb/src/common/util/SocketServer.cpp b/ndb/src/common/util/SocketServer.cpp index a0ec0aaa676..7c9585ae022 100644 --- a/ndb/src/common/util/SocketServer.cpp +++ b/ndb/src/common/util/SocketServer.cpp @@ -36,10 +36,11 @@ SocketServer::SocketServer(int maxSessions) : } SocketServer::~SocketServer() { - for(unsigned i = 0; i=0; i--) + int i; + for(i = m_sessions.size() - 1; i>=0; i--) m_sessions[i].m_session->m_stop = true; - for(int i = m_services.size() - 1; i>=0; i--) + for(i = m_services.size() - 1; i>=0; i--) m_services[i].m_service->stopSessions(); if(wait){ diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp index 4342a9d6d94..52a543dbcdc 100644 --- a/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/ndb/src/kernel/blocks/backup/Backup.cpp @@ -3332,7 +3332,8 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal) req->transId1 = 0; req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8); - for(unsigned int i = 0; iclientOpPtr[i] = filePtr.i; }//for @@ -3350,7 +3351,7 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal) signal->theData[7] = 0; Uint32 dataPos = 8; - for(Uint32 i = 0; iu_int16_value[i] = Twiddle16(attr_data->u_int16_value[i]); } return true; case 32: - for(unsigned i = 0; iu_int32_value[i] = Twiddle32(attr_data->u_int32_value[i]); } return true; case 64: - for(unsigned i = 0; iu_int64_value[i] = Twiddle64(attr_data->u_int64_value[i]); } return true; @@ -333,8 +334,8 @@ RestoreDataIterator::getNextTuple(int & res) Uint32 *buf_ptr = (Uint32*)_buf_ptr, *ptr = buf_ptr; ptr += m_currentTable->m_nullBitmaskSize; - - for(Uint32 i= 0; i < m_currentTable->m_fixedKeys.size(); i++){ + Uint32 i; + for(i= 0; i < m_currentTable->m_fixedKeys.size(); i++){ assert(ptr < buf_ptr + dataLength); const Uint32 attrId = m_currentTable->m_fixedKeys[i]->attrId; @@ -355,7 +356,7 @@ RestoreDataIterator::getNextTuple(int & res) ptr += sz; } - for(Uint32 i = 0; i < m_currentTable->m_fixedAttribs.size(); i++){ + for(i = 0; i < m_currentTable->m_fixedAttribs.size(); i++){ assert(ptr < buf_ptr + dataLength); const Uint32 attrId = m_currentTable->m_fixedAttribs[i]->attrId; @@ -377,7 +378,7 @@ RestoreDataIterator::getNextTuple(int & res) ptr += sz; } - for(Uint32 i = 0; i < m_currentTable->m_variableAttribs.size(); i++){ + for(i = 0; i < m_currentTable->m_variableAttribs.size(); i++){ const Uint32 attrId = m_currentTable->m_variableAttribs[i]->attrId; AttributeData * attr_data = m_tuple.getData(attrId); diff --git a/ndb/src/kernel/blocks/backup/restore/Restore.hpp b/ndb/src/kernel/blocks/backup/restore/Restore.hpp index e9149e38e44..5a705740c69 100644 --- a/ndb/src/kernel/blocks/backup/restore/Restore.hpp +++ b/ndb/src/kernel/blocks/backup/restore/Restore.hpp @@ -301,9 +301,10 @@ public: } ~LogEntry() { - for(Uint32 i= 0; i< m_values.size(); i++) + Uint32 i; + for(i= 0; i< m_values.size(); i++) delete m_values[i]; - for(Uint32 i= 0; i< m_values_e.size(); i++) + for(i= 0; i< m_values_e.size(); i++) delete m_values_e[i]; } Uint32 size() const { return m_values.size(); } diff --git a/ndb/src/kernel/blocks/backup/restore/main.cpp b/ndb/src/kernel/blocks/backup/restore/main.cpp index 99deeb3115c..be58d72ff72 100644 --- a/ndb/src/kernel/blocks/backup/restore/main.cpp +++ b/ndb/src/kernel/blocks/backup/restore/main.cpp @@ -250,8 +250,8 @@ main(int argc, const char** argv) return -1; } - - for(Uint32 i= 0; i < g_consumers.size(); i++) + Uint32 i; + for(i= 0; i < g_consumers.size(); i++) { if (!g_consumers[i]->init()) { @@ -261,7 +261,7 @@ main(int argc, const char** argv) } - for(Uint32 i = 0; igetTableName())) { @@ -345,7 +345,7 @@ main(int argc, const char** argv) return -1; } logIter.validateFooter(); //not implemented - for (Uint32 i= 0; i < g_consumers.size(); i++) + for (i= 0; i < g_consumers.size(); i++) g_consumers[i]->endOfLogEntrys(); } } diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index 694007c8508..a63be370d15 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -1100,14 +1100,15 @@ Cmvmi::execDUMP_STATE_ORD(Signal* signal) } if (dumpState->args[0] == DumpStateOrd::CmvmiTestLongSigWithDelay) { + unsigned i; Uint32 loopCount = dumpState->args[1]; const unsigned len0 = 11; const unsigned len1 = 123; Uint32 sec0[len0]; Uint32 sec1[len1]; - for (unsigned i = 0; i < len0; i++) + for (i = 0; i < len0; i++) sec0[i] = i; - for (unsigned i = 0; i < len1; i++) + for (i = 0; i < len1; i++) sec1[i] = 16 * i; Uint32* sig = signal->getDataPtrSend(); sig[0] = reference(); @@ -1160,6 +1161,7 @@ static LinearSectionPtr g_test[3]; void Cmvmi::execTESTSIG(Signal* signal){ + Uint32 i; /** * Test of SafeCounter */ @@ -1184,14 +1186,14 @@ Cmvmi::execTESTSIG(Signal* signal){ getOwnNodeId(), true); ndbout_c("-- Fixed section --"); - for(Uint32 i = 0; ilength(); i++){ + for(i = 0; ilength(); i++){ fprintf(stdout, "H'0x%.8x ", signal->theData[i]); if(((i + 1) % 6) == 0) fprintf(stdout, "\n"); } fprintf(stdout, "\n"); - for(Uint32 i = 0; iheader.m_noOfSections; i++){ + for(i = 0; iheader.m_noOfSections; i++){ SegmentedSectionPtr ptr; ndbout_c("-- Section %d --", i); signal->getSection(ptr, i); @@ -1204,7 +1206,7 @@ Cmvmi::execTESTSIG(Signal* signal){ /** * Validate length:s */ - for(Uint32 i = 0; iheader.m_noOfSections; i++){ + for(i = 0; iheader.m_noOfSections; i++){ SegmentedSectionPtr ptr; signal->getSection(ptr, i); ndbrequire(ptr.p != 0); @@ -1249,7 +1251,7 @@ Cmvmi::execTESTSIG(Signal* signal){ case 4:{ LinearSectionPtr ptr[3]; const Uint32 secs = signal->getNoOfSections(); - for(Uint32 i = 0; igetSection(sptr, i); ptr[i].sz = sptr.sz; @@ -1298,7 +1300,7 @@ Cmvmi::execTESTSIG(Signal* signal){ case 8:{ LinearSectionPtr ptr[3]; const Uint32 secs = signal->getNoOfSections(); - for(Uint32 i = 0; igetSection(sptr, i); ptr[i].sz = sptr.sz; @@ -1332,7 +1334,7 @@ Cmvmi::execTESTSIG(Signal* signal){ sendNextLinearFragment(signal, fragSend); } - for(Uint32 i = 0; igetNoOfSections(); memset(g_test, 0, sizeof(g_test)); - for(Uint32 i = 0; igetSection(sptr, i); g_test[i].sz = sptr.sz; @@ -1408,7 +1410,7 @@ Cmvmi::execTESTSIG(Signal* signal){ case 14:{ Uint32 count = signal->theData[8]; signal->theData[10] = count * rg.m_nodes.count(); - for(Uint32 i = 0; ilength(), JBB); } return; diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index 933ee2cf8e1..44df20633ec 100644 --- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -9199,8 +9199,8 @@ void Dbacc::initFragGeneral(FragmentrecPtr regFragPtr) for (Uint32 i = 0; i < ZWRITEPAGESIZE; i++) { regFragPtr.p->datapages[i] = RNIL; }//for - for (Uint32 i = 0; i < 4; i++) { - regFragPtr.p->longKeyPageArray[i] = RNIL; + for (Uint32 j = 0; j < 4; j++) { + regFragPtr.p->longKeyPageArray[j] = RNIL; }//for }//Dbacc::initFragGeneral() diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 2ef9e721e22..22c943c5648 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -5734,6 +5734,7 @@ void Dbdict::execLIST_TABLES_REQ(Signal* signal) { jamEntry(); + Uint32 i; ListTablesReq * req = (ListTablesReq*)signal->getDataPtr(); Uint32 senderRef = req->senderRef; Uint32 senderData = req->senderData; @@ -5747,7 +5748,7 @@ Dbdict::execLIST_TABLES_REQ(Signal* signal) conf->senderData = senderData; conf->counter = 0; Uint32 pos = 0; - for (Uint32 i = 0; i < c_tableRecordPool.getSize(); i++) { + for (i = 0; i < c_tableRecordPool.getSize(); i++) { TableRecordPtr tablePtr; c_tableRecordPool.getPtr(tablePtr, i); // filter @@ -5827,12 +5828,12 @@ Dbdict::execLIST_TABLES_REQ(Signal* signal) conf->counter++; pos = 0; } - Uint32 i = 0; - while (i < size) { + Uint32 k = 0; + while (k < size) { char* p = (char*)&conf->tableData[pos]; for (Uint32 j = 0; j < 4; j++) { - if (i < size) - *p++ = tablePtr.p->tableName[i++]; + if (k < size) + *p++ = tablePtr.p->tableName[k++]; else *p++ = 0; } @@ -5846,7 +5847,7 @@ Dbdict::execLIST_TABLES_REQ(Signal* signal) } } // XXX merge with above somehow - for (Uint32 i = 0; i < c_triggerRecordPool.getSize(); i++) { + for (i = 0; i < c_triggerRecordPool.getSize(); i++) { if (reqListIndexes) break; TriggerRecordPtr triggerPtr; @@ -5890,12 +5891,12 @@ Dbdict::execLIST_TABLES_REQ(Signal* signal) conf->counter++; pos = 0; } - Uint32 i = 0; - while (i < size) { + Uint32 k = 0; + while (k < size) { char* p = (char*)&conf->tableData[pos]; for (Uint32 j = 0; j < 4; j++) { - if (i < size) - *p++ = triggerPtr.p->triggerName[i++]; + if (k < size) + *p++ = triggerPtr.p->triggerName[k++]; else *p++ = 0; } @@ -6132,6 +6133,7 @@ Dbdict::createIndex_slavePrepare(Signal* signal, OpCreateIndexPtr opPtr) void Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr) { + Uint32 k; jam(); const CreateIndxReq* const req = &opPtr.p->m_request; // signal data writer @@ -6201,7 +6203,7 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr) } // hash index attributes must currently be in table order Uint32 prevAttrId = RNIL; - for (Uint32 k = 0; k < opPtr.p->m_attrList.sz; k++) { + for (k = 0; k < opPtr.p->m_attrList.sz; k++) { jam(); bool found = false; for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) { @@ -6261,7 +6263,7 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr) // write index key attributes AttributeRecordPtr aRecPtr; c_attributeRecordPool.getPtr(aRecPtr, tablePtr.p->firstAttribute); - for (Uint32 k = 0; k < opPtr.p->m_attrList.sz; k++) { + for (k = 0; k < opPtr.p->m_attrList.sz; k++) { jam(); for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) { AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr); diff --git a/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp b/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp index df47237ae59..595d15b62e9 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp @@ -50,17 +50,18 @@ void Dbdih::initData() nodeRecord = (NodeRecord*) allocRecord("NodeRecord", sizeof(NodeRecord), MAX_NDB_NODES); - for(Uint32 i = 0; itheData[0]; Uint32 nodeId = startMe->startingNodeId; const Uint32 startWord = startMe->startWord; + Uint32 i; CRASH_INSERTION(7130); ndbrequire(nodeId == cownNodeId); arrGuard(startWord + StartMeConf::DATA_SIZE, sizeof(cdata)/4); - for(Uint32 i = 0; i < StartMeConf::DATA_SIZE; i++) + for(i = 0; i < StartMeConf::DATA_SIZE; i++) cdata[startWord+i] = startMe->data[i]; if(startWord + StartMeConf::DATA_SIZE < Sysfile::SYSFILE_SIZE32){ @@ -1556,12 +1557,12 @@ void Dbdih::execSTART_MECONF(Signal* signal) * But dont copy lastCompletedGCI:s */ Uint32 tempGCP[MAX_NDB_NODES]; - for(Uint32 i = 0; i < MAX_NDB_NODES; i++) + for(i = 0; i < MAX_NDB_NODES; i++) tempGCP[i] = SYSFILE->lastCompletedGCI[i]; - for(Uint32 i = 0; i < Sysfile::SYSFILE_SIZE32; i++) + for(i = 0; i < Sysfile::SYSFILE_SIZE32; i++) sysfileData[i] = cdata[i]; - for(Uint32 i = 0; i < MAX_NDB_NODES; i++) + for(i = 0; i < MAX_NDB_NODES; i++) SYSFILE->lastCompletedGCI[i] = tempGCP[i]; setNodeActiveStatus(); @@ -3599,6 +3600,7 @@ void Dbdih::writeInitGcpLab(Signal* signal, FileRecordPtr filePtr) /*---------------------------------------------------------------------------*/ void Dbdih::execNODE_FAILREP(Signal* signal) { + Uint32 i; Uint32 failedNodes[MAX_NDB_NODES]; jamEntry(); NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0]; @@ -3611,7 +3613,7 @@ void Dbdih::execNODE_FAILREP(Signal* signal) // The first step is to convert from a bit mask to an array of failed nodes. /*-------------------------------------------------------------------------*/ Uint32 index = 0; - for (Uint32 i = 1; i < MAX_NDB_NODES; i++) { + for (i = 1; i < MAX_NDB_NODES; i++) { jam(); if(NodeBitmask::get(nodeFail->theNodes, i)){ jam(); @@ -3629,7 +3631,7 @@ void Dbdih::execNODE_FAILREP(Signal* signal) // We also set certain state variables ensuring that the node no longer is // used in transactions and also mark that we received this signal. /*-------------------------------------------------------------------------*/ - for (Uint32 i = 0; i < noOfFailedNodes; i++) { + for (i = 0; i < noOfFailedNodes; i++) { jam(); NodeRecordPtr TNodePtr; TNodePtr.i = failedNodes[i]; @@ -3671,7 +3673,7 @@ void Dbdih::execNODE_FAILREP(Signal* signal) const bool masterTakeOver = (oldMasterId != newMasterId); - for(Uint32 i = 0; i < noOfFailedNodes; i++) { + for(i = 0; i < noOfFailedNodes; i++) { NodeRecordPtr failedNodePtr; failedNodePtr.i = failedNodes[i]; ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord); @@ -6882,8 +6884,9 @@ void Dbdih::releaseFragments(TabRecordPtr tabPtr) void Dbdih::initialiseFragstore() { + Uint32 i; FragmentstorePtr fragPtr; - for (Uint32 i = 0; i < cfragstoreFileSize; i++) { + for (i = 0; i < cfragstoreFileSize; i++) { fragPtr.i = i; ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore); initFragstore(fragPtr); @@ -6892,7 +6895,7 @@ void Dbdih::initialiseFragstore() fragPtr.i = 0; cfirstfragstore = RNIL; cremainingfrags = 0; - for (Uint32 i = 0; i < noOfChunks; i++) { + for (i = 0; i < noOfChunks; i++) { ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore); fragPtr.p->nextFragmentChunk = cfirstfragstore; cfirstfragstore = fragPtr.i; @@ -10231,11 +10234,12 @@ void Dbdih::allocStoredReplica(FragmentstorePtr fragPtr, ReplicaRecordPtr& newReplicaPtr, Uint32 nodeId) { + Uint32 i; ReplicaRecordPtr arrReplicaPtr; ReplicaRecordPtr arrPrevReplicaPtr; seizeReplicaRec(newReplicaPtr); - for (Uint32 i = 0; i < MAX_LCP_STORED; i++) { + for (i = 0; i < MAX_LCP_STORED; i++) { newReplicaPtr.p->maxGciCompleted[i] = 0; newReplicaPtr.p->maxGciStarted[i] = 0; newReplicaPtr.p->lcpId[i] = 0; @@ -10243,7 +10247,7 @@ void Dbdih::allocStoredReplica(FragmentstorePtr fragPtr, }//for newReplicaPtr.p->noCrashedReplicas = 0; newReplicaPtr.p->initialGci = currentgcp; - for (Uint32 i = 0; i < 8; i++) { + for (i = 0; i < 8; i++) { newReplicaPtr.p->replicaLastGci[i] = (Uint32)-1; newReplicaPtr.p->createGci[i] = 0; }//for @@ -10354,7 +10358,8 @@ void Dbdih::checkEscalation() { Uint32 TnodeGroup[MAX_NDB_NODES]; NodeRecordPtr nodePtr; - for (Uint32 i = 0; i < MAX_NDB_NODES; i++) { + Uint32 i; + for (i = 0; i < MAX_NDB_NODES; i++) { TnodeGroup[i] = ZFALSE; }//for for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { @@ -10366,7 +10371,7 @@ void Dbdih::checkEscalation() TnodeGroup[nodePtr.p->nodeGroup] = ZTRUE; } } - for (Uint32 i = 0; i < cnoOfNodeGroups; i++) { + for (i = 0; i < cnoOfNodeGroups; i++) { jam(); if (TnodeGroup[i] == ZFALSE) { jam(); @@ -10929,7 +10934,8 @@ void Dbdih::initNodeState(NodeRecordPtr nodePtr) /*************************************************************************/ void Dbdih::initRestartInfo() { - for (int i = 0; i < MAX_NDB_NODES; i++) { + Uint32 i; + for (i = 0; i < MAX_NDB_NODES; i++) { SYSFILE->lastCompletedGCI[i] = 0; }//for NodeRecordPtr nodePtr; @@ -10950,10 +10956,10 @@ void Dbdih::initRestartInfo() SYSFILE->oldestRestorableGCI = 1; SYSFILE->newestRestorableGCI = 1; SYSFILE->systemRestartBits = 0; - for (Uint32 i = 0; i < NodeBitmask::Size; i++) { + for (i = 0; i < NodeBitmask::Size; i++) { SYSFILE->lcpActive[0] = 0; }//for - for (Uint32 i = 0; i < Sysfile::TAKE_OVER_SIZE; i++) { + for (i = 0; i < Sysfile::TAKE_OVER_SIZE; i++) { SYSFILE->takeOver[i] = 0; }//for Sysfile::setInitialStartOngoing(SYSFILE->systemRestartBits); @@ -11032,10 +11038,11 @@ void Dbdih::initTable(TabRecordPtr tabPtr) tabPtr.p->tabFile[1] = RNIL; tabPtr.p->m_dropTab.tabUserRef = 0; tabPtr.p->m_dropTab.tabUserPtr = RNIL; - for (Uint32 i = 0; i < MAX_NDB_NODES; i++) { + Uint32 i; + for (i = 0; i < MAX_NDB_NODES; i++) { tabPtr.p->startFid[i] = RNIL; }//for - for (Uint32 i = 0; i < 8; i++) { + for (i = 0; i < 8; i++) { tabPtr.p->pageRef[i] = RNIL; }//for tabPtr.p->tableType = DictTabInfo::UndefTableType; @@ -11367,6 +11374,7 @@ void Dbdih::makeNodeGroups(Uint32 nodeArray[]) Uint32 tmngNodeGroup; Uint32 tmngReplica; Uint32 tmngLimit; + Uint32 i; /**----------------------------------------------------------------------- * ASSIGN ALL ACTIVE NODES INTO NODE GROUPS. HOT SPARE NODES ARE ASSIGNED @@ -11376,7 +11384,7 @@ void Dbdih::makeNodeGroups(Uint32 nodeArray[]) tmngReplica = 0; tmngLimit = csystemnodes - cnoHotSpare; ndbrequire(tmngLimit < MAX_NDB_NODES); - for (Uint32 i = 0; i < tmngLimit; i++) { + for (i = 0; i < tmngLimit; i++) { NodeGroupRecordPtr NGPtr; jam(); tmngNode = nodeArray[i]; @@ -11396,14 +11404,14 @@ void Dbdih::makeNodeGroups(Uint32 nodeArray[]) }//for cnoOfNodeGroups = tmngNodeGroup; ndbrequire(csystemnodes < MAX_NDB_NODES); - for (Uint32 i = tmngLimit + 1; i < csystemnodes; i++) { + for (i = tmngLimit + 1; i < csystemnodes; i++) { jam(); tmngNode = nodeArray[i]; mngNodeptr.i = tmngNode; ptrCheckGuard(mngNodeptr, MAX_NDB_NODES, nodeRecord); mngNodeptr.p->nodeGroup = ZNIL; }//for - for(int i = 0; i < MAX_NDB_NODES; i++){ + for(i = 0; i < MAX_NDB_NODES; i++){ jam(); Sysfile::setNodeGroup(i, SYSFILE->nodeGroups, NO_NODE_GROUP_ID); }//for @@ -11690,12 +11698,13 @@ Uint32 Dbdih::readPageWord(RWFragment* rf) void Dbdih::readReplica(RWFragment* rf, ReplicaRecordPtr readReplicaPtr) { + Uint32 i; readReplicaPtr.p->procNode = readPageWord(rf); readReplicaPtr.p->initialGci = readPageWord(rf); readReplicaPtr.p->noCrashedReplicas = readPageWord(rf); readReplicaPtr.p->nextLcp = readPageWord(rf); - for (Uint32 i = 0; i < MAX_LCP_STORED; i++) { + for (i = 0; i < MAX_LCP_STORED; i++) { readReplicaPtr.p->maxGciCompleted[i] = readPageWord(rf); readReplicaPtr.p->maxGciStarted[i] = readPageWord(rf); readReplicaPtr.p->lcpId[i] = readPageWord(rf); @@ -11703,13 +11712,13 @@ void Dbdih::readReplica(RWFragment* rf, ReplicaRecordPtr readReplicaPtr) }//for const Uint32 noCrashedReplicas = readReplicaPtr.p->noCrashedReplicas; ndbrequire(noCrashedReplicas < 8); - for (Uint32 i = 0; i < noCrashedReplicas; i++) { + for (i = 0; i < noCrashedReplicas; i++) { readReplicaPtr.p->createGci[i] = readPageWord(rf); readReplicaPtr.p->replicaLastGci[i] = readPageWord(rf); ndbrequire(readReplicaPtr.p->createGci[i] != 0xF1F1F1F1); ndbrequire(readReplicaPtr.p->replicaLastGci[i] != 0xF1F1F1F1); }//for - for(Uint32 i = noCrashedReplicas; i<8; i++){ + for(i = noCrashedReplicas; i<8; i++){ readReplicaPtr.p->createGci[i] = readPageWord(rf); readReplicaPtr.p->replicaLastGci[i] = readPageWord(rf); // They are not initialized... @@ -11732,7 +11741,7 @@ void Dbdih::readReplica(RWFragment* rf, ReplicaRecordPtr readReplicaPtr) /* WE ALSO HAVE TO INVALIDATE ANY LOCAL CHECKPOINTS THAT HAVE BEEN */ /* INVALIDATED BY MOVING BACK THE RESTART GCI. */ /* ---------------------------------------------------------------------- */ - for (Uint32 i = 0; i < MAX_LCP_STORED; i++) { + for (i = 0; i < MAX_LCP_STORED; i++) { jam(); if ((readReplicaPtr.p->lcpStatus[i] == ZVALID) && (readReplicaPtr.p->maxGciStarted[i] > SYSFILE->newestRestorableGCI)) { @@ -11764,6 +11773,7 @@ void Dbdih::readReplica(RWFragment* rf, ReplicaRecordPtr readReplicaPtr) void Dbdih::readReplicas(RWFragment* rf, FragmentstorePtr fragPtr) { + Uint32 i; ReplicaRecordPtr newReplicaPtr; Uint32 noStoredReplicas = fragPtr.p->noStoredReplicas; Uint32 noOldStoredReplicas = fragPtr.p->noOldStoredReplicas; @@ -11775,7 +11785,7 @@ void Dbdih::readReplicas(RWFragment* rf, FragmentstorePtr fragPtr) fragPtr.p->noOldStoredReplicas = 0; Uint32 replicaIndex = 0; ndbrequire(noStoredReplicas + noOldStoredReplicas <= MAX_REPLICAS); - for (Uint32 i = 0; i < noStoredReplicas; i++) { + for (i = 0; i < noStoredReplicas; i++) { seizeReplicaRec(newReplicaPtr); readReplica(rf, newReplicaPtr); if (checkNodeAlive(newReplicaPtr.p->procNode)) { @@ -11790,7 +11800,7 @@ void Dbdih::readReplicas(RWFragment* rf, FragmentstorePtr fragPtr) }//if }//for fragPtr.p->fragReplicas = noStoredReplicas; - for (Uint32 i = 0; i < noOldStoredReplicas; i++) { + for (i = 0; i < noOldStoredReplicas; i++) { jam(); seizeReplicaRec(newReplicaPtr); readReplica(rf, newReplicaPtr); @@ -12640,11 +12650,11 @@ void Dbdih::setNodeRestartInfoBits() NodeRecordPtr nodePtr; Uint32 tsnrNodeGroup; Uint32 tsnrNodeActiveStatus; - - for(int i = 1; i < MAX_NDB_NODES; i++){ + Uint32 i; + for(i = 1; i < MAX_NDB_NODES; i++){ Sysfile::setNodeStatus(i, SYSFILE->nodeStatus, Sysfile::NS_Active); }//for - for(Uint32 i = 1; i < Sysfile::NODE_GROUPS_SIZE; i++){ + for(i = 1; i < Sysfile::NODE_GROUPS_SIZE; i++){ SYSFILE->nodeGroups[i] = 0; }//for NdbNodeBitmask::clear(SYSFILE->lcpActive); @@ -12786,13 +12796,14 @@ void Dbdih::writeReplicas(RWFragment* wf, Uint32 replicaStartIndex) writePageWord(wf, wfReplicaPtr.p->initialGci); writePageWord(wf, wfReplicaPtr.p->noCrashedReplicas); writePageWord(wf, wfReplicaPtr.p->nextLcp); - for (Uint32 i = 0; i < MAX_LCP_STORED; i++) { + Uint32 i; + for (i = 0; i < MAX_LCP_STORED; i++) { writePageWord(wf, wfReplicaPtr.p->maxGciCompleted[i]); writePageWord(wf, wfReplicaPtr.p->maxGciStarted[i]); writePageWord(wf, wfReplicaPtr.p->lcpId[i]); writePageWord(wf, wfReplicaPtr.p->lcpStatus[i]); }//if - for (Uint32 i = 0; i < 8; i++) { + for (i = 0; i < 8; i++) { writePageWord(wf, wfReplicaPtr.p->createGci[i]); writePageWord(wf, wfReplicaPtr.p->replicaLastGci[i]); }//if @@ -13003,7 +13014,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal) } if(signal->theData[0] == 7012){ - char buf[c_lcpState.m_participatingDIH.TextLength+1]; + char buf[8*_NDB_NODE_BITMASK_SIZE+1]; infoEvent("ParticipatingDIH = %s", c_lcpState.m_participatingDIH.getText(buf)); infoEvent("ParticipatingLQH = %s", c_lcpState.m_participatingLQH.getText(buf)); infoEvent("m_LCP_COMPLETE_REP_Counter_DIH = %s", @@ -13020,8 +13031,8 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal) jam(); ptrAss(nodePtr, nodeRecord); if(nodePtr.p->nodeStatus == NodeRecord::ALIVE){ - - for(Uint32 i = 0; inoOfStartedChkpt; i++){ + Uint32 i; + for(i = 0; inoOfStartedChkpt; i++){ infoEvent("Node %d: started: table=%d fragment=%d replica=%d", nodePtr.i, nodePtr.p->startedChkpt[i].tableId, @@ -13029,7 +13040,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal) nodePtr.p->startedChkpt[i].replicaPtr); } - for(Uint32 i = 0; inoOfQueuedChkpt; i++){ + for(i = 0; inoOfQueuedChkpt; i++){ infoEvent("Node %d: queued: table=%d fragment=%d replica=%d", nodePtr.i, nodePtr.p->queuedChkpt[i].tableId, diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 1abf4b3a7e9..807ac206f31 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -6307,12 +6307,13 @@ void Dblqh::execNODE_FAILREP(Signal* signal) UintR TfoundNodes = 0; UintR TnoOfNodes; UintR Tdata[MAX_NDB_NODES]; + Uint32 i; NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0]; TnoOfNodes = nodeFail->noOfNodes; UintR index = 0; - for (Uint32 i = 1; i < MAX_NDB_NODES; i++) { + for (i = 1; i < MAX_NDB_NODES; i++) { jam(); if(NodeBitmask::get(nodeFail->theNodes, i)){ jam(); @@ -6326,7 +6327,7 @@ void Dblqh::execNODE_FAILREP(Signal* signal) ndbrequire(index == TnoOfNodes); ndbrequire(cnoOfNodes - 1 < MAX_NDB_NODES); - for (Uint32 i = 0; i < TnoOfNodes; i++) { + for (i = 0; i < TnoOfNodes; i++) { const Uint32 nodeId = Tdata[i]; lcpPtr.p->m_EMPTY_LCP_REQ.clear(nodeId); @@ -6524,7 +6525,7 @@ Dblqh::scanMarkers(Signal* signal, } const Uint32 RT_BREAK = 256; - for(Uint32 i = 0; inoLocFrag; ndbrequire(noLocFrag == 2); Uint32 fragid[2]; - for (Uint32 i = 0; i < noLocFrag; i++) { + Uint32 i; + for (i = 0; i < noLocFrag; i++) { fragid[i] = srFragidConf->fragId[i]; }//for - for (Uint32 i = 0; i < noLocFrag; i++) { + for (i = 0; i < noLocFrag; i++) { jam(); Uint32 fragId = fragid[i]; /* ---------------------------------------------------------------------- @@ -16040,17 +16042,18 @@ void Dblqh::initialisePageRef(Signal* signal) void Dblqh::initialiseRecordsLab(Signal* signal, Uint32 data, Uint32 retRef, Uint32 retData) { + Uint32 i; switch (data) { case 0: jam(); - for (Uint32 i = 0; i < MAX_NDB_NODES; i++) { + for (i = 0; i < MAX_NDB_NODES; i++) { cnodeSrState[i] = ZSTART_SR; cnodeExecSrState[i] = ZSTART_SR; }//for - for (Uint32 i = 0; i < 1024; i++) { + for (i = 0; i < 1024; i++) { ctransidHash[i] = RNIL; }//for - for (Uint32 i = 0; i < 4; i++) { + for (i = 0; i < 4; i++) { cactiveCopy[i] = RNIL; }//for cnoActiveCopy = 0; @@ -18004,7 +18007,7 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal) infoEvent(" lcpQueued=%d reportEmpty=%d", TlcpPtr.p->lcpQueued, TlcpPtr.p->reportEmpty); - char buf[TlcpPtr.p->m_EMPTY_LCP_REQ.TextLength+1]; + char buf[8*_NDB_NODE_BITMASK_SIZE+1]; infoEvent(" m_EMPTY_LCP_REQ=%d", TlcpPtr.p->m_EMPTY_LCP_REQ.getText(buf)); diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index 5afd79687a1..326a1afd34f 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -6725,7 +6725,8 @@ void Dbtc::execNODE_FAILREP(Signal* signal) tcNodeFailptr.i = 0; ptrAss(tcNodeFailptr, tcFailRecord); - for (Uint32 tindex = 0; tindex < tnoOfNodes; tindex++) { + Uint32 tindex; + for (tindex = 0; tindex < tnoOfNodes; tindex++) { jam(); hostptr.i = cdata[tindex]; ptrCheckGuard(hostptr, chostFilesize, hostRecord); @@ -6842,8 +6843,7 @@ void Dbtc::execNODE_FAILREP(Signal* signal) }//if }//for }//if - - for (Uint32 tindex = 0; tindex < tnoOfNodes; tindex++) { + for (tindex = 0; tindex < tnoOfNodes; tindex++) { jam(); hostptr.i = cdata[tindex]; ptrCheckGuard(hostptr, chostFilesize, hostRecord); diff --git a/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp b/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp index c38fde23404..930faf6d24a 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp @@ -238,11 +238,12 @@ void Dbtup::execMEMCHECKREQ(Signal* signal) ljamEntry(); BlockReference blockref = signal->theData[0]; - for (Uint32 i = 0; i < 25; i++) { + Uint32 i; + for (i = 0; i < 25; i++) { ljam(); data[i] = 0; }//for - for (Uint32 i = 0; i < 16; i++) { + for (i = 0; i < 16; i++) { regPagePtr.i = cfreepageList[i]; ljam(); while (regPagePtr.i != RNIL) { diff --git a/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp b/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp index 580d764c96f..30701bdbe39 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp @@ -494,16 +494,17 @@ void Dbtup::readExecUndoLogLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, Uint32 dataPages[16]; ndbrequire(dbsiPtr.p->pdxFilePage > 0); ndbrequire(dbsiPtr.p->pdxFilePage <= ZUB_SEGMENT_SIZE); - for (Uint32 i = 0; i < dbsiPtr.p->pdxFilePage; i++) { + Uint32 i; + for (i = 0; i < dbsiPtr.p->pdxFilePage; i++) { ljam(); dataPages[i] = dbsiPtr.p->pdxDataPage[i + ZUB_SEGMENT_SIZE]; }//for - for (Uint32 i = 0; i < ZUB_SEGMENT_SIZE; i++) { + for (i = 0; i < ZUB_SEGMENT_SIZE; i++) { ljam(); dataPages[i + dbsiPtr.p->pdxFilePage] = dbsiPtr.p->pdxDataPage[i]; }//for Uint32 limitLoop = ZUB_SEGMENT_SIZE + dbsiPtr.p->pdxFilePage; - for (Uint32 i = 0; i < limitLoop; i++) { + for (i = 0; i < limitLoop; i++) { ljam(); dbsiPtr.p->pdxDataPage[i] = dataPages[i]; }//for @@ -977,7 +978,8 @@ void Dbtup::allocRestartUndoBufferSegment(Signal* signal, DiskBufferSegmentInfoP seizeDiskBufferSegmentRecord(dbsiPtr); dbsiPtr.p->pdxBuffertype = UNDO_RESTART_PAGES; dbsiPtr.p->pdxUndoBufferSet[0] = undoPagePtr.i; - for (Uint32 i = 0; i < ZUB_SEGMENT_SIZE; i++) { + Uint32 i; + for (i = 0; i < ZUB_SEGMENT_SIZE; i++) { dbsiPtr.p->pdxDataPage[i] = undoPagePtr.i + i; }//for @@ -994,7 +996,7 @@ void Dbtup::allocRestartUndoBufferSegment(Signal* signal, DiskBufferSegmentInfoP undoPagePtr.p->undoPageWord[ZPAGE_NEXT_POS] = RNIL; dbsiPtr.p->pdxUndoBufferSet[1] = undoPagePtr.i; // lliPtr.p->lliUndoPage = undoPagePtr.i; - for (Uint32 i = ZUB_SEGMENT_SIZE; i < (2 * ZUB_SEGMENT_SIZE); i++) { + for (i = ZUB_SEGMENT_SIZE; i < (2 * ZUB_SEGMENT_SIZE); i++) { dbsiPtr.p->pdxDataPage[i] = undoPagePtr.i + (i - ZUB_SEGMENT_SIZE); }//for return; diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp index 0612f191830..ca6a3e69931 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp @@ -405,14 +405,15 @@ Dbtux::freeDescEnt(IndexPtr indexPtr) index2.m_descPage == pagePtr.i && index2.m_descOff == off + size); // move the entry (overlapping copy if size < size2) - for (unsigned i = 0; i < size2; i++) { + unsigned i; + for (i = 0; i < size2; i++) { jam(); data[off + i] = data[off + size + i]; } off += size2; // adjust page offset in index and all fragments index2.m_descOff -= size; - for (unsigned i = 0; i < index2.m_numFrags; i++) { + for (i = 0; i < index2.m_numFrags; i++) { jam(); Frag& frag2 = *c_fragPool.getPtr(index2.m_fragPtrI[i]); frag2.m_descOff -= size; diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp index 7c3f5fa36b8..02ed9739f3c 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp @@ -283,7 +283,8 @@ Dbtux::treeRemove(Signal* signal, Frag& frag, TreePos treePos) nodePopDown(signal, node, pos, ent); ndbrequire(node.getChilds() <= 1); // handle half-leaf - for (unsigned i = 0; i <= 1; i++) { + unsigned i; + for (i = 0; i <= 1; i++) { jam(); TupLoc childLoc = node.getLink(i); if (childLoc != NullTupLoc) { @@ -297,7 +298,7 @@ Dbtux::treeRemove(Signal* signal, Frag& frag, TreePos treePos) // get parent if any TupLoc parentLoc = node.getLink(2); NodeHandle parentNode(frag); - unsigned i = node.getSide(); + i = node.getSide(); // move all that fits into parent if (parentLoc != NullTupLoc) { jam(); diff --git a/ndb/src/kernel/blocks/grep/Grep.cpp b/ndb/src/kernel/blocks/grep/Grep.cpp index ee506ce922a..8b93ef9cd20 100644 --- a/ndb/src/kernel/blocks/grep/Grep.cpp +++ b/ndb/src/kernel/blocks/grep/Grep.cpp @@ -73,7 +73,7 @@ Grep::getNodeGroupMembers(Signal* signal) { c_noNodesInGroup++; } } - ndbrequire(c_noNodesInGroup >= 0); // at least 1 node in the nodegroup + ndbrequire(c_noNodesInGroup > 0); // at least 1 node in the nodegroup #ifdef NODEFAIL_DEBUG for (Uint32 i = 0; i < c_noNodesInGroup; i++) { @@ -253,7 +253,8 @@ Grep::execREAD_NODESCONF(Signal* signal) /****************************** * Check which REP nodes exist ******************************/ - for (Uint32 i = 1; i < MAX_NODES; i++) + Uint32 i; + for (i = 1; i < MAX_NODES; i++) { jam(); #if 0 @@ -279,7 +280,7 @@ Grep::execREAD_NODESCONF(Signal* signal) m_aliveNodes.clear(); Uint32 count = 0; - for(Uint32 i = 0; iallNodes, i)) { diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index f2d2edb615d..ed4e7f48d6f 100644 --- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -2138,7 +2138,8 @@ void Qmgr::execPREP_FAILREQ(Signal* signal) Uint16 TfailureNr = prepFail->failNo; cnoPrepFailedNodes = prepFail->noOfNodes; UintR arrayIndex = 0; - for (Uint32 Tindex = 0; Tindex < MAX_NDB_NODES; Tindex++) { + Uint32 Tindex; + for (Tindex = 0; Tindex < MAX_NDB_NODES; Tindex++) { if (NodeBitmask::get(prepFail->theNodes, Tindex)){ cprepFailedNodes[arrayIndex] = Tindex; arrayIndex++; @@ -2166,7 +2167,7 @@ void Qmgr::execPREP_FAILREQ(Signal* signal) guard0 = cnoPrepFailedNodes - 1; arrGuard(guard0, MAX_NDB_NODES); - for (Uint32 Tindex = 0; Tindex <= guard0; Tindex++) { + for (Tindex = 0; Tindex <= guard0; Tindex++) { jam(); failReport(signal, cprepFailedNodes[Tindex], diff --git a/ndb/src/kernel/blocks/suma/Suma.cpp b/ndb/src/kernel/blocks/suma/Suma.cpp index 9718845de43..ec9dc4a3766 100644 --- a/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/ndb/src/kernel/blocks/suma/Suma.cpp @@ -3972,3 +3972,6 @@ Suma::execSUMA_HANDOVER_CONF(Signal* signal) { } } } + +template void append(DataBuffer<11>&,SegmentedSectionPtr,SectionSegmentPool&); + diff --git a/ndb/src/kernel/blocks/trix/Trix.cpp b/ndb/src/kernel/blocks/trix/Trix.cpp index f058433840c..6cbc7a9b371 100644 --- a/ndb/src/kernel/blocks/trix/Trix.cpp +++ b/ndb/src/kernel/blocks/trix/Trix.cpp @@ -965,3 +965,5 @@ void Trix::checkParallelism(Signal* signal, SubscriptionRecord* subRec) } BLOCK_FUNCTIONS(Trix); + +template void append(DataBuffer<15>&,SegmentedSectionPtr,SectionSegmentPool&); diff --git a/ndb/src/kernel/vm/DataBuffer.hpp b/ndb/src/kernel/vm/DataBuffer.hpp index 7dc89aa638c..7f553898eb5 100644 --- a/ndb/src/kernel/vm/DataBuffer.hpp +++ b/ndb/src/kernel/vm/DataBuffer.hpp @@ -33,7 +33,7 @@ public: Uint32 data[sz]; NdbOut& print(NdbOut& out){ out << "[DataBuffer<" << sz << ">::Segment this=" - << hex << (Uint32)this << dec << " nextPool= " + << this << dec << " nextPool= " << nextPool << " ]"; return out; } diff --git a/ndb/src/kernel/vm/SimulatedBlock.cpp b/ndb/src/kernel/vm/SimulatedBlock.cpp index a6a8a6242cd..22354b17db7 100644 --- a/ndb/src/kernel/vm/SimulatedBlock.cpp +++ b/ndb/src/kernel/vm/SimulatedBlock.cpp @@ -1005,7 +1005,8 @@ SimulatedBlock::assembleFragments(Signal * signal){ /** * FragInfo == 2 or 3 */ - for(Uint32 i = 0; im_sectionPtr[i].i; @@ -1027,7 +1028,6 @@ SimulatedBlock::assembleFragments(Signal * signal){ /** * fragInfo = 3 */ - Uint32 i; for(i = 0; i<3; i++){ Uint32 ptrI = fragPtr.p->m_sectionPtrI[i]; if(ptrI != RNIL){ diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp index bb4b6be8221..c4b4c11825c 100644 --- a/ndb/src/mgmapi/mgmapi.cpp +++ b/ndb/src/mgmapi/mgmapi.cpp @@ -63,7 +63,7 @@ 0, \ 0, 0 } -class ParserDummy : SocketServer::Session +class ParserDummy : private SocketServer::Session { public: ParserDummy(NDB_SOCKET_TYPE sock); @@ -491,11 +491,12 @@ extern "C" const char * ndb_mgm_get_node_status_string(enum ndb_mgm_node_status status) { - for(int i = 0; i*>; diff --git a/ndb/src/mgmsrv/CommandInterpreter.cpp b/ndb/src/mgmsrv/CommandInterpreter.cpp index 004fc463b70..316b6d5795e 100644 --- a/ndb/src/mgmsrv/CommandInterpreter.cpp +++ b/ndb/src/mgmsrv/CommandInterpreter.cpp @@ -378,7 +378,8 @@ void CommandInterpreter::executeHelp(char* parameters) { (void)parameters; // Don't want compiler warning if (emptyString(parameters)) { - for (int i = 0; i = "; - for(Uint32 i = 0; itheClusterMgr->getNodeInfo(i); @@ -1003,7 +1003,7 @@ MgmtSrvr::version(int * stopCount, bool abort, } } - for(Uint32 i = 0; igetDataPtrSend()); - for(Uint32 i = 0; itheCategories[i] = ll.theCategories[i]; dst->theLevels[i] = ll.theLevels[i]; } @@ -1523,7 +1524,8 @@ int MgmtSrvr::setNodeLogLevel(int processId, const SetLogLevelOrd & ll, bool isResend) { - for(Uint32 i = 0; igetDataPtrSend()); - for(Uint32 i = 0; itheCategories[i] = ll.theCategories[i]; dst->theLevels[i] = ll.theLevels[i]; } diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp index 739eef90c52..2309a1ccd81 100644 --- a/ndb/src/mgmsrv/Services.cpp +++ b/ndb/src/mgmsrv/Services.cpp @@ -1119,7 +1119,8 @@ void MgmStatService::println_statistics(const BaseString &line){ MutexVector copy(m_sockets.size()); m_sockets.lock(); - for(int i = m_sockets.size() - 1; i >= 0; i--){ + int i; + for(i = m_sockets.size() - 1; i >= 0; i--){ if(println_socket(m_sockets[i], MAX_WRITE_TIMEOUT, line.c_str()) == -1){ copy.push_back(m_sockets[i]); m_sockets.erase(i, false); @@ -1127,7 +1128,7 @@ MgmStatService::println_statistics(const BaseString &line){ } m_sockets.unlock(); - for(int i = copy.size() - 1; i >= 0; i--){ + for(i = copy.size() - 1; i >= 0; i--){ NDB_CLOSE_SOCKET(copy[i]); copy.erase(i); } diff --git a/ndb/src/ndbapi/DictCache.cpp b/ndb/src/ndbapi/DictCache.cpp index f6f2106f2aa..5f620f77906 100644 --- a/ndb/src/ndbapi/DictCache.cpp +++ b/ndb/src/ndbapi/DictCache.cpp @@ -157,6 +157,7 @@ GlobalDictCache::put(const char * name, NdbTableImpl * tab) void GlobalDictCache::drop(NdbTableImpl * tab) { + unsigned i; const Uint32 len = strlen(tab->m_internalName.c_str()); Vector * vers = m_tableHash.getData(tab->m_internalName.c_str(), len); @@ -173,7 +174,7 @@ GlobalDictCache::drop(NdbTableImpl * tab) abort(); } - for(unsigned i = 0; i < sz; i++){ + for(i = 0; i < sz; i++){ TableVersion & ver = (* vers)[i]; if(ver.m_impl == tab){ if(ver.m_refCount == 0 || ver.m_status == RETREIVING || @@ -193,7 +194,7 @@ GlobalDictCache::drop(NdbTableImpl * tab) } } - for(unsigned i = 0; im_internalName.c_str()); Vector * vers = m_tableHash.getData(tab->m_internalName.c_str(), len); @@ -220,7 +222,7 @@ GlobalDictCache::release(NdbTableImpl * tab){ abort(); } - for(unsigned i = 0; i < sz; i++){ + for(i = 0; i < sz; i++){ TableVersion & ver = (* vers)[i]; if(ver.m_impl == tab){ if(ver.m_refCount == 0 || ver.m_status == RETREIVING || @@ -235,7 +237,7 @@ GlobalDictCache::release(NdbTableImpl * tab){ } } - for(unsigned i = 0; i; diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp index fe7260c4693..8dcb5af3bd8 100644 --- a/ndb/src/ndbapi/Ndb.cpp +++ b/ndb/src/ndbapi/Ndb.cpp @@ -973,13 +973,13 @@ Ndb::StartTransactionNodeSelectionData::init(Uint32 noOfNodes, */ { fragment2PrimaryNodeMap = new Uint32[noOfFragments]; - - for(Uint32 i = 0; i fragment2PrimaryNodeMap[j]){ Uint32 tmp = fragment2PrimaryNodeMap[i]; @@ -987,7 +987,7 @@ Ndb::StartTransactionNodeSelectionData::init(Uint32 noOfNodes, fragment2PrimaryNodeMap[j] = tmp; } - for(Uint32 i = 0; i= 0; i--){ + size_t i; + for(i = 31; i >= 0; i--){ if(((1 << i) & size) != 0){ m_columnHashMask = (1 << (i + 1)) - 1; break; @@ -396,7 +397,7 @@ NdbTableImpl::buildColumnHash(){ Vector hashValues; Vector > chains; chains.fill(size, hashValues); - for(size_t i = 0; igetName()) & 0xFFFE; Uint32 bucket = hv & m_columnHashMask; bucket = (bucket < size ? bucket : bucket - size); @@ -410,7 +411,7 @@ NdbTableImpl::buildColumnHash(){ m_columnHash.fill((unsigned)size-1, tmp); // Default no chaining Uint32 pos = 0; // In overflow vector - for(size_t i = 0; i NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY){ m_error.code = 4317; return -1; @@ -1340,7 +1342,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, bool haveAutoIncrement = false; Uint64 autoIncrementValue; - for(unsigned i = 0; i MAX_TAB_NAME_SIZE) { @@ -1828,7 +1830,7 @@ NdbDictInterface::createIndex(Ndb & ndb, req->setOnline(true); AttributeList attributeList; attributeList.sz = impl.m_columns.size(); - for(unsigned i = 0; im_name.c_str()); if(col == 0){ @@ -1853,7 +1855,7 @@ NdbDictInterface::createIndex(Ndb & ndb, } if (it == DictTabInfo::UniqueHashIndex) { // Sort index attributes according to primary table (using insertion sort) - for(unsigned i = 1; i < attributeList.sz; i++) { + for(i = 1; i < attributeList.sz; i++) { unsigned int temp = attributeList.id[i]; unsigned int j = i; while((j > 0) && (attributeList.id[j - 1] > temp)) { @@ -1863,7 +1865,7 @@ NdbDictInterface::createIndex(Ndb & ndb, attributeList.id[j] = temp; } // Check for illegal duplicate attributes - for(unsigned i = 0; iaddColumn(*(col_impl->m_facade)); @@ -2086,7 +2089,7 @@ NdbDictionaryImpl::createEvent(NdbEventImpl & evnt) int pk_count = 0; evnt.m_attrListBitmask.clear(); - for(int i = 0; im_name.c_str()); if(col == 0){ @@ -2104,7 +2107,7 @@ NdbDictionaryImpl::createEvent(NdbEventImpl & evnt) } // Sort index attributes according to primary table (using insertion sort) - for(int i = 1; i < attributeList_sz; i++) { + for(i = 1; i < attributeList_sz; i++) { NdbColumnImpl* temp = evnt.m_columns[i]; unsigned int j = i; while((j > 0) && (evnt.m_columns[j - 1]->m_attrId > temp->m_attrId)) { @@ -2114,7 +2117,7 @@ NdbDictionaryImpl::createEvent(NdbEventImpl & evnt) evnt.m_columns[j] = temp; } // Check for illegal duplicate attributes - for(int i = 1; im_attrId == evnt.m_columns[i]->m_attrId) { m_error.code = 4258; return -1; @@ -2810,3 +2813,6 @@ NdbDictInterface::execLIST_TABLES_CONF(NdbApiSignal* signal, m_waiter.signal(NO_WAIT); } } + +template class Vector; +template class Vector >; diff --git a/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/ndb/src/ndbapi/NdbEventOperationImpl.cpp index b73a58d97c4..af84492564b 100644 --- a/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -93,11 +93,12 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N, NdbEventOperationImpl::~NdbEventOperationImpl() { + int i; if (sdata) NdbMem_Free(sdata); - for (int i=0 ; i<3; i++) { + for (i=0 ; i<3; i++) { if (ptr[i].p) NdbMem_Free(ptr[i].p); } - for (int i=0 ; i<2; i++) { + for (i=0 ; i<2; i++) { NdbRecAttr *p = theFirstRecAttrs[i]; while (p) { NdbRecAttr *p_next = p->next(); @@ -1233,8 +1234,9 @@ NdbGlobalEventBuffer::real_wait(NdbGlobalEventBufferHandle *h, int aMillisecondNumber) { // check if there are anything in any of the buffers + int i; int n = 0; - for (int i = 0; i < h->m_nids; i++) + for (i = 0; i < h->m_nids; i++) n += hasData(h->m_bufferIds[i]); if (n) return n; @@ -1243,7 +1245,9 @@ NdbGlobalEventBuffer::real_wait(NdbGlobalEventBufferHandle *h, return -1; n = 0; - for (int i = 0; i < h->m_nids; i++) + for (i = 0; i < h->m_nids; i++) n += hasData(h->m_bufferIds[i]); return n; } + +template class Vector; diff --git a/ndb/src/ndbapi/NdbLinHash.hpp b/ndb/src/ndbapi/NdbLinHash.hpp index f67d4e60200..5d0d52a31d8 100644 --- a/ndb/src/ndbapi/NdbLinHash.hpp +++ b/ndb/src/ndbapi/NdbLinHash.hpp @@ -165,13 +165,14 @@ NdbLinHash::createHashTable() { max = SEGMENTSIZE - 1; slack = SEGMENTSIZE * MAXLOADFCTR; directory[0] = new Segment_t(); - + int i; + /* The first segment cleared before used */ - for(int i = 0; i < SEGMENTSIZE; i++ ) + for(i = 0; i < SEGMENTSIZE; i++ ) directory[0]->elements[i] = 0; /* clear the rest of the directory */ - for( int i = 1; i < DIRECTORYSIZE; i++) + for(i = 1; i < DIRECTORYSIZE; i++) directory[i] = 0; } @@ -203,7 +204,8 @@ NdbLinHash::insertKey( const char* str, Uint32 len, Uint32 lkey1, C* data ) * chain=chainp will copy the contents of HASH_T into chain */ NdbElement_t * oldChain = 0; - for(NdbElement_t * chain = *chainp; chain != 0; chain = chain->next){ + NdbElement_t * chain; + for(chain = *chainp; chain != 0; chain = chain->next){ if(chain->len == len && !memcmp(chain->str, str, len)) return -1; /* Element already exists */ else @@ -211,7 +213,7 @@ NdbLinHash::insertKey( const char* str, Uint32 len, Uint32 lkey1, C* data ) } /* New entry */ - NdbElement_t * chain = new NdbElement_t(); + chain = new NdbElement_t(); chain->len = len; chain->hash = hash; chain->localkey1 = lkey1; diff --git a/ndb/src/ndbapi/NdbOperationScan.cpp b/ndb/src/ndbapi/NdbOperationScan.cpp index 299e6f2adea..929db9a6ea6 100644 --- a/ndb/src/ndbapi/NdbOperationScan.cpp +++ b/ndb/src/ndbapi/NdbOperationScan.cpp @@ -137,6 +137,7 @@ int NdbOperation::openScan(Uint32 aParallelism, bool lockMode, bool lockHoldMode, bool readCommitted) { + Uint32 i; aParallelism = checkParallelism(aParallelism); if(aParallelism == 0){ return 0; @@ -178,7 +179,7 @@ NdbOperation::openScan(Uint32 aParallelism, return -1; } - for (Uint32 i = 0; i < aParallelism; i ++) { + for (i = 0; i < aParallelism; i ++) { tScanRec = theNdb->getNdbScanRec(); if (tScanRec == NULL) { setErrorCodeAbort(4000); @@ -213,7 +214,7 @@ NdbOperation::openScan(Uint32 aParallelism, scanTabReq->transId1 = (Uint32) transId; scanTabReq->transId2 = (Uint32) (transId >> 32); - for (Uint32 i = 0; i < 16 && i < aParallelism ; i++) { + for (i = 0; i < 16 && i < aParallelism ; i++) { scanTabReq->apiOperationPtr[i] = theScanReceiversArray[i]->ptr2int(); }//for @@ -241,7 +242,7 @@ NdbOperation::openScan(Uint32 aParallelism, tSignal = theFirstSCAN_TABINFO_Send; while (tSignal != NULL) { tSignal->setData(theNdbCon->theTCConPtr, 1); - for (int i = 0; i < 16 ; i++) { + for (i = 0; i < 16 ; i++) { tSignal->setData(theScanReceiversArray[i + tParallelism]->ptr2int(), i + 2); }//for tSignal = tSignal->next(); diff --git a/ndb/src/ndbapi/Ndbinit.cpp b/ndb/src/ndbapi/Ndbinit.cpp index f451ba885d4..641919d771b 100644 --- a/ndb/src/ndbapi/Ndbinit.cpp +++ b/ndb/src/ndbapi/Ndbinit.cpp @@ -111,12 +111,13 @@ Ndb::Ndb( const char* aDataBase , const char* aSchema) : theCurrentConnectCounter = 1; theCurrentConnectIndex = 0; - for (int i = 0; i < MAX_NDB_NODES ; i++) { + int i; + for (i = 0; i < MAX_NDB_NODES ; i++) { theConnectionArray[i] = NULL; the_release_ind[i] = 0; theDBnodes[i] = 0; }//forg - for (int i = 0; i < 2048 ; i++) { + for (i = 0; i < 2048 ; i++) { theFirstTupleId[i] = 0; theLastTupleId[i] = 0; }//for diff --git a/ndb/src/ndbapi/TransporterFacade.cpp b/ndb/src/ndbapi/TransporterFacade.cpp index e725144a8f8..4ae292f352e 100644 --- a/ndb/src/ndbapi/TransporterFacade.cpp +++ b/ndb/src/ndbapi/TransporterFacade.cpp @@ -992,3 +992,6 @@ TransporterFacade::ThreadData::close(int number){ m_statusFunction[number] = 0; return 0; } + +template class Vector; +template class Vector; From 63fca63e5869d20b08aabcb53c84f49aa0c0ea03 Mon Sep 17 00:00:00 2001 From: "mysqldev@o2k.irixworld.net" <> Date: Fri, 9 Jul 2004 13:28:52 +0200 Subject: [PATCH 03/93] Irix64 mipspro ndb compile fixes --- ndb/src/cw/cpcd/CPCD.cpp | 5 +++-- ndb/src/cw/cpcd/Process.cpp | 8 ++++---- ndb/src/mgmclient/CommandInterpreter.cpp | 21 ++++++++++++--------- ndb/src/ndbapi/NdbDictionaryImpl.cpp | 4 ++++ ndb/tools/desc.cpp | 5 +++-- 5 files changed, 26 insertions(+), 17 deletions(-) diff --git a/ndb/src/cw/cpcd/CPCD.cpp b/ndb/src/cw/cpcd/CPCD.cpp index f2878b7dea1..40a5fd49493 100644 --- a/ndb/src/cw/cpcd/CPCD.cpp +++ b/ndb/src/cw/cpcd/CPCD.cpp @@ -351,8 +351,9 @@ CPCD::loadProcessList(){ sess.loadFile(); loadingProcessList = false; + size_t i; Vector temporary; - for(size_t i = 0; ireadPid(); if(proc->m_processType == TEMPORARY){ @@ -360,7 +361,7 @@ CPCD::loadProcessList(){ } } - for(size_t i = 0; i ulimit; m_ulimit.split(ulimit); - for(size_t i = 0; i 0 && set_ulimit(ulimit[i]) != 0){ _exit(1); } @@ -286,7 +286,7 @@ CPCD::Process::do_exec() { BaseString * redirects[] = { &m_stdin, &m_stdout, &m_stderr }; int fds[3]; - for(int i = 0; i<3; i++){ + for(i = 0; i<3; i++){ if(redirects[i]->empty()){ #ifndef DEBUG dup2(fd, i); @@ -319,7 +319,7 @@ CPCD::Process::do_exec() { } /* Close all filedescriptors */ - for(int i = STDERR_FILENO+1; i < getdtablesize(); i++) + for(i = STDERR_FILENO+1; i < getdtablesize(); i++) close(i); execv(m_path.c_str(), argv); diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp index 061ae3be8f0..fe9be9bcd44 100644 --- a/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/ndb/src/mgmclient/CommandInterpreter.cpp @@ -634,7 +634,8 @@ CommandInterpreter::executeHelp(char* parameters) void CommandInterpreter::executeShow(char* parameters) -{ +{ + int i; connect(); if (emptyString(parameters)) { ndbout << "Cluster Configuration" << endl @@ -652,7 +653,7 @@ CommandInterpreter::executeShow(char* parameters) api_nodes = 0, mgm_nodes = 0; - for(int i=0; i < state->no_of_nodes; i++) { + for(i=0; i < state->no_of_nodes; i++) { switch(state->node_states[i].node_type) { case NDB_MGM_NODE_TYPE_API: api_nodes++; @@ -673,7 +674,7 @@ CommandInterpreter::executeShow(char* parameters) << " NDB Node(s)" << endl; - for(int i=0; i < state->no_of_nodes; i++) { + for(i=0; i < state->no_of_nodes; i++) { if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_NDB) { ndbout << "DB node:\t" << state->node_states[i].node_id; if(state->node_states[i].version != 0) { @@ -695,7 +696,7 @@ CommandInterpreter::executeShow(char* parameters) << " API Node(s)" << endl; - for(int i=0; i < state->no_of_nodes; i++) { + for(i=0; i < state->no_of_nodes; i++) { if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_API) { ndbout << "API node:\t" << state->node_states[i].node_id; if(state->node_states[i].version != 0) { @@ -716,7 +717,7 @@ CommandInterpreter::executeShow(char* parameters) << " MGM Node(s)" << endl; - for(int i=0; i < state->no_of_nodes; i++) { + for(i=0; i < state->no_of_nodes; i++) { if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_MGM) { ndbout << "MGM node:\t" << state->node_states[i].node_id; if(state->node_states[i].version != 0) { @@ -759,6 +760,7 @@ CommandInterpreter::executeShow(char* parameters) void CommandInterpreter::executeClusterLog(char* parameters) { + int i; connect(); if (parameters != 0 && strlen(parameters) != 0) { enum ndb_mgm_clusterlog_level severity = NDB_MGM_CLUSTERLOG_ALL; @@ -846,10 +848,10 @@ CommandInterpreter::executeClusterLog(char* parameters) ndbout << "Cluster logging is disabled." << endl; - for(int i = 0; i<7;i++) + for(i = 0; i<7;i++) printf("enabled[%d] = %d\n", i, enabled[i]); ndbout << "Severities enabled: "; - for(int i = 1; i < 7; i++) { + for(i = 1; i < 7; i++) { if(enabled[i]) ndbout << names[i] << " "; } @@ -1298,14 +1300,15 @@ CommandInterpreter::executeLog(int processId, return; } int len=0; - for(Uint32 i=0; i; template class Vector >; +template class Vector; +template class Vector; +template class Bitmask<4>; + diff --git a/ndb/tools/desc.cpp b/ndb/tools/desc.cpp index a5ff11edca9..e5b98c4c8e9 100644 --- a/ndb/tools/desc.cpp +++ b/ndb/tools/desc.cpp @@ -73,7 +73,8 @@ int main(int argc, const char** argv){ ndbout << "-- Indexes -- " << endl; ndbout << "PRIMARY KEY("; - for (unsigned j= 0; j < pTab->getNoOfPrimaryKeys(); j++) + unsigned j; + for (j= 0; j < pTab->getNoOfPrimaryKeys(); j++) { const NdbDictionary::Column * col = pTab->getColumn(j); ndbout << col->getName(); @@ -82,7 +83,7 @@ int main(int argc, const char** argv){ } ndbout << ") - UniqueHashIndex" << endl; - for (unsigned j= 0; j < list.count; j++) { + for (j= 0; j < list.count; j++) { NdbDictionary::Dictionary::List::Element& elt = list.elements[j]; const NdbDictionary::Index *pIdx = dict->getIndex(elt.name, argv[i]); if (!pIdx){ From cba5218a1dcf61d7e72c26f60142f01779905fd2 Mon Sep 17 00:00:00 2001 From: "mysqldev@o2k.irixworld.net" <> Date: Fri, 9 Jul 2004 15:10:24 +0200 Subject: [PATCH 04/93] Irix64 mipspro ndb compile fixes --- ndb/src/common/util/ConfigValues.cpp | 20 +++++++++------- ndb/src/mgmsrv/MgmtSrvr.cpp | 7 ++---- ndb/src/ndbapi/NdbScanFilter.cpp | 9 +++++-- ndb/test/src/HugoCalculator.cpp | 10 ++++---- ndb/test/src/NDBT_Tables.cpp | 7 +++--- ndb/test/src/NDBT_Test.cpp | 35 ++++++++++++++-------------- ndb/tools/waiter.cpp | 5 ++-- 7 files changed, 51 insertions(+), 42 deletions(-) diff --git a/ndb/src/common/util/ConfigValues.cpp b/ndb/src/common/util/ConfigValues.cpp index b4cf6c9a919..7060c44f1eb 100644 --- a/ndb/src/common/util/ConfigValues.cpp +++ b/ndb/src/common/util/ConfigValues.cpp @@ -105,19 +105,19 @@ ConfigValues::getByPos(Uint32 pos, Entry * result) const { Uint64 * ConfigValues::get64(Uint32 index) const { assert(index < m_int64Count); - const Uint32 * data = m_values + (m_size << 1); + const Uint32 * data = m_values + (m_size << 1); Uint64 * ptr = (Uint64*)data; - ptr += index; + ptr += index; return ptr; } char ** ConfigValues::getString(Uint32 index) const { assert(index < m_stringCount); - const Uint32 * data = m_values + (m_size << 1); - char * ptr = (char*)data; + const Uint32 * data = m_values + (m_size << 1); + char * ptr = (char*)data; ptr += m_dataSize; - ptr -= (index * sizeof(char *)); + ptr -= (index * sizeof(char *)); return (char**)ptr; } @@ -261,9 +261,9 @@ directory(Uint32 sz){ ConfigValuesFactory::ConfigValuesFactory(Uint32 keys, Uint32 data){ m_sectionCounter = (1 << KP_SECTION_SHIFT); m_freeKeys = directory(keys); - m_freeData = data; + m_freeData = ((data + 7) & ~7); m_currentSection = 0; - m_cfg = create(m_freeKeys, data); + m_cfg = create(m_freeKeys, m_freeData); } ConfigValuesFactory::ConfigValuesFactory(ConfigValues * cfg){ @@ -316,7 +316,8 @@ ConfigValuesFactory::expand(Uint32 fk, Uint32 fs){ m_freeKeys = (m_freeKeys >= fk ? m_cfg->m_size : fk + m_cfg->m_size); m_freeData = (m_freeData >= fs ? m_cfg->m_dataSize : fs + m_cfg->m_dataSize); m_freeKeys = directory(m_freeKeys); - + m_freeData = ((m_freeData + 7) & ~7); + ConfigValues * m_tmp = m_cfg; m_cfg = create(m_freeKeys, m_freeData); put(* m_tmp); @@ -333,6 +334,7 @@ ConfigValuesFactory::shrink(){ m_freeKeys = m_cfg->m_size - m_freeKeys; m_freeData = m_cfg->m_dataSize - m_freeData; m_freeKeys = directory(m_freeKeys); + m_freeData = ((m_freeData + 7) & ~7); ConfigValues * m_tmp = m_cfg; m_cfg = create(m_freeKeys, m_freeData); @@ -462,7 +464,7 @@ ConfigValuesFactory::put(const ConfigValues::Entry & entry){ case ConfigValues::StringType:{ Uint32 index = m_cfg->m_stringCount++; m_cfg->m_values[pos+1] = index; - char ** ref = m_cfg->getString(index); + char ** ref = m_cfg->getString(index); * ref = strdup(entry.m_string ? entry.m_string : ""); m_freeKeys--; m_freeData -= sizeof(char *); diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index b51644b3940..717dc5083f0 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -243,20 +243,17 @@ MgmtSrvr::startEventLog() char clusterLog[MAXPATHLEN]; NdbConfig_ClusterLogFileName(clusterLog, sizeof(clusterLog)); - if(ndb_mgm_get_string_parameter(iter, CFG_LOG_DESTINATION, &tmp) == 0){ logdest.assign(tmp); } ndb_mgm_destroy_iterator(iter); - if(logdest.length()==0) { + if(logdest.length() == 0 || logdest == "") { logdest.assfmt("FILE:filename=%s,maxsize=1000000,maxfiles=6", clusterLog); } - if(!g_EventLogger.addHandler(logdest)) { - ndbout << "ERROR: cannot parse \"" << logdest << "\"" << endl; - exit(1); + ndbout << "Warning: could not add log destination \"" << logdest.c_str() << "\"" << endl; } } diff --git a/ndb/src/ndbapi/NdbScanFilter.cpp b/ndb/src/ndbapi/NdbScanFilter.cpp index 9542b226d7d..eace1a0acf5 100644 --- a/ndb/src/ndbapi/NdbScanFilter.cpp +++ b/ndb/src/ndbapi/NdbScanFilter.cpp @@ -337,7 +337,6 @@ static const tab2 table2[] = { const int tab_sz = sizeof(table)/sizeof(table[0]); const int tab2_sz = sizeof(table2)/sizeof(table2[0]); -template int matchType(const NdbDictionary::Column * col){ return 1; @@ -382,7 +381,7 @@ NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition op, return -1; } - if(!matchType(col)){ + if(!matchType(col)){ /** * Code not reached */ @@ -777,3 +776,9 @@ main(void){ return 0; } #endif + +template class Vector; +template int NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition, Uint32 attrId, Uint32); +template int NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition, Uint32 attrId, Uint64); + + diff --git a/ndb/test/src/HugoCalculator.cpp b/ndb/test/src/HugoCalculator.cpp index 55aa96a4909..147c8b104d8 100644 --- a/ndb/test/src/HugoCalculator.cpp +++ b/ndb/test/src/HugoCalculator.cpp @@ -28,7 +28,8 @@ HugoCalculator::HugoCalculator(const NdbDictionary::Table& tab) : m_tab(tab) { // The "id" column of this table is found in the first integer column - for (int i=0; igetType() == NdbDictionary::Column::Unsigned){ m_idCol = i; @@ -37,7 +38,7 @@ HugoCalculator::HugoCalculator(const NdbDictionary::Table& tab) : m_tab(tab) { } // The "number of updates" column for this table is found in the last column - for (int i=m_tab.getNoOfColumns()-1; i>=0; i--){ + for (i=m_tab.getNoOfColumns()-1; i>=0; i--){ const NdbDictionary::Column* attr = m_tab.getColumn(i); if (attr->getType() == NdbDictionary::Column::Unsigned){ m_updatesCol = i; @@ -102,7 +103,8 @@ HugoCalculator::calcValue(int record, // Fill buf with some pattern so that we can detect // anomalies in the area that we don't fill with chars - for (int i = 0; igetLength(); i++) + int i; + for (i = 0; igetLength(); i++) buf[i] = ((i+2) % 255); // Calculate length of the string to create. We want the string @@ -116,7 +118,7 @@ HugoCalculator::calcValue(int record, else len++; } - for(int i=0; i < len; i++) + for(i=0; i < len; i++) buf[i] = a[((val^i)%25)]; buf[len] = 0; } diff --git a/ndb/test/src/NDBT_Tables.cpp b/ndb/test/src/NDBT_Tables.cpp index 548e755a3fb..2031ddf5f04 100644 --- a/ndb/test/src/NDBT_Tables.cpp +++ b/ndb/test/src/NDBT_Tables.cpp @@ -678,17 +678,18 @@ NdbDictionary::Table* NDBT_Tables::getTable(const char* _nam){ // Search tables list to find a table NDBT_Table* tab = NULL; - for (int i=0; igetName(), _nam) == 0){ return test_tables[i]; } } - for (int i=0; igetName(), _nam) == 0){ return fail_tables[i]; } } - for (int i=0; igetName(), _nam) == 0){ return util_tables[i]; } diff --git a/ndb/test/src/NDBT_Test.cpp b/ndb/test/src/NDBT_Test.cpp index 4cd2c96486b..1bb00138d3b 100644 --- a/ndb/test/src/NDBT_Test.cpp +++ b/ndb/test/src/NDBT_Test.cpp @@ -336,24 +336,24 @@ NDBT_TestCaseImpl1::NDBT_TestCaseImpl1(NDBT_TestSuite* psuite, NDBT_TestCaseImpl1::~NDBT_TestCaseImpl1(){ NdbCondition_Destroy(waitThreadsCondPtr); NdbMutex_Destroy(waitThreadsMutexPtr); - - for(size_t i = 0; i < initializers.size(); i++) + size_t i; + for(i = 0; i < initializers.size(); i++) delete initializers[i]; initializers.clear(); - for(size_t i = 0; i < verifiers.size(); i++) + for(i = 0; i < verifiers.size(); i++) delete verifiers[i]; verifiers.clear(); - for(size_t i = 0; i < finalizers.size(); i++) + for(i = 0; i < finalizers.size(); i++) delete finalizers[i]; finalizers.clear(); - for(size_t i = 0; i < steps.size(); i++) + for(i = 0; i < steps.size(); i++) delete steps[i]; steps.clear(); results.clear(); - for(size_t i = 0; i < testTables.size(); i++) + for(i = 0; i < testTables.size(); i++) delete testTables[i]; testTables.clear(); - for(size_t i = 0; i < testResults.size(); i++) + for(i = 0; i < testResults.size(); i++) delete testResults[i]; testResults.clear(); @@ -487,7 +487,8 @@ void NDBT_TestCaseImpl1::waitSteps(){ waitThreadsMutexPtr); unsigned completedSteps = 0; - for(unsigned i=0; iprint(); } - for(unsigned i=0; iprint(); } - for(unsigned i=0; iprint(); } - for(unsigned i=0; iprint(); } diff --git a/ndb/tools/waiter.cpp b/ndb/tools/waiter.cpp index 7ce2739a157..f3312b895c0 100644 --- a/ndb/tools/waiter.cpp +++ b/ndb/tools/waiter.cpp @@ -121,7 +121,8 @@ getStatus(){ retries++; continue; } - for (int i = 0; i < status->no_of_nodes; i++){ + int count = status->no_of_nodes; + for (int i = 0; i < count; i++){ node = &status->node_states[i]; switch(node->node_type){ case NDB_MGM_NODE_TYPE_NDB: @@ -142,7 +143,7 @@ getStatus(){ apiNodes.clear(); free(status); status = NULL; - i = status->no_of_nodes; + count = 0; ndbout << "kalle"<< endl; break; From 03f430206f00294f4558eb6a000e6b71048db198 Mon Sep 17 00:00:00 2001 From: "mronstrom@mysql.com" <> Date: Sat, 17 Jul 2004 19:31:16 +0200 Subject: [PATCH 05/93] Added new testcase for Bug #4479 testBasic -n MassiveTransaction Inserts as many records as defined in one transaction using loadTable --- ndb/test/include/HugoTransactions.hpp | 3 +- ndb/test/ndbapi/testBasic.cpp | 18 ++++++++-- ndb/test/src/HugoTransactions.cpp | 47 +++++++++++++++++---------- 3 files changed, 48 insertions(+), 20 deletions(-) diff --git a/ndb/test/include/HugoTransactions.hpp b/ndb/test/include/HugoTransactions.hpp index 5ff1fef16bc..3d373652cbc 100644 --- a/ndb/test/include/HugoTransactions.hpp +++ b/ndb/test/include/HugoTransactions.hpp @@ -34,7 +34,8 @@ public: int records, int batch = 512, bool allowConstraintViolation = true, - int doSleep = 0); + int doSleep = 0, + bool oneTrans = false); int scanReadRecords(Ndb*, int records, int abort = 0, diff --git a/ndb/test/ndbapi/testBasic.cpp b/ndb/test/ndbapi/testBasic.cpp index 64dfe492c2c..af25a36dde2 100644 --- a/ndb/test/ndbapi/testBasic.cpp +++ b/ndb/test/ndbapi/testBasic.cpp @@ -29,9 +29,18 @@ * delete should be visible to same transaction * */ +int runLoadTable2(NDBT_Context* ctx, NDBT_Step* step) +{ + int records = ctx->getNumRecords(); + HugoTransactions hugoTrans(*ctx->getTab()); + if (hugoTrans.loadTable(GETNDB(step), records, 512, false, 0, true) != 0){ + return NDBT_FAILED; + } + return NDBT_OK; +} -int runLoadTable(NDBT_Context* ctx, NDBT_Step* step){ - +int runLoadTable(NDBT_Context* ctx, NDBT_Step* step) +{ int records = ctx->getNumRecords(); HugoTransactions hugoTrans(*ctx->getTab()); if (hugoTrans.loadTable(GETNDB(step), records) != 0){ @@ -1255,6 +1264,11 @@ TESTCASE("MassiveRollback2", INITIALIZER(runMassiveRollback2); FINALIZER(runClearTable2); } +TESTCASE("MassiveTransaction", + "Test very large insert transaction"){ + INITIALIZER(runLoadTable2); + FINALIZER(runClearTable2); +} NDBT_TESTSUITE_END(testBasic); int main(int argc, const char** argv){ diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp index 7f12484ddc8..994ad3284bb 100644 --- a/ndb/test/src/HugoTransactions.cpp +++ b/ndb/test/src/HugoTransactions.cpp @@ -819,12 +819,14 @@ HugoTransactions::loadTable(Ndb* pNdb, int records, int batch, bool allowConstraintViolation, - int doSleep){ + int doSleep, + bool oneTrans){ int check; int retryAttempt = 0; int retryMax = 5; NdbConnection *pTrans; NdbOperation *pOp; + bool first_batch = true; const int org = batch; const int cols = tab.getNoOfColumns(); @@ -833,7 +835,7 @@ HugoTransactions::loadTable(Ndb* pNdb, batch = (batch * 256); // -> 512 -> 65536k per commit batch = batch/bytes; // batch = batch == 0 ? 1 : batch; - + if(batch != org){ g_info << "batch = " << org << " rowsize = " << bytes << " -> rows/commit = " << batch << endl; @@ -841,7 +843,7 @@ HugoTransactions::loadTable(Ndb* pNdb, g_info << "|- Inserting records..." << endl; for (int c=0 ; c= retryMax){ g_info << "Record " << c << " could not be inserted, has retried " << retryAttempt << " times " << endl; @@ -852,19 +854,22 @@ HugoTransactions::loadTable(Ndb* pNdb, if (doSleep > 0) NdbSleep_MilliSleep(doSleep); - pTrans = pNdb->startTransaction(); + if (first_batch || !oneTrans) { + first_batch = false; + pTrans = pNdb->startTransaction(); + + if (pTrans == NULL) { + const NdbError err = pNdb->getNdbError(); - if (pTrans == NULL) { - const NdbError err = pNdb->getNdbError(); - - if (err.status == NdbError::TemporaryError){ - ERR(err); - NdbSleep_MilliSleep(50); - retryAttempt++; - continue; + if (err.status == NdbError::TemporaryError){ + ERR(err); + NdbSleep_MilliSleep(50); + retryAttempt++; + continue; + } + ERR(err); + return NDBT_FAILED; } - ERR(err); - return NDBT_FAILED; } for(int b = 0; b < batch && c+bexecute( Commit ); + if (!oneTrans || (c + batch) >= records) { + closeTrans = true; + check = pTrans->execute( Commit ); + } else { + closeTrans = false; + check = pTrans->execute( NoCommit ); + } if(check == -1 ) { const NdbError err = pTrans->getNdbError(); pNdb->closeTransaction(pTrans); @@ -937,8 +948,10 @@ HugoTransactions::loadTable(Ndb* pNdb, break; } } - else{ - pNdb->closeTransaction(pTrans); + else{ + if (closeTrans) { + pNdb->closeTransaction(pTrans); + } } // Step to next record From 73360e558228c8d8d1869d54e62f46b0f77a324f Mon Sep 17 00:00:00 2001 From: "mronstrom@mysql.com" <> Date: Tue, 20 Jul 2004 00:23:49 +0200 Subject: [PATCH 06/93] Bug #4479 Ensures that the node doesn't crash by overflowing the UNDO log buffer at local checkpoints. Inserts a real-time break after 512 operations and when low on UNDO log buffer. --- ndb/src/kernel/blocks/dbacc/Dbacc.hpp | 2 + ndb/src/kernel/blocks/dbacc/DbaccMain.cpp | 106 ++++++++++++++++------ 2 files changed, 78 insertions(+), 30 deletions(-) diff --git a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp index 6ba2d083e58..5185e91caac 100644 --- a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp +++ b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp @@ -218,6 +218,7 @@ ndbout << "Ptr: " << ptr.p->word32 << " \tIndex: " << tmp_string << " \tValue: " #define ZREL_FRAG 6 #define ZREL_DIR 7 #define ZREPORT_MEMORY_USAGE 8 +#define ZLCP_OP_WRITE_RT_BREAK 9 /* ------------------------------------------------------------------------- */ /* ERROR CODES */ @@ -1190,6 +1191,7 @@ private: void zpagesize_error(const char* where); void reportMemoryUsage(Signal* signal, int gth); + void lcp_write_op_to_undolog(Signal* signal); // Initialisation diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index 933ee2cf8e1..ccc1acdd273 100644 --- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -46,13 +46,17 @@ Dbacc::remainingUndoPages(){ ndbrequire(HeadPage>=TailPage); Uint32 UsedPages = HeadPage - TailPage; - Uint32 Remaining = cundopagesize - UsedPages; + Int32 Remaining = cundopagesize - UsedPages; // There can not be more than cundopagesize remaining - ndbrequire(Remaining<=cundopagesize); - + if (Remaining <= 0){ + // No more undolog, crash node + progError(__LINE__, + ERR_NO_MORE_UNDOLOG, + "There are more than 1Mbyte undolog writes outstanding"); + } return Remaining; -}//Dbacc::remainingUndoPages() +} void Dbacc::updateLastUndoPageIdWritten(Signal* signal, Uint32 aNewValue){ @@ -193,6 +197,17 @@ void Dbacc::execCONTINUEB(Signal* signal) return; } + case ZLCP_OP_WRITE_RT_BREAK: + { + operationRecPtr.i= signal->theData[1]; + fragrecptr.i= signal->theData[2]; + lcpConnectptr.i= signal->theData[3]; + ptrCheckGuard(operationRecPtr, coprecsize, operationrec); + ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); + ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec); + lcp_write_op_to_undolog(signal); + return; + } default: ndbrequire(false); break; @@ -7697,32 +7712,70 @@ void Dbacc::execACC_LCPREQ(Signal* signal) fragrecptr.p->lcpMaxOverDirIndex = fragrecptr.p->lastOverIndex; fragrecptr.p->createLcp = ZTRUE; operationRecPtr.i = fragrecptr.p->lockOwnersList; - while (operationRecPtr.i != RNIL) { - jam(); - ptrCheckGuard(operationRecPtr, coprecsize, operationrec); + lcp_write_op_to_undolog(signal); +} - if ((operationRecPtr.p->operation == ZINSERT) || - (operationRecPtr.p->elementIsDisappeared == ZTRUE)){ +void +Dbacc::lcp_write_op_to_undolog(Signal* signal) +{ + bool delay_continueb= false; + Uint32 i, j; + for (i= 0; i < 16; i++) { + jam(); + if (remainingUndoPages() <= ZMIN_UNDO_PAGES_AT_COMMIT) { + jam(); + delay_continueb= true; + break; + } + for (j= 0; j < 32; j++) { + if (operationRecPtr.i == RNIL) { + jam(); + break; + } + jam(); + ptrCheckGuard(operationRecPtr, coprecsize, operationrec); + + if ((operationRecPtr.p->operation == ZINSERT) || + (operationRecPtr.p->elementIsDisappeared == ZTRUE)){ /******************************************************************* * Only log inserts and elements that are marked as dissapeared. * All other operations update the element header and that is handled * when pages are written to disk ********************************************************************/ - undopageptr.i = (cundoposition>>ZUNDOPAGEINDEXBITS) & (cundopagesize-1); - ptrAss(undopageptr, undopage); - theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK; - tundoindex = theadundoindex + ZUNDOHEADSIZE; + undopageptr.i = (cundoposition>>ZUNDOPAGEINDEXBITS) & (cundopagesize-1); + ptrAss(undopageptr, undopage); + theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK; + tundoindex = theadundoindex + ZUNDOHEADSIZE; - writeUndoOpInfo(signal);/* THE INFORMATION ABOUT ELEMENT HEADER, STORED*/ - /* IN OP REC, IS WRITTEN AT UNDO PAGES */ - cundoElemIndex = 0;/* DEFAULT VALUE USED BY WRITE_UNDO_HEADER SUBROTINE */ - writeUndoHeader(signal, RNIL, UndoHeader::ZOP_INFO); /* WRITE THE HEAD OF THE UNDO ELEMENT */ - checkUndoPages(signal); /* SEND UNDO PAGE TO DISK WHEN A GROUP OF */ + writeUndoOpInfo(signal);/* THE INFORMATION ABOUT ELEMENT HEADER, STORED*/ + /* IN OP REC, IS WRITTEN AT UNDO PAGES */ + cundoElemIndex = 0;/* DEFAULT VALUE USED BY WRITE_UNDO_HEADER SUBROTINE */ + writeUndoHeader(signal, RNIL, UndoHeader::ZOP_INFO); /* WRITE THE HEAD OF THE UNDO ELEMENT */ + checkUndoPages(signal); /* SEND UNDO PAGE TO DISK WHEN A GROUP OF */ /* UNDO PAGES,CURRENTLY 8, IS FILLED */ - }//if - - operationRecPtr.i = operationRecPtr.p->nextLockOwnerOp; - }//while + } + operationRecPtr.i = operationRecPtr.p->nextLockOwnerOp; + } + if (operationRecPtr.i == RNIL) { + jam(); + break; + } + } + if (operationRecPtr.i != RNIL) { + jam(); + signal->theData[0]= ZLCP_OP_WRITE_RT_BREAK; + signal->theData[1]= operationRecPtr.i; + signal->theData[2]= fragrecptr.i; + signal->theData[3]= lcpConnectptr.i; + if (delay_continueb) { + jam(); + sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 10, 4); + } else { + jam(); + sendSignal(cownBlockref, GSN_CONTINUEB, signal, 4, JBB); + } + return; + } signal->theData[0] = fragrecptr.p->lcpLqhPtr; sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_LCPSTARTED, @@ -7735,8 +7788,7 @@ void Dbacc::execACC_LCPREQ(Signal* signal) signal->theData[0] = lcpConnectptr.i; signal->theData[1] = fragrecptr.i; sendSignal(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 2, JBB); - return; -}//Dbacc::execACC_LCPREQ() +} /* ******************--------------------------------------------------------------- */ /* ACC_SAVE_PAGES A GROUP OF PAGES IS ALLOCATED. THE PAGES AND OVERFLOW */ @@ -8595,12 +8647,6 @@ void Dbacc::checkUndoPages(Signal* signal) * RECORDS IN */ Uint16 nextUndoPageId = tundoPageId + 1; - if (nextUndoPageId > (clastUndoPageIdWritten + cundopagesize)){ - // No more undolog, crash node - progError(__LINE__, - ERR_NO_MORE_UNDOLOG, - "There are more than 1Mbyte undolog writes outstanding"); - } updateUndoPositionPage(signal, nextUndoPageId << ZUNDOPAGEINDEXBITS); if ((tundoPageId & (ZWRITE_UNDOPAGESIZE - 1)) == (ZWRITE_UNDOPAGESIZE - 1)) { From 0449396a674c88a0f8bee17e21c33d3b80fcc38a Mon Sep 17 00:00:00 2001 From: "jan@hundin.mysql.fi" <> Date: Tue, 20 Jul 2004 14:15:38 +0300 Subject: [PATCH 07/93] Added innodb_locks_unsafe_for_binlog option. This option turns off Innodb next-key locking. Using this option the locks InnoDB sets on index records do not affect the ``gap'' before that index record. Thus, this option allows phantom problem. --- BitKeeper/etc/logging_ok | 1 + innobase/include/srv0srv.h | 1 + innobase/row/row0sel.c | 83 ++++++++++++++++++++++++++++++++++---- innobase/srv/srv0srv.c | 4 ++ sql/ha_innodb.cc | 2 + sql/ha_innodb.h | 2 +- sql/mysqld.cc | 5 +++ sql/set_var.cc | 1 + 8 files changed, 91 insertions(+), 8 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 398a9295579..a9cb6429a35 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -61,6 +61,7 @@ hf@genie.(none) igor@hundin.mysql.fi igor@rurik.mysql.com ingo@mysql.com +jan@hundin.mysql.fi jani@a80-186-24-72.elisa-laajakaista.fi jani@dsl-jkl1657.dial.inet.fi jani@dsl-kpogw4gb5.dial.inet.fi diff --git a/innobase/include/srv0srv.h b/innobase/include/srv0srv.h index c527d40bc79..b98223cff68 100644 --- a/innobase/include/srv0srv.h +++ b/innobase/include/srv0srv.h @@ -42,6 +42,7 @@ extern char* srv_arch_dir; #endif /* UNIV_LOG_ARCHIVE */ extern ibool srv_file_per_table; +extern ibool srv_locks_unsafe_for_binlog; extern ulint srv_n_data_files; extern char** srv_data_file_names; diff --git a/innobase/row/row0sel.c b/innobase/row/row0sel.c index 8a0da2851a7..bf7f6f1fc3a 100644 --- a/innobase/row/row0sel.c +++ b/innobase/row/row0sel.c @@ -631,10 +631,24 @@ row_sel_get_clust_rec( if (!node->read_view) { /* Try to place a lock on the index record */ - + + /* If innodb_locks_unsafe_for_binlog option is used, + we lock only the record, i.e. next-key locking is + not used. + */ + if ( srv_locks_unsafe_for_binlog ) + { + err = lock_clust_rec_read_check_and_lock(0, clust_rec, + index,node->row_lock_mode, LOCK_REC_NOT_GAP, thr); + } + else + { err = lock_clust_rec_read_check_and_lock(0, clust_rec, index, node->row_lock_mode, LOCK_ORDINARY, thr); - if (err != DB_SUCCESS) { + + } + + if (err != DB_SUCCESS) { return(err); } @@ -1184,9 +1198,23 @@ rec_loop: search result set, resulting in the phantom problem. */ if (!consistent_read) { + + /* If innodb_locks_unsafe_for_binlog option is used, + we lock only the record, i.e. next-key locking is + not used. + */ + + if ( srv_locks_unsafe_for_binlog ) + { + err = sel_set_rec_lock(page_rec_get_next(rec), index, + node->row_lock_mode, LOCK_REC_NOT_GAP, thr); + } + else + { err = sel_set_rec_lock(page_rec_get_next(rec), index, node->row_lock_mode, LOCK_ORDINARY, thr); - if (err != DB_SUCCESS) { + } + if (err != DB_SUCCESS) { /* Note that in this case we will store in pcur the PREDECESSOR of the record we are waiting the lock for */ @@ -1211,8 +1239,22 @@ rec_loop: if (!consistent_read) { /* Try to place a lock on the index record */ - err = sel_set_rec_lock(rec, index, node->row_lock_mode, + /* If innodb_locks_unsafe_for_binlog option is used, + we lock only the record, i.e. next-key locking is + not used. + */ + + if ( srv_locks_unsafe_for_binlog ) + { + err = sel_set_rec_lock(rec, index, node->row_lock_mode, + LOCK_REC_NOT_GAP, thr); + } + else + { + err = sel_set_rec_lock(rec, index, node->row_lock_mode, LOCK_ORDINARY, thr); + } + if (err != DB_SUCCESS) { goto lock_wait_or_error; @@ -3144,10 +3186,24 @@ rec_loop: /* Try to place a lock on the index record */ - err = sel_set_rec_lock(rec, index, + /* If innodb_locks_unsafe_for_binlog option is used, + we lock only the record, i.e. next-key locking is + not used. + */ + if ( srv_locks_unsafe_for_binlog ) + { + err = sel_set_rec_lock(rec, index, + prebuilt->select_lock_type, + LOCK_REC_NOT_GAP, thr); + } + else + { + err = sel_set_rec_lock(rec, index, prebuilt->select_lock_type, LOCK_ORDINARY, thr); - if (err != DB_SUCCESS) { + } + + if (err != DB_SUCCESS) { goto lock_wait_or_error; } @@ -3300,9 +3356,22 @@ rec_loop: prebuilt->select_lock_type, LOCK_REC_NOT_GAP, thr); } else { - err = sel_set_rec_lock(rec, index, + /* If innodb_locks_unsafe_for_binlog option is used, + we lock only the record, i.e. next-key locking is + not used. + */ + if ( srv_locks_unsafe_for_binlog ) + { + err = sel_set_rec_lock(rec, index, + prebuilt->select_lock_type, + LOCK_REC_NOT_GAP, thr); + } + else + { + err = sel_set_rec_lock(rec, index, prebuilt->select_lock_type, LOCK_ORDINARY, thr); + } } if (err != DB_SUCCESS) { diff --git a/innobase/srv/srv0srv.c b/innobase/srv/srv0srv.c index fc46c95a8a6..4c305a76547 100644 --- a/innobase/srv/srv0srv.c +++ b/innobase/srv/srv0srv.c @@ -74,6 +74,10 @@ ibool srv_file_per_table = FALSE; /* store to its own file each table created by an user; data dictionary tables are in the system tablespace 0 */ +ibool srv_locks_unsafe_for_binlog = FALSE; /* Place locks to records only + i.e. do not use next-key locking + except on duplicate key checking and + foreign key checking */ ulint srv_n_data_files = 0; char** srv_data_file_names = NULL; ulint* srv_data_file_sizes = NULL; /* size in database pages */ diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 6eae315e443..a7dce3a6ab8 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -117,6 +117,7 @@ my_bool innobase_log_archive = FALSE;/* unused */ my_bool innobase_use_native_aio = FALSE; my_bool innobase_fast_shutdown = TRUE; my_bool innobase_file_per_table = FALSE; +my_bool innobase_locks_unsafe_for_binlog = FALSE; static char *internal_innobase_data_file_path = NULL; @@ -908,6 +909,7 @@ innobase_init(void) srv_fast_shutdown = (ibool) innobase_fast_shutdown; srv_file_per_table = (ibool) innobase_file_per_table; + srv_locks_unsafe_for_binlog = (ibool) innobase_locks_unsafe_for_binlog; srv_max_n_open_files = (ulint) innobase_open_files; diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index e09697f7ce6..6815bdd632d 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -189,7 +189,7 @@ extern char *innobase_unix_file_flush_method; /* The following variables have to be my_bool for SHOW VARIABLES to work */ extern my_bool innobase_log_archive, innobase_use_native_aio, innobase_fast_shutdown, - innobase_file_per_table; + innobase_file_per_table, innobase_locks_unsafe_for_binlog; extern "C" { extern ulong srv_max_buf_pool_modified_pct; } diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 4fd13d33bab..869048cee93 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -3880,6 +3880,7 @@ enum options_mysqld OPT_INNODB_FLUSH_METHOD, OPT_INNODB_FAST_SHUTDOWN, OPT_INNODB_FILE_PER_TABLE, OPT_CRASH_BINLOG_INNODB, + OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG, OPT_SAFE_SHOW_DB, OPT_INNODB_SAFE_BINLOG, OPT_INNODB, OPT_ISAM, OPT_NDBCLUSTER, OPT_SKIP_SAFEMALLOC, OPT_TEMP_POOL, OPT_TX_ISOLATION, @@ -4156,6 +4157,10 @@ Disable with --skip-bdb (will save memory).", "Stores each InnoDB table to an .ibd file in the database dir.", (gptr*) &innobase_file_per_table, (gptr*) &innobase_file_per_table, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"innodb_locks_unsafe_for_binlog", OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG, + "Force Innodb not to use next-key locking. Instead use only row-level locking", + (gptr*) &innobase_locks_unsafe_for_binlog, + (gptr*) &innobase_locks_unsafe_for_binlog, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif /* End HAVE_INNOBASE_DB */ {"init-connect", OPT_INIT_CONNECT, "Command(s) that are executed for each new connection", (gptr*) &opt_init_connect, (gptr*) &opt_init_connect, 0, GET_STR_ALLOC, diff --git a/sql/set_var.cc b/sql/set_var.cc index e1cfb77d297..fb9ff285859 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -673,6 +673,7 @@ struct show_var_st init_vars[]= { {"innodb_fast_shutdown", (char*) &innobase_fast_shutdown, SHOW_MY_BOOL}, {"innodb_file_io_threads", (char*) &innobase_file_io_threads, SHOW_LONG }, {"innodb_file_per_table", (char*) &innobase_file_per_table, SHOW_MY_BOOL}, + {"innodb_locks_unsafe_for_binlog", (char*) &innobase_locks_unsafe_for_binlog, SHOW_MY_BOOL}, {"innodb_flush_log_at_trx_commit", (char*) &innobase_flush_log_at_trx_commit, SHOW_INT}, {"innodb_flush_method", (char*) &innobase_unix_file_flush_method, SHOW_CHAR_PTR}, {"innodb_force_recovery", (char*) &innobase_force_recovery, SHOW_LONG }, From 569d3c848884b09f30ec0e8ef8bafb73aa167e3e Mon Sep 17 00:00:00 2001 From: "konstantin@mysql.com" <> Date: Sat, 24 Jul 2004 03:30:11 -0700 Subject: [PATCH 08/93] WL#1518, "make bundled zlib usable for unix builds": required autotools macro written and deployed in all apropriate Makefile.ams. Use cases checked: - linux, standard location of zlib, no ndb - linux, standard locatoin of zlib, with ndb - linux, non-standard location of zlib, no ndb - hpux11, use of bundled zlib, no ndb The only non-checked case is non-standard location of zlib (or use of bundled zlib) + ndb. I wasn't able to check it as ndb/ just won't compile on beasts like AIX52 or HPUX11, where such a check is possible. It didn't compile there before as these systems dont't have installed zlib, so nothing got broken ;) --- Makefile.am | 11 ++++- acinclude.m4 | 108 ++++++++++++++++++++++++++++++++--------- configure.in | 23 ++------- libmysql/Makefile.am | 2 +- libmysql_r/Makefile.am | 3 +- libmysqld/Makefile.am | 2 +- myisam/Makefile.am | 7 ++- mysys/Makefile.am | 3 +- sql/Makefile.am | 19 ++++---- tools/Makefile.am | 22 ++++++++- zlib/Makefile.am | 29 +++++++++++ 11 files changed, 167 insertions(+), 62 deletions(-) create mode 100644 zlib/Makefile.am diff --git a/Makefile.am b/Makefile.am index f8efb247c95..e2d61e56b60 100644 --- a/Makefile.am +++ b/Makefile.am @@ -19,8 +19,15 @@ AUTOMAKE_OPTIONS = foreign # These are built from source in the Docs directory -EXTRA_DIST = INSTALL-SOURCE README COPYING zlib -SUBDIRS = . include @docs_dirs@ \ +EXTRA_DIST = INSTALL-SOURCE README COPYING +SUBDIRS = . include @docs_dirs@ @zlib_dir@ \ + @readline_topdir@ sql-common \ + @thread_dirs@ pstack @sql_client_dirs@ \ + @sql_server_dirs@ scripts man tests \ + netware @libmysqld_dirs@ \ + @bench_dirs@ support-files @fs_dirs@ @tools_dirs@ + +DIST_SUBDIRS = . include @docs_dirs@ zlib \ @readline_topdir@ sql-common \ @thread_dirs@ pstack @sql_client_dirs@ \ @sql_server_dirs@ scripts man tests SSL\ diff --git a/acinclude.m4 b/acinclude.m4 index 0e6dab052ab..bcfa7b55e9b 100644 --- a/acinclude.m4 +++ b/acinclude.m4 @@ -167,32 +167,94 @@ then fi ]) -AC_DEFUN(MYSQL_CHECK_ZLIB_WITH_COMPRESS, [ -save_LIBS="$LIBS" -LIBS="-l$1 $LIBS" -AC_CACHE_CHECK([if libz with compress], mysql_cv_compress, -[AC_TRY_RUN([#include -#ifdef __cplusplus -extern "C" -#endif -int main(int argv, char **argc) -{ - return 0; -} -int link_test() -{ - return compress(0, (unsigned long*) 0, "", 0); -} -], mysql_cv_compress=yes, mysql_cv_compress=no)]) -if test "$mysql_cv_compress" = "yes" -then - AC_DEFINE([HAVE_COMPRESS], [1], [ZLIB and compress]) -else - LIBS="$save_LIBS" -fi +dnl MYSQL_CHECK_ZLIB_WITH_COMPRESS +dnl ------------------------------------------------------------------------ +dnl @synopsis MYSQL_CHECK_ZLIB_WITH_COMPRESS +dnl +dnl Provides the following configure options: +dnl --with-zlib-dir - custom location of compression library. +dnl MySQL needs both header file (zlib.h) and the library +dnl (libz.a). Given location prefix, the macro expects +dnl to find the library headers in $prefix/include, +dnl and binaries in $prefix/lib. If DIR is "no", +dnl compression and all dependent functions will be +dnl disabled. +dnl The call checks presense of 'zlib' compression library in default or +dnl given location. If there is no default library, the macro falls +dnl back to use zlib bundled along with MySQL sources. But if configure is +dnl called with custom name/path, and there is no library at given place, +dnl the macro bails out with error. +dnl +dnl If the library was found, this function #defines HAVE_COMPRESS +dnl and configure variables ZLIB_INCLUDES (i.e. -I/path/to/zlib/include) and +dnl ZLIB_LIBS (i. e. -L/path/to/zlib/lib -lz). +dnl +dnl Exception is Novell Netware, where we assume zlib is always present. + +AC_DEFUN([MYSQL_CHECK_ZLIB_WITH_COMPRESS], [ +AC_MSG_CHECKING([for zlib compression library]) +case $SYSTEM_TYPE in + *netware* | *modesto*) + AC_MSG_RESULT(ok) + ;; + *) + AC_ARG_WITH([zlib-dir], + AC_HELP_STRING([--with-zlib-dir=DIR], + [Provide MySQL with a custom location of + compression library. Given DIR, zlib binary is + assumed to be in $DIR/lib and header files + in $DIR/include.]), + [mysql_zlib_dir=${withval}], + [mysql_zlib_dir=""]) + if test "$mysql_zlib_dir" = "no"; then + mysql_cv_compress="no" + AC_MSG_RESULT([disabled]) + else + if test "$mysql_zlib_dir" = ""; then + ZLIB_INCLUDES="" + ZLIB_LIBS="-lz" + else + if test -f "$mysql_zlib_dir/lib/libz.a" -a \ + -f "$mysql_zlib_dir/include/zlib.h"; then + true + else + AC_MSG_ERROR([headers or binaries were not found in $mysql_zlib_dir/{include,lib}]) + fi + ZLIB_INCLUDES="-I$mysql_zlib_dir/include" + ZLIB_LIBS="-L$mysql_zlib_dir/lib -lz" + fi + save_INCLUDES="$INCLUDES" + save_LIBS="$LIBS" + INCLUDES="$ZLIB_INCLUDES" + LIBS="$ZLIB_LIBS" + AC_CACHE_VAL([mysql_cv_compress], + [AC_TRY_LINK([#include ], + [int link_test() { return compress(0, (unsigned long*) 0, "", 0); }], + [mysql_cv_compress="yes" + AC_MSG_RESULT(ok)], + [if test "$mysql_zlib_dir" = ""; then + AC_MSG_RESULT([system-wide zlib not found, using one bundled with MySQL]) + ZLIB_INCLUDES="-I\$(top_srcdir)/zlib" + ZLIB_LIBS="-L\$(top_builddir)/zlib -lz" + zlib_dir="zlib" + AC_SUBST([zlib_dir]) + mysql_cv_compress="yes" + else + AC_MSG_ERROR([not found in $mysql_zlib_dir]) + fi])]) + INCLUDES="$save_INCLUDES" + LIBS="$save_LIBS" + AC_DEFINE([HAVE_COMPRESS], [1], [Define if zlib is present]) + AC_SUBST([ZLIB_LIBS]) + AC_SUBST([ZLIB_INCLUDES]) + fi + ;; +esac ]) +dnl ------------------------------------------------------------------------ + #---START: Used in for client configure AC_DEFUN(MYSQL_CHECK_ULONG, [AC_MSG_CHECKING(for type ulong) diff --git a/configure.in b/configure.in index 080c2bcc823..7309d73970e 100644 --- a/configure.in +++ b/configure.in @@ -664,15 +664,6 @@ AC_ARG_WITH(named-curses-libs, [ with_named_curses=no ] ) -# Force use of a zlib (compress) -AC_ARG_WITH(named-z-libs, - [ --with-named-z-libs=ARG - Use specified zlib libraries instead of - those automatically found by configure.], - [ with_named_zlib=$withval ], - [ with_named_zlib=z ] - ) - # Make thread safe client AC_ARG_ENABLE(thread-safe-client, [ --enable-thread-safe-client @@ -806,16 +797,7 @@ AC_CHECK_FUNC(crypt, AC_DEFINE([HAVE_CRYPT], [1], [crypt])) # For sem_xxx functions on Solaris 2.6 AC_CHECK_FUNC(sem_init, , AC_CHECK_LIB(posix4, sem_init)) - -# For compress in zlib -case $SYSTEM_TYPE in - *netware* | *modesto*) - AC_DEFINE(HAVE_COMPRESS, [1]) - ;; - *) - MYSQL_CHECK_ZLIB_WITH_COMPRESS($with_named_zlib) - ;; -esac +MYSQL_CHECK_ZLIB_WITH_COMPRESS #-------------------------------------------------------------------- # Check for TCP wrapper support @@ -945,7 +927,7 @@ then fi # We make a special variable for client library's to avoid including # thread libs in the client. -NON_THREADED_CLIENT_LIBS="$LIBS" +NON_THREADED_CLIENT_LIBS="$LIBS $ZLIB_LIBS" AC_MSG_CHECKING([for int8]) case $SYSTEM_TYPE in @@ -3082,6 +3064,7 @@ AC_CONFIG_FILES(Makefile extra/Makefile mysys/Makefile dnl include/mysql_version.h dnl cmd-line-utils/Makefile dnl cmd-line-utils/libedit/Makefile dnl + zlib/Makefile dnl cmd-line-utils/readline/Makefile) AC_CONFIG_COMMANDS([default], , test -z "$CONFIG_HEADERS" || echo timestamp > stamp-h) AC_OUTPUT diff --git a/libmysql/Makefile.am b/libmysql/Makefile.am index 3e026fe589a..7e43ff751f9 100644 --- a/libmysql/Makefile.am +++ b/libmysql/Makefile.am @@ -20,7 +20,7 @@ target = libmysqlclient.la target_defs = -DUNDEF_THREADS_HACK -DDONT_USE_RAID @LIB_EXTRA_CCFLAGS@ LIBS = @CLIENT_LIBS@ -INCLUDES = -I$(top_srcdir)/include $(openssl_includes) +INCLUDES = -I$(top_srcdir)/include $(openssl_includes) @ZLIB_INCLUDES@ include $(srcdir)/Makefile.shared diff --git a/libmysql_r/Makefile.am b/libmysql_r/Makefile.am index b75f65b6f78..5329c2cf18f 100644 --- a/libmysql_r/Makefile.am +++ b/libmysql_r/Makefile.am @@ -21,7 +21,8 @@ target = libmysqlclient_r.la target_defs = -DDONT_USE_RAID -DMYSQL_CLIENT @LIB_EXTRA_CCFLAGS@ LIBS = @LIBS@ @openssl_libs@ -INCLUDES = @MT_INCLUDES@ -I$(top_srcdir)/include $(openssl_includes) +INCLUDES = @MT_INCLUDES@ \ + -I$(top_srcdir)/include $(openssl_includes) @ZLIB_INCLUDES@ ## automake barfs if you don't use $(srcdir) or $(top_srcdir) in include include $(top_srcdir)/libmysql/Makefile.shared diff --git a/libmysqld/Makefile.am b/libmysqld/Makefile.am index a0825a6a4fd..75a5ef7ff91 100644 --- a/libmysqld/Makefile.am +++ b/libmysqld/Makefile.am @@ -27,7 +27,7 @@ DEFS = -DEMBEDDED_LIBRARY -DMYSQL_SERVER \ -DSHAREDIR="\"$(MYSQLSHAREdir)\"" INCLUDES= @MT_INCLUDES@ @bdb_includes@ -I$(top_srcdir)/include \ -I$(top_srcdir)/sql -I$(top_srcdir)/regex \ - $(openssl_includes) + $(openssl_includes) @ZLIB_INCLUDES@ noinst_LIBRARIES = libmysqld_int.a pkglib_LIBRARIES = libmysqld.a diff --git a/myisam/Makefile.am b/myisam/Makefile.am index 5aa0740261e..9f4eef348a3 100644 --- a/myisam/Makefile.am +++ b/myisam/Makefile.am @@ -18,8 +18,11 @@ EXTRA_DIST = mi_test_all.sh mi_test_all.res pkgdata_DATA = mi_test_all mi_test_all.res INCLUDES = @MT_INCLUDES@ -I$(top_srcdir)/include -LDADD = @CLIENT_EXTRA_LDFLAGS@ libmyisam.a ../mysys/libmysys.a \ - ../dbug/libdbug.a ../strings/libmystrings.a +LDADD = @CLIENT_EXTRA_LDFLAGS@ libmyisam.a \ + $(top_builddir)/mysys/libmysys.a \ + $(top_builddir)/dbug/libdbug.a \ + @ZLIB_LIBS@ \ + $(top_builddir)/strings/libmystrings.a pkglib_LIBRARIES = libmyisam.a bin_PROGRAMS = myisamchk myisamlog myisampack myisam_ftdump myisamchk_DEPENDENCIES= $(LIBRARIES) diff --git a/mysys/Makefile.am b/mysys/Makefile.am index d4290bbc49b..3ffeeab0411 100644 --- a/mysys/Makefile.am +++ b/mysys/Makefile.am @@ -17,7 +17,8 @@ MYSQLDATAdir = $(localstatedir) MYSQLSHAREdir = $(pkgdatadir) MYSQLBASEdir= $(prefix) -INCLUDES = @MT_INCLUDES@ -I$(top_srcdir)/include -I$(srcdir) +INCLUDES = @MT_INCLUDES@ \ + @ZLIB_INCLUDES@ -I$(top_srcdir)/include -I$(srcdir) pkglib_LIBRARIES = libmysys.a LDADD = libmysys.a ../dbug/libdbug.a \ ../strings/libmystrings.a diff --git a/sql/Makefile.am b/sql/Makefile.am index 007239f2e8c..9859f1ef841 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -19,7 +19,7 @@ MYSQLDATAdir = $(localstatedir) MYSQLSHAREdir = $(pkgdatadir) MYSQLBASEdir= $(prefix) -INCLUDES = @MT_INCLUDES@ \ +INCLUDES = @MT_INCLUDES@ @ZLIB_INCLUDES@ \ @bdb_includes@ @innodb_includes@ @ndbcluster_includes@ \ -I$(top_srcdir)/include -I$(top_srcdir)/regex \ -I$(srcdir) $(openssl_includes) @@ -30,14 +30,15 @@ noinst_PROGRAMS = gen_lex_hash bin_PROGRAMS = mysql_tzinfo_to_sql gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@ LDADD = @isam_libs@ \ - ../myisam/libmyisam.a \ - ../myisammrg/libmyisammrg.a \ - ../heap/libheap.a \ - ../vio/libvio.a \ - ../mysys/libmysys.a \ - ../dbug/libdbug.a \ - ../regex/libregex.a \ - ../strings/libmystrings.a + @ZLIB_LIBS@ \ + $(top_builddir)/myisam/libmyisam.a \ + $(top_builddir)/myisammrg/libmyisammrg.a \ + $(top_builddir)/heap/libheap.a \ + $(top_builddir)/vio/libvio.a \ + $(top_builddir)/mysys/libmysys.a \ + $(top_builddir)/dbug/libdbug.a \ + $(top_builddir)/regex/libregex.a \ + $(top_builddir)/strings/libmystrings.a mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \ @bdb_libs@ @innodb_libs@ @pstack_libs@ \ diff --git a/tools/Makefile.am b/tools/Makefile.am index 0dc0b90c60e..50d1c8af56a 100644 --- a/tools/Makefile.am +++ b/tools/Makefile.am @@ -1,5 +1,23 @@ -INCLUDES = @MT_INCLUDES@ -I$(top_srcdir)/include $(openssl_includes) -LDADD= @CLIENT_EXTRA_LDFLAGS@ ../libmysql_r/libmysqlclient_r.la @openssl_libs@ +# Copyright (C) 2004 MySQL AB +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# Process this file with automake to create Makefile.in + +INCLUDES=@MT_INCLUDES@ -I$(top_srcdir)/include $(openssl_includes) +LDADD= @CLIENT_EXTRA_LDFLAGS@ @openssl_libs@ @ZLIB_LIBS@ \ + $(top_builddir)/libmysql_r/libmysqlclient_r.la \ bin_PROGRAMS= mysqlmanager mysqlmanager_SOURCES= mysqlmanager.c mysqlmanager_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES) diff --git a/zlib/Makefile.am b/zlib/Makefile.am new file mode 100644 index 00000000000..81d0f26082d --- /dev/null +++ b/zlib/Makefile.am @@ -0,0 +1,29 @@ +# Copyright (C) 2004 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +# Process this file with automake to create Makefile.in + +noinst_LIBRARIES=libz.a + +noinst_HEADERS= crc32.h deflate.h inffast.h inffixed.h inflate.h \ + inftrees.h trees.h zconf.h zlib.h zutil.h + +libz_a_SOURCES= adler32.c compress.c crc32.c deflate.c gzio.c \ + infback.c inffast.c inflate.c inftrees.c trees.c \ + uncompr.c zutil.c + +EXTRA_DIST= README FAQ INDEX ChangeLog algorithm.txt zlib.3 + From 3de8784b17c03eed88dcc8b9c1ad285241dd702c Mon Sep 17 00:00:00 2001 From: "serg@serg.mylan" <> Date: Mon, 26 Jul 2004 21:33:42 +0200 Subject: [PATCH 09/93] safemalloc always resets the free'd memory, not only when PEDANTIC_SAFEMALLOC --- sql/field.h | 2 +- sql/sql_list.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/sql/field.h b/sql/field.h index 24faee9d314..7f35b006c03 100644 --- a/sql/field.h +++ b/sql/field.h @@ -38,7 +38,7 @@ class Field public: static void *operator new(size_t size) {return (void*) sql_alloc((uint) size); } static void operator delete(void *ptr_arg, size_t size) { -#ifdef PEDANTIC_SAFEMALLOC +#ifdef SAFEMALLOC bfill(ptr_arg, size, 0x8F); #endif } diff --git a/sql/sql_list.h b/sql/sql_list.h index 22e9ed37386..c3b9c7f87ea 100644 --- a/sql/sql_list.h +++ b/sql/sql_list.h @@ -19,9 +19,9 @@ #pragma interface /* gcc class implementation */ #endif -/* mysql standard class memoryallocator */ +/* mysql standard class memory allocator */ -#ifdef PEDANTIC_SAFEMALLOC +#ifdef SAFEMALLOC #define TRASH(XX,YY) bfill((XX), (YY), 0x8F) #else #define TRASH(XX,YY) /* no-op */ From 7f58bb793e0e15f45ceee9244f20c1b9a9adf1ed Mon Sep 17 00:00:00 2001 From: "bar@mysql.com" <> Date: Tue, 27 Jul 2004 18:43:40 +0500 Subject: [PATCH 10/93] A small fix to understand 4.1.0 format. --- sql/sql_db.cc | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/sql/sql_db.cc b/sql/sql_db.cc index ef180b58ee0..82fef3f7c7b 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -320,10 +320,17 @@ bool load_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create) { if (!strncmp(buf,"default-character-set", (pos-buf))) { + /* + Try character set name, and if it fails + try collation name, probably it's an old + 4.1.0 db.opt file, which didn't have + separate default-character-set and + default-collation commands. + */ if (!(create->default_table_charset= - get_charset_by_csname(pos+1, - MY_CS_PRIMARY, - MYF(0)))) + get_charset_by_csname(pos+1, MY_CS_PRIMARY, MYF(0))) && + !(create->default_table_charset= + get_charset_by_name(pos+1, MYF(0)))) { sql_print_error("Error while loading database options: '%s':",path); sql_print_error(ER(ER_UNKNOWN_CHARACTER_SET),pos+1); From d68ed3f0f7bc0730c97f4b40fdc9d42d1b5c60ad Mon Sep 17 00:00:00 2001 From: "konstantin@mysql.com" <> Date: Tue, 27 Jul 2004 10:05:55 -0700 Subject: [PATCH 11/93] WL#1518 "make bundled zlib usable for unix builds", post review fixes: - comment for AC_DEFINE(HAVE_COMPRESS) fixed - build convenience library from zlib: we need to compile it into both libmysqlclient.a and libmysqlclient.so - --with-zlib-dir=bundled configure option --- acinclude.m4 | 122 +++++++++++++++++++++++++++------------------- tools/Makefile.am | 2 +- zlib/Makefile.am | 10 ++-- 3 files changed, 78 insertions(+), 56 deletions(-) diff --git a/acinclude.m4 b/acinclude.m4 index bcfa7b55e9b..0df0eed85d7 100644 --- a/acinclude.m4 +++ b/acinclude.m4 @@ -168,36 +168,65 @@ fi ]) +dnl Define zlib paths to point at bundled zlib + +AC_DEFUN([MYSQL_USE_BUNDLED_ZLIB], [ +ZLIB_INCLUDES="-I\$(top_srcdir)/zlib" +ZLIB_LIBS="\$(top_builddir)/zlib/libz.la" +zlib_dir="zlib" +AC_SUBST([zlib_dir]) +mysql_cv_compress="yes" +]) + +dnl Auxilary macro to check for zlib at given path + +AC_DEFUN([MYSQL_CHECK_ZLIB_DIR], [ +save_INCLUDES="$INCLUDES" +save_LIBS="$LIBS" +INCLUDES="$ZLIB_INCLUDES" +LIBS="$ZLIB_LIBS" +AC_CACHE_VAL([mysql_cv_compress], + [AC_TRY_LINK([#include ], + [int link_test() { return compress(0, (unsigned long*) 0, "", 0); }], + [mysql_cv_compress="yes" + AC_MSG_RESULT([ok])], + [mysql_cv_compress="no"]) + ]) +INCLUDES="$save_INCLUDES" +LIBS="$save_LIBS" +]) + dnl MYSQL_CHECK_ZLIB_WITH_COMPRESS dnl ------------------------------------------------------------------------ dnl @synopsis MYSQL_CHECK_ZLIB_WITH_COMPRESS dnl dnl Provides the following configure options: -dnl --with-zlib-dir - custom location of compression library. -dnl MySQL needs both header file (zlib.h) and the library -dnl (libz.a). Given location prefix, the macro expects -dnl to find the library headers in $prefix/include, -dnl and binaries in $prefix/lib. If DIR is "no", -dnl compression and all dependent functions will be -dnl disabled. -dnl The call checks presense of 'zlib' compression library in default or -dnl given location. If there is no default library, the macro falls -dnl back to use zlib bundled along with MySQL sources. But if configure is -dnl called with custom name/path, and there is no library at given place, -dnl the macro bails out with error. +dnl --with-zlib-dir=DIR +dnl Possible DIR values are: +dnl - "no" - the macro will disable use of compression functions +dnl - "bundled" - means use zlib bundled along with MySQL sources +dnl - empty, or not specified - the macro will try default system +dnl library (if present), and in case of error will fall back to +dnl bundled zlib +dnl - zlib location prefix - given location prefix, the macro expects +dnl to find the library headers in $prefix/include, and binaries in +dnl $prefix/lib. If zlib headers or binaries weren't found at $prefix, the +dnl macro bails out with error. dnl dnl If the library was found, this function #defines HAVE_COMPRESS dnl and configure variables ZLIB_INCLUDES (i.e. -I/path/to/zlib/include) and dnl ZLIB_LIBS (i. e. -L/path/to/zlib/lib -lz). -dnl -dnl Exception is Novell Netware, where we assume zlib is always present. AC_DEFUN([MYSQL_CHECK_ZLIB_WITH_COMPRESS], [ AC_MSG_CHECKING([for zlib compression library]) case $SYSTEM_TYPE in - *netware* | *modesto*) - AC_MSG_RESULT(ok) - ;; +dnl This is a quick fix for Netware if AC_TRY_LINK for some reason +dnl won't work there. Uncomment in case of failure and on Netware +dnl we'll always assume that zlib is present +dnl *netware* | *modesto*) +dnl AC_MSG_RESULT(ok) +dnl AC_DEFINE([HAVE_COMPRESS], [1], [Define to enable compression support]) +dnl ;; *) AC_ARG_WITH([zlib-dir], AC_HELP_STRING([--with-zlib-dir=DIR], @@ -207,47 +236,40 @@ case $SYSTEM_TYPE in in $DIR/include.]), [mysql_zlib_dir=${withval}], [mysql_zlib_dir=""]) - if test "$mysql_zlib_dir" = "no"; then - mysql_cv_compress="no" - AC_MSG_RESULT([disabled]) - else - if test "$mysql_zlib_dir" = ""; then + case "$mysql_zlib_dir" in + "no") + mysql_cv_compress="no" + AC_MSG_RESULT([disabled]) + ;; + "bundled") + MYSQL_USE_BUNDLED_ZLIB + AC_MSG_RESULT([using bundled zlib]) + ;; + "") ZLIB_INCLUDES="" ZLIB_LIBS="-lz" - else + MYSQL_CHECK_ZLIB_DIR + if test "$mysql_cv_compress" = "no"; then + MYSQL_USE_BUNDLED_ZLIB + AC_MSG_RESULT([system-wide zlib not found, using one bundled with MySQL]) + fi + ;; + *) if test -f "$mysql_zlib_dir/lib/libz.a" -a \ -f "$mysql_zlib_dir/include/zlib.h"; then - true - else + ZLIB_INCLUDES="-I$mysql_zlib_dir/include" + ZLIB_LIBS="-L$mysql_zlib_dir/lib -lz" + MYSQL_CHECK_ZLIB_DIR + fi + if test "x$mysql_cv_compress" != "xyes"; then AC_MSG_ERROR([headers or binaries were not found in $mysql_zlib_dir/{include,lib}]) fi - ZLIB_INCLUDES="-I$mysql_zlib_dir/include" - ZLIB_LIBS="-L$mysql_zlib_dir/lib -lz" - fi - save_INCLUDES="$INCLUDES" - save_LIBS="$LIBS" - INCLUDES="$ZLIB_INCLUDES" - LIBS="$ZLIB_LIBS" - AC_CACHE_VAL([mysql_cv_compress], - [AC_TRY_LINK([#include ], - [int link_test() { return compress(0, (unsigned long*) 0, "", 0); }], - [mysql_cv_compress="yes" - AC_MSG_RESULT(ok)], - [if test "$mysql_zlib_dir" = ""; then - AC_MSG_RESULT([system-wide zlib not found, using one bundled with MySQL]) - ZLIB_INCLUDES="-I\$(top_srcdir)/zlib" - ZLIB_LIBS="-L\$(top_builddir)/zlib -lz" - zlib_dir="zlib" - AC_SUBST([zlib_dir]) - mysql_cv_compress="yes" - else - AC_MSG_ERROR([not found in $mysql_zlib_dir]) - fi])]) - INCLUDES="$save_INCLUDES" - LIBS="$save_LIBS" - AC_DEFINE([HAVE_COMPRESS], [1], [Define if zlib is present]) + ;; + esac + if test "$mysql_cv_compress" = "yes"; then AC_SUBST([ZLIB_LIBS]) AC_SUBST([ZLIB_INCLUDES]) + AC_DEFINE([HAVE_COMPRESS], [1], [Define to enable compression support]) fi ;; esac diff --git a/tools/Makefile.am b/tools/Makefile.am index 50d1c8af56a..0dc90a0d107 100644 --- a/tools/Makefile.am +++ b/tools/Makefile.am @@ -17,7 +17,7 @@ INCLUDES=@MT_INCLUDES@ -I$(top_srcdir)/include $(openssl_includes) LDADD= @CLIENT_EXTRA_LDFLAGS@ @openssl_libs@ @ZLIB_LIBS@ \ - $(top_builddir)/libmysql_r/libmysqlclient_r.la \ + $(top_builddir)/libmysql_r/libmysqlclient_r.la bin_PROGRAMS= mysqlmanager mysqlmanager_SOURCES= mysqlmanager.c mysqlmanager_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES) diff --git a/zlib/Makefile.am b/zlib/Makefile.am index 81d0f26082d..58d3811cd7c 100644 --- a/zlib/Makefile.am +++ b/zlib/Makefile.am @@ -16,14 +16,14 @@ # Process this file with automake to create Makefile.in -noinst_LIBRARIES=libz.a +noinst_LTLIBRARIES=libz.la noinst_HEADERS= crc32.h deflate.h inffast.h inffixed.h inflate.h \ - inftrees.h trees.h zconf.h zlib.h zutil.h + inftrees.h trees.h zconf.h zlib.h zutil.h -libz_a_SOURCES= adler32.c compress.c crc32.c deflate.c gzio.c \ - infback.c inffast.c inflate.c inftrees.c trees.c \ - uncompr.c zutil.c +libz_la_SOURCES= adler32.c compress.c crc32.c deflate.c gzio.c \ + infback.c inffast.c inflate.c inftrees.c trees.c \ + uncompr.c zutil.c EXTRA_DIST= README FAQ INDEX ChangeLog algorithm.txt zlib.3 From f6f1e1f47b9025ddff47640fc950833131588c8a Mon Sep 17 00:00:00 2001 From: "konstantin@mysql.com" <> Date: Tue, 27 Jul 2004 10:05:57 -0700 Subject: [PATCH 12/93] acinclude.m4: another spelling mistake fixed --- acinclude.m4 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/acinclude.m4 b/acinclude.m4 index 0df0eed85d7..4109ff39fdc 100644 --- a/acinclude.m4 +++ b/acinclude.m4 @@ -178,7 +178,7 @@ AC_SUBST([zlib_dir]) mysql_cv_compress="yes" ]) -dnl Auxilary macro to check for zlib at given path +dnl Auxiliary macro to check for zlib at given path AC_DEFUN([MYSQL_CHECK_ZLIB_DIR], [ save_INCLUDES="$INCLUDES" From 674bbf0825b4cc9262defd8007df1d0c30740477 Mon Sep 17 00:00:00 2001 From: "konstantin@mysql.com" <> Date: Tue, 27 Jul 2004 21:59:28 -0700 Subject: [PATCH 13/93] Order of libs is important when building an optimized library: put ZLIB last in the list. --- tools/Makefile.am | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/Makefile.am b/tools/Makefile.am index 0dc90a0d107..5528df4dd68 100644 --- a/tools/Makefile.am +++ b/tools/Makefile.am @@ -16,8 +16,8 @@ # Process this file with automake to create Makefile.in INCLUDES=@MT_INCLUDES@ -I$(top_srcdir)/include $(openssl_includes) -LDADD= @CLIENT_EXTRA_LDFLAGS@ @openssl_libs@ @ZLIB_LIBS@ \ - $(top_builddir)/libmysql_r/libmysqlclient_r.la +LDADD= @CLIENT_EXTRA_LDFLAGS@ @openssl_libs@ \ + $(top_builddir)/libmysql_r/libmysqlclient_r.la @ZLIB_LIBS@ bin_PROGRAMS= mysqlmanager mysqlmanager_SOURCES= mysqlmanager.c mysqlmanager_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES) From d39fe2623f9461575f140f70d73e4a0cef5c8177 Mon Sep 17 00:00:00 2001 From: "dlenev@brandersnatch.localdomain" <> Date: Wed, 28 Jul 2004 10:49:21 +0400 Subject: [PATCH 14/93] Fix for bug #4492. TIMESTAMP columns should be unsigned to preserve compatibility with 4.0 (Or else InnoDB will return different internal TIMESTAMP values when user upgrades to 4.1). Altough this fix will introduce problems with early 4.1 -> 4.1 upgrades (tables with TIMESTAMP field should be reloaded using mysqldump) it will allow easy 4.0 -> 4.1 upgrade (which is more important since 4.1 is still beta). --- mysql-test/r/metadata.result | 2 +- mysql-test/r/ps_2myisam.result | 2 +- mysql-test/r/ps_3innodb.result | 2 +- mysql-test/r/ps_4heap.result | 2 +- mysql-test/r/ps_5merge.result | 4 ++-- sql/field.cc | 3 ++- 6 files changed, 8 insertions(+), 7 deletions(-) diff --git a/mysql-test/r/metadata.result b/mysql-test/r/metadata.result index 80a01a0ca90..ced3ca61f80 100644 --- a/mysql-test/r/metadata.result +++ b/mysql-test/r/metadata.result @@ -21,7 +21,7 @@ def test t1 t1 g g 5 4 0 Y 32768 3 63 def test t1 t1 h h 0 7 0 Y 32768 4 63 def test t1 t1 i i 13 4 0 Y 32864 0 63 def test t1 t1 j j 10 10 0 Y 128 0 63 -def test t1 t1 k k 7 19 0 N 1217 0 63 +def test t1 t1 k k 7 19 0 N 1249 0 63 def test t1 t1 l l 12 19 0 Y 128 0 63 def test t1 t1 m m 254 1 0 Y 256 0 8 def test t1 t1 n n 254 3 0 Y 2048 0 8 diff --git a/mysql-test/r/ps_2myisam.result b/mysql-test/r/ps_2myisam.result index 23ce63cacc3..b49eedb4067 100644 --- a/mysql-test/r/ps_2myisam.result +++ b/mysql-test/r/ps_2myisam.result @@ -870,7 +870,7 @@ def test t_many_col_types t_many_col_types c11 c11 0 9 6 Y 32768 4 63 def test t_many_col_types t_many_col_types c12 c12 0 10 6 Y 32768 4 63 def test t_many_col_types t_many_col_types c13 c13 10 10 10 Y 128 0 63 def test t_many_col_types t_many_col_types c14 c14 12 19 19 Y 128 0 63 -def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1217 0 63 +def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1249 0 63 def test t_many_col_types t_many_col_types c16 c16 11 8 8 Y 128 0 63 def test t_many_col_types t_many_col_types c17 c17 13 4 4 Y 32864 0 63 def test t_many_col_types t_many_col_types c18 c18 1 1 1 Y 32768 0 63 diff --git a/mysql-test/r/ps_3innodb.result b/mysql-test/r/ps_3innodb.result index 8ec7caa311c..3a2708376fa 100644 --- a/mysql-test/r/ps_3innodb.result +++ b/mysql-test/r/ps_3innodb.result @@ -870,7 +870,7 @@ def test t_many_col_types t_many_col_types c11 c11 0 9 6 Y 32768 4 63 def test t_many_col_types t_many_col_types c12 c12 0 10 6 Y 32768 4 63 def test t_many_col_types t_many_col_types c13 c13 10 10 10 Y 128 0 63 def test t_many_col_types t_many_col_types c14 c14 12 19 19 Y 128 0 63 -def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1217 0 63 +def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1249 0 63 def test t_many_col_types t_many_col_types c16 c16 11 8 8 Y 128 0 63 def test t_many_col_types t_many_col_types c17 c17 13 4 4 Y 32864 0 63 def test t_many_col_types t_many_col_types c18 c18 1 1 1 Y 32768 0 63 diff --git a/mysql-test/r/ps_4heap.result b/mysql-test/r/ps_4heap.result index fae17eb2e23..4228d95677d 100644 --- a/mysql-test/r/ps_4heap.result +++ b/mysql-test/r/ps_4heap.result @@ -871,7 +871,7 @@ def test t_many_col_types t_many_col_types c11 c11 0 9 6 Y 32768 4 63 def test t_many_col_types t_many_col_types c12 c12 0 10 6 Y 32768 4 63 def test t_many_col_types t_many_col_types c13 c13 10 10 10 Y 128 0 63 def test t_many_col_types t_many_col_types c14 c14 12 19 19 Y 128 0 63 -def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1217 0 63 +def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1249 0 63 def test t_many_col_types t_many_col_types c16 c16 11 8 8 Y 128 0 63 def test t_many_col_types t_many_col_types c17 c17 13 4 4 Y 32864 0 63 def test t_many_col_types t_many_col_types c18 c18 1 1 1 Y 32768 0 63 diff --git a/mysql-test/r/ps_5merge.result b/mysql-test/r/ps_5merge.result index 5aedebe396f..03020ccc0f3 100644 --- a/mysql-test/r/ps_5merge.result +++ b/mysql-test/r/ps_5merge.result @@ -913,7 +913,7 @@ def test t_many_col_types t_many_col_types c11 c11 0 9 6 Y 32768 4 63 def test t_many_col_types t_many_col_types c12 c12 0 10 6 Y 32768 4 63 def test t_many_col_types t_many_col_types c13 c13 10 10 10 Y 128 0 63 def test t_many_col_types t_many_col_types c14 c14 12 19 19 Y 128 0 63 -def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1217 0 63 +def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1249 0 63 def test t_many_col_types t_many_col_types c16 c16 11 8 8 Y 128 0 63 def test t_many_col_types t_many_col_types c17 c17 13 4 4 Y 32864 0 63 def test t_many_col_types t_many_col_types c18 c18 1 1 1 Y 32768 0 63 @@ -2106,7 +2106,7 @@ def test t_many_col_types t_many_col_types c11 c11 0 9 6 Y 32768 4 63 def test t_many_col_types t_many_col_types c12 c12 0 10 6 Y 32768 4 63 def test t_many_col_types t_many_col_types c13 c13 10 10 10 Y 128 0 63 def test t_many_col_types t_many_col_types c14 c14 12 19 19 Y 128 0 63 -def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1217 0 63 +def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1249 0 63 def test t_many_col_types t_many_col_types c16 c16 11 8 8 Y 128 0 63 def test t_many_col_types t_many_col_types c17 c17 13 4 4 Y 32864 0 63 def test t_many_col_types t_many_col_types c18 c18 1 1 1 Y 32768 0 63 diff --git a/sql/field.cc b/sql/field.cc index c96a5a6d809..8fba132738c 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -2877,7 +2877,8 @@ Field_timestamp::Field_timestamp(char *ptr_arg, uint32 len_arg, :Field_str(ptr_arg, 19, (uchar*) 0,0, unireg_check_arg, field_name_arg, table_arg, cs) { - flags|=ZEROFILL_FLAG; /* 4.0 MYD compatibility */ + /* For 4.0 MYD and 4.0 InnoDB compatibility */ + flags|= ZEROFILL_FLAG | UNSIGNED_FLAG; if (table && !table->timestamp_field && unireg_check != NONE) { From 10583f05c96c80f899e5817cdd3ba3c0feb31ee7 Mon Sep 17 00:00:00 2001 From: "magnus@neptunus.(none)" <> Date: Wed, 28 Jul 2004 10:28:30 +0200 Subject: [PATCH 15/93] Added order by to make the test output from ndb_basic and ndb_lock predicatble --- mysql-test/r/ndb_basic.result | 6 +++--- mysql-test/r/ndb_lock.result | 16 ++++++++-------- mysql-test/t/ndb_basic.test | 4 ++-- mysql-test/t/ndb_lock.test | 8 ++++---- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result index 7675048ca3c..b7479d9543d 100644 --- a/mysql-test/r/ndb_basic.result +++ b/mysql-test/r/ndb_basic.result @@ -24,15 +24,15 @@ pk1 attr1 attr2 attr3 9410 1 NULL 9412 9411 9413 17 9413 UPDATE t1 SET pk1=2 WHERE attr1=1; -SELECT * FROM t1; +SELECT * FROM t1 ORDER BY pk1; pk1 attr1 attr2 attr3 2 1 NULL 9412 9411 9413 17 9413 UPDATE t1 SET pk1=pk1 + 1; -SELECT * FROM t1; +SELECT * FROM t1 ORDER BY pk1; pk1 attr1 attr2 attr3 -9412 9413 17 9413 3 1 NULL 9412 +9412 9413 17 9413 DELETE FROM t1; SELECT * FROM t1; pk1 attr1 attr2 attr3 diff --git a/mysql-test/r/ndb_lock.result b/mysql-test/r/ndb_lock.result index 94ff5c25e6b..505eb054afd 100644 --- a/mysql-test/r/ndb_lock.result +++ b/mysql-test/r/ndb_lock.result @@ -1,25 +1,25 @@ DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7; create table t1 (x integer not null primary key, y varchar(32)) engine = ndb; insert into t1 values (1,'one'), (2,'two'); -select * from t1; +select * from t1 order by x; x y -2 two 1 one -select * from t1; +2 two +select * from t1 order by x; x y -2 two 1 one +2 two start transaction; insert into t1 values (3,'three'); start transaction; -select * from t1; +select * from t1 order by x; x y -2 two 1 one +2 two commit; -select * from t1; +select * from t1 order by x; x y +1 one 2 two 3 three -1 one commit; diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test index 6c120e00942..08fbf913155 100644 --- a/mysql-test/t/ndb_basic.test +++ b/mysql-test/t/ndb_basic.test @@ -31,9 +31,9 @@ SELECT * FROM t1; # Update primary key UPDATE t1 SET pk1=2 WHERE attr1=1; -SELECT * FROM t1; +SELECT * FROM t1 ORDER BY pk1; UPDATE t1 SET pk1=pk1 + 1; -SELECT * FROM t1; +SELECT * FROM t1 ORDER BY pk1; # Delete the record DELETE FROM t1; diff --git a/mysql-test/t/ndb_lock.test b/mysql-test/t/ndb_lock.test index 431729516d6..852d641ed54 100644 --- a/mysql-test/t/ndb_lock.test +++ b/mysql-test/t/ndb_lock.test @@ -19,20 +19,20 @@ DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7; connection con1; create table t1 (x integer not null primary key, y varchar(32)) engine = ndb; insert into t1 values (1,'one'), (2,'two'); -select * from t1; +select * from t1 order by x; connection con2; -select * from t1; +select * from t1 order by x; connection con1; start transaction; insert into t1 values (3,'three'); connection con2; -start transaction; select * from t1; +start transaction; select * from t1 order by x; connection con1; commit; connection con2; -select * from t1; +select * from t1 order by x; commit; From bac5d8f9b6992786826c7c0d47a6eea23f8c8e27 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Wed, 28 Jul 2004 11:05:00 +0200 Subject: [PATCH 16/93] Fix 64-bit issue in ConfigValues --- ndb/src/common/util/ConfigValues.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/ndb/src/common/util/ConfigValues.cpp b/ndb/src/common/util/ConfigValues.cpp index 7fc99bc526c..1dcb542e92c 100644 --- a/ndb/src/common/util/ConfigValues.cpp +++ b/ndb/src/common/util/ConfigValues.cpp @@ -261,9 +261,9 @@ directory(Uint32 sz){ ConfigValuesFactory::ConfigValuesFactory(Uint32 keys, Uint32 data){ m_sectionCounter = (1 << KP_SECTION_SHIFT); m_freeKeys = directory(keys); - m_freeData = data; + m_freeData = (data + 7) & ~7; m_currentSection = 0; - m_cfg = create(m_freeKeys, data); + m_cfg = create(m_freeKeys, m_freeData); } ConfigValuesFactory::ConfigValuesFactory(ConfigValues * cfg){ @@ -316,7 +316,8 @@ ConfigValuesFactory::expand(Uint32 fk, Uint32 fs){ m_freeKeys = (m_freeKeys >= fk ? m_cfg->m_size : fk + m_cfg->m_size); m_freeData = (m_freeData >= fs ? m_cfg->m_dataSize : fs + m_cfg->m_dataSize); m_freeKeys = directory(m_freeKeys); - + m_freeData = (m_freeData + 7) & ~7; + ConfigValues * m_tmp = m_cfg; m_cfg = create(m_freeKeys, m_freeData); put(* m_tmp); @@ -333,6 +334,7 @@ ConfigValuesFactory::shrink(){ m_freeKeys = m_cfg->m_size - m_freeKeys; m_freeData = m_cfg->m_dataSize - m_freeData; m_freeKeys = directory(m_freeKeys); + m_freeData = (m_freeData + 7) & ~7; ConfigValues * m_tmp = m_cfg; m_cfg = create(m_freeKeys, m_freeData); From 22b3e25a91bffa3e89687ec57dca27c4a9540625 Mon Sep 17 00:00:00 2001 From: "konstantin@mysql.com" <> Date: Wed, 28 Jul 2004 09:34:06 -0700 Subject: [PATCH 17/93] Fixing MYSQL_CHEKC_ZLIB_DIR to take into account user settings (in case there are such) --- acinclude.m4 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/acinclude.m4 b/acinclude.m4 index 4109ff39fdc..d2bbec82b75 100644 --- a/acinclude.m4 +++ b/acinclude.m4 @@ -183,8 +183,8 @@ dnl Auxiliary macro to check for zlib at given path AC_DEFUN([MYSQL_CHECK_ZLIB_DIR], [ save_INCLUDES="$INCLUDES" save_LIBS="$LIBS" -INCLUDES="$ZLIB_INCLUDES" -LIBS="$ZLIB_LIBS" +INCLUDES="$INCLUDES $ZLIB_INCLUDES" +LIBS="$LIBS $ZLIB_LIBS" AC_CACHE_VAL([mysql_cv_compress], [AC_TRY_LINK([#include ], [int link_test() { return compress(0, (unsigned long*) 0, "", 0); }], From b2b5e8715da21da8e6c1dd9eb0182c8b45aa646f Mon Sep 17 00:00:00 2001 From: "pekka@mysql.com" <> Date: Wed, 28 Jul 2004 18:55:17 +0200 Subject: [PATCH 18/93] MgmtSrvr.hpp: make subclass friend for gcc-2.95 et al [dup push] --- ndb/src/mgmsrv/MgmtSrvr.hpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index 41a7a69e106..1145f4a5a6b 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -68,6 +68,9 @@ public: virtual void println_statistics(const BaseString &s) = 0; }; + // some compilers need all of this + class Allocated_resources; + friend class Allocated_resources; class Allocated_resources { public: Allocated_resources(class MgmtSrvr &m); From e25126c68a569bf72a823bd85ac09dc52d48315a Mon Sep 17 00:00:00 2001 From: "ingo@mysql.com" <> Date: Wed, 28 Jul 2004 21:46:22 +0200 Subject: [PATCH 19/93] Casted all macro arguments, used a common style with array subscripts, improved the line-up, wrapped long lines. --- include/myisampack.h | 353 ++++++++++++++++++++++--------------------- 1 file changed, 178 insertions(+), 175 deletions(-) diff --git a/include/myisampack.h b/include/myisampack.h index 06c94fea75f..c92429e4c01 100644 --- a/include/myisampack.h +++ b/include/myisampack.h @@ -22,215 +22,218 @@ */ /* these two are for uniformity */ -#define mi_sint1korr(A) (int8)(*A) -#define mi_uint1korr(A) (uint8)(*A) +#define mi_sint1korr(A) ((int8)(*A)) +#define mi_uint1korr(A) ((uint8)(*A)) -#define mi_sint2korr(A) (int16) (((int16) ((uchar) (A)[1])) +\ - ((int16) ((int16) (A)[0]) << 8)) -#define mi_sint3korr(A) ((int32) ((((uchar) (A)[0]) & 128) ? \ - (((uint32) 255L << 24) | \ - (((uint32) (uchar) (A)[0]) << 16) |\ - (((uint32) (uchar) (A)[1]) << 8) | \ - ((uint32) (uchar) (A)[2])) : \ - (((uint32) (uchar) (A)[0]) << 16) |\ - (((uint32) (uchar) (A)[1]) << 8) | \ - ((uint32) (uchar) (A)[2]))) -#define mi_sint4korr(A) (int32) (((int32) ((uchar) (A)[3])) +\ - (((int32) ((uchar) (A)[2]) << 8)) +\ - (((int32) ((uchar) (A)[1]) << 16)) +\ - (((int32) ((int16) (A)[0]) << 24))) -#define mi_sint8korr(A) (longlong) mi_uint8korr(A) -#define mi_uint2korr(A) (uint16) (((uint16) ((uchar) (A)[1])) +\ - ((uint16) ((uchar) (A)[0]) << 8)) -#define mi_uint3korr(A) (uint32) (((uint32) ((uchar) (A)[2])) +\ - (((uint32) ((uchar) (A)[1])) << 8) +\ - (((uint32) ((uchar) (A)[0])) << 16)) -#define mi_uint4korr(A) (uint32) (((uint32) ((uchar) (A)[3])) +\ - (((uint32) ((uchar) (A)[2])) << 8) +\ - (((uint32) ((uchar) (A)[1])) << 16) +\ - (((uint32) ((uchar) (A)[0])) << 24)) -#define mi_uint5korr(A) ((ulonglong)(((uint32) ((uchar) (A)[4])) +\ - (((uint32) ((uchar) (A)[3])) << 8) +\ - (((uint32) ((uchar) (A)[2])) << 16) +\ - (((uint32) ((uchar) (A)[1])) << 24)) +\ - (((ulonglong) ((uchar) (A)[0])) << 32)) -#define mi_uint6korr(A) ((ulonglong)(((uint32) ((uchar) (A)[5])) +\ - (((uint32) ((uchar) (A)[4])) << 8) +\ - (((uint32) ((uchar) (A)[3])) << 16) +\ - (((uint32) ((uchar) (A)[2])) << 24)) +\ - (((ulonglong) (((uint32) ((uchar) (A)[1])) +\ - (((uint32) ((uchar) (A)[0]) << 8)))) <<\ - 32)) -#define mi_uint7korr(A) ((ulonglong)(((uint32) ((uchar) (A)[6])) +\ - (((uint32) ((uchar) (A)[5])) << 8) +\ - (((uint32) ((uchar) (A)[4])) << 16) +\ - (((uint32) ((uchar) (A)[3])) << 24)) +\ - (((ulonglong) (((uint32) ((uchar) (A)[2])) +\ - (((uint32) ((uchar) (A)[1])) << 8) +\ - (((uint32) ((uchar) (A)[0])) << 16))) <<\ - 32)) -#define mi_uint8korr(A) ((ulonglong)(((uint32) ((uchar) (A)[7])) +\ - (((uint32) ((uchar) (A)[6])) << 8) +\ - (((uint32) ((uchar) (A)[5])) << 16) +\ - (((uint32) ((uchar) (A)[4])) << 24)) +\ - (((ulonglong) (((uint32) ((uchar) (A)[3])) +\ - (((uint32) ((uchar) (A)[2])) << 8) +\ - (((uint32) ((uchar) (A)[1])) << 16) +\ - (((uint32) ((uchar) (A)[0])) << 24))) <<\ - 32)) +#define mi_sint2korr(A) ((int16) (((int16) (((uchar*) (A))[1])) +\ + ((int16) ((int16) ((char*) (A))[0]) << 8))) +#define mi_sint3korr(A) ((int32) (((((uchar*) (A))[0]) & 128) ? \ + (((uint32) 255L << 24) | \ + (((uint32) ((uchar*) (A))[0]) << 16) |\ + (((uint32) ((uchar*) (A))[1]) << 8) | \ + ((uint32) ((uchar*) (A))[2])) : \ + (((uint32) ((uchar*) (A))[0]) << 16) |\ + (((uint32) ((uchar*) (A))[1]) << 8) | \ + ((uint32) ((uchar*) (A))[2]))) +#define mi_sint4korr(A) ((int32) (((int32) (((uchar*) (A))[3])) +\ + ((int32) (((uchar*) (A))[2]) << 8) +\ + ((int32) (((uchar*) (A))[1]) << 16) +\ + ((int32) ((int16) ((char*) (A))[0]) << 24))) +#define mi_sint8korr(A) ((longlong) mi_uint8korr(A)) +#define mi_uint2korr(A) ((uint16) (((uint16) (((uchar*) (A))[1])) +\ + ((uint16) (((uchar*) (A))[0]) << 8))) +#define mi_uint3korr(A) ((uint32) (((uint32) (((uchar*) (A))[2])) +\ + (((uint32) (((uchar*) (A))[1])) << 8) +\ + (((uint32) (((uchar*) (A))[0])) << 16))) +#define mi_uint4korr(A) ((uint32) (((uint32) (((uchar*) (A))[3])) +\ + (((uint32) (((uchar*) (A))[2])) << 8) +\ + (((uint32) (((uchar*) (A))[1])) << 16) +\ + (((uint32) (((uchar*) (A))[0])) << 24))) +#define mi_uint5korr(A) ((ulonglong)(((uint32) (((uchar*) (A))[4])) +\ + (((uint32) (((uchar*) (A))[3])) << 8) +\ + (((uint32) (((uchar*) (A))[2])) << 16) +\ + (((uint32) (((uchar*) (A))[1])) << 24)) +\ + (((ulonglong) (((uchar*) (A))[0])) << 32)) +#define mi_uint6korr(A) ((ulonglong)(((uint32) (((uchar*) (A))[5])) +\ + (((uint32) (((uchar*) (A))[4])) << 8) +\ + (((uint32) (((uchar*) (A))[3])) << 16) +\ + (((uint32) (((uchar*) (A))[2])) << 24)) +\ + (((ulonglong) (((uint32) (((uchar*) (A))[1])) +\ + (((uint32) (((uchar*) (A))[0]) << 8)))) <<\ + 32)) +#define mi_uint7korr(A) ((ulonglong)(((uint32) (((uchar*) (A))[6])) +\ + (((uint32) (((uchar*) (A))[5])) << 8) +\ + (((uint32) (((uchar*) (A))[4])) << 16) +\ + (((uint32) (((uchar*) (A))[3])) << 24)) +\ + (((ulonglong) (((uint32) (((uchar*) (A))[2])) +\ + (((uint32) (((uchar*) (A))[1])) << 8) +\ + (((uint32) (((uchar*) (A))[0])) << 16))) <<\ + 32)) +#define mi_uint8korr(A) ((ulonglong)(((uint32) (((uchar*) (A))[7])) +\ + (((uint32) (((uchar*) (A))[6])) << 8) +\ + (((uint32) (((uchar*) (A))[5])) << 16) +\ + (((uint32) (((uchar*) (A))[4])) << 24)) +\ + (((ulonglong) (((uint32) (((uchar*) (A))[3])) +\ + (((uint32) (((uchar*) (A))[2])) << 8) +\ + (((uint32) (((uchar*) (A))[1])) << 16) +\ + (((uint32) (((uchar*) (A))[0])) << 24))) <<\ + 32)) /* This one is for uniformity */ #define mi_int1store(T,A) *((uchar*)(T))= (uchar) (A) -#define mi_int2store(T,A) { uint def_temp= (uint) (A) ;\ - *((uchar*) ((T)+1))= (uchar)(def_temp); \ - *((uchar*) ((T)+0))= (uchar)(def_temp >> 8); } -#define mi_int3store(T,A) { /*lint -save -e734 */\ - ulong def_temp= (ulong) (A);\ - *(((T)+2))=(char) (def_temp);\ - *((T)+1)= (char) (def_temp >> 8);\ - *((T)+0)= (char) (def_temp >> 16);\ - /*lint -restore */} -#define mi_int4store(T,A) { ulong def_temp= (ulong) (A);\ - *((T)+3)=(char) (def_temp);\ - *((T)+2)=(char) (def_temp >> 8);\ - *((T)+1)=(char) (def_temp >> 16);\ - *((T)+0)=(char) (def_temp >> 24); } -#define mi_int5store(T,A) { ulong def_temp= (ulong) (A),\ - def_temp2= (ulong) ((A) >> 32);\ - *((T)+4)=(char) (def_temp);\ - *((T)+3)=(char) (def_temp >> 8);\ - *((T)+2)=(char) (def_temp >> 16);\ - *((T)+1)=(char) (def_temp >> 24);\ - *((T)+0)=(char) (def_temp2); } -#define mi_int6store(T,A) { ulong def_temp= (ulong) (A),\ - def_temp2= (ulong) ((A) >> 32);\ - *((T)+5)=(char) (def_temp);\ - *((T)+4)=(char) (def_temp >> 8);\ - *((T)+3)=(char) (def_temp >> 16);\ - *((T)+2)=(char) (def_temp >> 24);\ - *((T)+1)=(char) (def_temp2);\ - *((T)+0)=(char) (def_temp2 >> 8); } -#define mi_int7store(T,A) { ulong def_temp= (ulong) (A),\ - def_temp2= (ulong) ((A) >> 32);\ - *((T)+6)=(char) (def_temp);\ - *((T)+5)=(char) (def_temp >> 8);\ - *((T)+4)=(char) (def_temp >> 16);\ - *((T)+3)=(char) (def_temp >> 24);\ - *((T)+2)=(char) (def_temp2);\ - *((T)+1)=(char) (def_temp2 >> 8);\ - *((T)+0)=(char) (def_temp2 >> 16); } -#define mi_int8store(T,A) { ulong def_temp3= (ulong) (A), \ - def_temp4= (ulong) ((A) >> 32); \ - mi_int4store((T),def_temp4); \ - mi_int4store((T+4),def_temp3); \ - } +#define mi_int2store(T,A) { uint def_temp= (uint) (A) ;\ + ((uchar*) (T))[1]= (uchar) (def_temp);\ + ((uchar*) (T))[0]= (uchar) (def_temp >> 8); } +#define mi_int3store(T,A) { /*lint -save -e734 */\ + ulong def_temp= (ulong) (A);\ + ((uchar*) (T))[2]= (uchar) (def_temp);\ + ((uchar*) (T))[1]= (uchar) (def_temp >> 8);\ + ((uchar*) (T))[0]= (uchar) (def_temp >> 16);\ + /*lint -restore */} +#define mi_int4store(T,A) { ulong def_temp= (ulong) (A);\ + ((uchar*) (T))[3]= (uchar) (def_temp);\ + ((uchar*) (T))[2]= (uchar) (def_temp >> 8);\ + ((uchar*) (T))[1]= (uchar) (def_temp >> 16);\ + ((uchar*) (T))[0]= (uchar) (def_temp >> 24); } +#define mi_int5store(T,A) { ulong def_temp= (ulong) (A),\ + def_temp2= (ulong) ((A) >> 32);\ + ((uchar*) (T))[4]= (uchar) (def_temp);\ + ((uchar*) (T))[3]= (uchar) (def_temp >> 8);\ + ((uchar*) (T))[2]= (uchar) (def_temp >> 16);\ + ((uchar*) (T))[1]= (uchar) (def_temp >> 24);\ + ((uchar*) (T))[0]= (uchar) (def_temp2); } +#define mi_int6store(T,A) { ulong def_temp= (ulong) (A),\ + def_temp2= (ulong) ((A) >> 32);\ + ((uchar*) (T))[5]= (uchar) (def_temp);\ + ((uchar*) (T))[4]= (uchar) (def_temp >> 8);\ + ((uchar*) (T))[3]= (uchar) (def_temp >> 16);\ + ((uchar*) (T))[2]= (uchar) (def_temp >> 24);\ + ((uchar*) (T))[1]= (uchar) (def_temp2);\ + ((uchar*) (T))[0]= (uchar) (def_temp2 >> 8); } +#define mi_int7store(T,A) { ulong def_temp= (ulong) (A),\ + def_temp2= (ulong) ((A) >> 32);\ + ((uchar*) (T))[6]= (uchar) (def_temp);\ + ((uchar*) (T))[5]= (uchar) (def_temp >> 8);\ + ((uchar*) (T))[4]= (uchar) (def_temp >> 16);\ + ((uchar*) (T))[3]= (uchar) (def_temp >> 24);\ + ((uchar*) (T))[2]= (uchar) (def_temp2);\ + ((uchar*) (T))[1]= (uchar) (def_temp2 >> 8);\ + ((uchar*) (T))[0]= (uchar) (def_temp2 >> 16); } +#define mi_int8store(T,A) { ulong def_temp3= (ulong) (A),\ + def_temp4= (ulong) ((A) >> 32);\ + mi_int4store((uchar*) (T) + 0, def_temp4);\ + mi_int4store((uchar*) (T) + 4, def_temp3); } #ifdef WORDS_BIGENDIAN -#define mi_float4store(T,A) { *(T)= ((byte *) &A)[0];\ - *((T)+1)=(char) ((byte *) &A)[1];\ - *((T)+2)=(char) ((byte *) &A)[2];\ - *((T)+3)=(char) ((byte *) &A)[3]; } +#define mi_float4store(T,A) { ((uchar*) (T))[0]= ((uchar*) &A)[0];\ + ((uchar*) (T))[1]= ((uchar*) &A)[1];\ + ((uchar*) (T))[2]= ((uchar*) &A)[2];\ + ((uchar*) (T))[3]= ((uchar*) &A)[3]; } #define mi_float4get(V,M) { float def_temp;\ - ((byte*) &def_temp)[0]=(M)[0];\ - ((byte*) &def_temp)[1]=(M)[1];\ - ((byte*) &def_temp)[2]=(M)[2];\ - ((byte*) &def_temp)[3]=(M)[3];\ - (V)=def_temp; } + ((uchar*) &def_temp)[0]= ((uchar*) (M))[0];\ + ((uchar*) &def_temp)[1]= ((uchar*) (M))[1];\ + ((uchar*) &def_temp)[2]= ((uchar*) (M))[2];\ + ((uchar*) &def_temp)[3]= ((uchar*) (M))[3];\ + (V)= def_temp; } -#define mi_float8store(T,V) { *(T)= ((byte *) &V)[0];\ - *((T)+1)=(char) ((byte *) &V)[1];\ - *((T)+2)=(char) ((byte *) &V)[2];\ - *((T)+3)=(char) ((byte *) &V)[3];\ - *((T)+4)=(char) ((byte *) &V)[4];\ - *((T)+5)=(char) ((byte *) &V)[5];\ - *((T)+6)=(char) ((byte *) &V)[6];\ - *((T)+7)=(char) ((byte *) &V)[7]; } +#define mi_float8store(T,V) { ((uchar*) (T))[0]= ((uchar*) &V)[0];\ + ((uchar*) (T))[1]= ((uchar*) &V)[1];\ + ((uchar*) (T))[2]= ((uchar*) &V)[2];\ + ((uchar*) (T))[3]= ((uchar*) &V)[3];\ + ((uchar*) (T))[4]= ((uchar*) &V)[4];\ + ((uchar*) (T))[5]= ((uchar*) &V)[5];\ + ((uchar*) (T))[6]= ((uchar*) &V)[6];\ + ((uchar*) (T))[7]= ((uchar*) &V)[7]; } #define mi_float8get(V,M) { double def_temp;\ - ((byte*) &def_temp)[0]=(M)[0];\ - ((byte*) &def_temp)[1]=(M)[1];\ - ((byte*) &def_temp)[2]=(M)[2];\ - ((byte*) &def_temp)[3]=(M)[3];\ - ((byte*) &def_temp)[4]=(M)[4];\ - ((byte*) &def_temp)[5]=(M)[5];\ - ((byte*) &def_temp)[6]=(M)[6];\ - ((byte*) &def_temp)[7]=(M)[7]; \ - (V)=def_temp; } + ((uchar*) &def_temp)[0]= ((uchar*) (M))[0];\ + ((uchar*) &def_temp)[1]= ((uchar*) (M))[1];\ + ((uchar*) &def_temp)[2]= ((uchar*) (M))[2];\ + ((uchar*) &def_temp)[3]= ((uchar*) (M))[3];\ + ((uchar*) &def_temp)[4]= ((uchar*) (M))[4];\ + ((uchar*) &def_temp)[5]= ((uchar*) (M))[5];\ + ((uchar*) &def_temp)[6]= ((uchar*) (M))[6];\ + ((uchar*) &def_temp)[7]= ((uchar*) (M))[7]; \ + (V)= def_temp; } #else -#define mi_float4store(T,A) { *(T)= ((byte *) &A)[3];\ - *((T)+1)=(char) ((byte *) &A)[2];\ - *((T)+2)=(char) ((byte *) &A)[1];\ - *((T)+3)=(char) ((byte *) &A)[0]; } +#define mi_float4store(T,A) { ((uchar*) (T))[0]= ((uchar*) &A)[3];\ + ((uchar*) (T))[1]= ((uchar*) &A)[2];\ + ((uchar*) (T))[2]= ((uchar*) &A)[1];\ + ((uchar*) (T))[3]= ((uchar*) &A)[0]; } #define mi_float4get(V,M) { float def_temp;\ - ((byte*) &def_temp)[0]=(M)[3];\ - ((byte*) &def_temp)[1]=(M)[2];\ - ((byte*) &def_temp)[2]=(M)[1];\ - ((byte*) &def_temp)[3]=(M)[0];\ - (V)=def_temp; } + ((uchar*) &def_temp)[0]= ((uchar*) (M))[3];\ + ((uchar*) &def_temp)[1]= ((uchar*) (M))[2];\ + ((uchar*) &def_temp)[2]= ((uchar*) (M))[1];\ + ((uchar*) &def_temp)[3]= ((uchar*) (M))[0];\ + (V)= def_temp; } #if defined(__FLOAT_WORD_ORDER) && (__FLOAT_WORD_ORDER == __BIG_ENDIAN) -#define mi_float8store(T,V) { *(T)= ((byte *) &V)[3];\ - *((T)+1)=(char) ((byte *) &V)[2];\ - *((T)+2)=(char) ((byte *) &V)[1];\ - *((T)+3)=(char) ((byte *) &V)[0];\ - *((T)+4)=(char) ((byte *) &V)[7];\ - *((T)+5)=(char) ((byte *) &V)[6];\ - *((T)+6)=(char) ((byte *) &V)[5];\ - *((T)+7)=(char) ((byte *) &V)[4];} +#define mi_float8store(T,V) { ((uchar*) (T))[0]= ((uchar*) &V)[3];\ + ((uchar*) (T))[1]= ((uchar*) &V)[2];\ + ((uchar*) (T))[2]= ((uchar*) &V)[1];\ + ((uchar*) (T))[3]= ((uchar*) &V)[0];\ + ((uchar*) (T))[4]= ((uchar*) &V)[7];\ + ((uchar*) (T))[5]= ((uchar*) &V)[6];\ + ((uchar*) (T))[6]= ((uchar*) &V)[5];\ + ((uchar*) (T))[7]= ((uchar*) &V)[4];} #define mi_float8get(V,M) { double def_temp;\ - ((byte*) &def_temp)[0]=(M)[3];\ - ((byte*) &def_temp)[1]=(M)[2];\ - ((byte*) &def_temp)[2]=(M)[1];\ - ((byte*) &def_temp)[3]=(M)[0];\ - ((byte*) &def_temp)[4]=(M)[7];\ - ((byte*) &def_temp)[5]=(M)[6];\ - ((byte*) &def_temp)[6]=(M)[5];\ - ((byte*) &def_temp)[7]=(M)[4];\ - (V)=def_temp; } + ((uchar*) &def_temp)[0]= ((uchar*) (M))[3];\ + ((uchar*) &def_temp)[1]= ((uchar*) (M))[2];\ + ((uchar*) &def_temp)[2]= ((uchar*) (M))[1];\ + ((uchar*) &def_temp)[3]= ((uchar*) (M))[0];\ + ((uchar*) &def_temp)[4]= ((uchar*) (M))[7];\ + ((uchar*) &def_temp)[5]= ((uchar*) (M))[6];\ + ((uchar*) &def_temp)[6]= ((uchar*) (M))[5];\ + ((uchar*) &def_temp)[7]= ((uchar*) (M))[4];\ + (V)= def_temp; } #else -#define mi_float8store(T,V) { *(T)= ((byte *) &V)[7];\ - *((T)+1)=(char) ((byte *) &V)[6];\ - *((T)+2)=(char) ((byte *) &V)[5];\ - *((T)+3)=(char) ((byte *) &V)[4];\ - *((T)+4)=(char) ((byte *) &V)[3];\ - *((T)+5)=(char) ((byte *) &V)[2];\ - *((T)+6)=(char) ((byte *) &V)[1];\ - *((T)+7)=(char) ((byte *) &V)[0];} +#define mi_float8store(T,V) { ((uchar*) (T))[0]= ((uchar*) &V)[7];\ + ((uchar*) (T))[1]= ((uchar*) &V)[6];\ + ((uchar*) (T))[2]= ((uchar*) &V)[5];\ + ((uchar*) (T))[3]= ((uchar*) &V)[4];\ + ((uchar*) (T))[4]= ((uchar*) &V)[3];\ + ((uchar*) (T))[5]= ((uchar*) &V)[2];\ + ((uchar*) (T))[6]= ((uchar*) &V)[1];\ + ((uchar*) (T))[7]= ((uchar*) &V)[0];} #define mi_float8get(V,M) { double def_temp;\ - ((byte*) &def_temp)[0]=(M)[7];\ - ((byte*) &def_temp)[1]=(M)[6];\ - ((byte*) &def_temp)[2]=(M)[5];\ - ((byte*) &def_temp)[3]=(M)[4];\ - ((byte*) &def_temp)[4]=(M)[3];\ - ((byte*) &def_temp)[5]=(M)[2];\ - ((byte*) &def_temp)[6]=(M)[1];\ - ((byte*) &def_temp)[7]=(M)[0];\ - (V)=def_temp; } + ((uchar*) &def_temp)[0]= ((uchar*) (M))[7];\ + ((uchar*) &def_temp)[1]= ((uchar*) (M))[6];\ + ((uchar*) &def_temp)[2]= ((uchar*) (M))[5];\ + ((uchar*) &def_temp)[3]= ((uchar*) (M))[4];\ + ((uchar*) &def_temp)[4]= ((uchar*) (M))[3];\ + ((uchar*) &def_temp)[5]= ((uchar*) (M))[2];\ + ((uchar*) &def_temp)[6]= ((uchar*) (M))[1];\ + ((uchar*) &def_temp)[7]= ((uchar*) (M))[0];\ + (V)= def_temp; } #endif /* __FLOAT_WORD_ORDER */ #endif /* WORDS_BIGENDIAN */ /* Fix to avoid warnings when sizeof(ha_rows) == sizeof(long) */ #ifdef BIG_TABLES -#define mi_rowstore(T,A) mi_int8store(T,A) -#define mi_rowkorr(T) mi_uint8korr(T) +#define mi_rowstore(T,A) mi_int8store(T, A) +#define mi_rowkorr(T) mi_uint8korr(T) #else -#define mi_rowstore(T,A) { mi_int4store(T,0); mi_int4store(((T)+4),A); } -#define mi_rowkorr(T) mi_uint4korr((T)+4) +#define mi_rowstore(T,A) { mi_int4store(T, 0);\ + mi_int4store(((uchar*) (T) + 4), A); } +#define mi_rowkorr(T) mi_uint4korr((uchar*) (T) + 4) #endif #if SIZEOF_OFF_T > 4 -#define mi_sizestore(T,A) mi_int8store(T,A) -#define mi_sizekorr(T) mi_uint8korr(T) +#define mi_sizestore(T,A) mi_int8store(T, A) +#define mi_sizekorr(T) mi_uint8korr(T) #else -#define mi_sizestore(T,A) { if ((A) == HA_OFFSET_ERROR) bfill((char*) (T),8,255); else { mi_int4store((T),0); mi_int4store(((T)+4),A); }} -#define mi_sizekorr(T) mi_uint4korr((T)+4) +#define mi_sizestore(T,A) { if ((A) == HA_OFFSET_ERROR)\ + bfill((char*) (T), 8, 255);\ + else { mi_int4store((T), 0);\ + mi_int4store(((T) + 4), A); }} +#define mi_sizekorr(T) mi_uint4korr((uchar*) (T) + 4) #endif From a3091294b4e1d4d01ee49390fdf5cac27bb3d17a Mon Sep 17 00:00:00 2001 From: "paul@kite-hub.kitebird.com" <> Date: Wed, 28 Jul 2004 14:52:04 -0500 Subject: [PATCH 20/93] Fix some variable misorderings. --- sql/mysqld.cc | 18 +++++++++--------- sql/set_var.cc | 4 ++-- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 83eb8bb864b..4018294a61b 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5056,6 +5056,8 @@ struct show_var_st status_vars[]= { {"Com_create_function", (char*) (com_stat+(uint) SQLCOM_CREATE_FUNCTION),SHOW_LONG}, {"Com_create_index", (char*) (com_stat+(uint) SQLCOM_CREATE_INDEX),SHOW_LONG}, {"Com_create_table", (char*) (com_stat+(uint) SQLCOM_CREATE_TABLE),SHOW_LONG}, + {"Com_dealloc_sql", (char*) (com_stat+(uint) + SQLCOM_DEALLOCATE_PREPARE), SHOW_LONG}, {"Com_delete", (char*) (com_stat+(uint) SQLCOM_DELETE),SHOW_LONG}, {"Com_delete_multi", (char*) (com_stat+(uint) SQLCOM_DELETE_MULTI),SHOW_LONG}, {"Com_do", (char*) (com_stat+(uint) SQLCOM_DO),SHOW_LONG}, @@ -5064,6 +5066,8 @@ struct show_var_st status_vars[]= { {"Com_drop_index", (char*) (com_stat+(uint) SQLCOM_DROP_INDEX),SHOW_LONG}, {"Com_drop_table", (char*) (com_stat+(uint) SQLCOM_DROP_TABLE),SHOW_LONG}, {"Com_drop_user", (char*) (com_stat+(uint) SQLCOM_DROP_USER),SHOW_LONG}, + {"Com_execute_sql", (char*) (com_stat+(uint) SQLCOM_EXECUTE), + SHOW_LONG}, {"Com_flush", (char*) (com_stat+(uint) SQLCOM_FLUSH),SHOW_LONG}, {"Com_grant", (char*) (com_stat+(uint) SQLCOM_GRANT),SHOW_LONG}, {"Com_ha_close", (char*) (com_stat+(uint) SQLCOM_HA_CLOSE),SHOW_LONG}, @@ -5079,6 +5083,8 @@ struct show_var_st status_vars[]= { {"Com_lock_tables", (char*) (com_stat+(uint) SQLCOM_LOCK_TABLES),SHOW_LONG}, {"Com_optimize", (char*) (com_stat+(uint) SQLCOM_OPTIMIZE),SHOW_LONG}, {"Com_preload_keys", (char*) (com_stat+(uint) SQLCOM_PRELOAD_KEYS),SHOW_LONG}, + {"Com_prepare_sql", (char*) (com_stat+(uint) SQLCOM_PREPARE), + SHOW_LONG}, {"Com_purge", (char*) (com_stat+(uint) SQLCOM_PURGE),SHOW_LONG}, {"Com_purge_before_date", (char*) (com_stat+(uint) SQLCOM_PURGE_BEFORE),SHOW_LONG}, {"Com_rename_table", (char*) (com_stat+(uint) SQLCOM_RENAME_TABLE),SHOW_LONG}, @@ -5125,12 +5131,6 @@ struct show_var_st status_vars[]= { {"Com_unlock_tables", (char*) (com_stat+(uint) SQLCOM_UNLOCK_TABLES),SHOW_LONG}, {"Com_update", (char*) (com_stat+(uint) SQLCOM_UPDATE),SHOW_LONG}, {"Com_update_multi", (char*) (com_stat+(uint) SQLCOM_UPDATE_MULTI),SHOW_LONG}, - {"Com_prepare_sql", (char*) (com_stat+(uint) SQLCOM_PREPARE), - SHOW_LONG}, - {"Com_execute_sql", (char*) (com_stat+(uint) SQLCOM_EXECUTE), - SHOW_LONG}, - {"Com_dealloc_sql", (char*) (com_stat+(uint) - SQLCOM_DEALLOCATE_PREPARE), SHOW_LONG}, {"Connections", (char*) &thread_id, SHOW_LONG_CONST}, {"Created_tmp_disk_tables", (char*) &created_tmp_disk_tables,SHOW_LONG}, {"Created_tmp_files", (char*) &my_tmp_file_created, SHOW_LONG}, @@ -5141,6 +5141,7 @@ struct show_var_st status_vars[]= { {"Flush_commands", (char*) &refresh_version, SHOW_LONG_CONST}, {"Handler_commit", (char*) &ha_commit_count, SHOW_LONG}, {"Handler_delete", (char*) &ha_delete_count, SHOW_LONG}, + {"Handler_discover", (char*) &ha_discover_count, SHOW_LONG}, {"Handler_read_first", (char*) &ha_read_first_count, SHOW_LONG}, {"Handler_read_key", (char*) &ha_read_key_count, SHOW_LONG}, {"Handler_read_next", (char*) &ha_read_next_count, SHOW_LONG}, @@ -5150,13 +5151,12 @@ struct show_var_st status_vars[]= { {"Handler_rollback", (char*) &ha_rollback_count, SHOW_LONG}, {"Handler_update", (char*) &ha_update_count, SHOW_LONG}, {"Handler_write", (char*) &ha_write_count, SHOW_LONG}, - {"Handler_discover", (char*) &ha_discover_count, SHOW_LONG}, {"Key_blocks_not_flushed", (char*) &dflt_key_cache_var.global_blocks_changed, SHOW_KEY_CACHE_LONG}, - {"Key_blocks_used", (char*) &dflt_key_cache_var.blocks_used, - SHOW_KEY_CACHE_CONST_LONG}, {"Key_blocks_unused", (char*) &dflt_key_cache_var.blocks_unused, SHOW_KEY_CACHE_CONST_LONG}, + {"Key_blocks_used", (char*) &dflt_key_cache_var.blocks_used, + SHOW_KEY_CACHE_CONST_LONG}, {"Key_read_requests", (char*) &dflt_key_cache_var.global_cache_r_requests, SHOW_KEY_CACHE_LONG}, {"Key_reads", (char*) &dflt_key_cache_var.global_cache_read, diff --git a/sql/set_var.cc b/sql/set_var.cc index e1cfb77d297..47d9973495a 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -611,8 +611,8 @@ struct show_var_st init_vars[]= { #ifdef HAVE_BERKELEY_DB {"bdb_cache_size", (char*) &berkeley_cache_size, SHOW_LONG}, {"bdb_home", (char*) &berkeley_home, SHOW_CHAR_PTR}, - {"bdb_logdir", (char*) &berkeley_logdir, SHOW_CHAR_PTR}, {"bdb_log_buffer_size", (char*) &berkeley_log_buffer_size, SHOW_LONG}, + {"bdb_logdir", (char*) &berkeley_logdir, SHOW_CHAR_PTR}, {"bdb_max_lock", (char*) &berkeley_max_lock, SHOW_LONG}, {"bdb_shared_data", (char*) &berkeley_shared_data, SHOW_BOOL}, {"bdb_tmpdir", (char*) &berkeley_tmpdir, SHOW_CHAR_PTR}, @@ -652,9 +652,9 @@ struct show_var_st init_vars[]= { {"have_bdb", (char*) &have_berkeley_db, SHOW_HAVE}, {"have_compress", (char*) &have_compress, SHOW_HAVE}, {"have_crypt", (char*) &have_crypt, SHOW_HAVE}, + {"have_geometry", (char*) &have_geometry, SHOW_HAVE}, {"have_innodb", (char*) &have_innodb, SHOW_HAVE}, {"have_isam", (char*) &have_isam, SHOW_HAVE}, - {"have_geometry", (char*) &have_geometry, SHOW_HAVE}, {"have_ndbcluster", (char*) &have_ndbcluster, SHOW_HAVE}, {"have_openssl", (char*) &have_openssl, SHOW_HAVE}, {"have_query_cache", (char*) &have_query_cache, SHOW_HAVE}, From fd1fad4371c0768576f80d20cfad433a189fa06e Mon Sep 17 00:00:00 2001 From: "ingo@mysql.com" <> Date: Thu, 29 Jul 2004 10:33:33 +0200 Subject: [PATCH 21/93] Fixed a copy-and-paste error: mysql_create_frm() should have its own enter string. --- sql/unireg.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/unireg.cc b/sql/unireg.cc index b5f6c3546a4..c82fcc4abef 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -75,7 +75,7 @@ bool mysql_create_frm(THD *thd, my_string file_name, uchar fileinfo[64],forminfo[288],*keybuff; TYPELIB formnames; uchar *screen_buff; - DBUG_ENTER("rea_create_table"); + DBUG_ENTER("mysql_create_frm"); formnames.type_names=0; if (!(screen_buff=pack_screens(create_fields,&info_length,&screens,0))) From 8bd205c8e2c31d9aee21d7cdbc6bc277d70a981d Mon Sep 17 00:00:00 2001 From: "pekka@mysql.com" <> Date: Thu, 29 Jul 2004 10:44:53 +0200 Subject: [PATCH 22/93] ha_ndbcluster.h, ha_ndbcluster.cc: compile fix for gcc-2.95 --- sql/ha_ndbcluster.cc | 8 ++++---- sql/ha_ndbcluster.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index ec8bd035c83..2c966aab73a 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -333,11 +333,11 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, - TODO allocate blob part aligned buffers */ -NdbBlob::ActiveHook get_ndb_blobs_value; +NdbBlob::ActiveHook g_get_ndb_blobs_value; -int get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg) +int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg) { - DBUG_ENTER("get_ndb_blobs_value [callback]"); + DBUG_ENTER("g_get_ndb_blobs_value"); if (ndb_blob->blobsNextBlob() != NULL) DBUG_RETURN(0); ha_ndbcluster *ha= (ha_ndbcluster *)arg; @@ -428,7 +428,7 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field, { // Set callback void *arg= (void *)this; - DBUG_RETURN(ndb_blob->setActiveHook(::get_ndb_blobs_value, arg) != 0); + DBUG_RETURN(ndb_blob->setActiveHook(g_get_ndb_blobs_value, arg) != 0); } DBUG_RETURN(1); } diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index bd8d78ec00b..31dd9a52331 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -184,7 +184,7 @@ class ha_ndbcluster: public handler uint fieldnr, const byte* field_ptr); int set_ndb_value(NdbOperation*, Field *field, uint fieldnr); int get_ndb_value(NdbOperation*, Field *field, uint fieldnr); - friend int ::get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg); + friend int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg); int get_ndb_blobs_value(NdbBlob *last_ndb_blob); int set_primary_key(NdbOperation *op, const byte *key); int set_primary_key(NdbOperation *op); From 387a01ddc8caf376dd7b87aae3e255344c0b8f94 Mon Sep 17 00:00:00 2001 From: "pem@mysql.comhem.se" <> Date: Thu, 29 Jul 2004 15:28:40 +0200 Subject: [PATCH 23/93] Added --with-ndbcluster config option to compile-pentium-valgrind-max (since it's in the other -max builds already). --- BUILD/compile-pentium-max | 5 ----- BUILD/compile-pentium-valgrind-max | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/BUILD/compile-pentium-max b/BUILD/compile-pentium-max index 6eb71fcebb6..caf657a2049 100755 --- a/BUILD/compile-pentium-max +++ b/BUILD/compile-pentium-max @@ -7,11 +7,6 @@ extra_flags="$pentium_cflags $fast_cflags -g" extra_configs="$pentium_configs" #strip=yes -#extra_configs="$extra_configs --with-innodb --with-berkeley-db \ -# --with-embedded-server --enable-thread-safe-client \ -# --with-openssl --with-vio --with-raid --with-ndbcluster" -# removed per discussion with Brian and Sanja because it makes Bootstrap -# fail extra_configs="$extra_configs --with-innodb --with-berkeley-db \ --with-embedded-server --enable-thread-safe-client \ --with-openssl --with-vio --with-raid --with-ndbcluster" diff --git a/BUILD/compile-pentium-valgrind-max b/BUILD/compile-pentium-valgrind-max index ef035b3f023..fd9543163d6 100755 --- a/BUILD/compile-pentium-valgrind-max +++ b/BUILD/compile-pentium-valgrind-max @@ -9,7 +9,7 @@ cxx_warnings="$cxx_warnings $debug_extra_warnings" extra_configs="$pentium_configs $debug_configs" # We want to test isam when building with valgrind -extra_configs="$extra_configs --with-berkeley-db --with-innodb --with-isam --with-embedded-server --with-openssl" +extra_configs="$extra_configs --with-berkeley-db --with-innodb --with-isam --with-embedded-server --with-openssl --with-vio --with-raid --with-ndbcluster" . "$path/FINISH.sh" From 0f3e279a05f99b2d05f22d27a52d4e82465a7cb4 Mon Sep 17 00:00:00 2001 From: "guilhem@mysql.com" <> Date: Thu, 29 Jul 2004 23:25:58 +0200 Subject: [PATCH 24/93] WL#1580: --start-datetime, --stop-datetime, --start-position (alias for --position) and --stop-position options for mysqlbinlog, with a test file. This enables user to say "recover my database to how it was this morning at 10:30" (mysqlbinlog "--stop-datetime=2003-07-29 10:30:00"). Using time functions into client/ made me move them out of sql/ into sql-common/. + (small) fix for BUG#4507 "mysqlbinlog --read-from-remote-server sometimes cannot accept 2 binlogs" (that is, on command line). --- client/client_priv.h | 3 +- client/mysqlbinlog.cc | 248 ++++++++++++----- include/my_time.h | 7 + include/mysql_time.h | 9 + mysql-test/r/mysqlbinlog2.result | 446 +++++++++++++++++++++++++++++++ mysql-test/t/mysqlbinlog2.test | 156 +++++++++++ sql-common/my_time.c | 155 +++++++++++ sql/mysql_priv.h | 5 +- sql/time.cc | 157 ----------- sql/tztime.h | 9 +- 10 files changed, 957 insertions(+), 238 deletions(-) create mode 100644 mysql-test/r/mysqlbinlog2.result create mode 100644 mysql-test/t/mysqlbinlog2.test diff --git a/client/client_priv.h b/client/client_priv.h index 854d205e585..ad08484b706 100644 --- a/client/client_priv.h +++ b/client/client_priv.h @@ -43,5 +43,6 @@ enum options_client OPT_PROMPT, OPT_IGN_LINES,OPT_TRANSACTION,OPT_MYSQL_PROTOCOL, OPT_SHARED_MEMORY_BASE_NAME, OPT_FRM, OPT_SKIP_OPTIMIZATION, OPT_COMPATIBLE, OPT_RECONNECT, OPT_DELIMITER, OPT_SECURE_AUTH, - OPT_OPEN_FILES_LIMIT, OPT_SET_CHARSET, OPT_CREATE_OPTIONS + OPT_OPEN_FILES_LIMIT, OPT_SET_CHARSET, OPT_CREATE_OPTIONS, + OPT_START_POSITION, OPT_STOP_POSITION, OPT_START_DATETIME, OPT_STOP_DATETIME }; diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index ba030379792..97746a52b39 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -17,7 +17,7 @@ #define MYSQL_CLIENT #undef MYSQL_SERVER #include "client_priv.h" -#include +#include #include "log_event.h" #define BIN_LOG_HEADER_SIZE 4 @@ -53,10 +53,18 @@ static int port = MYSQL_PORT; static const char* sock= 0; static const char* user = 0; static char* pass = 0; -static ulonglong position = 0; + +static ulonglong start_position, stop_position; +#define start_position_mot ((my_off_t)start_position) +#define stop_position_mot ((my_off_t)stop_position) + +static char *start_datetime_str, *stop_datetime_str; +static my_time_t start_datetime= 0, stop_datetime= MY_TIME_T_MAX; +static ulonglong rec_count= 0; static short binlog_flags = 0; static MYSQL* mysql = NULL; static const char* dirname_for_local_load= 0; +static bool stop_passed= 0; static int dump_local_log_entries(const char* logname); static int dump_remote_log_entries(const char* logname); @@ -302,15 +310,36 @@ Create_file event for file_id: %u\n",ae->file_id); Load_log_processor load_processor; +/* + RETURN + 0 ok and continue + 1 error and terminate + -1 ok and terminate -int process_event(ulonglong *rec_count, char *last_db, Log_event *ev, - my_off_t pos, int old_format) + TODO + This function returns 0 even in some error cases. This should be changed. +*/ +int process_event(char *last_db, Log_event *ev, my_off_t pos, int old_format) { char ll_buff[21]; DBUG_ENTER("process_event"); - if ((*rec_count) >= offset) + if ((rec_count >= offset) && + ((my_time_t)(ev->when) >= start_datetime)) { + /* + We have found an event after start_datetime, from now on print + everything (in case the binlog has timestamps increasing and decreasing, + we do this to avoid cutting the middle). + */ + start_datetime= 0; + offset= 0; // print everything and protect against cycling rec_count + if (((my_time_t)(ev->when) >= stop_datetime) + || (pos >= stop_position_mot)) + { + stop_passed= 1; // skip all next binlogs + DBUG_RETURN(-1); + } if (!short_form) fprintf(result_file, "# at %s\n",llstr(pos,ll_buff)); @@ -387,7 +416,7 @@ Create_file event for file_id: %u\n",exv->file_id); } end: - (*rec_count)++; + rec_count++; if (ev) delete ev; DBUG_RETURN(0); @@ -417,13 +446,14 @@ static struct my_option my_long_options[] = {"port", 'P', "Use port to connect to the remote server.", (gptr*) &port, (gptr*) &port, 0, GET_INT, REQUIRED_ARG, MYSQL_PORT, 0, 0, 0, 0, 0}, - {"position", 'j', "Start reading the binlog at position N.", - (gptr*) &position, (gptr*) &position, 0, GET_ULL, REQUIRED_ARG, 0, 0, 0, 0, - 0, 0}, + {"position", 'j', "Deprecated. Use --start-position instead.", + (gptr*) &start_position, (gptr*) &start_position, 0, GET_ULL, + REQUIRED_ARG, BIN_LOG_HEADER_SIZE, BIN_LOG_HEADER_SIZE, + /* COM_BINLOG_DUMP accepts only 4 bytes for the position */ + (ulonglong)(~(uint32)0), 0, 0, 0}, {"protocol", OPT_MYSQL_PROTOCOL, "The protocol of connection (tcp,socket,pipe,memory).", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"result-file", 'r', "Direct output to a given file.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"read-from-remote-server", 'R', "Read binary logs from a MySQL server", @@ -439,6 +469,35 @@ static struct my_option my_long_options[] = {"socket", 'S', "Socket file to use for connection.", (gptr*) &sock, (gptr*) &sock, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"start-datetime", OPT_START_DATETIME, + "Start reading the binlog at first event having a datetime equal or " + "posterior to the argument; the argument must be a date and time " + "in the local time zone, in any format accepted by the MySQL server " + "for DATETIME and TIMESTAMP types, for example: 2004-12-25 11:25:56 " + "(you should probably use quotes for your shell to set it properly).", + (gptr*) &start_datetime_str, (gptr*) &start_datetime_str, + 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"stop-datetime", OPT_STOP_DATETIME, + "Stop reading the binlog at first event having a datetime equal or " + "posterior to the argument; the argument must be a date and time " + "in the local time zone, in any format accepted by the MySQL server " + "for DATETIME and TIMESTAMP types, for example: 2004-12-25 11:25:56 " + "(you should probably use quotes for your shell to set it properly).", + (gptr*) &stop_datetime_str, (gptr*) &stop_datetime_str, + 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"start-position", OPT_START_POSITION, + "Start reading the binlog at position N. Applies to the first binlog " + "passed on the command line.", + (gptr*) &start_position, (gptr*) &start_position, 0, GET_ULL, + REQUIRED_ARG, BIN_LOG_HEADER_SIZE, BIN_LOG_HEADER_SIZE, + /* COM_BINLOG_DUMP accepts only 4 bytes for the position */ + (ulonglong)(~(uint32)0), 0, 0, 0}, + {"stop-position", OPT_STOP_POSITION, + "Stop reading the binlog at position N. Applies to the last binlog " + "passed on the command line.", + (gptr*) &stop_position, (gptr*) &stop_position, 0, GET_ULL, + REQUIRED_ARG, (ulonglong)(~(my_off_t)0), BIN_LOG_HEADER_SIZE, + (ulonglong)(~(my_off_t)0), 0, 0, 0}, {"to-last-log", 't', "Requires -R. Will not stop at the end of the \ requested binlog but rather continue printing until the end of the last \ binlog of the MySQL server. If you send the output to the same MySQL server, \ @@ -513,6 +572,29 @@ the mysql command line client\n\n"); my_print_variables(my_long_options); } + +static my_time_t convert_str_to_timestamp(const char* str) +{ + int was_cut; + MYSQL_TIME l_time; + long dummy_my_timezone; + bool dummy_in_dst_time_gap; + /* We require a total specification (date AND time) */ + if (str_to_datetime(str, strlen(str), &l_time, 0, &was_cut) != + MYSQL_TIMESTAMP_DATETIME || was_cut) + { + fprintf(stderr, "Incorrect date and time argument: %s\n", str); + exit(1); + } + /* + Note that Feb 30th, Apr 31st cause no error messages and are mapped to + the next existing day, like in mysqld. Maybe this could be changed when + mysqld is changed too (with its "strict" mode?). + */ + return + my_system_gmt_sec(&l_time, &dummy_my_timezone, &dummy_in_dst_time_gap); +} + #include extern "C" my_bool @@ -559,7 +641,12 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), } break; } - break; + case OPT_START_DATETIME: + start_datetime= convert_str_to_timestamp(start_datetime_str); + break; + case OPT_STOP_DATETIME: + stop_datetime= convert_str_to_timestamp(stop_datetime_str); + break; case 'V': print_version(); exit(0); @@ -604,9 +691,8 @@ static MYSQL* safe_connect() static int dump_log_entries(const char* logname) { - if (remote_opt) - return dump_remote_log_entries(logname); - return dump_local_log_entries(logname); + return (remote_opt ? dump_remote_log_entries(logname) : + dump_local_log_entries(logname)); } @@ -663,21 +749,27 @@ static int dump_remote_log_entries(const char* logname) char buf[128]; char last_db[FN_REFLEN+1] = ""; uint len, logname_len; - NET* net = &mysql->net; + NET* net; int old_format; + int error= 0; + my_off_t old_off= start_position_mot; + char fname[FN_REFLEN+1]; DBUG_ENTER("dump_remote_log_entries"); + /* + Even if we already read one binlog (case of >=2 binlogs on command line), + we cannot re-use the same connection as before, because it is now dead + (COM_BINLOG_DUMP kills the thread when it finishes). + */ + mysql= safe_connect(); + net= &mysql->net; old_format = check_master_version(mysql); - if (!position) - position = BIN_LOG_HEADER_SIZE; // protect the innocent from spam - if (position < BIN_LOG_HEADER_SIZE) - { - position = BIN_LOG_HEADER_SIZE; - // warn the user - sql_print_error("Warning: The position in the binary log can't be less than %d.\nStarting from position %d\n", BIN_LOG_HEADER_SIZE, BIN_LOG_HEADER_SIZE); - } - int4store(buf, position); + /* + COM_BINLOG_DUMP accepts only 4 bytes for the position, so we are forced to + cast to uint32. + */ + int4store(buf, (uint32)start_position); int2store(buf + BIN_LOG_HEADER_SIZE, binlog_flags); logname_len = (uint) strlen(logname); int4store(buf + 6, 0); @@ -685,33 +777,32 @@ static int dump_remote_log_entries(const char* logname) if (simple_command(mysql, COM_BINLOG_DUMP, buf, logname_len + 10, 1)) { fprintf(stderr,"Got fatal error sending the log dump command\n"); - DBUG_RETURN(1); + error= 1; + goto err; } - my_off_t old_off= position; - ulonglong rec_count= 0; - char fname[FN_REFLEN+1]; - for (;;) { - const char *error; + const char *error_msg; len = net_safe_read(mysql); if (len == packet_error) { fprintf(stderr, "Got error reading packet from server: %s\n", mysql_error(mysql)); - DBUG_RETURN(1); + error= 1; + goto err; } if (len < 8 && net->read_pos[0] == 254) break; // end of data DBUG_PRINT("info",( "len= %u, net->read_pos[5] = %d\n", len, net->read_pos[5])); Log_event *ev = Log_event::read_log_event((const char*) net->read_pos + 1 , - len - 1, &error, old_format); + len - 1, &error_msg, old_format); if (!ev) { fprintf(stderr, "Could not construct log event object\n"); - DBUG_RETURN(1); + error= 1; + goto err; } Log_event_type type= ev->get_type_code(); @@ -735,22 +826,32 @@ static int dump_remote_log_entries(const char* logname) which are about the binlogs, so which would trigger the end-detection below. */ - if ((rev->when == 0) && !to_last_remote_log) + if (rev->when == 0) { - if ((rev->ident_len != logname_len) || - memcmp(rev->new_log_ident, logname, logname_len)) - DBUG_RETURN(0); - /* - Otherwise, this is a fake Rotate for our log, at the very beginning - for sure. Skip it, because it was not in the original log. If we - are running with to_last_remote_log, we print it, because it serves - as a useful marker between binlogs then. - */ - continue; + if (!to_last_remote_log) + { + if ((rev->ident_len != logname_len) || + memcmp(rev->new_log_ident, logname, logname_len)) + { + error= 0; + goto err; + } + /* + Otherwise, this is a fake Rotate for our log, at the very + beginning for sure. Skip it, because it was not in the original + log. If we are running with to_last_remote_log, we print it, + because it serves as a useful marker between binlogs then. + */ + continue; + } + len= 1; // fake Rotate, so don't increment old_off } } - if (process_event(&rec_count,last_db,ev,old_off,old_format)) - DBUG_RETURN(1); + if ((error= process_event(last_db,ev,old_off,old_format))) + { + error= ((error < 0) ? 0 : 1); + goto err; + } } else { @@ -760,29 +861,35 @@ static int dump_remote_log_entries(const char* logname) File file; if ((file= load_processor.prepare_new_file_for_old_format(le,fname)) < 0) - DBUG_RETURN(1); + { + error= 1; + goto err; + } - if (process_event(&rec_count,last_db,ev,old_off,old_format)) + if ((error= process_event(last_db,ev,old_off,old_format))) { my_close(file,MYF(MY_WME)); - DBUG_RETURN(1); + error= ((error < 0) ? 0 : 1); + goto err; } if (load_processor.load_old_format_file(net,old_fname,old_len,file)) { my_close(file,MYF(MY_WME)); - DBUG_RETURN(1); + error= 1; + goto err; } my_close(file,MYF(MY_WME)); } /* Let's adjust offset for remote log as for local log to produce - similar text. As we don't print the fake Rotate event, all events are - real so we can simply add the length. + similar text. */ old_off+= len-1; } - DBUG_RETURN(0); +err: + mysql_close(mysql); + DBUG_RETURN(error); } @@ -817,7 +924,6 @@ static int dump_local_log_entries(const char* logname) { File fd = -1; IO_CACHE cache,*file= &cache; - ulonglong rec_count = 0; char last_db[FN_REFLEN+1]; byte tmp_buff[BIN_LOG_HEADER_SIZE]; bool old_format = 0; @@ -829,7 +935,7 @@ static int dump_local_log_entries(const char* logname) { if ((fd = my_open(logname, O_RDONLY | O_BINARY, MYF(MY_WME))) < 0) return 1; - if (init_io_cache(file, fd, 0, READ_CACHE, (my_off_t) position, 0, + if (init_io_cache(file, fd, 0, READ_CACHE, start_position_mot, 0, MYF(MY_WME | MY_NABP))) { my_close(fd, MYF(MY_WME)); @@ -843,12 +949,12 @@ static int dump_local_log_entries(const char* logname) 0, MYF(MY_WME | MY_NABP | MY_DONT_CHECK_FILESIZE))) return 1; old_format = check_header(file); - if (position) + if (start_position) { - /* skip 'position' characters from stdout */ + /* skip 'start_position' characters from stdout */ byte buff[IO_SIZE]; my_off_t length,tmp; - for (length= (my_off_t) position ; length > 0 ; length-=tmp) + for (length= start_position_mot ; length > 0 ; length-=tmp) { tmp=min(length,sizeof(buff)); if (my_b_read(file, buff, (uint) tmp)) @@ -858,11 +964,11 @@ static int dump_local_log_entries(const char* logname) } } } - file->pos_in_file=position; + file->pos_in_file= start_position_mot; file->seek_not_done=0; } - if (!position) + if (!start_position) { // Skip header if (my_b_read(file, tmp_buff, BIN_LOG_HEADER_SIZE)) @@ -891,9 +997,10 @@ static int dump_local_log_entries(const char* logname) // file->error == 0 means EOF, that's OK, we break in this case break; } - if (process_event(&rec_count,last_db,ev,old_off,false)) + if ((error= process_event(last_db,ev,old_off,false))) { - error= 1; + if (error < 0) + error= 0; break; } } @@ -909,11 +1016,14 @@ end: int main(int argc, char** argv) { static char **defaults_argv; - int exit_value; + int exit_value= 0; + ulonglong save_stop_position; MY_INIT(argv[0]); DBUG_ENTER("main"); DBUG_PROCESS(argv[0]); + init_time(); // for time functions + parse_args(&argc, (char***)&argv); defaults_argv=argv; @@ -925,8 +1035,6 @@ int main(int argc, char** argv) } my_set_max_open_files(open_files_limit); - if (remote_opt) - mysql = safe_connect(); MY_TMPDIR tmpdir; tmpdir.list= 0; @@ -944,24 +1052,26 @@ int main(int argc, char** argv) else load_processor.init_by_cur_dir(); - exit_value= 0; fprintf(result_file, "/*!40019 SET @@session.max_insert_delayed_threads=0*/;\n"); - while (--argc >= 0) + for (save_stop_position= stop_position, stop_position= ~(my_off_t)0 ; + (--argc >= 0) && !stop_passed ; ) { + if (argc == 0) // last log, --stop-position applies + stop_position= save_stop_position; if (dump_log_entries(*(argv++))) { exit_value=1; break; } + // For next log, --start-position does not apply + start_position= BIN_LOG_HEADER_SIZE; } if (tmpdir.list) free_tmpdir(&tmpdir); if (result_file != stdout) my_fclose(result_file, MYF(0)); - if (remote_opt) - mysql_close(mysql); cleanup(); free_defaults(defaults_argv); my_free_open_file_info(); diff --git a/include/my_time.h b/include/my_time.h index e42f7e9e402..1212f0533e2 100644 --- a/include/my_time.h +++ b/include/my_time.h @@ -41,6 +41,13 @@ str_to_datetime(const char *str, uint length, MYSQL_TIME *l_time, bool str_to_time(const char *str,uint length, MYSQL_TIME *l_time, int *was_cut); +long calc_daynr(uint year,uint month,uint day); + +void init_time(void); + +my_time_t +my_system_gmt_sec(const MYSQL_TIME *t, long *my_timezone, bool *in_dst_time_gap); + C_MODE_END #endif /* _my_time_h_ */ diff --git a/include/mysql_time.h b/include/mysql_time.h index 943d018fc14..32da27ba33e 100644 --- a/include/mysql_time.h +++ b/include/mysql_time.h @@ -34,4 +34,13 @@ typedef struct st_mysql_time enum enum_mysql_timestamp_type time_type; } MYSQL_TIME; + +/* + Portable time_t replacement. + Should be signed and hold seconds for 1902-2038 range. +*/ +typedef long my_time_t; +#define MY_TIME_T_MAX LONG_MAX +#define MY_TIME_T_MIN LONG_MIN + #endif /* _mysql_time_h_ */ diff --git a/mysql-test/r/mysqlbinlog2.result b/mysql-test/r/mysqlbinlog2.result new file mode 100644 index 00000000000..3c1b85e05a1 --- /dev/null +++ b/mysql-test/r/mysqlbinlog2.result @@ -0,0 +1,446 @@ +drop table if exists t1; +reset master; +set @a=UNIX_TIMESTAMP("2020-01-21 15:32:22"); +set timestamp=@a; +create table t1 (a int auto_increment not null primary key, b char(3)); +insert into t1 values(null, "a"); +insert into t1 values(null, "b"); +set timestamp=@a+2; +insert into t1 values(null, "c"); +set timestamp=@a+4; +insert into t1 values(null, "d"); +insert into t1 values(null, "e"); +flush logs; +set timestamp=@a+1; +insert into t1 values(null, "f"); + +--- Local -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609942; +create table t1 (a int auto_increment not null primary key, b char(3)); +SET INSERT_ID=1; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); +SET INSERT_ID=3; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); + +--- offset -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +SET INSERT_ID=1; +use test; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); +SET INSERT_ID=3; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); + +--- start-position -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); + +--- stop-position -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609942; +create table t1 (a int auto_increment not null primary key, b char(3)); +SET INSERT_ID=1; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); +SET INSERT_ID=3; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; + +--- start-datetime -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +SET INSERT_ID=3; +use test; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); + +--- stop-datetime -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609942; +create table t1 (a int auto_increment not null primary key, b char(3)); +SET INSERT_ID=1; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); + +--- Local with 2 binlogs on command line -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609942; +create table t1 (a int auto_increment not null primary key, b char(3)); +SET INSERT_ID=1; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); +SET INSERT_ID=3; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); +SET INSERT_ID=6; +use test; +SET TIMESTAMP=1579609943; +insert into t1 values(null, "f"); + +--- offset -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +SET INSERT_ID=1; +use test; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); +SET INSERT_ID=3; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); +SET INSERT_ID=6; +use test; +SET TIMESTAMP=1579609943; +insert into t1 values(null, "f"); + +--- start-position -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); +SET INSERT_ID=6; +use test; +SET TIMESTAMP=1579609943; +insert into t1 values(null, "f"); + +--- stop-position -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609942; +create table t1 (a int auto_increment not null primary key, b char(3)); +SET INSERT_ID=1; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); +SET INSERT_ID=3; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); +SET INSERT_ID=6; + +--- start-datetime -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +SET INSERT_ID=3; +use test; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); +SET INSERT_ID=6; +use test; +SET TIMESTAMP=1579609943; +insert into t1 values(null, "f"); + +--- stop-datetime -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609942; +create table t1 (a int auto_increment not null primary key, b char(3)); +SET INSERT_ID=1; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); + +--- Remote -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609942; +create table t1 (a int auto_increment not null primary key, b char(3)); +SET INSERT_ID=1; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); +SET INSERT_ID=3; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); + +--- offset -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +SET INSERT_ID=1; +use test; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); +SET INSERT_ID=3; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); + +--- start-position -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); + +--- stop-position -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609942; +create table t1 (a int auto_increment not null primary key, b char(3)); +SET INSERT_ID=1; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); +SET INSERT_ID=3; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; + +--- start-datetime -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +SET INSERT_ID=3; +use test; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); + +--- stop-datetime -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609942; +create table t1 (a int auto_increment not null primary key, b char(3)); +SET INSERT_ID=1; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); + +--- Remote with 2 binlogs on command line -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609942; +create table t1 (a int auto_increment not null primary key, b char(3)); +SET INSERT_ID=1; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); +SET INSERT_ID=3; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); +SET INSERT_ID=6; +use test; +SET TIMESTAMP=1579609943; +insert into t1 values(null, "f"); + +--- offset -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +SET INSERT_ID=1; +use test; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); +SET INSERT_ID=3; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); +SET INSERT_ID=6; +use test; +SET TIMESTAMP=1579609943; +insert into t1 values(null, "f"); + +--- start-position -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); +SET INSERT_ID=6; +use test; +SET TIMESTAMP=1579609943; +insert into t1 values(null, "f"); + +--- stop-position -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609942; +create table t1 (a int auto_increment not null primary key, b char(3)); +SET INSERT_ID=1; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); +SET INSERT_ID=3; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); +SET INSERT_ID=6; + +--- start-datetime -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +SET INSERT_ID=3; +use test; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); +SET INSERT_ID=6; +use test; +SET TIMESTAMP=1579609943; +insert into t1 values(null, "f"); + +--- stop-datetime -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609942; +create table t1 (a int auto_increment not null primary key, b char(3)); +SET INSERT_ID=1; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); + +--- to-last-log -- +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +use test; +SET TIMESTAMP=1579609942; +create table t1 (a int auto_increment not null primary key, b char(3)); +SET INSERT_ID=1; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "a"); +SET INSERT_ID=2; +SET TIMESTAMP=1579609942; +insert into t1 values(null, "b"); +SET INSERT_ID=3; +SET TIMESTAMP=1579609944; +insert into t1 values(null, "c"); +SET INSERT_ID=4; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "d"); +SET INSERT_ID=5; +SET TIMESTAMP=1579609946; +insert into t1 values(null, "e"); +SET INSERT_ID=6; +SET TIMESTAMP=1579609943; +insert into t1 values(null, "f"); + +--- end of test -- +drop table t1; diff --git a/mysql-test/t/mysqlbinlog2.test b/mysql-test/t/mysqlbinlog2.test new file mode 100644 index 00000000000..c6cff7558d4 --- /dev/null +++ b/mysql-test/t/mysqlbinlog2.test @@ -0,0 +1,156 @@ +# Test for the new options --start-datetime, stop-datetime, +# and a few others. + +--disable_warnings +drop table if exists t1; +--enable_warnings +reset master; + +# We need this for getting fixed timestamps inside of this test. +# I use a date in the future to keep a growing timestamp along the +# binlog (including the Start_log_event). This test will work +# unchanged everywhere, because mysql-test-run has fixed TZ, which it +# exports (so mysqlbinlog has same fixed TZ). +set @a=UNIX_TIMESTAMP("2020-01-21 15:32:22"); +set timestamp=@a; +create table t1 (a int auto_increment not null primary key, b char(3)); +insert into t1 values(null, "a"); +insert into t1 values(null, "b"); +set timestamp=@a+2; +insert into t1 values(null, "c"); +set timestamp=@a+4; +insert into t1 values(null, "d"); +insert into t1 values(null, "e"); + +flush logs; +set timestamp=@a+1; # this could happen on a slave +insert into t1 values(null, "f"); + +# delimiters are for easier debugging in future + +--disable_query_log +select "--- Local --" as ""; +--enable_query_log + +# +# We should use --short-form everywhere because in other case output will +# be time dependent (the Start events). Better than nothing. +# + +--exec $MYSQL_BINLOG --short-form $MYSQL_TEST_DIR/var/log/master-bin.000001 + +--disable_query_log +select "--- offset --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form --offset=2 $MYSQL_TEST_DIR/var/log/master-bin.000001 +--disable_query_log +select "--- start-position --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form --start-position=497 $MYSQL_TEST_DIR/var/log/master-bin.000001 +--disable_query_log +select "--- stop-position --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form --stop-position=497 $MYSQL_TEST_DIR/var/log/master-bin.000001 +--disable_query_log +select "--- start-datetime --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form "--start-datetime=2020-01-21 15:32:24" $MYSQL_TEST_DIR/var/log/master-bin.000001 +--disable_query_log +select "--- stop-datetime --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form "--stop-datetime=2020-01-21 15:32:24" $MYSQL_TEST_DIR/var/log/master-bin.000001 + +--disable_query_log +select "--- Local with 2 binlogs on command line --" as ""; +--enable_query_log + +# This is to verify that some options apply only to first, or last binlog + +--exec $MYSQL_BINLOG --short-form $MYSQL_TEST_DIR/var/log/master-bin.000001 $MYSQL_TEST_DIR/var/log/master-bin.000002 + +--disable_query_log +select "--- offset --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form --offset=2 $MYSQL_TEST_DIR/var/log/master-bin.000001 $MYSQL_TEST_DIR/var/log/master-bin.000002 +--disable_query_log +select "--- start-position --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form --start-position=497 $MYSQL_TEST_DIR/var/log/master-bin.000001 $MYSQL_TEST_DIR/var/log/master-bin.000002 +--disable_query_log +select "--- stop-position --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form --stop-position=32 $MYSQL_TEST_DIR/var/log/master-bin.000001 $MYSQL_TEST_DIR/var/log/master-bin.000002 +--disable_query_log +select "--- start-datetime --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form "--start-datetime=2020-01-21 15:32:24" $MYSQL_TEST_DIR/var/log/master-bin.000001 $MYSQL_TEST_DIR/var/log/master-bin.000002 +--disable_query_log +select "--- stop-datetime --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form "--stop-datetime=2020-01-21 15:32:24" $MYSQL_TEST_DIR/var/log/master-bin.000001 $MYSQL_TEST_DIR/var/log/master-bin.000002 + +--disable_query_log +select "--- Remote --" as ""; +--enable_query_log + +--exec $MYSQL_BINLOG --short-form --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 + +--disable_query_log +select "--- offset --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form --offset=2 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 +--disable_query_log +select "--- start-position --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form --start-position=497 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 +--disable_query_log +select "--- stop-position --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form --stop-position=497 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 +--disable_query_log +select "--- start-datetime --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form "--start-datetime=2020-01-21 15:32:24" --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 +--disable_query_log +select "--- stop-datetime --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form "--stop-datetime=2020-01-21 15:32:24" --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 + +--disable_query_log +select "--- Remote with 2 binlogs on command line --" as ""; +--enable_query_log + +--exec $MYSQL_BINLOG --short-form --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002 + +--disable_query_log +select "--- offset --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form --offset=2 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002 +--disable_query_log +select "--- start-position --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form --start-position=497 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002 +--disable_query_log +select "--- stop-position --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form --stop-position=32 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002 +--disable_query_log +select "--- start-datetime --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form "--start-datetime=20200121153224" --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002 +--disable_query_log +select "--- stop-datetime --" as ""; +--enable_query_log +--exec $MYSQL_BINLOG --short-form "--stop-datetime=2020/01/21 15@32@24" --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002 + +--disable_query_log +select "--- to-last-log --" as ""; +--enable_query_log + +--exec $MYSQL_BINLOG --short-form --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT --to-last-log master-bin.000001 + +# clean up +--disable_query_log +select "--- end of test --" as ""; +--enable_query_log +drop table t1; diff --git a/sql-common/my_time.c b/sql-common/my_time.c index 46c84ac9ba7..24c19be47ba 100644 --- a/sql-common/my_time.c +++ b/sql-common/my_time.c @@ -35,6 +35,16 @@ static uchar internal_format_positions[]= static char time_separator=':'; +static ulong const days_at_timestart=719528; /* daynr at 1970.01.01 */ +uchar days_in_month[]= {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 0}; + +/* + Offset of system time zone from UTC in seconds used to speed up + work of my_system_gmt_sec() function. +*/ +static long my_time_zone=0; + + /* Convert a timestamp string to a MYSQL_TIME value. @@ -559,3 +569,148 @@ fractional: } +/* + Prepare offset of system time zone from UTC for my_system_gmt_sec() func. + + SYNOPSIS + init_time() +*/ +void init_time(void) +{ + time_t seconds; + struct tm *l_time,tm_tmp;; + MYSQL_TIME my_time; + bool not_used; + + seconds= (time_t) time((time_t*) 0); + localtime_r(&seconds,&tm_tmp); + l_time= &tm_tmp; + my_time_zone= 3600; /* Comp. for -3600 in my_gmt_sec */ + my_time.year= (uint) l_time->tm_year+1900; + my_time.month= (uint) l_time->tm_mon+1; + my_time.day= (uint) l_time->tm_mday; + my_time.hour= (uint) l_time->tm_hour; + my_time.minute= (uint) l_time->tm_min; + my_time.second= (uint) l_time->tm_sec; + my_system_gmt_sec(&my_time, &my_time_zone, ¬_used); /* Init my_time_zone */ +} + + + /* Calculate nr of day since year 0 in new date-system (from 1615) */ + +long calc_daynr(uint year,uint month,uint day) +{ + long delsum; + int temp; + DBUG_ENTER("calc_daynr"); + + if (year == 0 && month == 0 && day == 0) + DBUG_RETURN(0); /* Skip errors */ + if (year < 200) + { + if ((year=year+1900) < 1900+YY_PART_YEAR) + year+=100; + } + delsum= (long) (365L * year+ 31*(month-1) +day); + if (month <= 2) + year--; + else + delsum-= (long) (month*4+23)/10; + temp=(int) ((year/100+1)*3)/4; + DBUG_PRINT("exit",("year: %d month: %d day: %d -> daynr: %ld", + year+(month <= 2),month,day,delsum+year/4-temp)); + DBUG_RETURN(delsum+(int) year/4-temp); +} /* calc_daynr */ + + +/* + Convert time in MYSQL_TIME representation in system time zone to its + my_time_t form (number of seconds in UTC since begginning of Unix Epoch). + + SYNOPSIS + my_system_gmt_sec() + t - time value to be converted + my_timezone - pointer to long where offset of system time zone + from UTC will be stored for caching + in_dst_time_gap - set to true if time falls into spring time-gap + + NOTES + The idea is to cache the time zone offset from UTC (including daylight + saving time) for the next call to make things faster. But currently we + just calculate this offset during startup (by calling init_time() + function) and use it all the time. + Time value provided should be legal time value (e.g. '2003-01-01 25:00:00' + is not allowed). + + RETURN VALUE + Time in UTC seconds since Unix Epoch representation. +*/ +my_time_t +my_system_gmt_sec(const MYSQL_TIME *t, long *my_timezone, bool *in_dst_time_gap) +{ + uint loop; + time_t tmp; + struct tm *l_time,tm_tmp; + long diff, current_timezone; + + /* + Calculate the gmt time based on current time and timezone + The -1 on the end is to ensure that if have a date that exists twice + (like 2002-10-27 02:00:0 MET), we will find the initial date. + + By doing -3600 we will have to call localtime_r() several times, but + I couldn't come up with a better way to get a repeatable result :( + + We can't use mktime() as it's buggy on many platforms and not thread safe. + */ + tmp=(time_t) (((calc_daynr((uint) t->year,(uint) t->month,(uint) t->day) - + (long) days_at_timestart)*86400L + (long) t->hour*3600L + + (long) (t->minute*60 + t->second)) + (time_t) my_time_zone - + 3600); + current_timezone= my_time_zone; + + localtime_r(&tmp,&tm_tmp); + l_time=&tm_tmp; + for (loop=0; + loop < 2 && + (t->hour != (uint) l_time->tm_hour || + t->minute != (uint) l_time->tm_min); + loop++) + { /* One check should be enough ? */ + /* Get difference in days */ + int days= t->day - l_time->tm_mday; + if (days < -1) + days= 1; // Month has wrapped + else if (days > 1) + days= -1; + diff=(3600L*(long) (days*24+((int) t->hour - (int) l_time->tm_hour)) + + (long) (60*((int) t->minute - (int) l_time->tm_min))); + current_timezone+= diff+3600; // Compensate for -3600 above + tmp+= (time_t) diff; + localtime_r(&tmp,&tm_tmp); + l_time=&tm_tmp; + } + /* + Fix that if we are in the not existing daylight saving time hour + we move the start of the next real hour + */ + if (loop == 2 && t->hour != (uint) l_time->tm_hour) + { + int days= t->day - l_time->tm_mday; + if (days < -1) + days=1; // Month has wrapped + else if (days > 1) + days= -1; + diff=(3600L*(long) (days*24+((int) t->hour - (int) l_time->tm_hour))+ + (long) (60*((int) t->minute - (int) l_time->tm_min))); + if (diff == 3600) + tmp+=3600 - t->minute*60 - t->second; // Move to next hour + else if (diff == -3600) + tmp-=t->minute*60 + t->second; // Move to previous hour + + *in_dst_time_gap= 1; + } + *my_timezone= current_timezone; + + return (my_time_t) tmp; +} /* my_system_gmt_sec */ diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 72ac3af70ff..f68d0951ea1 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -823,7 +823,7 @@ extern Gt_creator gt_creator; extern Lt_creator lt_creator; extern Ge_creator ge_creator; extern Le_creator le_creator; -extern uchar *days_in_month; +extern uchar days_in_month[]; extern char language[LIBLEN],reg_ext[FN_EXTLEN]; extern char glob_hostname[FN_REFLEN], mysql_home[FN_REFLEN]; extern char pidfile_name[FN_REFLEN], system_time_zone[30], *opt_init_file; @@ -989,12 +989,9 @@ void free_blobs(TABLE *table); int set_zone(int nr,int min_zone,int max_zone); ulong convert_period_to_month(ulong period); ulong convert_month_to_period(ulong month); -long calc_daynr(uint year,uint month,uint day); uint calc_days_in_year(uint year); void get_date_from_daynr(long daynr,uint *year, uint *month, uint *day); -void init_time(void); -my_time_t my_system_gmt_sec(const TIME *, long *current_timezone, bool *not_exist); my_time_t TIME_to_timestamp(THD *thd, const TIME *t, bool *not_exist); bool str_to_time_with_warn(const char *str,uint length,TIME *l_time); timestamp_type str_to_datetime_with_warn(const char *str, uint length, diff --git a/sql/time.cc b/sql/time.cc index 132612e53c5..4421b6aa00f 100644 --- a/sql/time.cc +++ b/sql/time.cc @@ -20,166 +20,9 @@ #include "mysql_priv.h" #include -static ulong const days_at_timestart=719528; /* daynr at 1970.01.01 */ -uchar *days_in_month= (uchar*) "\037\034\037\036\037\036\037\037\036\037\036\037"; - - -/* - Offset of system time zone from UTC in seconds used to speed up - work of my_system_gmt_sec() function. -*/ -static long my_time_zone=0; - - -/* - Prepare offset of system time zone from UTC for my_system_gmt_sec() func. - - SYNOPSIS - init_time() -*/ -void init_time(void) -{ - time_t seconds; - struct tm *l_time,tm_tmp;; - TIME my_time; - bool not_used; - - seconds= (time_t) time((time_t*) 0); - localtime_r(&seconds,&tm_tmp); - l_time= &tm_tmp; - my_time_zone= 3600; /* Comp. for -3600 in my_gmt_sec */ - my_time.year= (uint) l_time->tm_year+1900; - my_time.month= (uint) l_time->tm_mon+1; - my_time.day= (uint) l_time->tm_mday; - my_time.hour= (uint) l_time->tm_hour; - my_time.minute= (uint) l_time->tm_min; - my_time.second= (uint) l_time->tm_sec; - my_system_gmt_sec(&my_time, &my_time_zone, ¬_used); /* Init my_time_zone */ -} - - -/* - Convert time in TIME representation in system time zone to its - my_time_t form (number of seconds in UTC since begginning of Unix Epoch). - - SYNOPSIS - my_system_gmt_sec() - t - time value to be converted - my_timezone - pointer to long where offset of system time zone - from UTC will be stored for caching - in_dst_time_gap - set to true if time falls into spring time-gap - - NOTES - The idea is to cache the time zone offset from UTC (including daylight - saving time) for the next call to make things faster. But currently we - just calculate this offset during startup (by calling init_time() - function) and use it all the time. - Time value provided should be legal time value (e.g. '2003-01-01 25:00:00' - is not allowed). - - RETURN VALUE - Time in UTC seconds since Unix Epoch representation. -*/ -my_time_t -my_system_gmt_sec(const TIME *t, long *my_timezone, bool *in_dst_time_gap) -{ - uint loop; - time_t tmp; - struct tm *l_time,tm_tmp; - long diff, current_timezone; - - /* - Calculate the gmt time based on current time and timezone - The -1 on the end is to ensure that if have a date that exists twice - (like 2002-10-27 02:00:0 MET), we will find the initial date. - - By doing -3600 we will have to call localtime_r() several times, but - I couldn't come up with a better way to get a repeatable result :( - - We can't use mktime() as it's buggy on many platforms and not thread safe. - */ - tmp=(time_t) (((calc_daynr((uint) t->year,(uint) t->month,(uint) t->day) - - (long) days_at_timestart)*86400L + (long) t->hour*3600L + - (long) (t->minute*60 + t->second)) + (time_t) my_time_zone - - 3600); - current_timezone= my_time_zone; - - localtime_r(&tmp,&tm_tmp); - l_time=&tm_tmp; - for (loop=0; - loop < 2 && - (t->hour != (uint) l_time->tm_hour || - t->minute != (uint) l_time->tm_min); - loop++) - { /* One check should be enough ? */ - /* Get difference in days */ - int days= t->day - l_time->tm_mday; - if (days < -1) - days= 1; // Month has wrapped - else if (days > 1) - days= -1; - diff=(3600L*(long) (days*24+((int) t->hour - (int) l_time->tm_hour)) + - (long) (60*((int) t->minute - (int) l_time->tm_min))); - current_timezone+= diff+3600; // Compensate for -3600 above - tmp+= (time_t) diff; - localtime_r(&tmp,&tm_tmp); - l_time=&tm_tmp; - } - /* - Fix that if we are in the not existing daylight saving time hour - we move the start of the next real hour - */ - if (loop == 2 && t->hour != (uint) l_time->tm_hour) - { - int days= t->day - l_time->tm_mday; - if (days < -1) - days=1; // Month has wrapped - else if (days > 1) - days= -1; - diff=(3600L*(long) (days*24+((int) t->hour - (int) l_time->tm_hour))+ - (long) (60*((int) t->minute - (int) l_time->tm_min))); - if (diff == 3600) - tmp+=3600 - t->minute*60 - t->second; // Move to next hour - else if (diff == -3600) - tmp-=t->minute*60 + t->second; // Move to previous hour - - *in_dst_time_gap= 1; - } - *my_timezone= current_timezone; - - return (my_time_t) tmp; -} /* my_system_gmt_sec */ - /* Some functions to calculate dates */ - /* Calculate nr of day since year 0 in new date-system (from 1615) */ - -long calc_daynr(uint year,uint month,uint day) -{ - long delsum; - int temp; - DBUG_ENTER("calc_daynr"); - - if (year == 0 && month == 0 && day == 0) - DBUG_RETURN(0); /* Skip errors */ - if (year < 200) - { - if ((year=year+1900) < 1900+YY_PART_YEAR) - year+=100; - } - delsum= (long) (365L * year+ 31*(month-1) +day); - if (month <= 2) - year--; - else - delsum-= (long) (month*4+23)/10; - temp=(int) ((year/100+1)*3)/4; - DBUG_PRINT("exit",("year: %d month: %d day: %d -> daynr: %ld", - year+(month <= 2),month,day,delsum+year/4-temp)); - DBUG_RETURN(delsum+(int) year/4-temp); -} /* calc_daynr */ - - #ifndef TESTTIME /* Calc weekday from daynr */ /* Returns 0 for monday, 1 for tuesday .... */ diff --git a/sql/tztime.h b/sql/tztime.h index 334b14f4fc4..9df5f965f34 100644 --- a/sql/tztime.h +++ b/sql/tztime.h @@ -19,15 +19,10 @@ #pragma interface /* gcc class interface */ #endif -/* - Portable time_t replacement. - Should be signed and hold seconds for 1902-2038 range. -*/ -typedef long my_time_t; -#define MY_TIME_T_MAX LONG_MAX -#define MY_TIME_T_MIN LONG_MIN +#include #if !defined(TESTTIME) && !defined(TZINFO2SQL) + /* This class represents abstract time zone and provides basic interface for TIME <-> my_time_t conversion. From 7a00a995831cac4b49f861fbace958dc7c5e6d1c Mon Sep 17 00:00:00 2001 From: "miguel@hegel.txg" <> Date: Thu, 29 Jul 2004 18:35:54 -0300 Subject: [PATCH 25/93] Fix for bug report #4737 and revert fix for bug #4375 (re-opened). --- mysys/my_lib.c | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/mysys/my_lib.c b/mysys/my_lib.c index 0207d9a3683..055e00d2efc 100644 --- a/mysys/my_lib.c +++ b/mysys/my_lib.c @@ -461,17 +461,6 @@ MY_DIR *my_dir(const char *path, myf MyFlags) else finfo.mystat= NULL; - /* - If the directory is the root directory of the drive, Windows sometimes - creates hidden or system files there (like RECYCLER); do not show - them. We would need to see how this can be achieved with a Borland - compiler. - */ -#ifndef __BORLANDC__ - if (attrib & (_A_HIDDEN | _A_SYSTEM)) - continue; -#endif - if (push_dynamic(dir_entries_storage, (gptr)&finfo)) goto error; From da55194eb49826e2f2f8c1f06b958da81f5ee516 Mon Sep 17 00:00:00 2001 From: "guilhem@mysql.com" <> Date: Fri, 30 Jul 2004 01:10:21 +0200 Subject: [PATCH 26/93] rpl_relayrotate.test requires InnoDB (because what we want to test is if slave resumes at BEGIN). --- mysql-test/t/rpl_relayrotate.test | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mysql-test/t/rpl_relayrotate.test b/mysql-test/t/rpl_relayrotate.test index 46e6f1bd157..1bc6b574663 100644 --- a/mysql-test/t/rpl_relayrotate.test +++ b/mysql-test/t/rpl_relayrotate.test @@ -8,8 +8,7 @@ # The slave is started with max_binlog_size=16384 bytes, # to force many rotations (approximately 30 rotations) -# If the master or slave does not support InnoDB, this test will pass - +source include/have_innodb.inc; source include/master-slave.inc; connection slave; stop slave; From 3a17ab4551874940b451a73e4ab01aa48dd08df0 Mon Sep 17 00:00:00 2001 From: "serg@serg.mylan" <> Date: Fri, 30 Jul 2004 09:47:56 +0200 Subject: [PATCH 27/93] auto_value_on_zero bug test --- mysql-test/r/auto_increment.result | 18 ++++++++++++++++-- mysql-test/t/auto_increment.test | 2 ++ sql/sql_base.cc | 4 ++-- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/auto_increment.result b/mysql-test/r/auto_increment.result index 6bc59d4771f..79bcff06f68 100644 --- a/mysql-test/r/auto_increment.result +++ b/mysql-test/r/auto_increment.result @@ -201,12 +201,23 @@ a b 202 5 203 6 204 7 +alter table t1 modify b mediumint; +select * from t1 order by b; +a b +1 1 +200 2 +205 3 +201 4 +202 5 +203 6 +204 7 delete from t1 where a=0; update t1 set a=0 where b=5; select * from t1 order by b; a b 1 1 200 2 +205 3 201 4 0 5 203 6 @@ -214,7 +225,7 @@ a b delete from t1 where a=0; update t1 set a=NULL where b=6; Warnings: -Warning 1263 Data truncated; NULL supplied to NOT NULL column 'a' at row 4 +Warning 1263 Data truncated; NULL supplied to NOT NULL column 'a' at row 5 update t1 set a=300 where b=7; SET SQL_MODE=''; insert into t1(a,b)values(NULL,8); @@ -228,6 +239,7 @@ select * from t1 order by b; a b 1 1 200 2 +205 3 201 4 0 6 300 7 @@ -244,6 +256,7 @@ select * from t1 order by b; a b 1 1 200 2 +205 3 201 4 300 7 301 8 @@ -256,12 +269,13 @@ a b delete from t1 where a=0; update t1 set a=NULL where b=13; Warnings: -Warning 1263 Data truncated; NULL supplied to NOT NULL column 'a' at row 9 +Warning 1263 Data truncated; NULL supplied to NOT NULL column 'a' at row 10 update t1 set a=500 where b=14; select * from t1 order by b; a b 1 1 200 2 +205 3 201 4 300 7 301 8 diff --git a/mysql-test/t/auto_increment.test b/mysql-test/t/auto_increment.test index 73588a91aac..65770f32476 100644 --- a/mysql-test/t/auto_increment.test +++ b/mysql-test/t/auto_increment.test @@ -138,6 +138,8 @@ insert into t1(b)values(5); insert into t1(b)values(6); insert into t1(b)values(7); select * from t1 order by b; +alter table t1 modify b mediumint; +select * from t1 order by b; delete from t1 where a=0; update t1 set a=0 where b=5; select * from t1 order by b; diff --git a/sql/sql_base.cc b/sql/sql_base.cc index b6d14092885..1a923b2410a 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -2587,7 +2587,7 @@ fill_record(List &fields,List &values, bool ignore_errors) Field *rfield= field->field; TABLE *table= rfield->table; if (rfield == table->next_number_field) - table->auto_increment_field_not_null= true; + table->auto_increment_field_not_null= TRUE; if ((value->save_in_field(rfield, 0) < 0) && !ignore_errors) DBUG_RETURN(1); } @@ -2608,7 +2608,7 @@ fill_record(Field **ptr,List &values, bool ignore_errors) value=v++; TABLE *table= field->table; if (field == table->next_number_field) - table->auto_increment_field_not_null= true; + table->auto_increment_field_not_null= TRUE; if ((value->save_in_field(field, 0) < 0) && !ignore_errors) DBUG_RETURN(1); } From 80e74c80aa8c953aa9d0503560f0b1eb15d2fd50 Mon Sep 17 00:00:00 2001 From: "guilhem@mysql.com" <> Date: Fri, 30 Jul 2004 10:08:37 +0200 Subject: [PATCH 28/93] syntax fix: superfluous ';' which caused a problem with gcc 2.95 --- sql-common/my_time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql-common/my_time.c b/sql-common/my_time.c index 24c19be47ba..df852ad8880 100644 --- a/sql-common/my_time.c +++ b/sql-common/my_time.c @@ -578,7 +578,7 @@ fractional: void init_time(void) { time_t seconds; - struct tm *l_time,tm_tmp;; + struct tm *l_time,tm_tmp; MYSQL_TIME my_time; bool not_used; From f0cd209373111ea60e78ff8d5f994dc728ac1778 Mon Sep 17 00:00:00 2001 From: "pem@mysql.comhem.se" <> Date: Fri, 30 Jul 2004 12:13:40 +0200 Subject: [PATCH 29/93] Updated ps_6bdb.results. --- mysql-test/r/ps_6bdb.result | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/r/ps_6bdb.result b/mysql-test/r/ps_6bdb.result index 1c6b309576c..b8730cce101 100644 --- a/mysql-test/r/ps_6bdb.result +++ b/mysql-test/r/ps_6bdb.result @@ -870,7 +870,7 @@ def test t_many_col_types t_many_col_types c11 c11 0 9 6 Y 32768 4 63 def test t_many_col_types t_many_col_types c12 c12 0 10 6 Y 32768 4 63 def test t_many_col_types t_many_col_types c13 c13 10 10 10 Y 128 0 63 def test t_many_col_types t_many_col_types c14 c14 12 19 19 Y 128 0 63 -def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1217 0 63 +def test t_many_col_types t_many_col_types c15 c15 7 19 19 N 1249 0 63 def test t_many_col_types t_many_col_types c16 c16 11 8 8 Y 128 0 63 def test t_many_col_types t_many_col_types c17 c17 13 4 4 Y 32864 0 63 def test t_many_col_types t_many_col_types c18 c18 1 1 1 Y 32768 0 63 From 2973728a55e14e3ceb7b3d9e5296b77dd0379bca Mon Sep 17 00:00:00 2001 From: "mronstrom@mysql.com" <> Date: Fri, 30 Jul 2004 12:46:27 +0200 Subject: [PATCH 30/93] Removed an error check from debug mode that gets executed n**2 times in closeTransaction where n is number of signals sent in transaction. n can easily become 250.000 in a large transaction. --- ndb/src/ndbapi/Ndblist.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ndb/src/ndbapi/Ndblist.cpp b/ndb/src/ndbapi/Ndblist.cpp index e557fdc0a5f..a27b911eb07 100644 --- a/ndb/src/ndbapi/Ndblist.cpp +++ b/ndb/src/ndbapi/Ndblist.cpp @@ -592,13 +592,14 @@ Ndb::releaseSignal(NdbApiSignal* aSignal) #if defined VM_TRACE // Check that signal is not null assert(aSignal != NULL); - +#if 0 // Check that signal is not already in list NdbApiSignal* tmp = theSignalIdleList; while (tmp != NULL){ assert(tmp != aSignal); tmp = tmp->next(); } +#endif #endif creleaseSignals++; aSignal->next(theSignalIdleList); From 453a079feb81a981e114ebf2f2a8a9d1c181ea0a Mon Sep 17 00:00:00 2001 From: "lenz@mysql.com" <> Date: Fri, 30 Jul 2004 13:52:44 +0200 Subject: [PATCH 31/93] - added the MYSQL_EXTRA_LDFLAGS to the mysql_tzinfo_to_sql link flags to enable static linking (to avoid having a shared lib dependency in the Linux RPMs) - Disabled OpenSSL in the Max RPM --- sql/Makefile.am | 2 +- support-files/mysql.spec.sh | 11 ++++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/sql/Makefile.am b/sql/Makefile.am index 9859f1ef841..4eaf6d5377e 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -96,7 +96,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \ gen_lex_hash_SOURCES = gen_lex_hash.cc gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS) mysql_tzinfo_to_sql_SOURCES = mysql_tzinfo_to_sql.cc -mysql_tzinfo_to_sql_LDADD = $(LDADD) $(CXXLDFLAGS) +mysql_tzinfo_to_sql_LDADD = @MYSQLD_EXTRA_LDFLAGS@ $(LDADD) $(CXXLDFLAGS) DEFS = -DMYSQL_SERVER \ -DDEFAULT_MYSQL_HOME="\"$(MYSQLBASEdir)\"" \ diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index d5c43e61f9d..35e8b647522 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -149,7 +149,7 @@ languages and applications need to dynamically load and use MySQL. %package Max Release: %{release} -Summary: MySQL - server with Berkeley DB, OpenSSL, RAID and UDF support +Summary: MySQL - server with Berkeley DB, RAID and UDF support Group: Applications/Databases Provides: mysql-Max Obsoletes: mysql-Max @@ -157,7 +157,7 @@ Requires: MySQL >= 4.0 %description Max Optional MySQL server binary that supports additional features like -Berkeley DB, OpenSSL, RAID and User Defined Functions (UDFs). +Berkeley DB, RAID and User Defined Functions (UDFs). To activate this binary, just install this package in addition to the standard MySQL package. @@ -269,7 +269,7 @@ then fi BuildMySQL "--enable-shared \ - --with-openssl \ + --without-openssl \ --with-berkeley-db \ --with-innodb \ --with-raid \ @@ -579,6 +579,11 @@ fi # The spec file changelog only includes changes made to the spec file # itself %changelog +* Thu Jul 29 2004 Lenz Grimmer + +- disabled OpenSSL in the Max binaries again (the RPM packages were the + only exception to this anyway) (BUG 1043) + * Wed Jun 30 2004 Lenz Grimmer - fixed server postinstall (mysql_install_db was called with the wrong From c5e2fbcc5321058cd0a8a6811c923fa7f61d945b Mon Sep 17 00:00:00 2001 From: "serg@serg.mylan" <> Date: Fri, 30 Jul 2004 14:17:12 +0200 Subject: [PATCH 32/93] no_auto_value_on_zero + alter table bug --- mysql-test/r/auto_increment.result | 18 +++++++++++------- mysql-test/t/auto_increment.test | 5 +++++ sql/sql_table.cc | 19 +++++++++++++------ 3 files changed, 29 insertions(+), 13 deletions(-) diff --git a/mysql-test/r/auto_increment.result b/mysql-test/r/auto_increment.result index 79bcff06f68..f5ec5f1f852 100644 --- a/mysql-test/r/auto_increment.result +++ b/mysql-test/r/auto_increment.result @@ -206,18 +206,25 @@ select * from t1 order by b; a b 1 1 200 2 -205 3 +0 3 201 4 202 5 203 6 204 7 +create table t2 (a int); +insert t2 values (1),(2); +alter table t2 add b int auto_increment primary key; +select * from t2; +a b +1 1 +2 2 +drop table t2; delete from t1 where a=0; update t1 set a=0 where b=5; select * from t1 order by b; a b 1 1 200 2 -205 3 201 4 0 5 203 6 @@ -225,7 +232,7 @@ a b delete from t1 where a=0; update t1 set a=NULL where b=6; Warnings: -Warning 1263 Data truncated; NULL supplied to NOT NULL column 'a' at row 5 +Warning 1263 Data truncated; NULL supplied to NOT NULL column 'a' at row 4 update t1 set a=300 where b=7; SET SQL_MODE=''; insert into t1(a,b)values(NULL,8); @@ -239,7 +246,6 @@ select * from t1 order by b; a b 1 1 200 2 -205 3 201 4 0 6 300 7 @@ -256,7 +262,6 @@ select * from t1 order by b; a b 1 1 200 2 -205 3 201 4 300 7 301 8 @@ -269,13 +274,12 @@ a b delete from t1 where a=0; update t1 set a=NULL where b=13; Warnings: -Warning 1263 Data truncated; NULL supplied to NOT NULL column 'a' at row 10 +Warning 1263 Data truncated; NULL supplied to NOT NULL column 'a' at row 9 update t1 set a=500 where b=14; select * from t1 order by b; a b 1 1 200 2 -205 3 201 4 300 7 301 8 diff --git a/mysql-test/t/auto_increment.test b/mysql-test/t/auto_increment.test index 65770f32476..e5986e6755d 100644 --- a/mysql-test/t/auto_increment.test +++ b/mysql-test/t/auto_increment.test @@ -140,6 +140,11 @@ insert into t1(b)values(7); select * from t1 order by b; alter table t1 modify b mediumint; select * from t1 order by b; +create table t2 (a int); +insert t2 values (1),(2); +alter table t2 add b int auto_increment primary key; +select * from t2; +drop table t2; delete from t1 where a=0; update t1 set a=0 where b=5; select * from t1 order by b; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 8d82ca44951..7afbe6d0b87 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -3271,13 +3271,12 @@ copy_data_between_tables(TABLE *from,TABLE *to, ha_rows *deleted) { int error; - Copy_field *copy,*copy_end; + Copy_field *copy,*copy_end, *next_field; ulong found_count,delete_count; THD *thd= current_thd; uint length; SORT_FIELD *sortorder; READ_RECORD info; - Field *next_field; TABLE_LIST tables; List fields; List all_fields; @@ -3298,7 +3297,12 @@ copy_data_between_tables(TABLE *from,TABLE *to, { def=it++; if (def->field) + { + if (*ptr == to->next_number_field) + next_field= copy_end; (copy_end++)->set(*ptr,def->field,0); + } + } found_count=delete_count=0; @@ -3334,7 +3338,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, error= 1; goto err; } - + /* Handler must be told explicitly to retrieve all columns, because this function does not set field->query_id in the columns to the current query id */ @@ -3343,7 +3347,6 @@ copy_data_between_tables(TABLE *from,TABLE *to, if (handle_duplicates == DUP_IGNORE || handle_duplicates == DUP_REPLACE) to->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - next_field=to->next_number_field; thd->row_count= 0; while (!(error=info.read_record(&info))) { @@ -3354,10 +3357,14 @@ copy_data_between_tables(TABLE *from,TABLE *to, break; } thd->row_count++; - if (next_field) - next_field->reset(); + if (to->next_number_field) + to->next_number_field->reset(); for (Copy_field *copy_ptr=copy ; copy_ptr != copy_end ; copy_ptr++) + { + if (copy_ptr == next_field) + to->auto_increment_field_not_null= TRUE; copy_ptr->do_copy(copy_ptr); + } if ((error=to->file->write_row((byte*) to->record[0]))) { if ((handle_duplicates != DUP_IGNORE && From 9edc3d8b9d87b0201a08b3acf792189592293fa8 Mon Sep 17 00:00:00 2001 From: "marko@hundin.mysql.fi" <> Date: Fri, 30 Jul 2004 16:08:19 +0300 Subject: [PATCH 33/93] ha_innodb.cc: ha_innobase::create(): pass the query string as UTF-8 to row_table_add_foreign_constraints() (Bug #4649) --- sql/ha_innodb.cc | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index c06448647d5..a8309d4f32c 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -3642,11 +3642,19 @@ ha_innobase::create( } if (current_thd->query != NULL) { - - error = row_table_add_foreign_constraints(trx, - current_thd->query, norm_name); - error = convert_error_code_to_mysql(error, NULL); + LEX_STRING q; + if (thd->convert_string(&q, system_charset_info, + current_thd->query, + current_thd->query_length, + current_thd->charset())) { + error = HA_ERR_OUT_OF_MEM; + } else { + error = row_table_add_foreign_constraints(trx, + q.str, norm_name); + + error = convert_error_code_to_mysql(error, NULL); + } if (error) { innobase_commit_low(trx); From b435b8d47ec4034ad9a2d4cdc8d96e8f0bd00ea0 Mon Sep 17 00:00:00 2001 From: "marko@hundin.mysql.fi" <> Date: Fri, 30 Jul 2004 17:04:56 +0300 Subject: [PATCH 34/93] fil0fil.c: Restore accidentally deleted comment of fil_create_directory_for_tablename() --- innobase/fil/fil0fil.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/innobase/fil/fil0fil.c b/innobase/fil/fil0fil.c index 59fbd6f785d..885738deae2 100644 --- a/innobase/fil/fil0fil.c +++ b/innobase/fil/fil0fil.c @@ -1513,6 +1513,8 @@ fil_decr_pending_ibuf_merges( mutex_exit(&(system->mutex)); } +/************************************************************ +Creates the database directory for a table if it does not exist yet. */ static void fil_create_directory_for_tablename( From 2852862c68b29e418795b4ea0a8b1293f5919b3e Mon Sep 17 00:00:00 2001 From: "serg@serg.mylan" <> Date: Fri, 30 Jul 2004 22:05:08 +0200 Subject: [PATCH 35/93] apply in SET PASSWORD same checks as in GRANT, to let only valid hashes through --- mysql-test/r/connect.result | 2 ++ mysql-test/t/connect.test | 2 ++ sql/set_var.cc | 9 +++++---- sql/sql_acl.cc | 20 +++++++++++++++----- sql/sql_acl.h | 3 ++- 5 files changed, 26 insertions(+), 10 deletions(-) diff --git a/mysql-test/r/connect.result b/mysql-test/r/connect.result index 10c5d6cc0b8..ae0def02399 100644 --- a/mysql-test/r/connect.result +++ b/mysql-test/r/connect.result @@ -40,6 +40,8 @@ show tables; Tables_in_test update mysql.user set password=old_password("gambling2") where user=_binary"test"; flush privileges; +set password='gambling3'; +ERROR HY000: Password hash should be a 41-digit hexadecimal number set password=old_password('gambling3'); show tables; Tables_in_mysql diff --git a/mysql-test/t/connect.test b/mysql-test/t/connect.test index 32c1479ae04..c1ecf176470 100644 --- a/mysql-test/t/connect.test +++ b/mysql-test/t/connect.test @@ -48,6 +48,8 @@ flush privileges; #connect (con1,localhost,test,gambling2,""); #show tables; connect (con1,localhost,test,gambling2,mysql); +--error 1105 +set password='gambling3'; set password=old_password('gambling3'); show tables; connect (con1,localhost,test,gambling3,test); diff --git a/sql/set_var.cc b/sql/set_var.cc index e70fdaedb29..bcebb62ae4d 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -2851,8 +2851,9 @@ int set_var_password::check(THD *thd) if (!user->host.str) user->host.str= (char*) thd->host_or_ip; /* Returns 1 as the function sends error to client */ - return check_change_password(thd, user->host.str, user->user.str) ? 1 : 0; -#else + return check_change_password(thd, user->host.str, user->user.str, password) ? + 1 : 0; +#else return 0; #endif } @@ -2861,8 +2862,8 @@ int set_var_password::update(THD *thd) { #ifndef NO_EMBEDDED_ACCESS_CHECKS /* Returns 1 as the function sends error to client */ - return (change_password(thd, user->host.str, user->user.str, password) ? - 1 : 0); + return change_password(thd, user->host.str, user->user.str, password) ? + 1 : 0; #else return 0; #endif diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index fddd5b70a2f..f316bca4876 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -1127,13 +1127,14 @@ bool acl_check_host(const char *host, const char *ip) 1 ERROR ; In this case the error is sent to the client. */ -bool check_change_password(THD *thd, const char *host, const char *user) +bool check_change_password(THD *thd, const char *host, const char *user, + char *new_password) { if (!initialized) { net_printf(thd,ER_OPTION_PREVENTS_STATEMENT, - "--skip-grant-tables"); /* purecov: inspected */ - return(1); /* purecov: inspected */ + "--skip-grant-tables"); + return(1); } if (!thd->slave_thread && (strcmp(thd->user,user) || @@ -1147,6 +1148,15 @@ bool check_change_password(THD *thd, const char *host, const char *user) send_error(thd, ER_PASSWORD_ANONYMOUS_USER); return(1); } + uint len=strlen(new_password); + if (len != SCRAMBLED_PASSWORD_CHAR_LENGTH && + len != SCRAMBLED_PASSWORD_CHAR_LENGTH_323) + { + net_printf(thd, 0, + "Password hash should be a %d-digit hexadecimal number", + SCRAMBLED_PASSWORD_CHAR_LENGTH); + return -1; + } return(0); } @@ -1174,7 +1184,7 @@ bool change_password(THD *thd, const char *host, const char *user, host,user,new_password)); DBUG_ASSERT(host != 0); // Ensured by parent - if (check_change_password(thd, host, user)) + if (check_change_password(thd, host, user, new_password)) DBUG_RETURN(1); VOID(pthread_mutex_lock(&acl_cache->lock)); @@ -1433,7 +1443,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, if (combo.password.length != SCRAMBLED_PASSWORD_CHAR_LENGTH && combo.password.length != SCRAMBLED_PASSWORD_CHAR_LENGTH_323) { - my_printf_error(ER_PASSWORD_NO_MATCH, + my_printf_error(ER_UNKNOWN_ERROR, "Password hash should be a %d-digit hexadecimal number", MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH); DBUG_RETURN(-1); diff --git a/sql/sql_acl.h b/sql/sql_acl.h index a237b45e29c..68cb1476eb5 100644 --- a/sql/sql_acl.h +++ b/sql/sql_acl.h @@ -142,7 +142,8 @@ ulong acl_get(const char *host, const char *ip, int acl_getroot(THD *thd, USER_RESOURCES *mqh, const char *passwd, uint passwd_len); bool acl_check_host(const char *host, const char *ip); -bool check_change_password(THD *thd, const char *host, const char *user); +bool check_change_password(THD *thd, const char *host, const char *user, + char *password); bool change_password(THD *thd, const char *host, const char *user, char *password); int mysql_grant(THD *thd, const char *db, List &user_list, From 4665cec001f09c621c31b1970173cc90f69b92de Mon Sep 17 00:00:00 2001 From: "serg@serg.mylan" <> Date: Fri, 30 Jul 2004 22:15:52 +0200 Subject: [PATCH 36/93] bug#4817 catalog name is "def" --- libmysqld/lib_sql.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index 0adf9aeb86a..f1404d12654 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -609,9 +609,9 @@ bool Protocol::send_fields(List *list, uint flag) client_field->org_table_length= strlen(client_field->org_table); client_field->charsetnr= server_field.charsetnr; - client_field->catalog= strdup_root(field_alloc, "std"); + client_field->catalog= strdup_root(field_alloc, "def"); client_field->catalog_length= 3; - + if (INTERNAL_NUM_FIELD(client_field)) client_field->flags|= NUM_FLAG; From 268c1a2edc3c8b692ff24335c1806d8e8e47c0f8 Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Sat, 31 Jul 2004 12:00:12 +0200 Subject: [PATCH 37/93] Bug fix for alter table and auto_increment --- mysql-test/r/ndb_alter_table.result | 2 +- mysql-test/r/ndb_basic.result | 16 ++++++++++------ mysql-test/r/ndb_lock.result | 5 +++++ mysql-test/t/ndb_alter_table.test | 2 +- mysql-test/t/ndb_basic.test | 13 +++++++------ mysql-test/t/ndb_lock.test | 7 +++++-- ndb/src/ndbapi/NdbDictionaryImpl.cpp | 2 +- 7 files changed, 30 insertions(+), 17 deletions(-) diff --git a/mysql-test/r/ndb_alter_table.result b/mysql-test/r/ndb_alter_table.result index 6cc6a89d5ad..ce3c96b6f39 100644 --- a/mysql-test/r/ndb_alter_table.result +++ b/mysql-test/r/ndb_alter_table.result @@ -15,7 +15,7 @@ col2 varchar(30) not null, col3 varchar (20) not null, col4 varchar(4) not null, col5 enum('PENDING', 'ACTIVE', 'DISABLED') not null, -col6 int not null, to_be_deleted int); +col6 int not null, to_be_deleted int) ENGINE=ndbcluster; insert into t1 values (2,4,3,5,"PENDING",1,7); alter table t1 add column col4_5 varchar(20) not null after col4, diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result index b7479d9543d..3dc60b17754 100644 --- a/mysql-test/r/ndb_basic.result +++ b/mysql-test/r/ndb_basic.result @@ -6,20 +6,20 @@ attr2 INT, attr3 VARCHAR(10) ) ENGINE=ndbcluster; INSERT INTO t1 VALUES (9410,9412, NULL, '9412'), (9411,9413, 17, '9413'); -SELECT pk1 FROM t1; +SELECT pk1 FROM t1 ORDER BY pk1; pk1 9410 9411 -SELECT * FROM t1; +SELECT * FROM t1 ORDER BY pk1; pk1 attr1 attr2 attr3 9410 9412 NULL 9412 9411 9413 17 9413 -SELECT t1.* FROM t1; +SELECT t1.* FROM t1 ORDER BY pk1; pk1 attr1 attr2 attr3 9410 9412 NULL 9412 9411 9413 17 9413 UPDATE t1 SET attr1=1 WHERE pk1=9410; -SELECT * FROM t1; +SELECT * FROM t1 ORDER BY pk1; pk1 attr1 attr2 attr3 9410 1 NULL 9412 9411 9413 17 9413 @@ -115,13 +115,17 @@ SELECT * FROM t1; id id2 1234 7890 DELETE FROM t1; -INSERT INTO t1 values(3456, 7890), (3456, 7890), (3456, 7890); -SELECT * FROM t1; +INSERT INTO t1 values(3456, 7890), (3456, 7890), (3456, 7890), (3454, 7890); +SELECT * FROM t1 ORDER BY id; id id2 +3454 7890 3456 7890 3456 7890 3456 7890 DELETE FROM t1 WHERE id = 3456; +SELECT * FROM t1 ORDER BY id; +id id2 +3454 7890 DROP TABLE t1; CREATE TABLE t1 ( pk1 INT NOT NULL PRIMARY KEY, diff --git a/mysql-test/r/ndb_lock.result b/mysql-test/r/ndb_lock.result index 505eb054afd..56661913e22 100644 --- a/mysql-test/r/ndb_lock.result +++ b/mysql-test/r/ndb_lock.result @@ -11,6 +11,11 @@ x y 2 two start transaction; insert into t1 values (3,'three'); +select * from t1 order by x; +x y +1 one +2 two +3 three start transaction; select * from t1 order by x; x y diff --git a/mysql-test/t/ndb_alter_table.test b/mysql-test/t/ndb_alter_table.test index f95aa82b7cc..cc92843eba7 100644 --- a/mysql-test/t/ndb_alter_table.test +++ b/mysql-test/t/ndb_alter_table.test @@ -29,7 +29,7 @@ col2 varchar(30) not null, col3 varchar (20) not null, col4 varchar(4) not null, col5 enum('PENDING', 'ACTIVE', 'DISABLED') not null, -col6 int not null, to_be_deleted int); +col6 int not null, to_be_deleted int) ENGINE=ndbcluster; insert into t1 values (2,4,3,5,"PENDING",1,7); alter table t1 add column col4_5 varchar(20) not null after col4, diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test index 08fbf913155..c3c296113c3 100644 --- a/mysql-test/t/ndb_basic.test +++ b/mysql-test/t/ndb_basic.test @@ -21,13 +21,13 @@ CREATE TABLE t1 ( INSERT INTO t1 VALUES (9410,9412, NULL, '9412'), (9411,9413, 17, '9413'); -SELECT pk1 FROM t1; -SELECT * FROM t1; -SELECT t1.* FROM t1; +SELECT pk1 FROM t1 ORDER BY pk1; +SELECT * FROM t1 ORDER BY pk1; +SELECT t1.* FROM t1 ORDER BY pk1; # Update on record by primary key UPDATE t1 SET attr1=1 WHERE pk1=9410; -SELECT * FROM t1; +SELECT * FROM t1 ORDER BY pk1; # Update primary key UPDATE t1 SET pk1=2 WHERE attr1=1; @@ -85,9 +85,10 @@ UPDATE t1 SET id=1234 WHERE id2=7890; SELECT * FROM t1; DELETE FROM t1; -INSERT INTO t1 values(3456, 7890), (3456, 7890), (3456, 7890); -SELECT * FROM t1; +INSERT INTO t1 values(3456, 7890), (3456, 7890), (3456, 7890), (3454, 7890); +SELECT * FROM t1 ORDER BY id; DELETE FROM t1 WHERE id = 3456; +SELECT * FROM t1 ORDER BY id; DROP TABLE t1; diff --git a/mysql-test/t/ndb_lock.test b/mysql-test/t/ndb_lock.test index 852d641ed54..c0389dced44 100644 --- a/mysql-test/t/ndb_lock.test +++ b/mysql-test/t/ndb_lock.test @@ -25,10 +25,13 @@ connection con2; select * from t1 order by x; connection con1; -start transaction; insert into t1 values (3,'three'); +start transaction; +insert into t1 values (3,'three'); +select * from t1 order by x; connection con2; -start transaction; select * from t1 order by x; +start transaction; +select * from t1 order by x; connection con1; commit; diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index f1091ad5fb3..004ad531b65 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -1452,7 +1452,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, alterTable(&tSignal, ptr) : createTable(&tSignal, ptr); - if (haveAutoIncrement) { + if (!alter && haveAutoIncrement) { // if (!ndb.setAutoIncrementValue(impl.m_internalName.c_str(), autoIncrementValue)) { if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(), autoIncrementValue)) { m_error.code = 4336; From 4312a7913235dd63f7ad5a80079ee47004da88e6 Mon Sep 17 00:00:00 2001 From: "pekka@mysql.com" <> Date: Sat, 31 Jul 2004 15:53:27 +0200 Subject: [PATCH 38/93] Bitmask.hpp: compile fix for gcc-3.4.x --- ndb/include/util/Bitmask.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ndb/include/util/Bitmask.hpp b/ndb/include/util/Bitmask.hpp index 7435e351ddc..ee0140a2099 100644 --- a/ndb/include/util/Bitmask.hpp +++ b/ndb/include/util/Bitmask.hpp @@ -762,7 +762,7 @@ BitmaskPOD::overlaps(BitmaskPOD that) template class Bitmask : public BitmaskPOD { public: - Bitmask() { clear();} + Bitmask() { this->clear();} }; #endif From 466992fb1f72f6be01f64f5209e6cdbbbfdf2d5e Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Sun, 1 Aug 2004 09:07:08 +0200 Subject: [PATCH 39/93] New file missing in source distr. --- ndb/include/Makefile.am | 1 + 1 file changed, 1 insertion(+) diff --git a/ndb/include/Makefile.am b/ndb/include/Makefile.am index 2565a78238b..b1b7951f216 100644 --- a/ndb/include/Makefile.am +++ b/ndb/include/Makefile.am @@ -23,6 +23,7 @@ ndbapi/NdbReceiver.hpp \ ndbapi/NdbResultSet.hpp \ ndbapi/NdbScanFilter.hpp \ ndbapi/NdbScanOperation.hpp \ +ndbapi/NdbIndexScanOperation.hpp \ ndbapi/ndberror.h mgmapiinclude_HEADERS = \ From fd11f4a1879aed07f0519661a400444f7cec2dbf Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Sun, 1 Aug 2004 13:41:13 +0200 Subject: [PATCH 40/93] Fix uninit var. Fix error printout --- ndb/src/kernel/blocks/dbtc/Dbtc.hpp | 7 ++++++- ndb/src/mgmsrv/main.cpp | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp index e7f370e9879..095ba9b0bbe 100644 --- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp +++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp @@ -998,7 +998,12 @@ public: * It will receive max 16 tuples in each request */ struct ScanFragRec { - ScanFragRec(){} + ScanFragRec(){ + stopFragTimer(); + lqhBlockref = 0; + scanFragState = IDLE; + scanRec = RNIL; + } /** * ScanFragState * WAIT_GET_PRIMCONF : Waiting for DIGETPRIMCONF when starting a new diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp index c546d142810..0bbf042fbd6 100644 --- a/ndb/src/mgmsrv/main.cpp +++ b/ndb/src/mgmsrv/main.cpp @@ -187,7 +187,7 @@ NDB_MAIN(mgmsrv){ "Please check if the port is already used,\n" "(perhaps a mgmtsrvr is already running),\n" "and if you are executing on the correct computer", - glob.interface_name, glob.port); + (glob.interface_name ? glob.interface_name : "*"), glob.port); goto error_end; } free(glob.interface_name); From 29b5faf06f4d7fba15a60269d43ee12c4491dcaf Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Sun, 1 Aug 2004 14:12:45 +0200 Subject: [PATCH 41/93] Fix mysql-test-run w.r.t NDB "export A=var" is bash feature instead do "A=var; export A" --- mysql-test/mysql-test-run.sh | 6 ++++-- mysql-test/ndb/ndbcluster.sh | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index 853b6302f86..0c46fa17e1f 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -1450,9 +1450,11 @@ then then echo "Starting ndbcluster" ./ndb/ndbcluster --port-base=$NDBCLUSTER_PORT --small --diskless --initial --data-dir=$MYSQL_TEST_DIR/var || exit 1 - export NDB_CONNECTSTRING="host=localhost:$NDBCLUSTER_PORT" + NDB_CONNECTSTRING="host=localhost:$NDBCLUSTER_PORT" + export NDB_CONNECTSTRING else - export NDB_CONNECTSTRING="$USE_RUNNING_NDBCLUSTER" + NDB_CONNECTSTRING="$USE_RUNNING_NDBCLUSTER" + export NDB_CONNECTSTRING echo "Using ndbcluster at $NDB_CONNECTSTRING" fi fi diff --git a/mysql-test/ndb/ndbcluster.sh b/mysql-test/ndb/ndbcluster.sh index 3c5c715dde0..8b53c70fb72 100644 --- a/mysql-test/ndb/ndbcluster.sh +++ b/mysql-test/ndb/ndbcluster.sh @@ -86,7 +86,6 @@ fs_name_1=$fs_ndb/node-1-fs fs_name_2=$fs_ndb/node-2-fs NDB_HOME= -export NDB_CONNECTSTRING if [ ! -x $fsdir ]; then echo "$fsdir missing" exit 1 @@ -102,7 +101,8 @@ fi ndb_host="localhost" ndb_mgmd_port=$port_base -export NDB_CONNECTSTRING="host=$ndb_host:$ndb_mgmd_port" +NDB_CONNECTSTRING="host=$ndb_host:$ndb_mgmd_port" +export NDB_CONNECTSTRING start_default_ndbcluster() { From a6e2c98d0282c563410b17d40e333e2ba5067156 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Sun, 1 Aug 2004 15:53:31 +0200 Subject: [PATCH 42/93] Compile fixes for ccc (& cxx) on linux/alpha Mainly explicit template instantiations --- .../common/debugger/signaldata/SignalDataPrint.cpp | 4 +++- ndb/src/common/util/SocketServer.cpp | 3 +++ ndb/src/kernel/blocks/backup/BackupInit.cpp | 3 +++ ndb/src/kernel/blocks/dbutil/DbUtil.cpp | 2 ++ ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp | 3 +++ ndb/src/kernel/vm/ArrayPool.hpp | 2 ++ ndb/src/mgmclient/CpcClient.cpp | 3 ++- ndb/src/ndbapi/NdbDictionaryImpl.cpp | 1 - ndb/test/include/NDBT_Table.hpp | 2 +- ndb/test/include/NDBT_Test.hpp | 13 ++++++++++--- ndb/test/run-test/main.cpp | 2 ++ ndb/test/src/HugoOperations.cpp | 2 ++ ndb/test/src/HugoTransactions.cpp | 2 +- ndb/test/src/NDBT_Test.cpp | 12 +++++++++--- ndb/test/src/NdbRestarter.cpp | 2 ++ ndb/test/tools/cpcc.cpp | 1 + 16 files changed, 46 insertions(+), 11 deletions(-) diff --git a/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp b/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp index d49e316ad38..4f4cf645b39 100644 --- a/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp +++ b/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp @@ -258,5 +258,7 @@ SignalDataPrintFunctions[] = { const unsigned short NO_OF_PRINT_FUNCTIONS = sizeof(SignalDataPrintFunctions)/sizeof(NameFunctionPair); - +template class Bitmask<1>; +template class Bitmask<2>; +template class Bitmask<4>; diff --git a/ndb/src/common/util/SocketServer.cpp b/ndb/src/common/util/SocketServer.cpp index 7c9585ae022..609f17f1a8d 100644 --- a/ndb/src/common/util/SocketServer.cpp +++ b/ndb/src/common/util/SocketServer.cpp @@ -305,3 +305,6 @@ sessionThread_C(void* _sc){ NdbThread_Exit(0); return 0; } + +template class MutexVector; +template class MutexVector; diff --git a/ndb/src/kernel/blocks/backup/BackupInit.cpp b/ndb/src/kernel/blocks/backup/BackupInit.cpp index 36ce1857144..d8cbb36df62 100644 --- a/ndb/src/kernel/blocks/backup/BackupInit.cpp +++ b/ndb/src/kernel/blocks/backup/BackupInit.cpp @@ -213,3 +213,6 @@ Backup::~Backup() BLOCK_FUNCTIONS(Backup); +template class ArrayPool; +template class ArrayPool; +template class ArrayPool; diff --git a/ndb/src/kernel/blocks/dbutil/DbUtil.cpp b/ndb/src/kernel/blocks/dbutil/DbUtil.cpp index 92410e1a784..ecaead3ba5a 100644 --- a/ndb/src/kernel/blocks/dbutil/DbUtil.cpp +++ b/ndb/src/kernel/blocks/dbutil/DbUtil.cpp @@ -2581,3 +2581,5 @@ DbUtil::execUTIL_DESTORY_LOCK_REQ(Signal* signal){ sendSignal(req.senderRef, GSN_UTIL_DESTROY_LOCK_REF, signal, UtilDestroyLockRef::SignalLength, JBB); } + +template class ArrayPool; diff --git a/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp b/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp index fe737fc584b..e38ae566430 100644 --- a/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp +++ b/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp @@ -1010,3 +1010,6 @@ Ndbfs::execDUMP_STATE_ORD(Signal* signal) BLOCK_FUNCTIONS(Ndbfs); +template class Vector; +template class Vector; +template class MemoryChannel; diff --git a/ndb/src/kernel/vm/ArrayPool.hpp b/ndb/src/kernel/vm/ArrayPool.hpp index 4fc6bb97f73..c06f48f2e8e 100644 --- a/ndb/src/kernel/vm/ArrayPool.hpp +++ b/ndb/src/kernel/vm/ArrayPool.hpp @@ -153,6 +153,7 @@ public: * (Run operator NdbOut<< on every element) */ void print(NdbOut & out){ +#ifdef VM_TRACE out << "FirstFree = " << firstFree << endl; for(Uint32 i = 0; i; diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 5e06665dc0a..010d1c83b55 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -2818,5 +2818,4 @@ template class Vector; template class Vector >; template class Vector; template class Vector; -template class Bitmask<4>; diff --git a/ndb/test/include/NDBT_Table.hpp b/ndb/test/include/NDBT_Table.hpp index eee76773106..c0b6443d95b 100644 --- a/ndb/test/include/NDBT_Table.hpp +++ b/ndb/test/include/NDBT_Table.hpp @@ -26,7 +26,7 @@ class NDBT_Attribute : public NdbDictionary::Column { friend class NdbOut& operator <<(class NdbOut&, const NDBT_Attribute &); public: NDBT_Attribute(const char* _name, - Column::Type _type, + NdbDictionary::Column::Type _type, int _length = 1, bool _pk = false, bool _nullable = false): diff --git a/ndb/test/include/NDBT_Test.hpp b/ndb/test/include/NDBT_Test.hpp index 7a5d14689bc..8330c162e14 100644 --- a/ndb/test/include/NDBT_Test.hpp +++ b/ndb/test/include/NDBT_Test.hpp @@ -110,6 +110,7 @@ public: NDBT_Step(NDBT_TestCase* ptest, const char* pname, NDBT_TESTFUNC* pfunc); + virtual ~NDBT_Step() {} int execute(NDBT_Context*); virtual int setUp() = 0; virtual void tearDown() = 0; @@ -132,8 +133,9 @@ public: NDBT_NdbApiStep(NDBT_TestCase* ptest, const char* pname, NDBT_TESTFUNC* pfunc); - int setUp(); - void tearDown(); + virtual ~NDBT_NdbApiStep() {} + virtual int setUp(); + virtual void tearDown(); Ndb* getNdb(); protected: @@ -145,6 +147,7 @@ public: NDBT_ParallelStep(NDBT_TestCase* ptest, const char* pname, NDBT_TESTFUNC* pfunc); + virtual ~NDBT_ParallelStep() {} }; class NDBT_Verifier : public NDBT_NdbApiStep { @@ -152,6 +155,7 @@ public: NDBT_Verifier(NDBT_TestCase* ptest, const char* name, NDBT_TESTFUNC* func); + virtual ~NDBT_Verifier() {} }; class NDBT_Initializer : public NDBT_NdbApiStep { @@ -159,6 +163,7 @@ public: NDBT_Initializer(NDBT_TestCase* ptest, const char* name, NDBT_TESTFUNC* func); + virtual ~NDBT_Initializer() {} }; class NDBT_Finalizer : public NDBT_NdbApiStep { @@ -166,6 +171,7 @@ public: NDBT_Finalizer(NDBT_TestCase* ptest, const char* name, NDBT_TESTFUNC* func); + virtual ~NDBT_Finalizer() {} }; @@ -174,7 +180,8 @@ public: NDBT_TestCase(NDBT_TestSuite* psuite, const char* name, const char* comment); - virtual ~NDBT_TestCase(){}; + virtual ~NDBT_TestCase(){} + // This is the default executor of a test case // When a test case is executed it will need to be suplied with a number of // different parameters and settings, these are passed to the test in the diff --git a/ndb/test/run-test/main.cpp b/ndb/test/run-test/main.cpp index 9e318b0219e..b98bc2a7a74 100644 --- a/ndb/test/run-test/main.cpp +++ b/ndb/test/run-test/main.cpp @@ -970,3 +970,5 @@ setup_hosts(atrt_config& config){ } return true; } + +template class Vector*>; diff --git a/ndb/test/src/HugoOperations.cpp b/ndb/test/src/HugoOperations.cpp index 91263aa29b4..f841de917c0 100644 --- a/ndb/test/src/HugoOperations.cpp +++ b/ndb/test/src/HugoOperations.cpp @@ -791,3 +791,5 @@ HugoOperations::indexUpdateRecord(Ndb*, } return NDBT_OK; } + +template class Vector; diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp index 7f12484ddc8..9ac99e9de85 100644 --- a/ndb/test/src/HugoTransactions.cpp +++ b/ndb/test/src/HugoTransactions.cpp @@ -2415,4 +2415,4 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, return NDBT_OK; } - +template class Vector; diff --git a/ndb/test/src/NDBT_Test.cpp b/ndb/test/src/NDBT_Test.cpp index 1bb00138d3b..3c247dcd7cf 100644 --- a/ndb/test/src/NDBT_Test.cpp +++ b/ndb/test/src/NDBT_Test.cpp @@ -1117,6 +1117,12 @@ void NDBT_Step::print(){ } - - - +template class Vector; +template class Vector; +template class Vector; +template class Vector; +template class Vector; +template class Vector; +template class Vector; +template class Vector; +template class Vector; diff --git a/ndb/test/src/NdbRestarter.cpp b/ndb/test/src/NdbRestarter.cpp index b731cccb259..c4d668adcd4 100644 --- a/ndb/test/src/NdbRestarter.cpp +++ b/ndb/test/src/NdbRestarter.cpp @@ -672,3 +672,5 @@ NdbRestarter::getConfig(){ m_config = ndb_mgm_get_configuration(handle, 0); return m_config; } + +template class Vector; diff --git a/ndb/test/tools/cpcc.cpp b/ndb/test/tools/cpcc.cpp index e768d707bbc..1ff11a05658 100644 --- a/ndb/test/tools/cpcc.cpp +++ b/ndb/test/tools/cpcc.cpp @@ -347,3 +347,4 @@ Operate::evaluate(SimpleCpcClient* c, const SimpleCpcClient::Process & pp){ return true; } +template class Vector*>; From ae44634d9eca5efc8727ac718795c6323be810bd Mon Sep 17 00:00:00 2001 From: "pekka@mysql.com" <> Date: Sun, 1 Aug 2004 16:03:04 +0200 Subject: [PATCH 43/93] DbtuxScan.cpp: dont crash before wl_1942 done --- ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp index c0c470150bc..c4c33ff931f 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp @@ -742,7 +742,9 @@ Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr) if (scan.m_state == ScanOp::Locked) { jam(); // version of a tuple locked by us cannot disappear (assert only) +#ifdef dbtux_wl_1942_is_done ndbassert(false); +#endif AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); lockReq->returnCode = RNIL; lockReq->requestInfo = AccLockReq::Unlock; From e8d23cfa264e70bd8d6b4d6a4ef5368ca808b43e Mon Sep 17 00:00:00 2001 From: "serg@serg.mylan" <> Date: Sun, 1 Aug 2004 17:21:55 +0200 Subject: [PATCH 44/93] followup to SET PASSWORD fix --- sql/sql_acl.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index f316bca4876..44fd5e9e94f 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -1149,7 +1149,7 @@ bool check_change_password(THD *thd, const char *host, const char *user, return(1); } uint len=strlen(new_password); - if (len != SCRAMBLED_PASSWORD_CHAR_LENGTH && + if (len && len != SCRAMBLED_PASSWORD_CHAR_LENGTH && len != SCRAMBLED_PASSWORD_CHAR_LENGTH_323) { net_printf(thd, 0, From c95ef3321022b59c53a6dd47eba91acebca3fc60 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Mon, 2 Aug 2004 09:00:24 +0200 Subject: [PATCH 45/93] Bug fix for testBasic -n MassiveRollback, a bug in LCP in LQH --- ndb/src/kernel/blocks/dblqh/Dblqh.hpp | 3 +- ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 128 ++++++++++++---------- ndb/src/ndbapi/Ndbif.cpp | 96 ++++++++-------- ndb/test/ndbapi/testBasic.cpp | 8 +- ndb/test/src/HugoTransactions.cpp | 2 +- 5 files changed, 131 insertions(+), 106 deletions(-) diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index 824f74c59af..e0994955818 100644 --- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -410,7 +410,6 @@ */ class Dblqh: public SimulatedBlock { public: - enum LcpCloseState { LCP_IDLE = 0, LCP_RUNNING = 1, // LCP is running @@ -1990,7 +1989,6 @@ public: UintR nextTcLogQueue; UintR nextTc; UintR nextTcConnectrec; - Uint16 nodeAfterNext[2]; UintR prevHashRec; UintR prevLogTcrec; UintR prevTc; @@ -2027,6 +2025,7 @@ public: Uint16 nextReplica; Uint16 primKeyLen; Uint16 save1; + Uint16 nodeAfterNext[3]; Uint8 activeCreat; Uint8 apiVersionNo; diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 8bef953f522..f3a6ce8f994 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -3574,7 +3574,6 @@ void Dblqh::prepareContinueAfterBlockedLab(Signal* signal) key.scanNumber = KeyInfo20::getScanNo(regTcPtr->tcScanInfo); key.fragPtrI = fragptr.i; c_scanTakeOverHash.find(scanptr, key); - ndbassert(scanptr.i != RNIL); } if (scanptr.i == RNIL) { jam(); @@ -5995,10 +5994,15 @@ void Dblqh::abortStateHandlerLab(Signal* signal) break; case TcConnectionrec::STOPPED: jam(); -/* ------------------------------------------------------------------------- */ -/*WE ARE CURRENTLY QUEUED FOR ACCESS TO THE FRAGMENT BY A LOCAL CHECKPOINT. */ -/* ------------------------------------------------------------------------- */ + /* --------------------------------------------------------------------- + * WE ARE CURRENTLY QUEUED FOR ACCESS TO THE FRAGMENT BY A LCP + * Since nothing has been done, just release operation + * i.e. no prepare log record has been written + * so no abort log records needs to be written + */ releaseWaitQueue(signal); + continueAfterLogAbortWriteLab(signal); + return; break; case TcConnectionrec::WAIT_AI_AFTER_ABORT: jam(); @@ -9953,9 +9957,11 @@ void Dblqh::execLCP_HOLDOPCONF(Signal* signal) return; } else { jam(); + /* NO MORE HOLDOPS NEEDED */ lcpLocptr.p->lcpLocstate = LcpLocRecord::HOLDOP_READY; checkLcpHoldop(signal); + if (lcpPtr.p->lcpState == LcpRecord::LCP_WAIT_ACTIVE_FINISH) { if (fragptr.p->activeList == RNIL) { jam(); @@ -9973,6 +9979,7 @@ void Dblqh::execLCP_HOLDOPCONF(Signal* signal) }//if }//if }//if + /* ----------------------- */ /* ELSE */ /* ------------------------------------------------------------------------ @@ -10045,7 +10052,6 @@ void Dblqh::execTUP_LCPSTARTED(Signal* signal) void Dblqh::lcpStartedLab(Signal* signal) { checkLcpStarted(signal); - if (lcpPtr.p->lcpState == LcpRecord::LCP_STARTED) { jam(); /* ---------------------------------------------------------------------- @@ -10064,7 +10070,7 @@ void Dblqh::lcpStartedLab(Signal* signal) sendAccContOp(signal); /* START OPERATIONS IN ACC */ moveAccActiveFrag(signal); /* MOVE FROM ACC BLOCKED LIST TO ACTIVE LIST ON FRAGMENT */ - }//if + } /*---------------*/ /* ELSE */ /*-------------------------------------------------------------------------*/ @@ -10125,32 +10131,27 @@ void Dblqh::execLQH_RESTART_OP(Signal* signal) lcpPtr.i = signal->theData[1]; ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord); - if (fragptr.p->fragStatus == Fragrecord::BLOCKED) { - if (lcpPtr.p->lcpState == LcpRecord::LCP_STARTED) { - jam(); - /***********************************************************************/ - /* THIS SIGNAL CAN ONLY BE RECEIVED WHEN FRAGMENT IS BLOCKED AND - * THE LOCAL CHECKPOINT HAS BEEN STARTED. THE BLOCKING WILL BE - * REMOVED AS SOON AS ALL OPERATIONS HAVE BEEN STARTED. - ***********************************************************************/ - restartOperationsLab(signal); - return; - } else { - jam(); - if (lcpPtr.p->lcpState == LcpRecord::LCP_BLOCKED_COMP) { - jam(); - /*******************************************************************> - * THE CHECKPOINT IS COMPLETED BUT HAS NOT YET STARTED UP - * ALL OPERATIONS AGAIN. - * WE PERFORM THIS START-UP BEFORE CONTINUING WITH THE NEXT - * FRAGMENT OF THE LOCAL CHECKPOINT TO AVOID ANY STRANGE ERRORS. - *******************************************************************> */ - restartOperationsLab(signal); - return; - }//if - }//if - }//if - ndbrequire(false); + ndbrequire(fragptr.p->fragStatus == Fragrecord::BLOCKED); + if (lcpPtr.p->lcpState == LcpRecord::LCP_STARTED) { + jam(); + /***********************************************************************/ + /* THIS SIGNAL CAN ONLY BE RECEIVED WHEN FRAGMENT IS BLOCKED AND + * THE LOCAL CHECKPOINT HAS BEEN STARTED. THE BLOCKING WILL BE + * REMOVED AS SOON AS ALL OPERATIONS HAVE BEEN STARTED. + ***********************************************************************/ + restartOperationsLab(signal); + } else if (lcpPtr.p->lcpState == LcpRecord::LCP_BLOCKED_COMP) { + jam(); + /*******************************************************************> + * THE CHECKPOINT IS COMPLETED BUT HAS NOT YET STARTED UP + * ALL OPERATIONS AGAIN. + * WE PERFORM THIS START-UP BEFORE CONTINUING WITH THE NEXT + * FRAGMENT OF THE LOCAL CHECKPOINT TO AVOID ANY STRANGE ERRORS. + *******************************************************************> */ + restartOperationsLab(signal); + } else { + ndbrequire(false); + } }//Dblqh::execLQH_RESTART_OP() void Dblqh::restartOperationsLab(Signal* signal) @@ -10203,13 +10204,13 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal) * WHEN ARRIVING HERE THE OPERATION IS ALREADY SET IN THE ACTIVE LIST. * THUS WE CAN IMMEDIATELY CALL THE METHODS THAT EXECUTE FROM WHERE * THE OPERATION WAS STOPPED. - *------------------------------------------------------------------------- */ + *------------------------------------------------------------------------ */ switch (tcConnectptr.p->transactionState) { case TcConnectionrec::STOPPED: jam(); /*----------------------------------------------------------------------- * STOPPED BEFORE TRYING TO SEND ACCKEYREQ - *----------------------------------------------------------------------- */ + *---------------------------------------------------------------------- */ prepareContinueAfterBlockedLab(signal); return; break; @@ -10217,7 +10218,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal) jam(); /* ---------------------------------------------------------------------- * STOPPED BEFORE TRYING TO SEND ACC_COMMITREQ - * ---------------------------------------------------------------------- */ + * --------------------------------------------------------------------- */ releaseActiveFrag(signal); commitContinueAfterBlockedLab(signal); return; @@ -10226,7 +10227,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal) jam(); /* ---------------------------------------------------------------------- * STOPPED BEFORE TRYING TO SEND ACC_ABORTREQ - * ---------------------------------------------------------------------- */ + * --------------------------------------------------------------------- */ abortContinueAfterBlockedLab(signal, true); return; break; @@ -10234,7 +10235,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal) jam(); /* ---------------------------------------------------------------------- * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING COPY FRAGMENT - * ---------------------------------------------------------------------- */ + * --------------------------------------------------------------------- */ continueCopyAfterBlockedLab(signal); return; break; @@ -10242,7 +10243,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal) jam(); /* ---------------------------------------------------------------------- * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING COPY FRAGMENT - * ---------------------------------------------------------------------- */ + * --------------------------------------------------------------------- */ continueFirstCopyAfterBlockedLab(signal); return; break; @@ -10250,7 +10251,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal) jam(); /* ---------------------------------------------------------------------- * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING SCAN - * ---------------------------------------------------------------------- */ + * --------------------------------------------------------------------- */ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED; continueFirstScanAfterBlockedLab(signal); return; @@ -10259,7 +10260,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal) jam(); /* ---------------------------------------------------------------------- * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING SCAN - * ---------------------------------------------------------------------- */ + * --------------------------------------------------------------------- */ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED; continueAfterCheckLcpStopBlocked(signal); return; @@ -10268,7 +10269,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal) jam(); /* ---------------------------------------------------------------------- * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING SCAN - * ---------------------------------------------------------------------- */ + * --------------------------------------------------------------------- */ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED; continueScanAfterBlockedLab(signal); return; @@ -10278,7 +10279,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal) /* ---------------------------------------------------------------------- * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING RELEASE * LOCKS IN SCAN - * ---------------------------------------------------------------------- */ + * --------------------------------------------------------------------- */ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED; continueScanReleaseAfterBlockedLab(signal); return; @@ -10287,7 +10288,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal) jam(); /* ---------------------------------------------------------------------- * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING CLOSE OF SCAN - * ---------------------------------------------------------------------- */ + * --------------------------------------------------------------------- */ continueCloseScanAfterBlockedLab(signal); return; break; @@ -10295,7 +10296,7 @@ void Dblqh::restartOperationsAfterStopLab(Signal* signal) jam(); /* ---------------------------------------------------------------------- * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING CLOSE OF COPY - * ---------------------------------------------------------------------- */ + * --------------------------------------------------------------------- */ continueCloseCopyAfterBlockedLab(signal); return; break; @@ -10421,7 +10422,12 @@ void Dblqh::contChkpNextFragLab(Signal* signal) * ----------------------------------------------------------------------- */ if (fragptr.p->fragStatus == Fragrecord::BLOCKED) { jam(); + /** + * LCP of fragment complete + * but restarting of operations isn't + */ lcpPtr.p->lcpState = LcpRecord::LCP_BLOCKED_COMP; + //restartOperationsLab(signal); return; }//if @@ -10698,25 +10704,25 @@ void Dblqh::checkLcpStarted(Signal* signal) terrorCode = ZOK; clsLcpLocptr.i = lcpPtr.p->firstLcpLocAcc; + int i = 0; do { ptrCheckGuard(clsLcpLocptr, clcpLocrecFileSize, lcpLocRecord); - if (clsLcpLocptr.p->lcpLocstate != LcpLocRecord::ACC_STARTED) { - ndbrequire((clsLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_COMPLETED) || - (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_WAIT_STARTED)); + if (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_WAIT_STARTED){ return; }//if clsLcpLocptr.i = clsLcpLocptr.p->nextLcpLoc; + i++; } while (clsLcpLocptr.i != RNIL); + i = 0; clsLcpLocptr.i = lcpPtr.p->firstLcpLocTup; do { ptrCheckGuard(clsLcpLocptr, clcpLocrecFileSize, lcpLocRecord); - if (clsLcpLocptr.p->lcpLocstate != LcpLocRecord::TUP_STARTED) { - ndbrequire((clsLcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_COMPLETED) || - (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_WAIT_STARTED)); + if (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_WAIT_STARTED){ return; }//if clsLcpLocptr.i = clsLcpLocptr.p->nextLcpLoc; + i++; } while (clsLcpLocptr.i != RNIL); lcpPtr.p->lcpState = LcpRecord::LCP_STARTED; }//Dblqh::checkLcpStarted() @@ -10874,18 +10880,28 @@ void Dblqh::sendAccContOp(Signal* signal) { LcpLocRecordPtr sacLcpLocptr; + int count = 0; sacLcpLocptr.i = lcpPtr.p->firstLcpLocAcc; do { ptrCheckGuard(sacLcpLocptr, clcpLocrecFileSize, lcpLocRecord); sacLcpLocptr.p->accContCounter = 0; -/* ------------------------------------------------------------------------- */ -/*SEND START OPERATIONS TO ACC AGAIN */ -/* ------------------------------------------------------------------------- */ - signal->theData[0] = lcpPtr.p->lcpAccptr; - signal->theData[1] = sacLcpLocptr.p->locFragid; - sendSignal(fragptr.p->accBlockref, GSN_ACC_CONTOPREQ, signal, 2, JBA); + if(sacLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_STARTED){ + /* ------------------------------------------------------------------- */ + /*SEND START OPERATIONS TO ACC AGAIN */ + /* ------------------------------------------------------------------- */ + signal->theData[0] = lcpPtr.p->lcpAccptr; + signal->theData[1] = sacLcpLocptr.p->locFragid; + sendSignal(fragptr.p->accBlockref, GSN_ACC_CONTOPREQ, signal, 2, JBA); + count++; + } else if(sacLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_COMPLETED){ + signal->theData[0] = sacLcpLocptr.i; + sendSignal(reference(), GSN_ACC_CONTOPCONF, signal, 1, JBB); + } else { + ndbrequire(false); + } sacLcpLocptr.i = sacLcpLocptr.p->nextLcpLoc; } while (sacLcpLocptr.i != RNIL); + }//Dblqh::sendAccContOp() /* ------------------------------------------------------------------------- */ diff --git a/ndb/src/ndbapi/Ndbif.cpp b/ndb/src/ndbapi/Ndbif.cpp index ee59e661cfb..7ad37401b9a 100644 --- a/ndb/src/ndbapi/Ndbif.cpp +++ b/ndb/src/ndbapi/Ndbif.cpp @@ -350,47 +350,46 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3]) return; } - case GSN_TRANSID_AI: - { - tFirstDataPtr = int2void(tFirstData); - assert(tFirstDataPtr); - if (tFirstDataPtr == 0) goto InvalidSignal; - NdbReceiver* tRec = void2rec(tFirstDataPtr); - assert(tRec->checkMagicNumber()); - assert(tRec->getTransaction()); - assert(tRec->getTransaction()->checkState_TransId(((const TransIdAI*)tDataPtr)->transId)); - if(tRec->checkMagicNumber() && (tCon = tRec->getTransaction()) && - tCon->checkState_TransId(((const TransIdAI*)tDataPtr)->transId)){ - Uint32 com; - if(aSignal->m_noOfSections > 0){ - com = tRec->execTRANSID_AI(ptr[0].p, ptr[0].sz); - } else { - com = tRec->execTRANSID_AI(tDataPtr + TransIdAI::HeaderLength, - tLen - TransIdAI::HeaderLength); - } - - if(com == 1){ - switch(tRec->getType()){ - case NdbReceiver::NDB_OPERATION: - case NdbReceiver::NDB_INDEX_OPERATION: - if(tCon->OpCompleteSuccess() != -1){ - completedTransaction(tCon); - return; - } - break; - case NdbReceiver::NDB_SCANRECEIVER: - tCon->theScanningOp->receiver_delivered(tRec); - theWaiter.m_state = (tWaitState == WAIT_SCAN? NO_WAIT: tWaitState); - break; - default: - goto InvalidSignal; - } - } - break; + case GSN_TRANSID_AI:{ + tFirstDataPtr = int2void(tFirstData); + NdbReceiver* tRec; + if (tFirstDataPtr && (tRec = void2rec(tFirstDataPtr)) && + tRec->checkMagicNumber() && (tCon = tRec->getTransaction()) && + tCon->checkState_TransId(((const TransIdAI*)tDataPtr)->transId)){ + Uint32 com; + if(aSignal->m_noOfSections > 0){ + com = tRec->execTRANSID_AI(ptr[0].p, ptr[0].sz); } else { - goto InvalidSignal; + com = tRec->execTRANSID_AI(tDataPtr + TransIdAI::HeaderLength, + tLen - TransIdAI::HeaderLength); } + + if(com == 1){ + switch(tRec->getType()){ + case NdbReceiver::NDB_OPERATION: + case NdbReceiver::NDB_INDEX_OPERATION: + if(tCon->OpCompleteSuccess() != -1){ + completedTransaction(tCon); + return; + } + break; + case NdbReceiver::NDB_SCANRECEIVER: + tCon->theScanningOp->receiver_delivered(tRec); + theWaiter.m_state = (tWaitState == WAIT_SCAN ? NO_WAIT : tWaitState); + break; + default: + goto InvalidSignal; + } + } + break; + } else { + /** + * This is ok as transaction can have been aborted before TRANSID_AI + * arrives (if TUP on other node than TC) + */ + return; } + } case GSN_TCKEY_FAILCONF: { tFirstDataPtr = int2void(tFirstData); @@ -695,7 +694,8 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3]) (tCon = void2con(tFirstDataPtr)) && (tCon->checkMagicNumber() == 0)){ if(aSignal->m_noOfSections > 0){ - tReturnCode = tCon->receiveSCAN_TABCONF(aSignal, ptr[0].p, ptr[0].sz); + tReturnCode = tCon->receiveSCAN_TABCONF(aSignal, + ptr[0].p, ptr[0].sz); } else { tReturnCode = tCon->receiveSCAN_TABCONF(aSignal, @@ -730,12 +730,11 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3]) } case GSN_KEYINFO20: { tFirstDataPtr = int2void(tFirstData); - if (tFirstDataPtr == 0) goto InvalidSignal; - NdbReceiver* tRec = void2rec(tFirstDataPtr); - - if(tRec->checkMagicNumber() && (tCon = tRec->getTransaction()) && - tCon->checkState_TransId(&((const KeyInfo20*)tDataPtr)->transId1)){ - + NdbReceiver* tRec; + if (tFirstDataPtr && (tRec = void2rec(tFirstDataPtr)) && + tRec->checkMagicNumber() && (tCon = tRec->getTransaction()) && + tCon->checkState_TransId(&((const KeyInfo20*)tDataPtr)->transId1)){ + Uint32 len = ((const KeyInfo20*)tDataPtr)->keyLen; Uint32 info = ((const KeyInfo20*)tDataPtr)->scanInfo_Node; int com = -1; @@ -756,8 +755,13 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3]) goto InvalidSignal; } break; + } else { + /** + * This is ok as transaction can have been aborted before KEYINFO20 + * arrives (if TUP on other node than TC) + */ + return; } - goto InvalidSignal; } case GSN_TCINDXCONF:{ tFirstDataPtr = int2void(tFirstData); diff --git a/ndb/test/ndbapi/testBasic.cpp b/ndb/test/ndbapi/testBasic.cpp index af25a36dde2..871179200c8 100644 --- a/ndb/test/ndbapi/testBasic.cpp +++ b/ndb/test/ndbapi/testBasic.cpp @@ -962,6 +962,7 @@ int runMassiveRollback(NDBT_Context* ctx, NDBT_Step* step){ const Uint32 OPS_TOTAL = 4096; for(int row = 0; row < records; row++){ + int res; CHECK(hugoOps.startTransaction(pNdb) == 0); for(int i = 0; igetNdbError(res); + CHECK(err.classification == NdbError::TimeoutExpired); + break; + } } if(result != NDBT_OK){ break; diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp index dc94955d90b..882b9185ea8 100644 --- a/ndb/test/src/HugoTransactions.cpp +++ b/ndb/test/src/HugoTransactions.cpp @@ -1364,7 +1364,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, allocRows(batch); - g_info << "|- Updating records..." << endl; + g_info << "|- Updating records (batch=" << batch << ")..." << endl; while (r < records){ if (retryAttempt >= retryMax){ From 24ee9709d2d536f6ced73be26bc5ef2a663a4b53 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Mon, 2 Aug 2004 10:57:09 +0200 Subject: [PATCH 46/93] Fix testOIBasic 1) Close transaction whenever Con goes out of scope so that it don't leave open transactions in TC 2) Close transaction when starting a transaction wo/ closing first 3) Allow 499 as deadlock 4) Don't use buddy as: 1) no need 2) harder to read signal log --- ndb/test/ndbapi/testOIBasic.cpp | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp index 59640262f55..be0baaafe61 100644 --- a/ndb/test/ndbapi/testOIBasic.cpp +++ b/ndb/test/ndbapi/testOIBasic.cpp @@ -624,6 +624,11 @@ struct Con { Con() : m_ndb(0), m_dic(0), m_tx(0), m_op(0), m_scanop(0), m_indexscanop(0), m_resultset(0), m_scanmode(ScanNo), m_errtype(ErrNone) {} + + ~Con(){ + if(m_tx) closeTransaction(); + } + int connect(); void disconnect(); int startTransaction(); @@ -674,7 +679,8 @@ Con::disconnect() int Con::startTransaction() { - assert(m_ndb != 0 && m_tx == 0); + assert(m_ndb != 0); + if(m_tx) closeTransaction(); CHKCON((m_tx = m_ndb->startTransaction()) != 0, *this); return 0; } @@ -824,7 +830,7 @@ Con::printerror(NdbOut& out) if (m_tx) { if ((code = m_tx->getNdbError().code) != 0) { LL0(++any << " con: error " << m_tx->getNdbError()); - if (code == 266 || code == 274 || code == 296 || code == 297) + if (code == 266 || code == 274 || code == 296 || code == 297 || code == 499) m_errtype = ErrDeadlock; } if (m_op && m_op->getNdbError().code != 0) { @@ -2295,7 +2301,7 @@ scanupdatetable(Par par) // updating trans Con con2; con2.m_ndb = con.m_ndb; - CHK(con2.startBuddyTransaction(con) == 0); + CHK(con2.startTransaction(con) == 0); while (1) { int ret; CHK((ret = con.nextScanResult()) == 0 || ret == 1); @@ -2341,7 +2347,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) // updating trans Con con2; con2.m_ndb = con.m_ndb; - CHK(con2.startBuddyTransaction(con) == 0); + CHK(con2.startTransaction(con) == 0); while (1) { int ret; CHK((ret = con.nextScanResult()) == 0 || ret == 1); From dec992d8fbeffeddf7032f8bd5b486a4b6033fd8 Mon Sep 17 00:00:00 2001 From: "serg@serg.mylan" <> Date: Mon, 2 Aug 2004 11:12:11 +0200 Subject: [PATCH 47/93] hang in dummy natural join (no common columns) Bug #4807 --- mysql-test/r/join.result | 6 +++++ mysql-test/t/join.test | 10 +++++++++ sql/sql_base.cc | 47 +++++++++++++++++++++------------------- 3 files changed, 41 insertions(+), 22 deletions(-) diff --git a/mysql-test/r/join.result b/mysql-test/r/join.result index db9b051a58f..dc763472b0e 100644 --- a/mysql-test/r/join.result +++ b/mysql-test/r/join.result @@ -283,6 +283,12 @@ ID Value1 Value2 SELECT * FROM t1 NATURAL JOIN t2 WHERE (Value1 = 'A' AND Value2 <> 'B') AND 1; ID Value1 Value2 drop table t1,t2; +CREATE TABLE t1 (a int); +CREATE TABLE t2 (b int); +CREATE TABLE t3 (c int); +SELECT * FROM t1 NATURAL JOIN t2 NATURAL JOIN t3; +a b c +DROP TABLE t1, t2, t3; create table t1 (i int); create table t2 (i int); create table t3 (i int); diff --git a/mysql-test/t/join.test b/mysql-test/t/join.test index bba5cdeee58..1d18e020543 100644 --- a/mysql-test/t/join.test +++ b/mysql-test/t/join.test @@ -284,6 +284,16 @@ SELECT * FROM t1 NATURAL JOIN t2 WHERE 1 AND Value1 = 'A' AND Value2 <> 'B'; SELECT * FROM t1 NATURAL JOIN t2 WHERE (Value1 = 'A' AND Value2 <> 'B') AND 1; drop table t1,t2; +# +# dummy natural join (no common columns) Bug #4807 +# + +CREATE TABLE t1 (a int); +CREATE TABLE t2 (b int); +CREATE TABLE t3 (c int); +SELECT * FROM t1 NATURAL JOIN t2 NATURAL JOIN t3; +DROP TABLE t1, t2, t3; + # # Test combination of join methods # diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 1a923b2410a..dd8283e057a 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -2522,29 +2522,32 @@ int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds) // to prevent natural join processing during PS re-execution table->natural_join= 0; - if (!table->outer_join) // Not left join + if (cond_and->list.elements) { - *conds= and_conds(*conds, cond_and); - // fix_fields() should be made with temporary memory pool - if (stmt) - thd->restore_backup_item_arena(stmt, &backup); - if (*conds && !(*conds)->fixed) - { - if ((*conds)->fix_fields(thd, tables, conds)) - DBUG_RETURN(1); - } - } - else - { - table->on_expr= and_conds(table->on_expr, cond_and); - // fix_fields() should be made with temporary memory pool - if (stmt) - thd->restore_backup_item_arena(stmt, &backup); - if (table->on_expr && !table->on_expr->fixed) - { - if (table->on_expr->fix_fields(thd, tables, &table->on_expr)) - DBUG_RETURN(1); - } + if (!table->outer_join) // Not left join + { + *conds= and_conds(*conds, cond_and); + // fix_fields() should be made with temporary memory pool + if (stmt) + thd->restore_backup_item_arena(stmt, &backup); + if (*conds && !(*conds)->fixed) + { + if ((*conds)->fix_fields(thd, tables, conds)) + DBUG_RETURN(1); + } + } + else + { + table->on_expr= and_conds(table->on_expr, cond_and); + // fix_fields() should be made with temporary memory pool + if (stmt) + thd->restore_backup_item_arena(stmt, &backup); + if (table->on_expr && !table->on_expr->fixed) + { + if (table->on_expr->fix_fields(thd, tables, &table->on_expr)) + DBUG_RETURN(1); + } + } } } } From a2f866e909de564a891367fef43711554135c063 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Mon, 2 Aug 2004 13:44:22 +0200 Subject: [PATCH 48/93] Fix ndb detection of gcc --- configure.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.in b/configure.in index afcc60942ce..6fe3a29ca99 100644 --- a/configure.in +++ b/configure.in @@ -396,7 +396,7 @@ then # we will gets some problems when linking static programs. # The following code is used to fix this problem. - if test "$CXX" = "gcc" -o "$CXX" = "ccache gcc" + if echo $CXX | grep gcc > /dev/null 2>&1 then if $CXX -v 2>&1 | grep 'version 3' > /dev/null 2>&1 then From de8e5c069b595ebaaf4dfd30a400332d78f185e0 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Mon, 2 Aug 2004 13:56:14 +0200 Subject: [PATCH 49/93] testOIBasic --- ndb/test/ndbapi/testOIBasic.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp index be0baaafe61..fddcd7ef346 100644 --- a/ndb/test/ndbapi/testOIBasic.cpp +++ b/ndb/test/ndbapi/testOIBasic.cpp @@ -2301,7 +2301,7 @@ scanupdatetable(Par par) // updating trans Con con2; con2.m_ndb = con.m_ndb; - CHK(con2.startTransaction(con) == 0); + CHK(con2.startTransaction() == 0); while (1) { int ret; CHK((ret = con.nextScanResult()) == 0 || ret == 1); @@ -2347,7 +2347,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) // updating trans Con con2; con2.m_ndb = con.m_ndb; - CHK(con2.startTransaction(con) == 0); + CHK(con2.startTransaction() == 0); while (1) { int ret; CHK((ret = con.nextScanResult()) == 0 || ret == 1); From 9d987304fa11c4ca8b91229ed9ff18428aed1b79 Mon Sep 17 00:00:00 2001 From: "lenz@mysql.com" <> Date: Tue, 3 Aug 2004 02:36:21 +0200 Subject: [PATCH 50/93] - added option "--bundled-zlib" to compile using the included compression library --- Build-tools/Do-compile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Build-tools/Do-compile b/Build-tools/Do-compile index e6e71582c74..c17995f5779 100755 --- a/Build-tools/Do-compile +++ b/Build-tools/Do-compile @@ -10,12 +10,13 @@ use Sys::Hostname; $opt_distribution=$opt_user=$opt_config_env=$opt_config_extra_env=""; $opt_dbd_options=$opt_perl_options=$opt_config_options=$opt_make_options=$opt_suffix=""; $opt_tmp=$opt_version_suffix=""; -$opt_help=$opt_delete=$opt_debug=$opt_stage=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_use_old_distribution=$opt_enable_shared=$opt_no_crash_me=$opt_no_strip=$opt_with_cluster=$opt_with_debug=$opt_no_benchmark=$opt_no_mysqltest=$opt_without_embedded=$opt_readline=0; +$opt_bundled_zlib=$opt_help=$opt_delete=$opt_debug=$opt_stage=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_use_old_distribution=$opt_enable_shared=$opt_no_crash_me=$opt_no_strip=$opt_with_cluster=$opt_with_debug=$opt_no_benchmark=$opt_no_mysqltest=$opt_without_embedded=$opt_readline=0; $opt_innodb=$opt_bdb=$opt_raid=$opt_libwrap=$opt_clearlogs=0; GetOptions( "bdb", "build-thread=i", + "bundled-zlib", "config-env=s" => \@config_env, "config-extra-env=s" => \@config_extra_env, "config-options=s" => \@config_options, @@ -255,6 +256,7 @@ if ($opt_stage <= 1) log_system("$make clean") if ($opt_use_old_distribution); $opt_config_options.= " --disable-shared" if (!$opt_enable_shared); # Default for binary versions $opt_config_options.= " --with-berkeley-db" if ($opt_bdb); + $opt_config_options.= " --with-zlib-dir=bundled" if ($opt_bundled_zlib); $opt_config_options.= " --with-client-ldflags=-all-static" if ($opt_static_client); $opt_config_options.= " --with-debug" if ($opt_with_debug); $opt_config_options.= " --with-libwrap" if ($opt_libwrap); From 4078c1ce0308f7fa6c8df19004c960316a588d68 Mon Sep 17 00:00:00 2001 From: "lenz@mysql.com" <> Date: Tue, 3 Aug 2004 11:28:37 +0200 Subject: [PATCH 51/93] - removed several C++-style comments (//) - these confuse the IBM compiler --- sql-common/my_time.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/sql-common/my_time.c b/sql-common/my_time.c index df852ad8880..855e92d6648 100644 --- a/sql-common/my_time.c +++ b/sql-common/my_time.c @@ -680,12 +680,12 @@ my_system_gmt_sec(const MYSQL_TIME *t, long *my_timezone, bool *in_dst_time_gap) /* Get difference in days */ int days= t->day - l_time->tm_mday; if (days < -1) - days= 1; // Month has wrapped + days= 1; /* Month has wrapped */ else if (days > 1) days= -1; diff=(3600L*(long) (days*24+((int) t->hour - (int) l_time->tm_hour)) + (long) (60*((int) t->minute - (int) l_time->tm_min))); - current_timezone+= diff+3600; // Compensate for -3600 above + current_timezone+= diff+3600; /* Compensate for -3600 above */ tmp+= (time_t) diff; localtime_r(&tmp,&tm_tmp); l_time=&tm_tmp; @@ -698,15 +698,15 @@ my_system_gmt_sec(const MYSQL_TIME *t, long *my_timezone, bool *in_dst_time_gap) { int days= t->day - l_time->tm_mday; if (days < -1) - days=1; // Month has wrapped + days=1; /* Month has wrapped */ else if (days > 1) days= -1; diff=(3600L*(long) (days*24+((int) t->hour - (int) l_time->tm_hour))+ (long) (60*((int) t->minute - (int) l_time->tm_min))); if (diff == 3600) - tmp+=3600 - t->minute*60 - t->second; // Move to next hour + tmp+=3600 - t->minute*60 - t->second; /* Move to next hour */ else if (diff == -3600) - tmp-=t->minute*60 + t->second; // Move to previous hour + tmp-=t->minute*60 + t->second; /* Move to previous hour */ *in_dst_time_gap= 1; } From d861fa8577462ed85aa50713a0ab7207cfc24a40 Mon Sep 17 00:00:00 2001 From: "mysqldev@o2k.irixworld.net" <> Date: Tue, 3 Aug 2004 13:54:55 +0200 Subject: [PATCH 52/93] Fix duplicate declaration in NDB cluster handler --- sql/ha_ndbcluster.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 5b36d6d2b55..8f23a0f3919 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -403,7 +403,7 @@ int ha_ndbcluster::build_index_list() DBUG_ENTER("build_index_list"); // Save information about all known indexes - for (uint i= 0; i < table->keys; i++) + for (i= 0; i < table->keys; i++) { NDB_INDEX_TYPE idx_type= get_index_type_from_table(i); m_indextype[i]= idx_type; From 954542b0c9d999bad9fbf618f98f8f5e749f7228 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Tue, 3 Aug 2004 14:58:44 +0200 Subject: [PATCH 53/93] more 4.1 to irix merge --- ndb/src/kernel/main.cpp | 12 ++++++++---- ndb/src/mgmsrv/MgmtSrvr.cpp | 6 ++++-- ndb/src/ndbapi/TransporterFacade.cpp | 12 ++++++++---- ndb/test/src/HugoOperations.cpp | 2 -- 4 files changed, 20 insertions(+), 12 deletions(-) diff --git a/ndb/src/kernel/main.cpp b/ndb/src/kernel/main.cpp index f2896cfdd8e..f8e852b9d35 100644 --- a/ndb/src/kernel/main.cpp +++ b/ndb/src/kernel/main.cpp @@ -175,11 +175,15 @@ NDB_MAIN(ndb_kernel){ globalTransporterRegistry.startSending(); globalTransporterRegistry.startReceiving(); - if (!globalTransporterRegistry.start_service(socket_server)) - NDB_ASSERT(0, "globalTransporterRegistry.start_service() failed"); + if (!globalTransporterRegistry.start_service(socket_server)){ + ndbout_c("globalTransporterRegistry.start_service() failed"); + exit(-1); + } - if (!globalTransporterRegistry.start_clients()) - NDB_ASSERT(0, "globalTransporterRegistry.start_clients() failed"); + if (!globalTransporterRegistry.start_clients()){ + ndbout_c("globalTransporterRegistry.start_clients() failed"); + exit(-1); + } globalEmulatorData.theWatchDog->doStart(); diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 511572b31f1..ca77ae9fb63 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -587,8 +587,10 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, _ownNodeId= 0; // did not get nodeid requested } m_allocated_resources.reserve_node(_ownNodeId); - } else - NDB_ASSERT(0, "Unable to retrieve own node id"); + } else { + ndbout_c("Unable to retrieve own node id"); + exit(-1); + } } diff --git a/ndb/src/ndbapi/TransporterFacade.cpp b/ndb/src/ndbapi/TransporterFacade.cpp index a52547954a0..6a25db560c9 100644 --- a/ndb/src/ndbapi/TransporterFacade.cpp +++ b/ndb/src/ndbapi/TransporterFacade.cpp @@ -463,11 +463,15 @@ void TransporterFacade::threadMainSend(void) SocketServer socket_server; theTransporterRegistry->startSending(); - if (!theTransporterRegistry->start_service(socket_server)) - NDB_ASSERT(0, "Unable to start theTransporterRegistry->start_service"); + if (!theTransporterRegistry->start_service(socket_server)){ + ndbout_c("Unable to start theTransporterRegistry->start_service"); + exit(0); + } - if (!theTransporterRegistry->start_clients()) - NDB_ASSERT(0, "Unable to start theTransporterRegistry->start_clients"); + if (!theTransporterRegistry->start_clients()){ + ndbout_c("Unable to start theTransporterRegistry->start_clients"); + exit(0); + } socket_server.startServer(); diff --git a/ndb/test/src/HugoOperations.cpp b/ndb/test/src/HugoOperations.cpp index 821cd8ad1e0..d5dbf1388d1 100644 --- a/ndb/test/src/HugoOperations.cpp +++ b/ndb/test/src/HugoOperations.cpp @@ -703,5 +703,3 @@ HugoOperations::indexUpdateRecord(Ndb*, } return NDBT_OK; } - -template class Vector; From 7786fce2fd11bca0c4ba403e6117b5e18931b4b5 Mon Sep 17 00:00:00 2001 From: "mysqldev@o2k.irixworld.net" <> Date: Tue, 3 Aug 2004 17:16:57 +0200 Subject: [PATCH 54/93] Compile fixes for irix --- ndb/include/util/Bitmask.hpp | 38 +++---- .../debugger/signaldata/SignalDataPrint.cpp | 4 +- ndb/src/ndbapi/NdbDictionaryImpl.cpp | 1 + ndb/test/ndbapi/flexAsynch.cpp | 14 +-- ndb/test/ndbapi/flexBench.cpp | 20 ++-- ndb/test/ndbapi/flexTT.cpp | 6 +- ndb/test/ndbapi/testBasic.cpp | 22 ++-- ndb/test/ndbapi/testBlobs.cpp | 14 +-- ndb/test/src/HugoAsynchTransactions.cpp | 32 +++--- ndb/test/src/HugoOperations.cpp | 45 ++++---- ndb/test/src/HugoTransactions.cpp | 100 +++++++++--------- ndb/test/src/NdbRestarts.cpp | 12 +-- ndb/test/src/UtilTransactions.cpp | 10 +- ndb/test/tools/cpcc.cpp | 6 +- ndb/test/tools/hugoPkReadRecord.cpp | 9 +- 15 files changed, 171 insertions(+), 162 deletions(-) diff --git a/ndb/include/util/Bitmask.hpp b/ndb/include/util/Bitmask.hpp index 24e51c6224f..bb217adab5f 100644 --- a/ndb/include/util/Bitmask.hpp +++ b/ndb/include/util/Bitmask.hpp @@ -488,14 +488,14 @@ template inline void BitmaskPOD::assign(const typename BitmaskPOD::Data & src) { - assign(rep.data, src.data); + BitmaskPOD::assign(rep.data, src.data); } template inline void BitmaskPOD::assign(const BitmaskPOD & src) { - assign(rep.data, src.rep.data); + BitmaskPOD::assign(rep.data, src.rep.data); } template @@ -523,7 +523,7 @@ template inline bool BitmaskPOD::get(unsigned n) const { - return get(rep.data, n); + return BitmaskPOD::get(rep.data, n); } template @@ -537,7 +537,7 @@ template inline void BitmaskPOD::set(unsigned n, bool value) { - set(rep.data, n, value); + BitmaskPOD::set(rep.data, n, value); } template @@ -551,7 +551,7 @@ template inline void BitmaskPOD::set(unsigned n) { - set(rep.data, n); + BitmaskPOD::set(rep.data, n); } template @@ -565,7 +565,7 @@ template inline void BitmaskPOD::set() { - set(rep.data); + BitmaskPOD::set(rep.data); } template @@ -579,7 +579,7 @@ template inline void BitmaskPOD::clear(unsigned n) { - clear(rep.data, n); + BitmaskPOD::clear(rep.data, n); } template @@ -593,7 +593,7 @@ template inline void BitmaskPOD::clear() { - clear(rep.data); + BitmaskPOD::clear(rep.data); } template @@ -607,7 +607,7 @@ template inline bool BitmaskPOD::isclear() const { - return isclear(rep.data); + return BitmaskPOD::isclear(rep.data); } template @@ -621,7 +621,7 @@ template inline unsigned BitmaskPOD::count() const { - return count(rep.data); + return BitmaskPOD::count(rep.data); } template @@ -635,7 +635,7 @@ template inline unsigned BitmaskPOD::find(unsigned n) const { - return find(rep.data, n); + return BitmaskPOD::find(rep.data, n); } template @@ -649,7 +649,7 @@ template inline bool BitmaskPOD::equal(const BitmaskPOD& mask2) const { - return equal(rep.data, mask2.rep.data); + return BitmaskPOD::equal(rep.data, mask2.rep.data); } template @@ -663,7 +663,7 @@ template inline BitmaskPOD& BitmaskPOD::bitOR(const BitmaskPOD& mask2) { - bitOR(rep.data, mask2.rep.data); + BitmaskPOD::bitOR(rep.data, mask2.rep.data); return *this; } @@ -678,7 +678,7 @@ template inline BitmaskPOD& BitmaskPOD::bitAND(const BitmaskPOD& mask2) { - bitAND(rep.data, mask2.rep.data); + BitmaskPOD::bitAND(rep.data, mask2.rep.data); return *this; } @@ -693,7 +693,7 @@ template inline BitmaskPOD& BitmaskPOD::bitANDC(const BitmaskPOD& mask2) { - bitANDC(rep.data, mask2.rep.data); + BitmaskPOD::bitANDC(rep.data, mask2.rep.data); return *this; } @@ -708,7 +708,7 @@ template inline BitmaskPOD& BitmaskPOD::bitXOR(const BitmaskPOD& mask2) { - bitXOR(rep.data, mask2.rep.data); + BitmaskPOD::bitXOR(rep.data, mask2.rep.data); return *this; } @@ -723,7 +723,7 @@ template inline char * BitmaskPOD::getText(char* buf) const { - return getText(rep.data, buf); + return BitmaskPOD::getText(rep.data, buf); } template @@ -737,7 +737,7 @@ template inline bool BitmaskPOD::contains(BitmaskPOD that) { - return contains(this->rep.data, that.rep.data); + return BitmaskPOD::contains(this->rep.data, that.rep.data); } template @@ -751,7 +751,7 @@ template inline bool BitmaskPOD::overlaps(BitmaskPOD that) { - return overlaps(this->rep.data, that.rep.data); + return BitmaskPOD::overlaps(this->rep.data, that.rep.data); } template diff --git a/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp b/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp index 4f4cf645b39..6227a9994d1 100644 --- a/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp +++ b/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp @@ -261,4 +261,6 @@ const unsigned short NO_OF_PRINT_FUNCTIONS = sizeof(SignalDataPrintFunctions)/si template class Bitmask<1>; template class Bitmask<2>; template class Bitmask<4>; - +template struct BitmaskPOD<1>; +template struct BitmaskPOD<2>; +template struct BitmaskPOD<4>; diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 7291c78c8f1..6e95f5c5622 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -2822,6 +2822,7 @@ NdbDictInterface::execLIST_TABLES_CONF(NdbApiSignal* signal, } } +template class Vector; template class Vector; template class Vector >; template class Vector; diff --git a/ndb/test/ndbapi/flexAsynch.cpp b/ndb/test/ndbapi/flexAsynch.cpp index 396ac06c87a..9192ec21b93 100644 --- a/ndb/test/ndbapi/flexAsynch.cpp +++ b/ndb/test/ndbapi/flexAsynch.cpp @@ -146,7 +146,7 @@ tellThreads(StartType what) NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535) { ThreadNdb* pThreadData; - int tLoops=0; + int tLoops=0, i; int returnValue = NDBT_OK; flexAsynchErrorData = new ErrorData; @@ -256,7 +256,7 @@ NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535) PRINT_TIMER("insert", noOfTransacts, tNoOfOpsPerTrans); if (0 < failed) { - int i = retry_opt ; + i = retry_opt ; int ci = 1 ; while (0 < failed && 0 < i){ ndbout << failed << " of the transactions returned errors!" @@ -293,7 +293,7 @@ NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535) PRINT_TIMER("read", noOfTransacts, tNoOfOpsPerTrans); if (0 < failed) { - int i = retry_opt ; + i = retry_opt ; int cr = 1; while (0 < failed && 0 < i){ ndbout << failed << " of the transactions returned errors!"<resetErrorCounters(); @@ -250,7 +250,7 @@ NDB_COMMAND(flexTT, "flexTT", "flexTT", "flexTT", 65535) * Create NDB objects. * ****************************************************************/ resetThreads(); - for (int i = 0; i < tNoOfThreads ; i++) { + for (i = 0; i < tNoOfThreads ; i++) { pThreadData[i].threadNo = i; threadLife[i] = NdbThread_Create(threadLoop, (void**)&pThreadData[i], @@ -301,7 +301,7 @@ NDB_COMMAND(flexTT, "flexTT", "flexTT", "flexTT", 65535) execute(stStop); void * tmp; - for(int i = 0; igetNdbOperation(tab->getName()); if (pOp == NULL){ ERR(pTrans->getNdbError()); @@ -280,7 +280,7 @@ readOneNoCommit(Ndb* pNdb, NdbConnection* pTrans, } // Define primary keys - for(int a = 0; agetNoOfColumns(); a++){ + for(a = 0; agetNoOfColumns(); a++){ if (tab->getColumn(a)->getPrimaryKey() == true){ if(tmp.equalForAttr(pOp, a, 0) != 0){ ERR(pTrans->getNdbError()); @@ -290,7 +290,7 @@ readOneNoCommit(Ndb* pNdb, NdbConnection* pTrans, } // Define attributes to read - for(int a = 0; agetNoOfColumns(); a++){ + for(a = 0; agetNoOfColumns(); a++){ if((row->attributeStore(a) = pOp->getValue(tab->getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); @@ -639,35 +639,35 @@ int runNoCommitRollback630(NDBT_Context* ctx, NDBT_Step* step){ int runNoCommitAndClose(NDBT_Context* ctx, NDBT_Step* step){ - int result = NDBT_OK; + int i, result = NDBT_OK; HugoOperations hugoOps(*ctx->getTab()); Ndb* pNdb = GETNDB(step); do{ // Read CHECK(hugoOps.startTransaction(pNdb) == 0); - for (int i = 0; i < 10; i++) + for (i = 0; i < 10; i++) CHECK(hugoOps.pkReadRecord(pNdb, i, true) == 0); CHECK(hugoOps.execute_NoCommit(pNdb) == 0); CHECK(hugoOps.closeTransaction(pNdb) == 0); // Update CHECK(hugoOps.startTransaction(pNdb) == 0); - for (int i = 0; i < 10; i++) + for (i = 0; i < 10; i++) CHECK(hugoOps.pkUpdateRecord(pNdb, i) == 0); CHECK(hugoOps.execute_NoCommit(pNdb) == 0); CHECK(hugoOps.closeTransaction(pNdb) == 0); // Delete CHECK(hugoOps.startTransaction(pNdb) == 0); - for (int i = 0; i < 10; i++) + for (i = 0; i < 10; i++) CHECK(hugoOps.pkDeleteRecord(pNdb, i) == 0); CHECK(hugoOps.execute_NoCommit(pNdb) == 0); CHECK(hugoOps.closeTransaction(pNdb) == 0); // Try to insert, record should already exist CHECK(hugoOps.startTransaction(pNdb) == 0); - for (int i = 0; i < 10; i++) + for (i = 0; i < 10; i++) CHECK(hugoOps.pkInsertRecord(pNdb, i) == 0); CHECK(hugoOps.execute_Commit(pNdb) == 630); CHECK(hugoOps.closeTransaction(pNdb) == 0); @@ -781,14 +781,14 @@ int runCheckRollbackDeleteMultiple(NDBT_Context* ctx, NDBT_Step* step){ CHECK(hugoOps.closeTransaction(pNdb) == 0); Uint32 updatesValue = 0; - + Uint32 j; for(Uint32 i = 0; i<1; i++){ // Read record 5 - 10 CHECK(hugoOps.startTransaction(pNdb) == 0); CHECK(hugoOps.pkReadRecord(pNdb, 5, true, 10) == 0); CHECK(hugoOps.execute_NoCommit(pNdb) == 0); - for(Uint32 j = 0; j<10; j++){ + for(j = 0; j<10; j++){ // Update record 5 - 10 updatesValue++; CHECK(hugoOps.pkUpdateRecord(pNdb, 5, 10, updatesValue) == 0); @@ -799,7 +799,7 @@ int runCheckRollbackDeleteMultiple(NDBT_Context* ctx, NDBT_Step* step){ CHECK(hugoOps.verifyUpdatesValue(updatesValue) == 0); } - for(Uint32 j = 0; j<10; j++){ + for(j = 0; j<10; j++){ // Delete record 5 - 10 times CHECK(hugoOps.pkDeleteRecord(pNdb, 5, 10) == 0); CHECK(hugoOps.execute_NoCommit(pNdb) == 0); diff --git a/ndb/test/ndbapi/testBlobs.cpp b/ndb/test/ndbapi/testBlobs.cpp index 6ffac3028b1..64881ca39ab 100644 --- a/ndb/test/ndbapi/testBlobs.cpp +++ b/ndb/test/ndbapi/testBlobs.cpp @@ -1170,6 +1170,7 @@ deleteScan(bool idx) static int testmain() { + int style; g_ndb = new Ndb("TEST_DB"); CHK(g_ndb->init() == 0); CHK(g_ndb->waitUntilReady() == 0); @@ -1197,7 +1198,7 @@ testmain() if (g_opt.m_seed == 0) srandom(g_loop); // pk - for (int style = 0; style <= 2; style++) { + for (style = 0; style <= 2; style++) { if (skipcase('k') || skipstyle(style)) continue; DBG("--- pk ops " << stylename[style] << " ---"); @@ -1215,7 +1216,7 @@ testmain() CHK(verifyBlob() == 0); } // hash index - for (int style = 0; style <= 2; style++) { + for (style = 0; style <= 2; style++) { if (skipcase('i') || skipstyle(style)) continue; DBG("--- idx ops " << stylename[style] << " ---"); @@ -1233,7 +1234,7 @@ testmain() CHK(verifyBlob() == 0); } // scan table - for (int style = 0; style <= 2; style++) { + for (style = 0; style <= 2; style++) { if (skipcase('s') || skipstyle(style)) continue; DBG("--- table scan " << stylename[style] << " ---"); @@ -1249,7 +1250,7 @@ testmain() CHK(verifyBlob() == 0); } // scan index - for (int style = 0; style <= 2; style++) { + for (style = 0; style <= 2; style++) { if (skipcase('r') || skipstyle(style)) continue; DBG("--- index scan " << stylename[style] << " ---"); @@ -1274,6 +1275,7 @@ testmain() static int bugtest_4088() { + unsigned i; DBG("bug test 4088 - ndb api hang with mixed ops on index table"); // insert rows calcTups(false); @@ -1285,7 +1287,7 @@ bugtest_4088() // read table pk via index as a table const unsigned pkcnt = 2; Tup pktup[pkcnt]; - for (unsigned i = 0; i < pkcnt; i++) { + for (i = 0; i < pkcnt; i++) { char name[20]; // XXX guess table id sprintf(name, "%d/%s", 4, g_opt.m_x1name); @@ -1304,7 +1306,7 @@ bugtest_4088() // BUG 4088: gets 1 tckeyconf, 1 tcindxconf, then hangs CHK(g_con->execute(Commit) == 0); // verify - for (unsigned i = 0; i < pkcnt; i++) { + for (i = 0; i < pkcnt; i++) { CHK(pktup[i].m_pk1 == tup.m_pk1); CHK(memcmp(pktup[i].m_pk2, tup.m_pk2, g_opt.m_pk2len) == 0); } diff --git a/ndb/test/src/HugoAsynchTransactions.cpp b/ndb/test/src/HugoAsynchTransactions.cpp index 5bedf26aa62..f75293f5a14 100644 --- a/ndb/test/src/HugoAsynchTransactions.cpp +++ b/ndb/test/src/HugoAsynchTransactions.cpp @@ -165,12 +165,13 @@ HugoAsynchTransactions::pkUpdateRecordsAsynch(Ndb* pNdb, allocRows(trans*operations); allocTransactions(trans); + int a, t, r; for (int i = 0; i < batch; i++) { // For each batch while (cRecords < records*batch) { cTrans = 0; cReadIndex = 0; - for (int t = 0; t < trans; t++) { // For each transaction + for (t = 0; t < trans; t++) { // For each transaction transactions[t] = pNdb->startTransaction(); if (transactions[t] == NULL) { ERR(pNdb->getNdbError()); @@ -187,7 +188,7 @@ HugoAsynchTransactions::pkUpdateRecordsAsynch(Ndb* pNdb, // Read // Define primary keys check = pOp->readTupleExclusive(); - for (int a = 0; a < tab.getNoOfColumns(); a++) { + for (a = 0; a < tab.getNoOfColumns(); a++) { if (tab.getColumn(a)->getPrimaryKey() == true) { if (equalForAttr(pOp, a, cReadRecords) != 0){ ERR(transactions[t]->getNdbError()); @@ -197,7 +198,7 @@ HugoAsynchTransactions::pkUpdateRecordsAsynch(Ndb* pNdb, } } // Define attributes to read - for (int a = 0; a < tab.getNoOfColumns(); a++) { + for (a = 0; a < tab.getNoOfColumns(); a++) { if ((rows[cReadIndex]->attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(transactions[t]->getNdbError()); @@ -225,7 +226,7 @@ HugoAsynchTransactions::pkUpdateRecordsAsynch(Ndb* pNdb, pNdb->sendPollNdb(3000, 0, 0); // Verify the data! - for (int r = 0; r < trans*operations; r++) { + for (r = 0; r < trans*operations; r++) { if (calc.verifyRowValues(rows[r]) != 0) { g_info << "|- Verify failed..." << endl; // Close all transactions @@ -239,7 +240,7 @@ HugoAsynchTransactions::pkUpdateRecordsAsynch(Ndb* pNdb, // Update cTrans = 0; cIndex = 0; - for (int t = 0; t < trans; t++) { // For each transaction + for (t = 0; t < trans; t++) { // For each transaction for (int k = 0; k < operations; k++) { // For each operation NdbOperation* pOp = transactions[t]->getNdbOperation(tab.getName()); if (pOp == NULL) { @@ -258,7 +259,7 @@ HugoAsynchTransactions::pkUpdateRecordsAsynch(Ndb* pNdb, } // Set search condition for the record - for (int a = 0; a < tab.getNoOfColumns(); a++) { + for (a = 0; a < tab.getNoOfColumns(); a++) { if (tab.getColumn(a)->getPrimaryKey() == true) { if (equalForAttr(pOp, a, cRecords) != 0) { ERR(transactions[t]->getNdbError()); @@ -269,7 +270,7 @@ HugoAsynchTransactions::pkUpdateRecordsAsynch(Ndb* pNdb, } // Update the record - for (int a = 0; a < tab.getNoOfColumns(); a++) { + for (a = 0; a < tab.getNoOfColumns(); a++) { if (tab.getColumn(a)->getPrimaryKey() == false) { if (setValueForAttr(pOp, a, cRecords, updates) != 0) { ERR(transactions[t]->getNdbError()); @@ -298,7 +299,7 @@ HugoAsynchTransactions::pkUpdateRecordsAsynch(Ndb* pNdb, pNdb->sendPollNdb(3000, 0, 0); // Close all transactions - for (int t = 0; t < cTrans; t++) { + for (t = 0; t < cTrans; t++) { pNdb->closeTransaction(transactions[t]); } @@ -346,6 +347,7 @@ HugoAsynchTransactions::executeAsynchOperation(Ndb* pNdb, int cTrans = 0; int cRecords = 0; int cIndex = 0; + int a,t,r; transactionsCompleted = 0; allocTransactions(trans); @@ -354,7 +356,7 @@ HugoAsynchTransactions::executeAsynchOperation(Ndb* pNdb, while (cRecords < records*batch) { cTrans = 0; cIndex = 0; - for (int t = 0; t < trans; t++) { // For each transaction + for (t = 0; t < trans; t++) { // For each transaction transactions[t] = pNdb->startTransaction(); if (transactions[t] == NULL) { ERR(pNdb->getNdbError()); @@ -379,7 +381,7 @@ HugoAsynchTransactions::executeAsynchOperation(Ndb* pNdb, } // Set a calculated value for each attribute in this table - for (int a = 0; a < tab.getNoOfColumns(); a++) { + for (a = 0; a < tab.getNoOfColumns(); a++) { if (setValueForAttr(pOp, a, cRecords, 0 ) != 0) { ERR(transactions[t]->getNdbError()); pNdb->closeTransaction(transactions[t]); @@ -394,7 +396,7 @@ HugoAsynchTransactions::executeAsynchOperation(Ndb* pNdb, case NO_READ: // Define primary keys check = pOp->readTuple(); - for (int a = 0; a < tab.getNoOfColumns(); a++) { + for (a = 0; a < tab.getNoOfColumns(); a++) { if (tab.getColumn(a)->getPrimaryKey() == true) { if (equalForAttr(pOp, a, cRecords) != 0){ ERR(transactions[t]->getNdbError()); @@ -404,7 +406,7 @@ HugoAsynchTransactions::executeAsynchOperation(Ndb* pNdb, } } // Define attributes to read - for (int a = 0; a < tab.getNoOfColumns(); a++) { + for (a = 0; a < tab.getNoOfColumns(); a++) { if ((rows[cIndex]->attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(transactions[t]->getNdbError()); @@ -423,7 +425,7 @@ HugoAsynchTransactions::executeAsynchOperation(Ndb* pNdb, } // Define primary keys - for (int a = 0; a < tab.getNoOfColumns(); a++) { + for (a = 0; a < tab.getNoOfColumns(); a++) { if (tab.getColumn(a)->getPrimaryKey() == true){ if (equalForAttr(pOp, a, cRecords) != 0) { ERR(transactions[t]->getNdbError()); @@ -462,7 +464,7 @@ HugoAsynchTransactions::executeAsynchOperation(Ndb* pNdb, switch (theOperation) { case NO_READ: // Verify the data! - for (int r = 0; r < trans*operations; r++) { + for (r = 0; r < trans*operations; r++) { if (calc.verifyRowValues(rows[r]) != 0) { g_info << "|- Verify failed..." << endl; // Close all transactions @@ -480,7 +482,7 @@ HugoAsynchTransactions::executeAsynchOperation(Ndb* pNdb, } // Close all transactions - for (int t = 0; t < cTrans; t++) { + for (t = 0; t < cTrans; t++) { pNdb->closeTransaction(transactions[t]); } diff --git a/ndb/test/src/HugoOperations.cpp b/ndb/test/src/HugoOperations.cpp index d5dbf1388d1..ef37bd815da 100644 --- a/ndb/test/src/HugoOperations.cpp +++ b/ndb/test/src/HugoOperations.cpp @@ -51,7 +51,7 @@ int HugoOperations::pkReadRecord(Ndb* pNdb, int recordNo, bool exclusive, int numRecords){ - + int a; allocRows(numRecords); int check; for(int r=0; r < numRecords; r++){ @@ -71,7 +71,7 @@ int HugoOperations::pkReadRecord(Ndb* pNdb, } // Define primary keys - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pOp, a, r+recordNo) != 0){ ERR(pTrans->getNdbError()); @@ -81,7 +81,7 @@ int HugoOperations::pkReadRecord(Ndb* pNdb, } // Define attributes to read - for(int a = 0; aattributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); @@ -95,7 +95,7 @@ int HugoOperations::pkReadRecord(Ndb* pNdb, int HugoOperations::pkDirtyReadRecord(Ndb* pNdb, int recordNo, int numRecords){ - + int a; allocRows(numRecords); int check; for(int r=0; r < numRecords; r++){ @@ -113,7 +113,7 @@ int HugoOperations::pkDirtyReadRecord(Ndb* pNdb, } // Define primary keys - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pOp, a, r+recordNo) != 0){ ERR(pTrans->getNdbError()); @@ -123,7 +123,7 @@ int HugoOperations::pkDirtyReadRecord(Ndb* pNdb, } // Define attributes to read - for(int a = 0; aattributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); @@ -137,7 +137,7 @@ int HugoOperations::pkDirtyReadRecord(Ndb* pNdb, int HugoOperations::pkSimpleReadRecord(Ndb* pNdb, int recordNo, int numRecords){ - + int a; allocRows(numRecords); int check; for(int r=0; r < numRecords; r++){ @@ -155,7 +155,7 @@ int HugoOperations::pkSimpleReadRecord(Ndb* pNdb, } // Define primary keys - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pOp, a, r+recordNo) != 0){ ERR(pTrans->getNdbError()); @@ -165,7 +165,7 @@ int HugoOperations::pkSimpleReadRecord(Ndb* pNdb, } // Define attributes to read - for(int a = 0; aattributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); @@ -180,7 +180,7 @@ int HugoOperations::pkUpdateRecord(Ndb* pNdb, int recordNo, int numRecords, int updatesValue){ - + int a; allocRows(numRecords); int check; for(int r=0; r < numRecords; r++){ @@ -197,7 +197,7 @@ int HugoOperations::pkUpdateRecord(Ndb* pNdb, } // Define primary keys - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pOp, a, r+recordNo) != 0){ ERR(pTrans->getNdbError()); @@ -207,7 +207,7 @@ int HugoOperations::pkUpdateRecord(Ndb* pNdb, } // Define attributes to update - for(int a = 0; agetPrimaryKey() == false){ if(setValueForAttr(pOp, a, recordNo+r, updatesValue ) != 0){ ERR(pTrans->getNdbError()); @@ -224,7 +224,7 @@ int HugoOperations::pkInsertRecord(Ndb* pNdb, int numRecords, int updatesValue){ - int check; + int a, check; for(int r=0; r < numRecords; r++){ NdbOperation* pOp = pTrans->getNdbOperation(tab.getName()); if (pOp == NULL) { @@ -239,7 +239,7 @@ int HugoOperations::pkInsertRecord(Ndb* pNdb, } // Define primary keys - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pOp, a, r+recordNo) != 0){ ERR(pTrans->getNdbError()); @@ -249,7 +249,7 @@ int HugoOperations::pkInsertRecord(Ndb* pNdb, } // Define attributes to update - for(int a = 0; agetPrimaryKey() == false){ if(setValueForAttr(pOp, a, recordNo+r, updatesValue ) != 0){ ERR(pTrans->getNdbError()); @@ -265,7 +265,7 @@ int HugoOperations::pkDeleteRecord(Ndb* pNdb, int recordNo, int numRecords){ - int check; + int a, check; for(int r=0; r < numRecords; r++){ NdbOperation* pOp = pTrans->getNdbOperation(tab.getName()); if (pOp == NULL) { @@ -280,7 +280,7 @@ int HugoOperations::pkDeleteRecord(Ndb* pNdb, } // Define primary keys - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pOp, a, r+recordNo) != 0){ ERR(pTrans->getNdbError()); @@ -619,6 +619,7 @@ int HugoOperations::indexReadRecords(Ndb*, const char * idxName, int recordNo, bool exclusive, int numRecords){ + int a; allocRows(numRecords); int check; for(int r=0; r < numRecords; r++){ @@ -638,7 +639,7 @@ int HugoOperations::indexReadRecords(Ndb*, const char * idxName, int recordNo, } // Define primary keys - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pOp, a, r+recordNo) != 0){ ERR(pTrans->getNdbError()); @@ -648,7 +649,7 @@ int HugoOperations::indexReadRecords(Ndb*, const char * idxName, int recordNo, } // Define attributes to read - for(int a = 0; aattributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); @@ -665,7 +666,7 @@ HugoOperations::indexUpdateRecord(Ndb*, int recordNo, int numRecords, int updatesValue){ - + int a; allocRows(numRecords); int check; for(int r=0; r < numRecords; r++){ @@ -682,7 +683,7 @@ HugoOperations::indexUpdateRecord(Ndb*, } // Define primary keys - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pOp, a, r+recordNo) != 0){ ERR(pTrans->getNdbError()); @@ -692,7 +693,7 @@ HugoOperations::indexUpdateRecord(Ndb*, } // Define attributes to update - for(int a = 0; agetPrimaryKey() == false){ if(setValueForAttr(pOp, a, recordNo+r, updatesValue ) != 0){ ERR(pTrans->getNdbError()); diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp index 744ba08d62b..bd90908a01a 100644 --- a/ndb/test/src/HugoTransactions.cpp +++ b/ndb/test/src/HugoTransactions.cpp @@ -46,7 +46,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, int retryAttempt = 0; const int retryMax = 100; - int check; + int check, a; NdbConnection *pTrans; NdbScanOperation *pOp; @@ -96,7 +96,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, return NDBT_FAILED; } - for(int a = 0; agetValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); @@ -220,7 +220,7 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb, #else int retryAttempt = 0; const int retryMax = 100; - int check; + int check, a; NdbConnection *pTrans; NdbOperation *pOp; @@ -269,7 +269,7 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb, } // Read all attributes from this table - for(int a=0; agetValue(tab.getColumn(a)->getName())) == NULL){ ERR(pTrans->getNdbError()); pNdb->closeTransaction(pTrans); @@ -387,7 +387,7 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb, #else int retryAttempt = 0; const int retryMax = 100; - int check; + int check, a; NdbConnection *pTrans; NdbOperation *pOp; @@ -436,7 +436,7 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb, } // Read all attributes from this table - for(int a=0; agetValue(tab.getColumn(a)->getName())) == NULL){ ERR(pTrans->getNdbError()); pNdb->closeTransaction(pTrans); @@ -553,7 +553,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb, int parallelism){ int retryAttempt = 0; const int retryMax = 100; - int check; + int check, a; NdbConnection *pTrans; NdbScanOperation *pOp; @@ -592,7 +592,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb, } // Read all attributes from this table - for(int a=0; agetValue(tab.getColumn(a)->getName())) == NULL){ ERR(pTrans->getNdbError()); pNdb->closeTransaction(pTrans); @@ -634,7 +634,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb, } const int updates = calc.getUpdatesValue(&row) + 1; const int r = calc.getIdValue(&row); - for(int a = 0; agetPrimaryKey() == false){ if(setValueForAttr(pUp, a, r, updates ) != 0){ ERR(pTrans->getNdbError()); @@ -695,7 +695,7 @@ HugoTransactions::loadTable(Ndb* pNdb, bool allowConstraintViolation, int doSleep, bool oneTrans){ - int check; + int check, a; int retryAttempt = 0; int retryMax = 5; NdbConnection *pTrans; @@ -763,7 +763,7 @@ HugoTransactions::loadTable(Ndb* pNdb, } // Set a calculated value for each attribute in this table - for (int a = 0; agetNdbError()); pNdb->closeTransaction(pTrans); @@ -838,7 +838,7 @@ HugoTransactions::loadTable(Ndb* pNdb, int HugoTransactions::fillTable(Ndb* pNdb, int batch){ - int check; + int check, a, b; int retryAttempt = 0; int retryMax = 5; NdbConnection *pTrans; @@ -869,7 +869,7 @@ HugoTransactions::fillTable(Ndb* pNdb, return NDBT_FAILED; } - for(int b = 0; b < batch; b++){ + for(b = 0; b < batch; b++){ pOp = pTrans->getNdbOperation(tab.getName()); if (pOp == NULL) { @@ -886,7 +886,7 @@ HugoTransactions::fillTable(Ndb* pNdb, } // Set a calculated value for each attribute in this table - for (int a = 0; agetNdbError()); pNdb->closeTransaction(pTrans); @@ -1025,7 +1025,7 @@ int HugoTransactions::eventOperation(Ndb* pNdb, void* pstats, int records) { int myXXXXX = XXXXX++; - + Uint32 i; const char function[] = "HugoTransactions::eventOperation: "; struct receivedEvent* recInsertEvent; NdbAutoObjArrayPtr @@ -1042,7 +1042,7 @@ HugoTransactions::eventOperation(Ndb* pNdb, void* pstats, stats.n_duplicates = 0; stats.n_inconsistent_gcis = 0; - for (int i = 0; i < records; i++) { + for (i = 0; i < records; i++) { recInsertEvent[i].pk = 0xFFFFFFFF; recInsertEvent[i].count = 0; recInsertEvent[i].event = 0xFFFFFFFF; @@ -1150,7 +1150,7 @@ HugoTransactions::eventOperation(Ndb* pNdb, void* pstats, } g_info << "overrun " << overrun << " pk " << pk; - for (int i = 1; i < noEventColumnName; i++) { + for (i = 1; i < noEventColumnName; i++) { if (recAttr[i]->isNULL() >= 0) { // we have a value g_info << " post[" << i << "]="; if (recAttr[i]->isNULL() == 0) // we have a non-null value @@ -1193,7 +1193,7 @@ HugoTransactions::eventOperation(Ndb* pNdb, void* pstats, if (stats.n_updates > 0) { stats.n_consecutive++; } - for (Uint32 i = 0; i < (Uint32)records/3; i++) { + for (i = 0; i < (Uint32)records/3; i++) { if (recInsertEvent[i].pk != i) { stats.n_consecutive ++; ndbout << "missing insert pk " << i << endl; @@ -1232,7 +1232,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb, int r = 0; int retryAttempt = 0; const int retryMax = 100; - int check; + int check, a; NdbConnection *pTrans; NdbOperation *pOp; @@ -1284,7 +1284,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb, } // Define primary keys - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pOp, a, r+b) != 0){ ERR(pTrans->getNdbError()); @@ -1295,7 +1295,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb, } // Define attributes to read - for(int a = 0; aattributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); @@ -1358,7 +1358,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, int r = 0; int retryAttempt = 0; const int retryMax = 100; - int check; + int check, a, b; NdbConnection *pTrans; NdbOperation *pOp; @@ -1390,7 +1390,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, return NDBT_FAILED; } - for(int b = 0; bgetNdbOperation(tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); @@ -1406,7 +1406,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, } // Define primary keys - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pOp, a, r+b) != 0){ ERR(pTrans->getNdbError()); @@ -1417,7 +1417,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, } // Define attributes to read - for(int a = 0; aattributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); @@ -1443,7 +1443,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, return NDBT_FAILED; } - for(int b = 0; bcloseTransaction(pTrans); return NDBT_FAILED; @@ -1466,7 +1466,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, return NDBT_FAILED; } - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pUpdOp, a, r+b) != 0){ ERR(pTrans->getNdbError()); @@ -1476,7 +1476,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, } } - for(int a = 0; agetPrimaryKey() == false){ if(setValueForAttr(pUpdOp, a, r+b, updates ) != 0){ ERR(pTrans->getNdbError()); @@ -1526,7 +1526,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, int r = 0; int retryAttempt = 0; const int retryMax = 100; - int check; + int check, a; NdbConnection *pTrans; while (r < records){ @@ -1566,7 +1566,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, } // Define primary keys - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pOp, a, r) != 0){ ERR(pTrans->getNdbError()); @@ -1577,7 +1577,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, } // Read update value - for(int a = 0; agetValue(tab.getColumn(a)->getName())) == 0) { @@ -1622,7 +1622,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, } // PKs - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pUpdOp, a, r) != 0){ ERR(pTrans->getNdbError()); @@ -1633,7 +1633,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, } // Update col - for(int a = 0; agetPrimaryKey() == false) && (calc.isUpdateCol(a) == true)){ @@ -1650,7 +1650,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, } // Remaining attributes - for(int a = 0; agetPrimaryKey() == false) && (calc.isUpdateCol(a) == false)){ if(setValueForAttr(pUpdOp, a, r, updates ) != 0){ @@ -1705,7 +1705,7 @@ HugoTransactions::pkDelRecords(Ndb* pNdb, int r = 0; int retryAttempt = 0; const int retryMax = 100; - int check; + int check, a; NdbConnection *pTrans; NdbOperation *pOp; @@ -1750,7 +1750,7 @@ HugoTransactions::pkDelRecords(Ndb* pNdb, } // Define primary keys - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pOp, a, r) != 0){ ERR(pTrans->getNdbError()); @@ -1820,7 +1820,7 @@ HugoTransactions::lockRecords(Ndb* pNdb, int r = 0; int retryAttempt = 0; const int retryMax = 100; - int check; + int check, a, b; NdbConnection *pTrans; NdbOperation *pOp; @@ -1857,7 +1857,7 @@ HugoTransactions::lockRecords(Ndb* pNdb, return NDBT_FAILED; } - for(int b = 0; (bgetNdbOperation(tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); @@ -1873,7 +1873,7 @@ HugoTransactions::lockRecords(Ndb* pNdb, } // Define primary keys - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pOp, a, r+b) != 0){ ERR(pTrans->getNdbError()); @@ -1884,7 +1884,7 @@ HugoTransactions::lockRecords(Ndb* pNdb, } // Define attributes to read - for(int a = 0; aattributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); @@ -1967,7 +1967,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb, int r = 0; int retryAttempt = 0; const int retryMax = 100; - int check; + int check, a; NdbConnection *pTrans; NdbOperation *pOp; NdbIndexScanOperation *sOp; @@ -2039,7 +2039,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb, } // Define primary keys - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pOp, a, r+b) != 0){ ERR(pTrans->getNdbError()); @@ -2050,7 +2050,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb, } // Define attributes to read - for(int a = 0; aattributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); @@ -2118,7 +2118,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, int r = 0; int retryAttempt = 0; const int retryMax = 100; - int check; + int check, a, b; NdbConnection *pTrans; NdbOperation *pOp; NdbScanOperation * sOp; @@ -2155,7 +2155,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, return NDBT_FAILED; } - for(int b = 0; bgetNdbIndexOperation(idxName, tab.getName()); if (pOp == NULL) { @@ -2183,7 +2183,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, } // Define primary keys - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pOp, a, r+b) != 0){ ERR(pTrans->getNdbError()); @@ -2194,7 +2194,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, } // Define attributes to read - for(int a = 0; aattributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); @@ -2225,7 +2225,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, return NDBT_FAILED; } - for(int b = 0; bcloseTransaction(pTrans); return NDBT_FAILED; @@ -2254,7 +2254,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, } if(!ordered){ - for(int a = 0; agetPrimaryKey() == true){ if(equalForAttr(pUpdOp, a, r+b) != 0){ ERR(pTrans->getNdbError()); @@ -2265,7 +2265,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, } } - for(int a = 0; agetPrimaryKey() == false){ if(setValueForAttr(pUpdOp, a, r+b, updates ) != 0){ ERR(pTrans->getNdbError()); diff --git a/ndb/test/src/NdbRestarts.cpp b/ndb/test/src/NdbRestarts.cpp index f6a85d69fc2..b649a60d98b 100644 --- a/ndb/test/src/NdbRestarts.cpp +++ b/ndb/test/src/NdbRestarts.cpp @@ -625,9 +625,9 @@ int restartNFDuringNR(NdbRestarter& _restarter, const NdbRestarts::NdbRestart* _restart){ myRandom48Init(NdbTick_CurrentMillisecond()); - + int i; const int sz = sizeof(NFDuringNR_codes)/sizeof(NFDuringNR_codes[0]); - for(int i = 0; igetType(); int retryAttempt = 0; const int retryMax = 100; - int check; + int check, a; NdbConnection *pTrans1=NULL; NdbResultSet *cursor= NULL; NdbOperation *pOp; @@ -1100,7 +1100,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb, #if VERBOSE printf("PK: "); #endif - for(int a = 0; agetPrimaryKey() == true){ if (pOp->equal(attr->getName(), row.attributeStore(a)->aRef()) != 0){ @@ -1119,7 +1119,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb, #if VERBOSE printf("Reading %u attributes: ", tab.getNoOfColumns()); #endif - for(int a = 0; agetValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans1->getNdbError()); @@ -1170,7 +1170,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb, #if VERBOSE printf("SI: "); #endif - for(int a = 0; a<(int)pIndex->getNoOfColumns(); a++){ + for(a = 0; a<(int)pIndex->getNoOfColumns(); a++){ const NdbDictionary::Column * col = pIndex->getColumn(a); int r; @@ -1200,7 +1200,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb, #if VERBOSE printf("Reading %u attributes: ", tab.getNoOfColumns()); #endif - for(int a = 0; a split; tmp.split(split, ":"); diff --git a/ndb/test/tools/hugoPkReadRecord.cpp b/ndb/test/tools/hugoPkReadRecord.cpp index ac17ffffee8..85f20bd2060 100644 --- a/ndb/test/tools/hugoPkReadRecord.cpp +++ b/ndb/test/tools/hugoPkReadRecord.cpp @@ -43,7 +43,7 @@ int main(int argc, const char** argv) }; int num_args = sizeof(args) / sizeof(args[0]); - int optind = 0; + int optind = 0, i; if(getarg(args, num_args, argc, argv, &optind) || argv[optind] == NULL) { arg_printusage(args, num_args, argv[0], "table name\n"); @@ -80,7 +80,7 @@ int main(int argc, const char** argv) } op->readTuple(); NdbRecAttr** data = new NdbRecAttr*[table->getNoOfColumns()]; - for (int i = 0; i < table->getNoOfColumns(); i++) + for (i = 0; i < table->getNoOfColumns(); i++) { const NdbDictionary::Column* c = table->getColumn(i); if (c->getPrimaryKey()) @@ -93,11 +93,10 @@ int main(int argc, const char** argv) data[i] = op->getValue(c->getName(), NULL); } } - if (conn->execute(Commit) == 0) { // Print column names - for (int i = 0; i < table->getNoOfColumns(); i++) + for (i = 0; i < table->getNoOfColumns(); i++) { const NdbDictionary::Column* c = table->getColumn(i); @@ -111,7 +110,7 @@ int main(int argc, const char** argv) { g_info << hex; } - for (int i = 0; i < table->getNoOfColumns(); i++) + for (i = 0; i < table->getNoOfColumns(); i++) { NdbRecAttr* a = data[i]; switch(a->getType()) From 883247ea467743f907de4e62308c44351286958b Mon Sep 17 00:00:00 2001 From: "pekka@mysql.com" <> Date: Tue, 3 Aug 2004 18:09:28 +0200 Subject: [PATCH 55/93] testOIBasic.cpp: make it fail in more useful ways --- ndb/test/ndbapi/testOIBasic.cpp | 561 ++++++++++++++++++++++---------- 1 file changed, 388 insertions(+), 173 deletions(-) diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp index fddcd7ef346..c58dd8538e9 100644 --- a/ndb/test/ndbapi/testOIBasic.cpp +++ b/ndb/test/ndbapi/testOIBasic.cpp @@ -33,13 +33,16 @@ struct Opt { // common options + unsigned m_batch; const char* m_case; bool m_core; bool m_dups; NdbDictionary::Object::FragmentType m_fragtype; + unsigned m_idxloop; const char* m_index; unsigned m_loop; bool m_nologging; + bool m_msglock; unsigned m_rows; unsigned m_samples; unsigned m_scanrd; @@ -50,18 +53,21 @@ struct Opt { unsigned m_threads; unsigned m_v; Opt() : + m_batch(32), m_case(0), m_core(false), m_dups(false), m_fragtype(NdbDictionary::Object::FragUndefined), + m_idxloop(4), m_index(0), m_loop(1), m_nologging(false), + m_msglock(true), m_rows(1000), m_samples(0), m_scanrd(240), m_scanex(240), - m_seed(1), + m_seed(0), m_subloop(4), m_table(0), m_threads(4), @@ -80,6 +86,7 @@ printhelp() Opt d; ndbout << "usage: testOIbasic [options]" << endl + << " -batch N pk operations in batch [" << d.m_batch << "]" << endl << " -case abc only given test cases (letters a-z)" << endl << " -core core dump on error [" << d.m_core << "]" << endl << " -dups allow duplicate tuples from index scan [" << d.m_dups << "]" << endl @@ -91,7 +98,7 @@ printhelp() << " -samples N samples for some timings (0=all) [" << d.m_samples << "]" << endl << " -scanrd N scan read parallelism [" << d.m_scanrd << "]" << endl << " -scanex N scan exclusive parallelism [" << d.m_scanex << "]" << endl - << " -seed N srandom seed [" << d.m_seed << "]" << endl + << " -seed N srandom seed 0=loop number[" << d.m_seed << "]" << endl << " -subloop N subtest loop count [" << d.m_subloop << "]" << endl << " -table xyz only given table numbers (digits 1-9)" << endl << " -threads N number of threads [" << d.m_threads << "]" << endl @@ -133,9 +140,9 @@ getthrstr() #define LLN(n, s) \ do { \ if ((n) > g_opt.m_v) break; \ - NdbMutex_Lock(&ndbout_mutex); \ + if (g_opt.m_msglock) NdbMutex_Lock(&ndbout_mutex); \ ndbout << getthrstr() << s << endl; \ - NdbMutex_Unlock(&ndbout_mutex); \ + if (g_opt.m_msglock) NdbMutex_Unlock(&ndbout_mutex); \ } while(0) #define LL0(s) LLN(0, s) @@ -148,11 +155,10 @@ getthrstr() // following check a condition and return -1 on failure #undef CHK // simple check -#undef CHKTRY // execute action (try-catch) on failure -#undef CHKMSG // print extra message on failure +#undef CHKTRY // check with action on fail #undef CHKCON // print NDB API errors on failure -#define CHK(x) CHKTRY(x, ;) +#define CHK(x) CHKTRY(x, ;) #define CHKTRY(x, act) \ do { \ @@ -163,14 +169,6 @@ getthrstr() return -1; \ } while (0) -#define CHKMSG(x, msg) \ - do { \ - if (x) break; \ - LL0("line " << __LINE__ << ": " << #x << " failed: " << msg); \ - if (g_opt.m_core) abort(); \ - return -1; \ - } while (0) - #define CHKCON(x, con) \ do { \ if (x) break; \ @@ -199,13 +197,14 @@ struct Par : public Opt { Tmr* m_tmr; Tmr& tmr() const { assert(m_tmr != 0); return *m_tmr; } unsigned m_totrows; - unsigned m_batch; // value calculation unsigned m_pctnull; unsigned m_range; unsigned m_pctrange; // do verify after read bool m_verify; + // deadlock possible + bool m_deadlock; // timer location Par(const Opt& opt) : Opt(opt), @@ -215,11 +214,11 @@ struct Par : public Opt { m_set(0), m_tmr(0), m_totrows(m_threads * m_rows), - m_batch(32), m_pctnull(10), m_range(m_rows), m_pctrange(0), - m_verify(false) { + m_verify(false), + m_deadlock(false) { } }; @@ -313,13 +312,51 @@ const char* Tmr::over(const Tmr& t1) { if (0 < t1.m_ms) { - sprintf(m_text, "%d pct", (100 * (m_ms - t1.m_ms)) / t1.m_ms); + if (t1.m_ms <= m_ms) + sprintf(m_text, "%u pct", (100 * (m_ms - t1.m_ms)) / t1.m_ms); + else + sprintf(m_text, "-%u pct", (100 * (t1.m_ms - m_ms)) / t1.m_ms); } else { sprintf(m_text, "[cannot measure]"); } return m_text; } +// list of ints + +struct Lst { + Lst(); + unsigned m_arr[1000]; + unsigned m_cnt; + void push(unsigned i); + unsigned cnt() const; + void reset(); +}; + +Lst::Lst() : + m_cnt(0) +{ +} + +void +Lst::push(unsigned i) +{ + assert(m_cnt < sizeof(m_arr)/sizeof(m_arr[0])); + m_arr[m_cnt++] = i; +} + +unsigned +Lst::cnt() const +{ + return m_cnt; +} + +void +Lst::reset() +{ + m_cnt = 0; +} + // tables and indexes // Col - table column @@ -624,15 +661,14 @@ struct Con { Con() : m_ndb(0), m_dic(0), m_tx(0), m_op(0), m_scanop(0), m_indexscanop(0), m_resultset(0), m_scanmode(ScanNo), m_errtype(ErrNone) {} - - ~Con(){ - if(m_tx) closeTransaction(); + ~Con() { + if (m_tx != 0) + closeTransaction(); } - int connect(); + void connect(const Con& con); void disconnect(); int startTransaction(); - int startBuddyTransaction(const Con& con); int getNdbOperation(const Tab& tab); int getNdbScanOperation(const Tab& tab); int getNdbScanOperation(const ITab& itab, const Tab& tab); @@ -641,20 +677,16 @@ struct Con { int setValue(int num, const char* addr); int setBound(int num, int type, const void* value); int execute(ExecType t); + int execute(ExecType t, bool& deadlock); int openScanRead(unsigned parallelism); int openScanExclusive(unsigned parallelism); int executeScan(); - int nextScanResult(); - int takeOverForUpdate(Con& scan); - int takeOverForDelete(Con& scan); + int nextScanResult(bool fetchAllowed); + int nextScanResult(bool fetchAllowed, bool& deadlock); + int updateScanTuple(Con& con2); + int deleteScanTuple(Con& con2); void closeTransaction(); void printerror(NdbOut& out); - // flush dict cache - int bugger() { - //disconnect(); - //CHK(connect() == 0); - return 0; - } }; int @@ -664,11 +696,17 @@ Con::connect() m_ndb = new Ndb("TEST_DB"); CHKCON(m_ndb->init() == 0, *this); CHKCON(m_ndb->waitUntilReady(30) == 0, *this); - m_dic = m_ndb->getDictionary(); m_tx = 0, m_op = 0; return 0; } +void +Con::connect(const Con& con) +{ + assert(m_ndb == 0); + m_ndb = con.m_ndb; +} + void Con::disconnect() { @@ -680,19 +718,12 @@ int Con::startTransaction() { assert(m_ndb != 0); - if(m_tx) closeTransaction(); + if (m_tx != 0) + closeTransaction(); CHKCON((m_tx = m_ndb->startTransaction()) != 0, *this); return 0; } -int -Con::startBuddyTransaction(const Con& con) -{ - assert(m_ndb != 0 && m_tx == 0 && con.m_ndb == m_ndb && con.m_tx != 0); - CHKCON((m_tx = m_ndb->hupp(con.m_tx)) != 0, *this); - return 0; -} - int Con::getNdbOperation(const Tab& tab) { @@ -757,6 +788,22 @@ Con::execute(ExecType t) return 0; } +int +Con::execute(ExecType t, bool& deadlock) +{ + int ret = execute(t); + if (ret != 0) { + if (deadlock && m_errtype == ErrDeadlock) { + LL3("caught deadlock"); + ret = 0; + } + } else { + deadlock = false; + } + CHK(ret == 0); + return 0; +} + int Con::openScanRead(unsigned parallelism) { @@ -781,28 +828,44 @@ Con::executeScan() } int -Con::nextScanResult() +Con::nextScanResult(bool fetchAllowed) { int ret; assert(m_resultset != 0); - CHKCON((ret = m_resultset->nextResult()) != -1, *this); - assert(ret == 0 || ret == 1); + CHKCON((ret = m_resultset->nextResult(fetchAllowed)) != -1, *this); + assert(ret == 0 || ret == 1 || (! fetchAllowed && ret == 2)); return ret; } int -Con::takeOverForUpdate(Con& scan) +Con::nextScanResult(bool fetchAllowed, bool& deadlock) { - assert(m_tx != 0 && scan.m_op != 0); - CHKCON((m_op = scan.m_resultset->updateTuple(m_tx)) != 0, scan); + int ret = nextScanResult(fetchAllowed); + if (ret == -1) { + if (deadlock && m_errtype == ErrDeadlock) { + LL3("caught deadlock"); + ret = 0; + } + } else { + deadlock = false; + } + CHK(ret == 0 || ret == 1 || (! fetchAllowed && ret == 2)); + return ret; +} + +int +Con::updateScanTuple(Con& con2) +{ + assert(con2.m_tx != 0); + CHKCON((con2.m_op = m_resultset->updateTuple(con2.m_tx)) != 0, *this); return 0; } int -Con::takeOverForDelete(Con& scan) +Con::deleteScanTuple(Con& con2) { - assert(m_tx != 0 && scan.m_op != 0); - CHKCON(scan.m_resultset->deleteTuple(m_tx) == 0, scan); + assert(con2.m_tx != 0); + CHKCON(m_resultset->deleteTuple(con2.m_tx) == 0, *this); return 0; } @@ -850,7 +913,7 @@ invalidateindex(Par par, const ITab& itab) { Con& con = par.con(); const Tab& tab = par.tab(); - con.m_dic->invalidateIndex(itab.m_name, tab.m_name); + con.m_ndb->getDictionary()->invalidateIndex(itab.m_name, tab.m_name); return 0; } @@ -874,7 +937,7 @@ invalidatetable(Par par) Con& con = par.con(); const Tab& tab = par.tab(); invalidateindex(par); - con.m_dic->invalidateTable(tab.m_name); + con.m_ndb->getDictionary()->invalidateTable(tab.m_name); return 0; } @@ -883,6 +946,7 @@ droptable(Par par) { Con& con = par.con(); const Tab& tab = par.tab(); + con.m_dic = con.m_ndb->getDictionary(); if (con.m_dic->getTable(tab.m_name) == 0) { // how to check for error LL4("no table " << tab.m_name); @@ -890,6 +954,7 @@ droptable(Par par) LL3("drop table " << tab.m_name); CHKCON(con.m_dic->dropTable(tab.m_name) == 0, con); } + con.m_dic = 0; return 0; } @@ -897,7 +962,6 @@ static int createtable(Par par) { Con& con = par.con(); - CHK(con.bugger() == 0); const Tab& tab = par.tab(); LL3("create table " << tab.m_name); LL4(tab); @@ -917,7 +981,9 @@ createtable(Par par) c.setNullable(col.m_nullable); t.addColumn(c); } + con.m_dic = con.m_ndb->getDictionary(); CHKCON(con.m_dic->createTable(t) == 0, con); + con.m_dic = 0; return 0; } @@ -926,6 +992,7 @@ dropindex(Par par, const ITab& itab) { Con& con = par.con(); const Tab& tab = par.tab(); + con.m_dic = con.m_ndb->getDictionary(); if (con.m_dic->getIndex(itab.m_name, tab.m_name) == 0) { // how to check for error LL4("no index " << itab.m_name); @@ -933,6 +1000,7 @@ dropindex(Par par, const ITab& itab) LL3("drop index " << itab.m_name); CHKCON(con.m_dic->dropIndex(itab.m_name, tab.m_name) == 0, con); } + con.m_dic = 0; return 0; } @@ -953,7 +1021,6 @@ static int createindex(Par par, const ITab& itab) { Con& con = par.con(); - CHK(con.bugger() == 0); const Tab& tab = par.tab(); LL3("create index " << itab.m_name); LL4(itab); @@ -965,7 +1032,9 @@ createindex(Par par, const ITab& itab) const Col& col = itab.m_icol[k].m_col; x.addColumnName(col.m_name); } + con.m_dic = con.m_ndb->getDictionary(); CHKCON(con.m_dic->createIndex(x) == 0, con); + con.m_dic = 0; return 0; } @@ -1240,6 +1309,8 @@ struct Row { const Tab& m_tab; Val** m_val; bool m_exist; + enum Op { NoOp = 0, ReadOp, InsOp, UpdOp, DelOp }; + Op m_pending; Row(const Tab& tab); ~Row(); void copy(const Row& row2); @@ -1264,6 +1335,7 @@ Row::Row(const Tab& tab) : m_val[k] = new Val(col); } m_exist = false; + m_pending = NoOp; } Row::~Row() @@ -1301,7 +1373,7 @@ int Row::verify(const Row& row2) const { const Tab& tab = m_tab; - assert(&tab == &row2.m_tab); + assert(&tab == &row2.m_tab && m_exist && row2.m_exist); for (unsigned k = 0; k < tab.m_cols; k++) { const Val& val = *m_val[k]; const Val& val2 = *row2.m_val[k]; @@ -1322,7 +1394,7 @@ Row::insrow(Par par) const Val& val = *m_val[k]; CHK(val.setval(par) == 0); } - m_exist = true; + m_pending = InsOp; return 0; } @@ -1338,6 +1410,7 @@ Row::updrow(Par par) const Val& val = *m_val[k]; CHK(val.setval(par) == 0); } + m_pending = UpdOp; return 0; } @@ -1355,7 +1428,7 @@ Row::delrow(Par par) if (col.m_pk) CHK(val.setval(par) == 0); } - m_exist = false; + m_pending = DelOp; return 0; } @@ -1372,7 +1445,6 @@ Row::selrow(Par par) if (col.m_pk) CHK(val.setval(par) == 0); } - m_exist = false; return 0; } @@ -1387,6 +1459,7 @@ Row::setrow(Par par) if (! col.m_pk) CHK(val.setval(par) == 0); } + m_pending = UpdOp; return 0; } @@ -1414,6 +1487,10 @@ operator<<(NdbOut& out, const Row& row) out << " "; out << *row.m_val[i]; } + out << " [exist=" << row.m_exist; + if (row.m_pending) + out << " pending=" << row.m_pending; + out << "]"; return out; } @@ -1432,6 +1509,9 @@ struct Set { unsigned count() const; // row methods bool exist(unsigned i) const; + Row::Op pending(unsigned i) const; + void notpending(unsigned i); + void notpending(const Lst& lst); void calc(Par par, unsigned i); int insrow(Par par, unsigned i); int updrow(Par par, unsigned i); @@ -1446,7 +1526,7 @@ struct Set { void savepoint(); void commit(); void rollback(); - // locking (not perfect since ops may complete in different order) + // protect structure NdbMutex* m_mutex; void lock() { NdbMutex_Lock(m_mutex); @@ -1464,6 +1544,7 @@ Set::Set(const Tab& tab, unsigned rows) : m_rows = rows; m_row = new Row* [m_rows]; for (unsigned i = 0; i < m_rows; i++) { + // allocate on need to save space m_row[i] = 0; } m_saverow = 0; @@ -1519,7 +1600,18 @@ bool Set::exist(unsigned i) const { assert(i < m_rows); - return m_row[i] != 0 && m_row[i]->m_exist; + if (m_row[i] == 0) // not allocated => not exist + return false; + return m_row[i]->m_exist; +} + +Row::Op +Set::pending(unsigned i) const +{ + assert(i < m_rows); + if (m_row[i] == 0) // not allocated => not pending + return Row::NoOp; + return m_row[i]->m_pending; } void @@ -1598,7 +1690,7 @@ Set::getkey(Par par, unsigned* i) assert(m_rec[0] != 0); const char* aRef0 = m_rec[0]->aRef(); Uint32 key = *(const Uint32*)aRef0; - CHKMSG(key < m_rows, "key=" << key << " rows=" << m_rows); + CHK(key < m_rows); *i = key; return 0; } @@ -1628,12 +1720,32 @@ Set::putval(unsigned i, bool force) return 0; } +void +Set::notpending(unsigned i) +{ + assert(m_row[i] != 0); + Row& row = *m_row[i]; + if (row.m_pending == Row::InsOp) + row.m_exist = true; + if (row.m_pending == Row::DelOp) + row.m_exist = false; + row.m_pending = Row::NoOp; +} + +void +Set::notpending(const Lst& lst) +{ + for (unsigned j = 0; j < lst.m_cnt; j++) { + unsigned i = lst.m_arr[j]; + notpending(i); + } +} + int Set::verify(const Set& set2) const { const Tab& tab = m_tab; assert(&tab == &set2.m_tab && m_rows == set2.m_rows); - CHKMSG(count() == set2.count(), "set=" << count() << " set2=" << set2.count()); for (unsigned i = 0; i < m_rows; i++) { CHK(exist(i) == set2.exist(i)); if (! exist(i)) @@ -1924,28 +2036,46 @@ pkinsert(Par par) Set& set = par.set(); LL3("pkinsert"); CHK(con.startTransaction() == 0); - unsigned n = 0; + Lst lst; for (unsigned j = 0; j < par.m_rows; j++) { unsigned i = thrrow(par, j); set.lock(); - if (set.exist(i)) { + if (set.exist(i) || set.pending(i)) { set.unlock(); continue; } set.calc(par, i); - LL4("pkinsert " << i << ": " << *set.m_row[i]); - CHKTRY(set.insrow(par, i) == 0, set.unlock()); + CHK(set.insrow(par, i) == 0); set.unlock(); - if (++n == par.m_batch) { - CHK(con.execute(Commit) == 0); + LL4("pkinsert " << i << ": " << *set.m_row[i]); + lst.push(i); + if (lst.cnt() == par.m_batch) { + bool deadlock = par.m_deadlock; + CHK(con.execute(Commit, deadlock) == 0); con.closeTransaction(); + if (deadlock) { + LL1("pkinsert: stop on deadlock"); + return 0; + } + set.lock(); + set.notpending(lst); + set.unlock(); + lst.reset(); CHK(con.startTransaction() == 0); - n = 0; } } - if (n != 0) { - CHK(con.execute(Commit) == 0); - n = 0; + if (lst.cnt() != 0) { + bool deadlock = par.m_deadlock; + CHK(con.execute(Commit, deadlock) == 0); + con.closeTransaction(); + if (deadlock) { + LL1("pkinsert: stop on deadlock"); + return 0; + } + set.lock(); + set.notpending(lst); + set.unlock(); + return 0; } con.closeTransaction(); return 0; @@ -1958,28 +2088,45 @@ pkupdate(Par par) Set& set = par.set(); LL3("pkupdate"); CHK(con.startTransaction() == 0); - unsigned n = 0; + Lst lst; + bool deadlock = false; for (unsigned j = 0; j < par.m_rows; j++) { unsigned i = thrrow(par, j); set.lock(); - if (! set.exist(i)) { + if (! set.exist(i) || set.pending(i)) { set.unlock(); continue; } set.calc(par, i); - LL4("pkupdate " << i << ": " << *set.m_row[i]); - CHKTRY(set.updrow(par, i) == 0, set.unlock()); + CHK(set.updrow(par, i) == 0); set.unlock(); - if (++n == par.m_batch) { - CHK(con.execute(Commit) == 0); + LL4("pkupdate " << i << ": " << *set.m_row[i]); + lst.push(i); + if (lst.cnt() == par.m_batch) { + deadlock = par.m_deadlock; + CHK(con.execute(Commit, deadlock) == 0); + if (deadlock) { + LL1("pkupdate: stop on deadlock"); + break; + } con.closeTransaction(); + set.lock(); + set.notpending(lst); + set.unlock(); + lst.reset(); CHK(con.startTransaction() == 0); - n = 0; } } - if (n != 0) { - CHK(con.execute(Commit) == 0); - n = 0; + if (! deadlock && lst.cnt() != 0) { + deadlock = par.m_deadlock; + CHK(con.execute(Commit, deadlock) == 0); + if (deadlock) { + LL1("pkupdate: stop on deadlock"); + } else { + set.lock(); + set.notpending(lst); + set.unlock(); + } } con.closeTransaction(); return 0; @@ -1992,27 +2139,44 @@ pkdelete(Par par) Set& set = par.set(); LL3("pkdelete"); CHK(con.startTransaction() == 0); - unsigned n = 0; + Lst lst; + bool deadlock = false; for (unsigned j = 0; j < par.m_rows; j++) { unsigned i = thrrow(par, j); set.lock(); - if (! set.exist(i)) { + if (! set.exist(i) || set.pending(i)) { set.unlock(); continue; } - LL4("pkdelete " << i << ": " << *set.m_row[i]); - CHKTRY(set.delrow(par, i) == 0, set.unlock()); + CHK(set.delrow(par, i) == 0); set.unlock(); - if (++n == par.m_batch) { - CHK(con.execute(Commit) == 0); + LL4("pkdelete " << i << ": " << *set.m_row[i]); + lst.push(i); + if (lst.cnt() == par.m_batch) { + deadlock = par.m_deadlock; + CHK(con.execute(Commit, deadlock) == 0); + if (deadlock) { + LL1("pkdelete: stop on deadlock"); + break; + } con.closeTransaction(); + set.lock(); + set.notpending(lst); + set.unlock(); + lst.reset(); CHK(con.startTransaction() == 0); - n = 0; } } - if (n != 0) { - CHK(con.execute(Commit) == 0); - n = 0; + if (! deadlock && lst.cnt() != 0) { + deadlock = par.m_deadlock; + CHK(con.execute(Commit, deadlock) == 0); + if (deadlock) { + LL1("pkdelete: stop on deadlock"); + } else { + set.lock(); + set.notpending(lst); + set.unlock(); + } } con.closeTransaction(); return 0; @@ -2023,14 +2187,18 @@ pkread(Par par) { Con& con = par.con(); const Tab& tab = par.tab(); - const Set& set = par.set(); + Set& set = par.set(); LL3((par.m_verify ? "pkverify " : "pkread ") << tab.m_name); // expected const Set& set1 = set; Set set2(tab, set.m_rows); for (unsigned i = 0; i < set.m_rows; i++) { - if (! set.exist(i)) + set.lock(); + if (! set.exist(i) || set.pending(i)) { + set.unlock(); continue; + } + set.unlock(); CHK(con.startTransaction() == 0); CHK(set2.selrow(par, i) == 0); CHK(con.execute(Commit) == 0); @@ -2053,6 +2221,7 @@ pkreadfast(Par par, unsigned count) const Set& set = par.set(); LL3("pkfast " << tab.m_name); Row keyrow(tab); + // not batched on purpose for (unsigned j = 0; j < count; j++) { unsigned i = urandom(set.m_rows); assert(set.exist(i)); @@ -2089,7 +2258,7 @@ scanreadtable(Par par) CHK(con.executeScan() == 0); while (1) { int ret; - CHK((ret = con.nextScanResult()) == 0 || ret == 1); + CHK((ret = con.nextScanResult(true)) == 0 || ret == 1); if (ret == 1) break; unsigned i = (unsigned)-1; @@ -2120,7 +2289,7 @@ scanreadtablefast(Par par, unsigned countcheck) unsigned count = 0; while (1) { int ret; - CHK((ret = con.nextScanResult()) == 0 || ret == 1); + CHK((ret = con.nextScanResult(true)) == 0 || ret == 1); if (ret == 1) break; count++; @@ -2150,7 +2319,7 @@ scanreadindex(Par par, const ITab& itab, const BSet& bset) CHK(con.executeScan() == 0); while (1) { int ret; - CHK((ret = con.nextScanResult()) == 0 || ret == 1); + CHK((ret = con.nextScanResult(true)) == 0 || ret == 1); if (ret == 1) break; unsigned i = (unsigned)-1; @@ -2184,7 +2353,7 @@ scanreadindexfast(Par par, const ITab& itab, const BSet& bset, unsigned countche unsigned count = 0; while (1) { int ret; - CHK((ret = con.nextScanResult()) == 0 || ret == 1); + CHK((ret = con.nextScanResult(true)) == 0 || ret == 1); if (ret == 1) break; count++; @@ -2198,7 +2367,7 @@ static int scanreadindex(Par par, const ITab& itab) { const Tab& tab = par.tab(); - for (unsigned i = 0; i < par.m_subloop; i++) { + for (unsigned i = 0; i < par.m_idxloop; i++) { BSet bset(tab, itab, par.m_rows); bset.calc(par); CHK(scanreadindex(par, itab, bset) == 0); @@ -2300,29 +2469,63 @@ scanupdatetable(Par par) unsigned count = 0; // updating trans Con con2; - con2.m_ndb = con.m_ndb; + con2.connect(con); CHK(con2.startTransaction() == 0); + Lst lst; + bool deadlock = false; while (1) { int ret; - CHK((ret = con.nextScanResult()) == 0 || ret == 1); + deadlock = par.m_deadlock; + CHK((ret = con.nextScanResult(true, deadlock)) == 0 || ret == 1); + if (ret == 1) + break; + if (deadlock) { + LL1("scanupdatetable: stop on deadlock"); + break; + } + do { + unsigned i = (unsigned)-1; + CHK(set2.getkey(par, &i) == 0); + const Row& row = *set.m_row[i]; + set.lock(); + if (! set.exist(i) || set.pending(i)) { + LL4("scan update " << tab.m_name << ": skip: " << row); + } else { + CHKTRY(set2.putval(i, false) == 0, set.unlock()); + CHKTRY(con.updateScanTuple(con2) == 0, set.unlock()); + Par par2 = par; + par2.m_con = &con2; + set.calc(par, i); + CHKTRY(set.setrow(par2, i) == 0, set.unlock()); + LL4("scan update " << tab.m_name << ": " << row); + lst.push(i); + } + set.unlock(); + if (lst.cnt() == par.m_batch) { + CHK(con2.execute(Commit) == 0); + con2.closeTransaction(); + set.lock(); + set.notpending(lst); + set.unlock(); + count += lst.cnt(); + lst.reset(); + CHK(con2.startTransaction() == 0); + } + CHK((ret = con.nextScanResult(false)) == 0 || ret == 1 || ret == 2); + if (ret == 2 && lst.cnt() != 0) { + CHK(con2.execute(Commit) == 0); + con2.closeTransaction(); + set.lock(); + set.notpending(lst); + set.unlock(); + count += lst.cnt(); + lst.reset(); + CHK(con2.startTransaction() == 0); + } + } while (ret == 0); if (ret == 1) break; - unsigned i = (unsigned)-1; - CHK(set2.getkey(par, &i) == 0); - LL4("key " << i); - CHK(set2.putval(i, false) == 0); - CHK(con2.takeOverForUpdate(con) == 0); - Par par2 = par; - par2.m_con = &con2; - set.lock(); - set.calc(par, i); - LL4("scan update " << tab.m_name << ": " << *set.m_row[i]); - CHKTRY(set.setrow(par2, i) == 0, set.unlock()); - set.unlock(); - CHK(con2.execute(NoCommit) == 0); - count++; } - CHK(con2.execute(Commit) == 0); con2.closeTransaction(); LL3("scan update " << tab.m_name << " rows updated=" << count); con.closeTransaction(); @@ -2346,32 +2549,61 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) unsigned count = 0; // updating trans Con con2; - con2.m_ndb = con.m_ndb; + con2.connect(con); CHK(con2.startTransaction() == 0); + Lst lst; + bool deadlock = false; while (1) { int ret; - CHK((ret = con.nextScanResult()) == 0 || ret == 1); + deadlock = par.m_deadlock; + CHK((ret = con.nextScanResult(true, deadlock)) == 0 || ret == 1); if (ret == 1) break; - unsigned i = (unsigned)-1; - CHK(set2.getkey(par, &i) == 0); - LL4("key " << i); - CHK(set2.putval(i, par.m_dups) == 0); - // avoid deadlock for now - //if (! isthrrow(par, i)) - //continue; - CHK(con2.takeOverForUpdate(con) == 0); - Par par2 = par; - par2.m_con = &con2; - set.lock(); - set.calc(par, i); - LL4("scan update " << itab.m_name << ": " << *set.m_row[i]); - CHKTRY(set.setrow(par2, i) == 0, set.unlock()); - set.unlock(); - CHK(con2.execute(NoCommit) == 0); - count++; + if (deadlock) { + LL1("scanupdateindex: stop on deadlock"); + break; + } + do { + unsigned i = (unsigned)-1; + CHK(set2.getkey(par, &i) == 0); + const Row& row = *set.m_row[i]; + set.lock(); + if (! set.exist(i) || set.pending(i)) { + LL4("scan update " << itab.m_name << ": skip: " << row); + } else { + CHKTRY(set2.putval(i, par.m_dups) == 0, set.unlock()); + CHKTRY(con.updateScanTuple(con2) == 0, set.unlock()); + Par par2 = par; + par2.m_con = &con2; + set.calc(par, i); + CHKTRY(set.setrow(par2, i) == 0, set.unlock()); + LL4("scan update " << itab.m_name << ": " << row); + lst.push(i); + } + set.unlock(); + if (lst.cnt() == par.m_batch) { + CHK(con2.execute(Commit) == 0); + con2.closeTransaction(); + set.lock(); + set.notpending(lst); + set.unlock(); + count += lst.cnt(); + lst.reset(); + CHK(con2.startTransaction() == 0); + } + CHK((ret = con.nextScanResult(false)) == 0 || ret == 1 || ret == 2); + if (ret == 2 && lst.cnt() != 0) { + CHK(con2.execute(Commit) == 0); + con2.closeTransaction(); + set.lock(); + set.notpending(lst); + set.unlock(); + count += lst.cnt(); + lst.reset(); + CHK(con2.startTransaction() == 0); + } + } while (ret == 0); } - CHK(con2.execute(Commit) == 0); con2.closeTransaction(); LL3("scan update " << itab.m_name << " rows updated=" << count); con.closeTransaction(); @@ -2382,7 +2614,7 @@ static int scanupdateindex(Par par, const ITab& itab) { const Tab& tab = par.tab(); - for (unsigned i = 0; i < par.m_subloop; i++) { + for (unsigned i = 0; i < par.m_idxloop; i++) { BSet bset(tab, itab, par.m_rows); bset.calc(par); CHK(scanupdateindex(par, itab, bset) == 0); @@ -2413,41 +2645,15 @@ scanupdateall(Par par) // medium level routines -static bool -ignoreverifyerror(Par par) -{ - Con& con = par.con(); - bool b = par.m_threads > 1; - if (b) { - LL1("ignore verify error"); - if (con.m_tx != 0) - con.closeTransaction(); - return true; - } - return b; -} - static int readverify(Par par) { par.m_verify = true; - CHK(pkread(par) == 0 || ignoreverifyerror(par)); - CHK(scanreadall(par) == 0 || ignoreverifyerror(par)); + CHK(pkread(par) == 0); + CHK(scanreadall(par) == 0); return 0; } -static bool -ignoredeadlock(Par par) -{ - Con& con = par.con(); - if (con.m_errtype == Con::ErrDeadlock) { - LL1("ignore deadlock"); - con.closeTransaction(); - return true; - } - return false; -} - static int pkupdatescanread(Par par) { @@ -2469,15 +2675,16 @@ static int mixedoperations(Par par) { par.m_dups = true; + par.m_deadlock = true; unsigned sel = urandom(10); if (sel < 2) { - CHK(pkdelete(par) == 0 || ignoredeadlock(par)); + CHK(pkdelete(par) == 0); } else if (sel < 4) { - CHK(pkupdate(par) == 0 || ignoredeadlock(par)); + CHK(pkupdate(par) == 0); } else if (sel < 6) { - CHK(scanupdatetable(par) == 0 || ignoredeadlock(par)); + CHK(scanupdatetable(par) == 0); } else { - CHK(scanupdateindex(par) == 0 || ignoredeadlock(par)); + CHK(scanupdateindex(par) == 0); } return 0; } @@ -2611,7 +2818,6 @@ Thr::run() break; } LL4("start"); - CHK(con.bugger() == 0); assert(m_state == Start); m_ret = (*m_func)(m_par); m_state = Stopped; @@ -2936,7 +3142,8 @@ static int runtest(Par par) { LL1("start"); - srandom(par.m_seed); + if (par.m_seed != 0) + srandom(par.m_seed); Con con; CHK(con.connect() == 0); par.m_con = &con; @@ -2951,6 +3158,8 @@ runtest(Par par) } for (unsigned l = 0; par.m_loop == 0 || l < par.m_loop; l++) { LL1("loop " << l); + if (par.m_seed == 0) + srandom(l); for (unsigned i = 0; i < tcasecount; i++) { const TCase& tcase = tcaselist[i]; if (par.m_case != 0 && strchr(par.m_case, tcase.m_name[0]) == 0) @@ -2992,6 +3201,12 @@ NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535) ndbout << "testOIBasic: unknown argument " << arg; goto usage; } + if (strcmp(arg, "-batch") == 0) { + if (++argv, --argc > 0) { + g_opt.m_batch = atoi(argv[0]); + continue; + } + } if (strcmp(arg, "-case") == 0) { if (++argv, --argc > 0) { g_opt.m_case = strdup(argv[0]); From 8d39a7b10405adbdf4fd57c2b82b2904361c9ea2 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Tue, 3 Aug 2004 22:08:17 +0200 Subject: [PATCH 56/93] Ndb mgmsrv Memleak(s) --- ndb/src/mgmapi/Makefile.am | 5 +++++ ndb/src/mgmsrv/MgmtSrvr.cpp | 12 +++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/ndb/src/mgmapi/Makefile.am b/ndb/src/mgmapi/Makefile.am index e4fa1d449c6..bf209ddccb5 100644 --- a/ndb/src/mgmapi/Makefile.am +++ b/ndb/src/mgmapi/Makefile.am @@ -9,5 +9,10 @@ DEFS_LOC = -DNO_DEBUG_MESSAGES include $(top_srcdir)/ndb/config/common.mk.am include $(top_srcdir)/ndb/config/type_util.mk.am +#ndbtest_PROGRAMS = ndb_test_mgmapi +ndb_test_mgmapi_SOURCES = test_mgmapi.cpp +ndb_test_mgmapi_LDFLAGS = @ndb_bin_am_ldflags@ \ + $(top_builddir)/ndb/src/libndbclient.la + # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 33ac4ddcf99..2fe4624ab59 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -172,7 +172,7 @@ MgmtSrvr::signalRecvThreadRun() siglist.push_back(SigMatch(GSN_MGM_UNLOCK_CONFIG_REQ, &MgmtSrvr::handle_MGM_UNLOCK_CONFIG_REQ)); - while(1) { + while(!_isStopThread) { SigMatch *handler = NULL; NdbApiSignal *signal = NULL; if(m_signalRecvQueue.waitFor(siglist, handler, signal)) { @@ -415,14 +415,18 @@ MgmtSrvr::getPort() const { ndbout << "Local node id " << getOwnNodeId() << " is not defined as management server" << endl << "Have you set correct NodeId for this node?" << endl; + ndb_mgm_destroy_iterator(iter); return 0; } Uint32 port = 0; if(ndb_mgm_get_int_parameter(iter, CFG_MGM_PORT, &port) != 0){ ndbout << "Could not find PortNumber in the configuration file." << endl; + ndb_mgm_destroy_iterator(iter); return 0; } + + ndb_mgm_destroy_iterator(iter); /***************** * Set Stat Port * @@ -517,6 +521,7 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, _isStopThread = false; _logLevelThread = NULL; _logLevelThreadSleep = 500; + m_signalRecvThread = NULL; _startedNodeId = 0; theFacade = 0; @@ -696,6 +701,11 @@ MgmtSrvr::~MgmtSrvr() NdbThread_WaitFor(_logLevelThread, &res); NdbThread_Destroy(&_logLevelThread); } + + if (m_signalRecvThread != NULL) { + NdbThread_WaitFor(m_signalRecvThread, &res); + NdbThread_Destroy(&m_signalRecvThread); + } } //**************************************************************************** From 1c65da2cbcd23c6dcd41a95997c1383e9b038dc0 Mon Sep 17 00:00:00 2001 From: "magnus@neptunus.(none)" <> Date: Wed, 4 Aug 2004 10:54:42 +0200 Subject: [PATCH 57/93] BUG#4818 DELETE FROM tab LIMIT Check if there are any operations pending that needs to be taken over to the updating/deleting transaction before closing the scan --- mysql-test/r/ndb_limit.result | 31 ++++++++++++++++++++++++ mysql-test/t/ndb_limit.test | 44 +++++++++++++++++++++++++++++++++++ sql/ha_ndbcluster.cc | 14 +++++++++++ 3 files changed, 89 insertions(+) create mode 100644 mysql-test/r/ndb_limit.result create mode 100644 mysql-test/t/ndb_limit.test diff --git a/mysql-test/r/ndb_limit.result b/mysql-test/r/ndb_limit.result new file mode 100644 index 00000000000..6574aa0bb1a --- /dev/null +++ b/mysql-test/r/ndb_limit.result @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS t2; +CREATE TABLE t2 ( +a bigint unsigned NOT NULL PRIMARY KEY, +b int unsigned not null, +c int unsigned +) engine=ndbcluster; +select count(*) from t2; +count(*) +10000 +delete from t2 limit 1; +select count(*) from t2; +count(*) +9999 +delete from t2 limit 100; +select count(*) from t2; +count(*) +9899 +delete from t2 limit 1000; +select count(*) from t2; +count(*) +8899 +update t2 set c=12345678 limit 100; +select count(*) from t2 where c=12345678; +count(*) +100 +select count(*) from t2 where c=12345678 limit 1000; +count(*) +100 +select * from t2 limit 0; +a b c +drop table t2; diff --git a/mysql-test/t/ndb_limit.test b/mysql-test/t/ndb_limit.test new file mode 100644 index 00000000000..b0b6f3c4f17 --- /dev/null +++ b/mysql-test/t/ndb_limit.test @@ -0,0 +1,44 @@ +-- source include/have_ndb.inc + +--disable_warnings +DROP TABLE IF EXISTS t2; +--enable_warnings + + +CREATE TABLE t2 ( + a bigint unsigned NOT NULL PRIMARY KEY, + b int unsigned not null, + c int unsigned +) engine=ndbcluster; + + +# +# insert records into table +# +let $1=1000; +disable_query_log; +while ($1) +{ + eval insert into t2 values($1*10, $1+9, 5*$1), ($1*10+1, $1+10, 7),($1*10+2, $1+10, 7*$1), ($1*10+3, $1+10, 10+$1), ($1*10+4, $1+10, 70*$1), ($1*10+5, $1+10, 7), ($1*10+6, $1+10, 9), ($1*10+7, $1+299, 899), ($1*10+8, $1+10, 12), ($1*10+9, $1+10, 14*$1); + dec $1; +} +enable_query_log; + +select count(*) from t2; + +delete from t2 limit 1; +select count(*) from t2; + +delete from t2 limit 100; +select count(*) from t2; + +delete from t2 limit 1000; +select count(*) from t2; + +update t2 set c=12345678 limit 100; +select count(*) from t2 where c=12345678; +select count(*) from t2 where c=12345678 limit 1000; + +select * from t2 limit 0; + +drop table t2; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 2c966aab73a..f9dca1b36bb 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1937,11 +1937,25 @@ int ha_ndbcluster::rnd_init(bool scan) int ha_ndbcluster::close_scan() { NdbResultSet *cursor= m_active_cursor; + NdbConnection *trans= m_active_trans; DBUG_ENTER("close_scan"); if (!cursor) DBUG_RETURN(1); + + if (ops_pending) + { + /* + Take over any pending transactions to the + deleteing/updating transaction before closing the scan + */ + DBUG_PRINT("info", ("ops_pending: %d", ops_pending)); + if (trans->execute(NoCommit) != 0) + DBUG_RETURN(ndb_err(trans)); + ops_pending= 0; + } + cursor->close(); m_active_cursor= NULL; DBUG_RETURN(0); From d526f3277e223be3b3741f04e925b2c7ee75a76e Mon Sep 17 00:00:00 2001 From: "magnus@neptunus.(none)" <> Date: Wed, 4 Aug 2004 11:28:36 +0200 Subject: [PATCH 58/93] BUG#4892 TRUNCATE TABLE returns error 156 Added NDBCLUSTER to table types which does not support generate. Added test case for truncate. --- mysql-test/r/ndb_truncate.result | 14 ++++++++++++++ mysql-test/t/ndb_truncate.test | 33 ++++++++++++++++++++++++++++++++ sql/handler.h | 3 ++- 3 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 mysql-test/r/ndb_truncate.result create mode 100644 mysql-test/t/ndb_truncate.test diff --git a/mysql-test/r/ndb_truncate.result b/mysql-test/r/ndb_truncate.result new file mode 100644 index 00000000000..38f3a78029c --- /dev/null +++ b/mysql-test/r/ndb_truncate.result @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS t2; +CREATE TABLE t2 ( +a bigint unsigned NOT NULL PRIMARY KEY, +b int unsigned not null, +c int unsigned +) engine=ndbcluster; +select count(*) from t2; +count(*) +5000 +truncate table t2; +select count(*) from t2; +count(*) +0 +drop table t2; diff --git a/mysql-test/t/ndb_truncate.test b/mysql-test/t/ndb_truncate.test new file mode 100644 index 00000000000..63bb8cbefb6 --- /dev/null +++ b/mysql-test/t/ndb_truncate.test @@ -0,0 +1,33 @@ +-- source include/have_ndb.inc + +--disable_warnings +DROP TABLE IF EXISTS t2; +--enable_warnings + + +CREATE TABLE t2 ( + a bigint unsigned NOT NULL PRIMARY KEY, + b int unsigned not null, + c int unsigned +) engine=ndbcluster; + + +# +# insert records into table +# +let $1=500; +disable_query_log; +while ($1) +{ + eval insert into t2 values($1*10, $1+9, 5*$1), ($1*10+1, $1+10, 7),($1*10+2, $1+10, 7*$1), ($1*10+3, $1+10, 10+$1), ($1*10+4, $1+10, 70*$1), ($1*10+5, $1+10, 7), ($1*10+6, $1+10, 9), ($1*10+7, $1+299, 899), ($1*10+8, $1+10, 12), ($1*10+9, $1+10, 14*$1); + dec $1; +} +enable_query_log; + +select count(*) from t2; + +truncate table t2; + +select count(*) from t2; + +drop table t2; diff --git a/sql/handler.h b/sql/handler.h index 28b0b8df6e2..3dd89a0c5d0 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -516,7 +516,8 @@ extern TYPELIB tx_isolation_typelib; #define ha_rollback(thd) (ha_rollback_trans((thd), &((thd)->transaction.all))) #define ha_supports_generate(T) (T != DB_TYPE_INNODB && \ - T != DB_TYPE_BERKELEY_DB) + T != DB_TYPE_BERKELEY_DB && \ + T != DB_TYPE_NDBCLUSTER) bool ha_caching_allowed(THD* thd, char* table_key, uint key_length, uint8 cache_type); From e38680886d50dbe1455f4865fc5e84e06a85f7e6 Mon Sep 17 00:00:00 2001 From: "mronstrom@mysql.com" <> Date: Wed, 4 Aug 2004 15:47:50 +0200 Subject: [PATCH 59/93] Fix for allowing large transactions with less memory impact. Currently one needs 636*1.6*2*noOfReplicas + 184 bytes per record which amounts to about 4200 bytes per record. The 2 is a bug which is fixed here as well, noOfReplicas is removed, it was there for concurrent transactions but it is better to focus on supporting one large transaction in the cluster. Also decreasing the safety factor from 1.6 to 1.1. Also removing unused parameters. --- ndb/src/kernel/vm/Configuration.cpp | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp index c438c48f450..11bad203619 100644 --- a/ndb/src/kernel/vm/Configuration.cpp +++ b/ndb/src/kernel/vm/Configuration.cpp @@ -510,7 +510,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){ // The remainder are allowed for use by the scan processes. /*-----------------------------------------------------------------------*/ cfg.put(CFG_ACC_OP_RECS, - noOfReplicas*((16 * noOfOperations) / 10 + 50) + + ((11 * noOfOperations) / 10 + 50) + (noOfLocalScanRecords * MAX_PARALLEL_SCANS_PER_FRAG) + NODE_RECOVERY_SCAN_OP_RECORDS); @@ -535,18 +535,9 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){ */ cfg.put(CFG_DICT_ATTRIBUTE, noOfAttributes); - - cfg.put(CFG_DICT_CONNECT, - noOfOperations + 32); - - cfg.put(CFG_DICT_FRAG_CONNECT, - NO_OF_FRAG_PER_NODE * noOfDBNodes * noOfReplicas); cfg.put(CFG_DICT_TABLE, noOfTables); - - cfg.put(CFG_DICT_TC_CONNECT, - 2* noOfOperations); } { @@ -587,18 +578,12 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){ cfg.put(CFG_LQH_FRAG, NO_OF_FRAG_PER_NODE * noOfTables * noOfReplicas); - cfg.put(CFG_LQH_CONNECT, - noOfReplicas*((11 * noOfOperations) / 10 + 50)); - cfg.put(CFG_LQH_TABLE, noOfTables); cfg.put(CFG_LQH_TC_CONNECT, - noOfReplicas*((16 * noOfOperations) / 10 + 50)); + (11 * noOfOperations) / 10 + 50); - cfg.put(CFG_LQH_REPLICAS, - noOfReplicas); - cfg.put(CFG_LQH_SCAN, noOfLocalScanRecords); } @@ -611,7 +596,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){ 3 * noOfTransactions); cfg.put(CFG_TC_TC_CONNECT, - noOfOperations + 16 + noOfTransactions); + (2 * noOfOperations) + 16 + noOfTransactions); cfg.put(CFG_TC_TABLE, noOfTables); @@ -631,7 +616,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){ 2 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas); cfg.put(CFG_TUP_OP_RECS, - noOfReplicas*((16 * noOfOperations) / 10 + 50)); + (11 * noOfOperations) / 10 + 50); cfg.put(CFG_TUP_PAGE, noOfDataPages); From 4a6e9d8052006046efdb990889435171fb3575c6 Mon Sep 17 00:00:00 2001 From: "ndbdev@eel.hemma.oreland.se" <> Date: Wed, 4 Aug 2004 20:48:48 +0200 Subject: [PATCH 60/93] wl1292 - workaround for mgmsrv node id problems --- ndb/src/cw/cpcd/Makefile.am | 2 +- ndb/src/cw/cpcd/Process.cpp | 78 ++++++++++++++++++------------------- ndb/test/run-test/main.cpp | 26 ++++++------- 3 files changed, 51 insertions(+), 55 deletions(-) diff --git a/ndb/src/cw/cpcd/Makefile.am b/ndb/src/cw/cpcd/Makefile.am index 6345bae9bbe..1f7b0d88448 100644 --- a/ndb/src/cw/cpcd/Makefile.am +++ b/ndb/src/cw/cpcd/Makefile.am @@ -1,5 +1,5 @@ -ndbtools_PROGRAMS = ndb_cpcd +ndbbin_PROGRAMS = ndb_cpcd ndb_cpcd_SOURCES = main.cpp CPCD.cpp Process.cpp APIService.cpp Monitor.cpp common.cpp diff --git a/ndb/src/cw/cpcd/Process.cpp b/ndb/src/cw/cpcd/Process.cpp index 74426306a88..d99dda2ba0b 100644 --- a/ndb/src/cw/cpcd/Process.cpp +++ b/ndb/src/cw/cpcd/Process.cpp @@ -209,49 +209,45 @@ int set_ulimit(const BaseString & pair){ #ifdef HAVE_GETRLIMIT errno = 0; - do { - Vector list; - pair.split(list, ":"); - if(list.size() != 2){ - break; - } - - int res; - rlim_t value = RLIM_INFINITY; - if(!(list[1].trim() == "unlimited")){ - value = atoi(list[1].c_str()); - } - - struct rlimit rlp; + Vector list; + pair.split(list, ":"); + if(list.size() != 2){ + logger.error("Unable to process ulimit: split >%s< list.size()=%d", + pair.c_str(), list.size()); + return -1; + } + + int res; + rlim_t value = RLIM_INFINITY; + if(!(list[1].trim() == "unlimited")){ + value = atoi(list[1].c_str()); + } + + struct rlimit rlp; #define _RLIMIT_FIX(x) { res = getrlimit(x,&rlp); if(!res){ rlp.rlim_cur = value; res = setrlimit(x, &rlp); }} - - if(list[0].trim() == "c"){ - _RLIMIT_FIX(RLIMIT_CORE); - } else if(list[0] == "d"){ - _RLIMIT_FIX(RLIMIT_DATA); - } else if(list[0] == "f"){ - _RLIMIT_FIX(RLIMIT_FSIZE); - } else if(list[0] == "n"){ - _RLIMIT_FIX(RLIMIT_NOFILE); - } else if(list[0] == "s"){ - _RLIMIT_FIX(RLIMIT_STACK); - } else if(list[0] == "t"){ - _RLIMIT_FIX(RLIMIT_CPU); - } else { - errno = EINVAL; - break; - } - if(!res) - break; - - return 0; - } while(false); - logger.error("Unable to process ulimit: %s(%s)", - pair.c_str(), strerror(errno)); - return -1; -#else - return 0; // Maybe it's ok anyway... + + if(list[0].trim() == "c"){ + _RLIMIT_FIX(RLIMIT_CORE); + } else if(list[0] == "d"){ + _RLIMIT_FIX(RLIMIT_DATA); + } else if(list[0] == "f"){ + _RLIMIT_FIX(RLIMIT_FSIZE); + } else if(list[0] == "n"){ + _RLIMIT_FIX(RLIMIT_NOFILE); + } else if(list[0] == "s"){ + _RLIMIT_FIX(RLIMIT_STACK); + } else if(list[0] == "t"){ + _RLIMIT_FIX(RLIMIT_CPU); + } else { + errno = EINVAL; + } + if(res){ + logger.error("Unable to process ulimit: %s res=%d error=%d(%s)", + pair.c_str(), res, errno, strerror(errno)); + return -1; + } #endif + return 0; } void diff --git a/ndb/test/run-test/main.cpp b/ndb/test/run-test/main.cpp index 865fe8b49a0..c23133245a7 100644 --- a/ndb/test/run-test/main.cpp +++ b/ndb/test/run-test/main.cpp @@ -106,13 +106,6 @@ main(int argc, const char ** argv){ if(!setup_hosts(g_config)) goto end; - if(!start_processes(g_config, atrt_process::NDB_MGM)) - goto end; - - if(!connect_ndb_mgm(g_config)){ - goto end; - } - /** * Main loop */ @@ -122,25 +115,32 @@ main(int argc, const char ** argv){ */ if(restart){ g_logger.info("(Re)starting ndb processes"); + if(!stop_processes(g_config, atrt_process::NDB_MGM)) + goto end; + if(!stop_processes(g_config, atrt_process::NDB_DB)) goto end; - if(!wait_ndb(g_config, NDB_MGM_NODE_STATUS_NO_CONTACT)) + if(!start_processes(g_config, atrt_process::NDB_MGM)) goto end; + if(!connect_ndb_mgm(g_config)){ + goto end; + } + if(!start_processes(g_config, atrt_process::NDB_DB)) goto end; - + if(!wait_ndb(g_config, NDB_MGM_NODE_STATUS_NOT_STARTED)) goto end; - + for(Uint32 i = 0; i<3; i++) if(wait_ndb(g_config, NDB_MGM_NODE_STATUS_STARTED)) goto started; - + goto end; - -started: + + started: g_logger.info("Ndb start completed"); } From a622fdc951308d0b80ad1f0748c30bd0d4e3243e Mon Sep 17 00:00:00 2001 From: "ndbdev@eel.hemma.oreland.se" <> Date: Thu, 5 Aug 2004 08:15:58 +0200 Subject: [PATCH 61/93] Remove timeout test, as default timeout now is a year --- ndb/test/run-test/daily-basic-tests.txt | 54 ++++++++++++------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index 0b64d9cf9c2..b63fbbc450c 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -152,33 +152,33 @@ cmd: testBasic args: -n MassiveRollback2 T1 T6 T13 #-m 500 1: testBasic -n ReadConsistency T6 -max-time: 500 -cmd: testTimeout -args: -n DontTimeoutTransaction T1 - -max-time: 500 -cmd: testTimeout -args: -n DontTimeoutTransaction5 T1 - -max-time: 500 -cmd: testTimeout -args: -n TimeoutTransaction T1 - -max-time: 500 -cmd: testTimeout -args: -n TimeoutTransaction5 T1 - -max-time: 500 -cmd: testTimeout -args: -n BuddyTransNoTimeout T1 - -max-time: 500 -cmd: testTimeout -args: -n BuddyTransNoTimeout5 T1 - -max-time: 500 -cmd: testTimeout -args: -n TimeoutRandTransaction T1 +#max-time: 500 +#cmd: testTimeout +#args: -n DontTimeoutTransaction T1 +# +#max-time: 500 +#cmd: testTimeout +#args: -n DontTimeoutTransaction5 T1 +# +#max-time: 500 +#cmd: testTimeout +#args: -n TimeoutTransaction T1 +# +#max-time: 500 +#cmd: testTimeout +#args: -n TimeoutTransaction5 T1 +# +#max-time: 500 +#cmd: testTimeout +#args: -n BuddyTransNoTimeout T1 +# +#max-time: 500 +#cmd: testTimeout +#args: -n BuddyTransNoTimeout5 T1 +# +#max-time: 500 +#cmd: testTimeout +#args: -n TimeoutRandTransaction T1 # # SCAN TESTS # From 7c6a4ce7bc7eb9e9b57bcd1907f3403dfa68966d Mon Sep 17 00:00:00 2001 From: "konstantin@mysql.com" <> Date: Thu, 5 Aug 2004 01:56:41 -0700 Subject: [PATCH 62/93] -O4 may be slower if you don't have profiling info (as HP pal told me on OSCON) --- BUILD/compile-hpux11-parisc2-aCC | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/BUILD/compile-hpux11-parisc2-aCC b/BUILD/compile-hpux11-parisc2-aCC index 09bb5821b6d..1bdef94e080 100755 --- a/BUILD/compile-hpux11-parisc2-aCC +++ b/BUILD/compile-hpux11-parisc2-aCC @@ -13,7 +13,7 @@ fi # Also sends +Oprocelim and +Ofastaccess to the linker # (see ld(1)). -release_flags="-fast +O4" +release_flags="-fast +O3" # -z Do not bind anything to address zero. This option # allows runtime detection of null pointers. See the From f8c0850521709173112cb2be1e9668d32e0d9b55 Mon Sep 17 00:00:00 2001 From: "konstantin@mysql.com" <> Date: Thu, 5 Aug 2004 02:43:18 -0700 Subject: [PATCH 63/93] Cleanup in libmysql. --- libmysql/libmysql.c | 475 +++++++++++++++++++++++++------------------- tests/client_test.c | 6 + 2 files changed, 272 insertions(+), 209 deletions(-) diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index fc7728c98e0..b9c8201ed56 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -3018,6 +3018,7 @@ mysql_stmt_send_long_data(MYSQL_STMT *stmt, uint param_number, static void set_zero_time(MYSQL_TIME *tm) { bzero((void *)tm, sizeof(*tm)); + tm->time_type= MYSQL_TIMESTAMP_NONE; } @@ -3041,86 +3042,203 @@ static void set_zero_time(MYSQL_TIME *tm) static uint read_binary_time(MYSQL_TIME *tm, uchar **pos) { - uchar *to; uint length; /* net_field_length will set pos to the first byte of data */ if (!(length= net_field_length(pos))) - { set_zero_time(tm); - return 0; + else + { + uchar *to= *pos; + tm->neg= (bool) to[0]; + + tm->day= (ulong) sint4korr(to+1); + tm->hour= (uint) to[5]; + tm->minute= (uint) to[6]; + tm->second= (uint) to[7]; + tm->second_part= (length > 8) ? (ulong) sint4korr(to+8) : 0; + + tm->year= tm->month= 0; + tm->time_type= MYSQL_TIMESTAMP_TIME; } - - to= *pos; - tm->neg= (bool) to[0]; - - tm->day= (ulong) sint4korr(to+1); - tm->hour= (uint) to[5]; - tm->minute= (uint) to[6]; - tm->second= (uint) to[7]; - tm->second_part= (length > 8) ? (ulong) sint4korr(to+8) : 0; - - tm->year= tm->month= 0; return length; } static uint read_binary_datetime(MYSQL_TIME *tm, uchar **pos) { - uchar *to; uint length; if (!(length= net_field_length(pos))) - { set_zero_time(tm); - return 0; - } - - to= *pos; - - tm->neg= 0; - tm->year= (uint) sint2korr(to); - tm->month= (uint) to[2]; - tm->day= (uint) to[3]; - - if (length > 4) - { - tm->hour= (uint) to[4]; - tm->minute= (uint) to[5]; - tm->second= (uint) to[6]; - } else - tm->hour= tm->minute= tm->second= 0; - tm->second_part= (length > 7) ? (ulong) sint4korr(to+7) : 0; + { + uchar *to= *pos; + + tm->neg= 0; + tm->year= (uint) sint2korr(to); + tm->month= (uint) to[2]; + tm->day= (uint) to[3]; + + if (length > 4) + { + tm->hour= (uint) to[4]; + tm->minute= (uint) to[5]; + tm->second= (uint) to[6]; + } + else + tm->hour= tm->minute= tm->second= 0; + tm->second_part= (length > 7) ? (ulong) sint4korr(to+7) : 0; + tm->time_type= MYSQL_TIMESTAMP_DATETIME; + } return length; } static uint read_binary_date(MYSQL_TIME *tm, uchar **pos) { - uchar *to; uint length; if (!(length= net_field_length(pos))) - { set_zero_time(tm); - return 0; + else + { + uchar *to= *pos; + tm->year = (uint) sint2korr(to); + tm->month= (uint) to[2]; + tm->day= (uint) to[3]; + + tm->hour= tm->minute= tm->second= 0; + tm->second_part= 0; + tm->neg= 0; + tm->time_type= MYSQL_TIMESTAMP_DATE; } - - to= *pos; - tm->year = (uint) sint2korr(to); - tm->month= (uint) to[2]; - tm->day= (uint) to[3]; - - tm->hour= tm->minute= tm->second= 0; - tm->second_part= 0; - tm->neg= 0; return length; } -/* Convert integer value to client buffer type. */ +/* + Convert string to supplied buffer of any type. -static void send_data_long(MYSQL_BIND *param, MYSQL_FIELD *field, - longlong value) + SYNOPSIS + fetch_string_with_conversion() + param output buffer descriptor + value column data + length data length +*/ + +static void fetch_string_with_conversion(MYSQL_BIND *param, char *value, + uint length) +{ + char *buffer= (char *)param->buffer; + int err= 0; + + /* + This function should support all target buffer types: the rest + of conversion functions can delegate conversion to it. + */ + switch(param->buffer_type) { + case MYSQL_TYPE_NULL: /* do nothing */ + break; + case MYSQL_TYPE_TINY: + { + uchar data= (uchar) my_strntol(&my_charset_latin1, value, length, 10, + NULL, &err); + *buffer= data; + break; + } + case MYSQL_TYPE_SHORT: + { + short data= (short) my_strntol(&my_charset_latin1, value, length, 10, + NULL, &err); + shortstore(buffer, data); + break; + } + case MYSQL_TYPE_LONG: + { + int32 data= (int32)my_strntol(&my_charset_latin1, value, length, 10, + NULL, &err); + longstore(buffer, data); + break; + } + case MYSQL_TYPE_LONGLONG: + { + longlong data= my_strntoll(&my_charset_latin1, value, length, 10, + NULL, &err); + longlongstore(buffer, data); + break; + } + case MYSQL_TYPE_FLOAT: + { + float data = (float) my_strntod(&my_charset_latin1, value, length, + NULL, &err); + floatstore(buffer, data); + break; + } + case MYSQL_TYPE_DOUBLE: + { + double data= my_strntod(&my_charset_latin1, value, length, NULL, &err); + doublestore(buffer, data); + break; + } + case MYSQL_TYPE_TIME: + { + MYSQL_TIME *tm= (MYSQL_TIME *)buffer; + str_to_time(value, length, tm, &err); + break; + } + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_DATETIME: + { + MYSQL_TIME *tm= (MYSQL_TIME *)buffer; + str_to_datetime(value, length, tm, 0, &err); + break; + } + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_BLOB: + default: + { + /* + Copy column data to the buffer taking into account offset, + data length and buffer length. + */ + char *start= value + param->offset; + char *end= value + length; + ulong copy_length; + if (start < end) + { + copy_length= end - start; + /* We've got some data beyond offset: copy up to buffer_length bytes */ + if (param->buffer_length) + memcpy(buffer, start, min(copy_length, param->buffer_length)); + } + else + copy_length= 0; + if (copy_length < param->buffer_length) + buffer[copy_length]= '\0'; + /* + param->length will always contain length of entire column; + number of copied bytes may be way different: + */ + *param->length= length; + break; + } + } +} + + +/* + Convert integer value to client buffer of any type. + + SYNOPSIS + fetch_long_with_conversion() + param output buffer descriptor + field column metadata + value column data +*/ + +static void fetch_long_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field, + longlong value) { char *buffer= (char *)param->buffer; uint field_is_unsigned= (field->flags & UNSIGNED_FLAG); @@ -3142,45 +3260,47 @@ static void send_data_long(MYSQL_BIND *param, MYSQL_FIELD *field, break; case MYSQL_TYPE_FLOAT: { - float data= (field_is_unsigned ? (float) ulonglong2double(value) : - (float) value); + float data= field_is_unsigned ? (float) ulonglong2double(value) : + (float) value; floatstore(buffer, data); break; } case MYSQL_TYPE_DOUBLE: { - double data= (field_is_unsigned ? ulonglong2double(value) : - (double) value); + double data= field_is_unsigned ? ulonglong2double(value) : + (double) value; doublestore(buffer, data); break; } default: { - char tmp[22]; /* Enough for longlong */ - uint length= (uint)(longlong10_to_str(value,(char *)tmp, - field_is_unsigned ? 10: -10) - - tmp); - ulong copy_length= min((ulong)length-param->offset, param->buffer_length); - if ((long) copy_length < 0) - copy_length=0; - else - memcpy(buffer, (char *)tmp+param->offset, copy_length); - *param->length= length; - - if (copy_length != param->buffer_length) - *(buffer+copy_length)= '\0'; + char buff[22]; /* Enough for longlong */ + char *end= longlong10_to_str(value, buff, field_is_unsigned ? 10: -10); + /* Resort to string conversion which supports all typecodes */ + return fetch_string_with_conversion(param, buff, end - buff); } } } -/* Convert Double to buffer types */ +/* + Convert double/float column to supplied buffer of any type. -static void send_data_double(MYSQL_BIND *param, double value) + SYNOPSIS + fetch_float_with_conversion() + param output buffer descriptor + field column metadata + value column data + width default number of significant digits used when converting + float/double to string +*/ + +static void fetch_float_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field, + double value, int width) { char *buffer= (char *)param->buffer; - switch(param->buffer_type) { + switch (param->buffer_type) { case MYSQL_TYPE_NULL: /* do nothing */ break; case MYSQL_TYPE_TINY: @@ -3211,167 +3331,108 @@ static void send_data_double(MYSQL_BIND *param, double value) } default: { - char tmp[128]; - uint length= my_sprintf(tmp,(tmp,"%g",value)); - ulong copy_length= min((ulong)length-param->offset, param->buffer_length); - if ((long) copy_length < 0) - copy_length=0; + /* + Resort to fetch_string_with_conversion: this should handle + floating point -> string conversion nicely, honor all typecodes + and param->offset possibly set in mysql_stmt_fetch_column + */ + char buff[331]; + char *end; + /* TODO: move this to a header shared between client and server. */ +#define NOT_FIXED_DEC 31 + if (field->decimals >= 31) +#undef NOT_FIXED_DEC + { + sprintf(buff, "%-*.*g", (int) param->buffer_length, width, value); + end= strcend(buff, ' '); + *end= 0; + } else - memcpy(buffer, (char *)tmp+param->offset, copy_length); - *param->length= length; - - if (copy_length != param->buffer_length) - *(buffer+copy_length)= '\0'; + { + sprintf(buff, "%.*f", field->decimals, value); + end= strend(buff); + } + return fetch_string_with_conversion(param, buff, end - buff); } } } -/* Convert string to buffer types */ +/* + Fetch time/date/datetime to supplied buffer of any type -static void send_data_str(MYSQL_BIND *param, char *value, uint length) -{ - char *buffer= (char *)param->buffer; - int err=0; + SYNOPSIS + param output buffer descriptor + time column data +*/ - switch(param->buffer_type) { - case MYSQL_TYPE_NULL: /* do nothing */ - break; - case MYSQL_TYPE_TINY: - { - uchar data= (uchar)my_strntol(&my_charset_latin1,value,length,10,NULL, - &err); - *buffer= data; - break; - } - case MYSQL_TYPE_SHORT: - { - short data= (short)my_strntol(&my_charset_latin1,value,length,10,NULL, - &err); - shortstore(buffer, data); - break; - } - case MYSQL_TYPE_LONG: - { - int32 data= (int32)my_strntol(&my_charset_latin1,value,length,10,NULL, - &err); - longstore(buffer, data); - break; - } - case MYSQL_TYPE_LONGLONG: - { - longlong data= my_strntoll(&my_charset_latin1,value,length,10,NULL,&err); - longlongstore(buffer, data); - break; - } - case MYSQL_TYPE_FLOAT: - { - float data = (float)my_strntod(&my_charset_latin1,value,length,NULL,&err); - floatstore(buffer, data); - break; - } - case MYSQL_TYPE_DOUBLE: - { - double data= my_strntod(&my_charset_latin1,value,length,NULL,&err); - doublestore(buffer, data); - break; - } - case MYSQL_TYPE_TIME: - { - int dummy; - MYSQL_TIME *tm= (MYSQL_TIME *)buffer; - str_to_time(value, length, tm, &dummy); - break; - } - case MYSQL_TYPE_DATE: - case MYSQL_TYPE_DATETIME: - { - int dummy; - MYSQL_TIME *tm= (MYSQL_TIME *)buffer; - str_to_datetime(value, length, tm, 0, &dummy); - break; - } - case MYSQL_TYPE_TINY_BLOB: - case MYSQL_TYPE_MEDIUM_BLOB: - case MYSQL_TYPE_LONG_BLOB: - case MYSQL_TYPE_BLOB: - *param->length= length; - length= min(length-param->offset, param->buffer_length); - if ((long) length > 0) - memcpy(buffer, value+param->offset, length); - break; - default: - *param->length= length; - length= min(length-param->offset, param->buffer_length); - if ((long) length < 0) - length= 0; - else - memcpy(buffer, value+param->offset, length); - if (length != param->buffer_length) - buffer[length]= '\0'; - } -} - - -static void send_data_time(MYSQL_BIND *param, MYSQL_TIME ltime, - uint length) +static void fetch_datetime_with_conversion(MYSQL_BIND *param, + MYSQL_TIME *time) { switch (param->buffer_type) { case MYSQL_TYPE_NULL: /* do nothing */ break; - case MYSQL_TYPE_DATE: case MYSQL_TYPE_TIME: case MYSQL_TYPE_DATETIME: case MYSQL_TYPE_TIMESTAMP: - { - MYSQL_TIME *tm= (MYSQL_TIME *)param->buffer; - - tm->year= ltime.year; - tm->month= ltime.month; - tm->day= ltime.day; - - tm->hour= ltime.hour; - tm->minute= ltime.minute; - tm->second= ltime.second; - - tm->second_part= ltime.second_part; - tm->neg= ltime.neg; + /* XXX: should we copy only relevant members here? */ + *(MYSQL_TIME *)(param->buffer)= *time; break; - } default: { + /* + Convert time value to string and delegate the rest to + fetch_string_with_conversion: + */ char buff[25]; + uint length; - if (!length) - ltime.time_type= MYSQL_TIMESTAMP_NONE; - switch (ltime.time_type) { + switch (time->time_type) { case MYSQL_TIMESTAMP_DATE: - length= my_sprintf(buff,(buff, "%04d-%02d-%02d", ltime.year, - ltime.month,ltime.day)); + length= my_sprintf(buff,(buff, "%04d-%02d-%02d", + time->year, time->month, time->day)); break; case MYSQL_TIMESTAMP_DATETIME: length= my_sprintf(buff,(buff, "%04d-%02d-%02d %02d:%02d:%02d", - ltime.year,ltime.month,ltime.day, - ltime.hour,ltime.minute,ltime.second)); + time->year, time->month, time->day, + time->hour, time->minute, time->second)); break; case MYSQL_TIMESTAMP_TIME: length= my_sprintf(buff, (buff, "%02d:%02d:%02d", - ltime.hour,ltime.minute,ltime.second)); + time->hour, time->minute, time->second)); break; default: length= 0; buff[0]='\0'; + break; } - send_data_str(param, (char *)buff, length); + /* Resort to string conversion */ + fetch_string_with_conversion(param, (char *)buff, length); + break; } } } -/* Fetch data to client buffers with conversion. */ +/* + Fetch and convert result set column to output buffer. -static void fetch_results(MYSQL_BIND *param, MYSQL_FIELD *field, uchar **row) + SYNOPSIS + fetch_result_with_conversion() + param output buffer descriptor + field column metadata + row points to a column of result set tuple in binary format + + DESCRIPTION + This is a fallback implementation of column fetch used + if column and output buffer types do not match. + Increases tuple pointer to point at the next column within the + tuple. +*/ + +static void fetch_result_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field, + uchar **row) { ulong length; enum enum_field_types field_type= field->type; @@ -3381,9 +3442,9 @@ static void fetch_results(MYSQL_BIND *param, MYSQL_FIELD *field, uchar **row) { char value= (char) **row; uint field_is_unsigned= (field->flags & UNSIGNED_FLAG); - longlong data= ((field_is_unsigned) ? (longlong) (unsigned char) value: - (longlong) value); - send_data_long(param, field, data); + longlong data= (field_is_unsigned) ? (longlong) (unsigned char) value: + (longlong) value; + fetch_long_with_conversion(param, field, data); length= 1; break; } @@ -3394,7 +3455,7 @@ static void fetch_results(MYSQL_BIND *param, MYSQL_FIELD *field, uchar **row) uint field_is_unsigned= (field->flags & UNSIGNED_FLAG); longlong data= ((field_is_unsigned) ? (longlong) (unsigned short) value: (longlong) value); - send_data_long(param, field, data); + fetch_long_with_conversion(param, field, data); length= 2; break; } @@ -3404,14 +3465,14 @@ static void fetch_results(MYSQL_BIND *param, MYSQL_FIELD *field, uchar **row) uint field_is_unsigned= (field->flags & UNSIGNED_FLAG); longlong data= ((field_is_unsigned) ? (longlong) (unsigned long) value: (longlong) value); - send_data_long(param, field, data); + fetch_long_with_conversion(param, field, data); length= 4; break; } case MYSQL_TYPE_LONGLONG: { longlong value= (longlong)sint8korr(*row); - send_data_long(param, field, value); + fetch_long_with_conversion(param, field, value); length= 8; break; } @@ -3419,7 +3480,7 @@ static void fetch_results(MYSQL_BIND *param, MYSQL_FIELD *field, uchar **row) { float value; float4get(value,*row); - send_data_double(param,value); + fetch_float_with_conversion(param, field, value, FLT_DIG); length= 4; break; } @@ -3427,7 +3488,7 @@ static void fetch_results(MYSQL_BIND *param, MYSQL_FIELD *field, uchar **row) { double value; float8get(value,*row); - send_data_double(param,value); + fetch_float_with_conversion(param, field, value, DBL_DIG); length= 8; break; } @@ -3436,8 +3497,7 @@ static void fetch_results(MYSQL_BIND *param, MYSQL_FIELD *field, uchar **row) MYSQL_TIME tm; length= read_binary_date(&tm, row); - tm.time_type= MYSQL_TIMESTAMP_DATE; - send_data_time(param, tm, length); + fetch_datetime_with_conversion(param, &tm); break; } case MYSQL_TYPE_TIME: @@ -3445,8 +3505,7 @@ static void fetch_results(MYSQL_BIND *param, MYSQL_FIELD *field, uchar **row) MYSQL_TIME tm; length= read_binary_time(&tm, row); - tm.time_type= MYSQL_TIMESTAMP_TIME; - send_data_time(param, tm, length); + fetch_datetime_with_conversion(param, &tm); break; } case MYSQL_TYPE_DATETIME: @@ -3455,13 +3514,12 @@ static void fetch_results(MYSQL_BIND *param, MYSQL_FIELD *field, uchar **row) MYSQL_TIME tm; length= read_binary_datetime(&tm, row); - tm.time_type= MYSQL_TIMESTAMP_DATETIME; - send_data_time(param, tm, length); + fetch_datetime_with_conversion(param, &tm); break; } default: length= net_field_length(row); - send_data_str(param,(char*) *row,length); + fetch_string_with_conversion(param, (char*) *row, length); break; } *row+= length; @@ -3606,7 +3664,6 @@ static void skip_result_string(MYSQL_BIND *param __attribute__((unused)), } - /* Setup the bind buffers for resultset processing */ @@ -3825,7 +3882,7 @@ static int stmt_fetch_row(MYSQL_STMT *stmt, uchar *row) if (field->type == bind->buffer_type) (*bind->fetch_result)(bind, &row); else - fetch_results(bind, field, &row); + fetch_result_with_conversion(bind, field, &row); } if (!((bit<<=1) & 255)) { @@ -3917,7 +3974,7 @@ int STDCALL mysql_stmt_fetch_column(MYSQL_STMT *stmt, MYSQL_BIND *bind, *bind->length= *param->length; else bind->length= ¶m->internal_length; /* Needed for fetch_result() */ - fetch_results(bind, field, &row); + fetch_result_with_conversion(bind, field, &row); } else { diff --git a/tests/client_test.c b/tests/client_test.c index 3652c0f7c8e..de77d4517dd 100644 --- a/tests/client_test.c +++ b/tests/client_test.c @@ -9862,11 +9862,17 @@ static void test_bug4026() time_in.minute= 59; time_in.second= 59; time_in.second_part= 123456; + /* + This is not necessary, just to make assert below work: this field + is filled in when time is received from server + */ + time_in.time_type= MYSQL_TIMESTAMP_TIME; datetime_in= time_in; datetime_in.year= 2003; datetime_in.month= 12; datetime_in.day= 31; + datetime_in.time_type= MYSQL_TIMESTAMP_DATETIME; mysql_stmt_bind_param(stmt, bind); From b1fd4d883956c0b1dbad1539e4002fc65ba51f5e Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Thu, 5 Aug 2004 12:54:59 +0200 Subject: [PATCH 64/93] wl2010 Added NDB_DEBUG(--with-debug) and NDB_DEBUG_FULL(--with-debug=full) --- configure.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configure.in b/configure.in index afcc60942ce..b78881a253c 100644 --- a/configure.in +++ b/configure.in @@ -2961,10 +2961,10 @@ then if test "$with_debug" = "yes" then # Medium debug. - NDB_DEFS="-DVM_TRACE -DERROR_INSERT -DARRAY_GUARD" + NDB_DEFS="-DNDB_DEBUG -DVM_TRACE -DERROR_INSERT -DARRAY_GUARD" elif test "$with_debug" = "full" then - NDB_DEFS="-DVM_TRACE -DERROR_INSERT -DARRAY_GUARD" + NDB_DEFS="-DNDB_DEBUG_FULL -DVM_TRACE -DERROR_INSERT -DARRAY_GUARD" else NDB_DEFS="-DNDEBUG" fi From b13e7483f5c4c7536ca92e52ffa49cef23db30e0 Mon Sep 17 00:00:00 2001 From: "ndbdev@eel.hemma.oreland.se" <> Date: Thu, 5 Aug 2004 13:00:32 +0200 Subject: [PATCH 65/93] wl1292 testInterpreter is no longer built --- ndb/test/run-test/daily-basic-tests.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index b63fbbc450c..d34c37021bf 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -474,10 +474,10 @@ max-time: 500 cmd: testNdbApi args: -n UpdateWithoutValues T6 -max-time: 500 -cmd: testInterpreter -args: T1 - +#max-time: 500 +#cmd: testInterpreter +#args: T1 +# max-time: 1500 cmd: testOperations args: -n ReadRead From 2082e8b9e4e2287e83077aef10066140c39064d1 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Thu, 5 Aug 2004 15:38:08 +0200 Subject: [PATCH 66/93] bug#4909 + testSystemRestart -n SR_FULLDB 1) Fix so that scan takeover is possible after SR 2) Reserve two pages for SR "zero pages" --- ndb/include/ndbapi/NdbOperation.hpp | 2 - ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 46 +++++++++++-------- ndb/src/kernel/blocks/dbtc/DbtcMain.cpp | 3 +- ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 5 ++ ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp | 7 +++ ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp | 8 ++-- .../blocks/dbtup/DbtupSystemRestart.cpp | 26 +++++++++-- 7 files changed, 67 insertions(+), 30 deletions(-) diff --git a/ndb/include/ndbapi/NdbOperation.hpp b/ndb/include/ndbapi/NdbOperation.hpp index 625fc8b233a..c48dccd4864 100644 --- a/ndb/include/ndbapi/NdbOperation.hpp +++ b/ndb/include/ndbapi/NdbOperation.hpp @@ -746,8 +746,6 @@ protected: int prepareSendInterpreted(); // Help routine to prepare* - void TCOPCONF(Uint32 anNdbColumnImplLen); // Handle TC[KEY/INDX]CONF signal - int receiveTCKEYREF(NdbApiSignal*); diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index f3a6ce8f994..0d801493ac4 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -107,6 +107,7 @@ operator<<(NdbOut& out, Dblqh::ScanRecord::ScanType state){ #endif //#define MARKER_TRACE 1 +//#define TRACE_SCAN_TAKEOVER 1 const Uint32 NR_ScanNo = 0; @@ -1001,7 +1002,7 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) } else { fragptr.p->tableFragptr = fragptr.i; } - + if (tempTable) { //-------------------------------------------- // reqinfo bit 3-4 = 2 means temporary table @@ -3574,6 +3575,10 @@ void Dblqh::prepareContinueAfterBlockedLab(Signal* signal) key.scanNumber = KeyInfo20::getScanNo(regTcPtr->tcScanInfo); key.fragPtrI = fragptr.i; c_scanTakeOverHash.find(scanptr, key); +#ifdef TRACE_SCAN_TAKEOVER + if(scanptr.i == RNIL) + ndbout_c("not finding (%d %d)", key.scanNumber, key.fragPtrI); +#endif } if (scanptr.i == RNIL) { jam(); @@ -8272,7 +8277,7 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq) scanptr.p->scanLocalref[1] = 0; scanptr.p->scanLocalFragid = 0; scanptr.p->scanTcWaiting = ZTRUE; - scanptr.p->scanNumber = ZNIL; + scanptr.p->scanNumber = ~0; for (Uint32 i = 0; i < scanConcurrentOperations; i++) { jam(); @@ -8327,6 +8332,11 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq) #ifdef VM_TRACE ScanRecordPtr tmp; ndbrequire(!c_scanTakeOverHash.find(tmp, * scanptr.p)); +#endif +#ifdef TRACE_SCAN_TAKEOVER + ndbout_c("adding (%d %d) table: %d fragId: %d frag.i: %d tableFragptr: %d", + scanptr.p->scanNumber, scanptr.p->fragPtrI, + tabptr.i, scanFragReq->fragmentNo, fragptr.i, fragptr.p->tableFragptr); #endif c_scanTakeOverHash.add(scanptr); } @@ -8418,6 +8428,9 @@ void Dblqh::finishScanrec(Signal* signal) if(scanptr.p->scanKeyinfoFlag){ jam(); ScanRecordPtr tmp; +#ifdef TRACE_SCAN_TAKEOVER + ndbout_c("removing (%d %d)", scanptr.p->scanNumber, scanptr.p->fragPtrI); +#endif c_scanTakeOverHash.remove(tmp, * scanptr.p); ndbrequire(tmp.p == scanptr.p); } @@ -8461,6 +8474,9 @@ void Dblqh::finishScanrec(Signal* signal) ndbrequire(!c_scanTakeOverHash.find(tmp, * restart.p)); #endif c_scanTakeOverHash.add(restart); +#ifdef TRACE_SCAN_TAKEOVER + ndbout_c("adding-r (%d %d)", restart.p->scanNumber, restart.p->fragPtrI); +#endif } scanptr = restart; @@ -12034,18 +12050,18 @@ void Dblqh::writeLogfileLab(Signal* signal) /* WRITE. */ /*---------------------------------------------------------------------------*/ switch (logFilePtr.p->fileChangeState) { -#if 0 - case LogFileRecord::BOTH_WRITES_ONGOING: - jam(); - ndbout_c("not crashing!!"); - // Fall-through -#endif case LogFileRecord::NOT_ONGOING: jam(); checkGcpCompleted(signal, ((lfoPtr.p->lfoPageNo + lfoPtr.p->noPagesRw) - 1), lfoPtr.p->lfoWordWritten); break; +#if 0 + case LogFileRecord::BOTH_WRITES_ONGOING: + jam(); + ndbout_c("not crashing!!"); + // Fall-through +#endif case LogFileRecord::WRITE_PAGE_ZERO_ONGOING: case LogFileRecord::LAST_WRITE_ONGOING: jam(); @@ -13133,20 +13149,11 @@ void Dblqh::execSTART_FRAGREQ(Signal* signal) ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); if (!getFragmentrec(signal, fragId)) { - jam(); - /* ---------------------------------------------------------------------- - * FRAGMENT WAS NOT DEFINED YET. PUT IT IN. IF NO LOCAL CHECKPOINT EXISTED - * THEN THE FRAGMENT HAS ALREADY BEEN ADDED. - * ---------------------------------------------------------------------- */ - if (!insertFragrec(signal, fragId)) { - jam(); - startFragRefLab(signal); - return; - }//if + startFragRefLab(signal); + return; }//if tabptr.p->tableStatus = Tablerec::TABLE_DEFINED; - initFragrec(signal, tabptr.i, fragId, ZPRIMARY_NODE); initFragrecSr(signal); if (startFragReq->lcpNo == ZNIL) { jam(); @@ -16414,6 +16421,7 @@ void Dblqh::initFragrec(Signal* signal, fragptr.p->execSrNoReplicas = 0; fragptr.p->fragDistributionKey = 0; fragptr.p->activeTcCounter = 0; + fragptr.p->tableFragptr = RNIL; }//Dblqh::initFragrec() /* ========================================================================== diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index 066fb24f09c..e112bed948d 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -5280,8 +5280,9 @@ void Dbtc::execTCROLLBACKREQ(Signal* signal) signal->theData[1] = apiConnectptr.p->transid[0]; signal->theData[2] = apiConnectptr.p->transid[1]; signal->theData[3] = ZROLLBACKNOTALLOWED; + signal->theData[4] = apiConnectptr.p->apiConnectstate; sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKREF, - signal, 4, JBB); + signal, 5, JBB); break; /* SEND A REFUSAL SIGNAL*/ case CS_ABORTING: diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index 71af563599c..b792edf9333 100644 --- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -2322,10 +2322,15 @@ private: // Counters for num UNDO log records executed Uint32 cSrUndoRecords[9]; + STATIC_CONST(MAX_PARALLELL_TUP_SRREQ = 2); + Uint32 c_sr_free_page_0; + Uint32 c_errorInsert4000TableId; void initGlobalTemporaryVars(); void reportMemoryUsage(Signal* signal, int incDec); + + #ifdef VM_TRACE struct Th { Uint32 data[1]; diff --git a/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp b/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp index c38fde23404..549bb3a608f 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp @@ -201,6 +201,10 @@ Dbtup::execDUMP_STATE_ORD(Signal* signal) ndbrequire(chunk.pageCount <= alloc); if(chunk.pageCount != 0){ chunks.push_back(chunk); + if(chunk.pageCount != alloc) { + ndbout_c(" Tried to allocate %d - only allocated %d - free: %d", + alloc, chunk.pageCount, free); + } } else { ndbout_c(" Failed to alloc %d pages with %d pages free", alloc, free); @@ -212,6 +216,9 @@ Dbtup::execDUMP_STATE_ORD(Signal* signal) ptrCheckGuard(pagePtr, cnoOfPage, page); pagePtr.p->pageWord[ZPAGE_STATE_POS] = ~ZFREE_COMMON; } + + if(alloc == 1 && free > 0) + ndbrequire(chunk.pageCount == alloc); } break; } diff --git a/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp b/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp index 410cafee161..cccbcfbe966 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp @@ -139,19 +139,21 @@ void Dbtup::initializePage() ptrAss(pagePtr, page); pagePtr.p->pageWord[ZPAGE_STATE_POS] = ~ZFREE_COMMON; - returnCommonArea(1, cnoOfPage - 1); - cnoOfAllocatedPages = 1; + cnoOfAllocatedPages = 1 + MAX_PARALLELL_TUP_SRREQ; + returnCommonArea(cnoOfAllocatedPages, cnoOfPage - cnoOfAllocatedPages); + c_sr_free_page_0 = ~0; }//Dbtup::initializePage() void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate, Uint32& noOfPagesAllocated, Uint32& allocPageRef) { - if (noOfPagesToAllocate == 0) { + if (noOfPagesToAllocate == 0){ ljam(); noOfPagesAllocated = 0; return; }//if + Uint32 firstListToCheck = nextHigherTwoLog(noOfPagesToAllocate - 1); for (Uint32 i = firstListToCheck; i < 16; i++) { ljam(); diff --git a/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp b/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp index 580d764c96f..9aec12abbe4 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp @@ -92,12 +92,25 @@ void Dbtup::rfrReadRestartInfoLab(Signal* signal, RestartInfoRecordPtr riPtr) seizeDiskBufferSegmentRecord(dbsiPtr); riPtr.p->sriDataBufferSegmentP = dbsiPtr.i; - Uint32 retPageRef; + Uint32 retPageRef = RNIL; Uint32 noAllocPages = 1; Uint32 noOfPagesAllocated; - allocConsPages(noAllocPages, noOfPagesAllocated, retPageRef); - ndbrequire(noOfPagesAllocated == 1); - + { + /** + * Use low pages for 0-pages during SR + * bitmask of free pages is kept in c_sr_free_page_0 + */ + Uint32 tmp = c_sr_free_page_0; + for(Uint32 i = 1; i<(1+MAX_PARALLELL_TUP_SRREQ); i++){ + if(tmp & (1 << i)){ + retPageRef = i; + c_sr_free_page_0 = tmp & (~(1 << i)); + break; + } + } + ndbrequire(retPageRef != RNIL); + } + dbsiPtr.p->pdxDataPage[0] = retPageRef; dbsiPtr.p->pdxNumDataPages = 1; dbsiPtr.p->pdxFilePage = 0; @@ -150,7 +163,10 @@ Dbtup::rfrInitRestartInfoLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr) /* LETS REMOVE IT AND REUSE THE SEGMENT FOR REAL DATA PAGES */ /* REMOVE ONE PAGE ONLY, PAGEP IS ALREADY SET TO THE RESTART INFO PAGE */ /************************************************************************/ - returnCommonArea(pagePtr.i, 1); + { + ndbrequire(pagePtr.i > 0 && pagePtr.i <= MAX_PARALLELL_TUP_SRREQ); + c_sr_free_page_0 |= (1 << pagePtr.i); + } Uint32 undoFileVersion = TzeroDataPage[ZSRI_UNDO_FILE_VER]; lliPtr.i = (undoFileVersion << 2) + (regTabPtr.i & 0x3); From 86d2906be536e7bcc19cd6fbc01a948ad133a204 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Thu, 5 Aug 2004 16:21:33 +0200 Subject: [PATCH 67/93] Initial support for updating configuration "on the fly" Only updates values mgmsrv's on main memory to be used in test prg's --- ndb/include/mgmapi/mgmapi_debug.h | 25 ++++++ ndb/include/util/ConfigValues.hpp | 6 +- ndb/src/mgmapi/mgmapi.cpp | 136 +++++++++++++++++++++++++++++- ndb/src/mgmsrv/MgmtSrvr.cpp | 101 ++++++++++++++++++++++ ndb/src/mgmsrv/MgmtSrvr.hpp | 2 + ndb/src/mgmsrv/Services.cpp | 31 ++++++- ndb/src/mgmsrv/Services.hpp | 2 + 7 files changed, 299 insertions(+), 4 deletions(-) diff --git a/ndb/include/mgmapi/mgmapi_debug.h b/ndb/include/mgmapi/mgmapi_debug.h index 2723263e7a7..1c562cd164f 100644 --- a/ndb/include/mgmapi/mgmapi_debug.h +++ b/ndb/include/mgmapi/mgmapi_debug.h @@ -106,6 +106,31 @@ extern "C" { struct ndb_mgm_reply* reply); + /** + * + * @param handle the NDB management handle. + * @param nodeId the node id. 0 = all db nodes + * @param errrorCode the errorCode. + * @param reply the reply message. + * @return 0 if successful or an error code. + */ + int ndb_mgm_set_int_parameter(NdbMgmHandle handle, + int node, + int param, + unsigned value, + struct ndb_mgm_reply* reply); + + int ndb_mgm_set_int64_parameter(NdbMgmHandle handle, + int node, + int param, + unsigned long long value, + struct ndb_mgm_reply* reply); + + int ndb_mgm_set_string_parameter(NdbMgmHandle handle, + int node, + int param, + const char * value, + struct ndb_mgm_reply* reply); #ifdef __cplusplus } #endif diff --git a/ndb/include/util/ConfigValues.hpp b/ndb/include/util/ConfigValues.hpp index 3fbeedb25a0..457488e3c42 100644 --- a/ndb/include/util/ConfigValues.hpp +++ b/ndb/include/util/ConfigValues.hpp @@ -32,9 +32,8 @@ public: class ConstIterator { friend class ConfigValuesFactory; const ConfigValues & m_cfg; - protected: - Uint32 m_currentSection; public: + Uint32 m_currentSection; ConstIterator(const ConfigValues&c) : m_cfg(c) { m_currentSection = 0;} bool openSection(Uint32 key, Uint32 no); @@ -57,6 +56,9 @@ public: ConfigValues & m_cfg; public: Iterator(ConfigValues&c) : ConstIterator(c), m_cfg(c) {} + Iterator(ConfigValues&c, const ConstIterator& i):ConstIterator(c),m_cfg(c){ + m_currentSection = i.m_currentSection; + } bool set(Uint32 key, Uint32 value); bool set(Uint32 key, Uint64 value); diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp index 8f0c9e3ccf7..c75315f2d89 100644 --- a/ndb/src/mgmapi/mgmapi.cpp +++ b/ndb/src/mgmapi/mgmapi.cpp @@ -283,6 +283,7 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow *command_reply, while((name = iter.next()) != NULL) { PropertiesType t; Uint32 val_i; + Uint64 val_64; BaseString val_s; cmd_args->getTypeOf(name, &t); @@ -291,11 +292,15 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow *command_reply, cmd_args->get(name, &val_i); out.println("%s: %d", name, val_i); break; + case PropertiesType_Uint64: + cmd_args->get(name, &val_64); + out.println("%s: %Ld", name, val_64); + break; case PropertiesType_char: cmd_args->get(name, val_s); out.println("%s: %s", name, val_s.c_str()); break; - default: + case PropertiesType_Properties: /* Ignore */ break; } @@ -1591,3 +1596,132 @@ ndb_mgm_rep_command(NdbMgmHandle handle, unsigned int request, delete reply; return 0; } + +extern "C" +int +ndb_mgm_set_int_parameter(NdbMgmHandle handle, + int node, + int param, + unsigned value, + struct ndb_mgm_reply*){ + CHECK_HANDLE(handle, 0); + CHECK_CONNECTED(handle, 0); + + Properties args; + args.put("node: ", node); + args.put("param: ", param); + args.put("value: ", value); + + const ParserRow reply[]= { + MGM_CMD("set parameter reply", NULL, ""), + MGM_ARG("result", String, Mandatory, "Error message"), + MGM_END() + }; + + const Properties *prop; + prop= ndb_mgm_call(handle, reply, "set parameter", &args); + + if(prop == NULL) { + SET_ERROR(handle, EIO, "Unable set parameter"); + return -1; + } + + int res= -1; + do { + const char * buf; + if(!prop->get("result", &buf) || strcmp(buf, "Ok") != 0){ + ndbout_c("ERROR Message: %s\n", buf); + break; + } + res= 0; + } while(0); + + delete prop; + return res; +} + +extern "C" +int +ndb_mgm_set_int64_parameter(NdbMgmHandle handle, + int node, + int param, + unsigned long long value, + struct ndb_mgm_reply*){ + CHECK_HANDLE(handle, 0); + CHECK_CONNECTED(handle, 0); + + Properties args; + args.put("node: ", node); + args.put("param: ", param); + args.put("value: ", value); + + const ParserRow reply[]= { + MGM_CMD("set parameter reply", NULL, ""), + MGM_ARG("result", String, Mandatory, "Error message"), + MGM_END() + }; + + const Properties *prop; + prop= ndb_mgm_call(handle, reply, "set parameter", &args); + + if(prop == NULL) { + SET_ERROR(handle, EIO, "Unable set parameter"); + return -1; + } + + int res= -1; + do { + const char * buf; + if(!prop->get("result", &buf) || strcmp(buf, "Ok") != 0){ + ndbout_c("ERROR Message: %s\n", buf); + break; + } + res= 0; + } while(0); + + delete prop; + return res; +} + +extern "C" +int +ndb_mgm_set_string_parameter(NdbMgmHandle handle, + int node, + int param, + const char * value, + struct ndb_mgm_reply*){ + CHECK_HANDLE(handle, 0); + CHECK_CONNECTED(handle, 0); + + Properties args; + args.put("node: ", node); + args.put("parameter: ", param); + args.put("value: ", value); + + const ParserRow reply[]= { + MGM_CMD("set parameter reply", NULL, ""), + MGM_ARG("result", String, Mandatory, "Error message"), + MGM_END() + }; + + const Properties *prop; + prop= ndb_mgm_call(handle, reply, "set parameter", &args); + + if(prop == NULL) { + SET_ERROR(handle, EIO, "Unable set parameter"); + return -1; + } + + int res= -1; + do { + const char * buf; + if(!prop->get("result", &buf) || strcmp(buf, "Ok") != 0){ + ndbout_c("ERROR Message: %s\n", buf); + break; + } + res= 0; + } while(0); + + delete prop; + return res; +} diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 2fe4624ab59..a8b095439e4 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -2777,3 +2777,104 @@ MgmtSrvr::Allocated_resources::reserve_node(NodeId id) m_mgmsrv.m_reserved_nodes.set(id); } +int +MgmtSrvr::setDbParameter(int node, int param, const char * value, + BaseString& msg){ + /** + * Check parameter + */ + ndb_mgm_configuration_iterator iter(* _config->m_configValues, + CFG_SECTION_NODE); + if(iter.first() != 0){ + msg.assign("Unable to find node section (iter.first())"); + return -1; + } + + Uint32 type = NODE_TYPE_DB + 1; + if(node != 0){ + if(iter.find(CFG_NODE_ID, node) != 0){ + msg.assign("Unable to find node (iter.find())"); + return -1; + } + if(iter.get(CFG_TYPE_OF_SECTION, &type) != 0){ + msg.assign("Unable to get node type(iter.get(CFG_TYPE_OF_SECTION))"); + return -1; + } + } else { + do { + if(iter.get(CFG_TYPE_OF_SECTION, &type) != 0){ + msg.assign("Unable to get node type(iter.get(CFG_TYPE_OF_SECTION))"); + return -1; + } + if(type == NODE_TYPE_DB) + break; + } while(iter.next() == 0); + } + + if(type != NODE_TYPE_DB){ + msg.assfmt("Invalid node type or no such node (%d %d)", + type, NODE_TYPE_DB); + return -1; + } + + int p_type; + unsigned val_32; + unsigned long long val_64; + const char * val_char; + do { + p_type = 0; + if(iter.get(param, &val_32) == 0){ + val_32 = atoi(value); + break; + } + + p_type++; + if(iter.get(param, &val_64) == 0){ + val_64 = atoll(value); + break; + } + p_type++; + if(iter.get(param, &val_char) == 0){ + val_char = value; + break; + } + msg.assign("Could not get parameter"); + return -1; + } while(0); + + bool res = false; + do { + int ret = iter.get(CFG_TYPE_OF_SECTION, &type); + assert(ret == 0); + + if(type != NODE_TYPE_DB) + continue; + + Uint32 node; + ret = iter.get(CFG_NODE_ID, &node); + assert(ret == 0); + + ConfigValues::Iterator i2(_config->m_configValues->m_config, + iter.m_config); + switch(p_type){ + case 0: + res = i2.set(param, val_32); + ndbout_c("Updateing node %d param: %d to %d", node, param, val_32); + break; + case 1: + res = i2.set(param, val_64); + ndbout_c("Updateing node %d param: %d to %Ld", node, param, val_32); + break; + case 2: + res = i2.set(param, val_char); + ndbout_c("Updateing node %d param: %d to %s", node, param, val_char); + break; + default: + abort(); + } + assert(res); + } while(node == 0 && iter.next() == 0); + + msg.assign("Success"); + return 0; +} diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index 1145f4a5a6b..f677cdbb2d0 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -517,6 +517,8 @@ public: */ int getPort() const; + int setDbParameter(int node, int parameter, const char * value, BaseString&); + //************************************************************************** private: //************************************************************************** diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp index c94e1455554..c77ddd3f277 100644 --- a/ndb/src/mgmsrv/Services.cpp +++ b/ndb/src/mgmsrv/Services.cpp @@ -225,6 +225,16 @@ ParserRow commands[] = { MGM_ARG("parameter", String, Mandatory, "Parameter"), MGM_ARG("value", String, Mandatory, "Value"), + MGM_CMD("config lock", &MgmApiSession::configLock, ""), + + MGM_CMD("config unlock", &MgmApiSession::configUnlock, ""), + MGM_ARG("commit", Int, Mandatory, "Commit changes"), + + MGM_CMD("set parameter", &MgmApiSession::setParameter, ""), + MGM_ARG("node", String, Mandatory, "Node"), + MGM_ARG("parameter", String, Mandatory, "Parameter"), + MGM_ARG("value", String, Mandatory, "Value"), + MGM_END() }; @@ -1248,5 +1258,24 @@ MgmStatService::stopSessions(){ NDB_CLOSE_SOCKET(m_sockets[i]); m_sockets.erase(i); } - +} + +void +MgmApiSession::setParameter(Parser_t::Context &, + Properties const &args) { + BaseString node, param, value; + args.get("node", node); + args.get("parameter", param); + args.get("value", value); + + BaseString result; + int ret = m_mgmsrv.setDbParameter(atoi(node.c_str()), + atoi(param.c_str()), + value.c_str(), + result); + + m_output->println("set parameter reply"); + m_output->println("message: %s", result.c_str()); + m_output->println("result: %d", ret); + m_output->println(""); } diff --git a/ndb/src/mgmsrv/Services.hpp b/ndb/src/mgmsrv/Services.hpp index f5d10031d7a..9cf8b59be8f 100644 --- a/ndb/src/mgmsrv/Services.hpp +++ b/ndb/src/mgmsrv/Services.hpp @@ -82,6 +82,8 @@ public: void configUnlock(Parser_t::Context &ctx, const class Properties &args); void configChange(Parser_t::Context &ctx, const class Properties &args); + void setParameter(Parser_t::Context &ctx, const class Properties &args); + void repCommand(Parser_t::Context &ctx, const class Properties &args); }; From 48c8a11a4fabfec1e05041d77dac1b16654750f8 Mon Sep 17 00:00:00 2001 From: "kent@mysql.com" <> Date: Thu, 5 Aug 2004 17:05:21 +0200 Subject: [PATCH 68/93] mysqld.cc, mysql_test_run.c: Changed URL in error message, page has moved --- BitKeeper/etc/logging_ok | 1 + netware/mysql_test_run.c | 2 +- sql/mysqld.cc | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index a9cb6429a35..bd9417928be 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -84,6 +84,7 @@ joreland@mysql.com jorge@linux.jorge.mysql.com jplindst@t41.(none) kaj@work.mysql.com +kent@mysql.com konstantin@mysql.com kostja@oak.local lenz@kallisto.mysql.com diff --git a/netware/mysql_test_run.c b/netware/mysql_test_run.c index a69c5015968..fd5725a6414 100644 --- a/netware/mysql_test_run.c +++ b/netware/mysql_test_run.c @@ -170,7 +170,7 @@ void report_stats() log_msg("\nThe .out and .err files in %s may give you some\n", result_dir); log_msg("hint of what when wrong.\n"); log_msg("\nIf you want to report this error, please first read the documentation\n"); - log_msg("at: http://www.mysql.com/doc/M/y/MySQL_test_suite.html\n"); + log_msg("at: http://www.mysql.com/doc/en/MySQL_test_suite.html\n"); } log_msg("\n%.02f total minutes elapsed in the test cases\n\n", total_time / 60); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 78e1268f363..998b5501724 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1788,7 +1788,7 @@ bytes of memory\n", ((ulong) sql_key_cache->key_cache_mem_size + You seem to be running 32-bit Linux and have %d concurrent connections.\n\ If you have not changed STACK_SIZE in LinuxThreads and built the binary \n\ yourself, LinuxThreads is quite likely to steal a part of the global heap for\n\ -the thread stack. Please read http://www.mysql.com/doc/L/i/Linux.html\n\n", +the thread stack. Please read http://www.mysql.com/doc/en/Linux.html\n\n", thread_count); } #endif /* HAVE_LINUXTHREADS */ From f851b03af598068a283d6f142fd8df825a04dd6d Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Thu, 5 Aug 2004 17:38:06 +0200 Subject: [PATCH 69/93] Fix bug for TCKEYREF's when using IgnoreError --- ndb/src/ndbapi/NdbConnection.cpp | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/ndb/src/ndbapi/NdbConnection.cpp b/ndb/src/ndbapi/NdbConnection.cpp index db6201ee9bb..cd051bb4609 100644 --- a/ndb/src/ndbapi/NdbConnection.cpp +++ b/ndb/src/ndbapi/NdbConnection.cpp @@ -1477,6 +1477,17 @@ from other transactions. theGlobalCheckpointId = tGCI; } else if ((tNoComp >= tNoSent) && (theLastExecOpInList->theCommitIndicator == 1)){ + + + if (m_abortOption == IgnoreError && theError.code != 0){ + /** + * There's always a TCKEYCONF when using IgnoreError + */ +#ifdef VM_TRACE + ndbout_c("Not completing transaction 2"); +#endif + return -1; + } /**********************************************************************/ // We sent the transaction with Commit flag set and received a CONF with // no Commit flag set. This is clearly an anomaly. @@ -1720,6 +1731,16 @@ NdbConnection::OpCompleteFailure() if (theSimpleState == 1) { theCommitStatus = NdbConnection::Aborted; }//if + if (m_abortOption == IgnoreError){ + /** + * There's always a TCKEYCONF when using IgnoreError + */ +#ifdef VM_TRACE + ndbout_c("Not completing transaction"); +#endif + return -1; + } + return 0; // Last operation received } else if (tNoComp > tNoSent) { setOperationErrorCodeAbort(4113); // Too many operations, From a0a5a6a6fb947336afa70a94557da1b7413782a9 Mon Sep 17 00:00:00 2001 From: "mronstrom@mysql.com" <> Date: Thu, 5 Aug 2004 18:23:01 +0200 Subject: [PATCH 70/93] Remove unused config parameters --- BitKeeper/etc/logging_ok | 1 + ndb/include/kernel/kernel_config_parameters.h | 5 ----- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index a9cb6429a35..d24092e1a54 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -97,6 +97,7 @@ miguel@hegel.txg miguel@light. miguel@light.local miguel@sartre.local +mikron@c-fb0ae253.1238-1-64736c10.cust.bredbandsbolaget.se mikron@mikael-ronstr-ms-dator.local mmatthew@markslaptop. monty@bitch.mysql.fi diff --git a/ndb/include/kernel/kernel_config_parameters.h b/ndb/include/kernel/kernel_config_parameters.h index 2f63efa4b6c..bb7c6ebd42c 100644 --- a/ndb/include/kernel/kernel_config_parameters.h +++ b/ndb/include/kernel/kernel_config_parameters.h @@ -14,10 +14,7 @@ #define CFG_ACC_SCAN (PRIVATE_BASE + 9) #define CFG_DICT_ATTRIBUTE (PRIVATE_BASE + 10) -#define CFG_DICT_CONNECT (PRIVATE_BASE + 11) -#define CFG_DICT_FRAG_CONNECT (PRIVATE_BASE + 12) #define CFG_DICT_TABLE (PRIVATE_BASE + 13) -#define CFG_DICT_TC_CONNECT (PRIVATE_BASE + 14) #define CFG_DIH_API_CONNECT (PRIVATE_BASE + 15) #define CFG_DIH_CONNECT (PRIVATE_BASE + 16) @@ -27,10 +24,8 @@ #define CFG_DIH_TABLE (PRIVATE_BASE + 20) #define CFG_LQH_FRAG (PRIVATE_BASE + 21) -#define CFG_LQH_CONNECT (PRIVATE_BASE + 22) #define CFG_LQH_TABLE (PRIVATE_BASE + 23) #define CFG_LQH_TC_CONNECT (PRIVATE_BASE + 24) -#define CFG_LQH_REPLICAS (PRIVATE_BASE + 25) #define CFG_LQH_LOG_FILES (PRIVATE_BASE + 26) #define CFG_LQH_SCAN (PRIVATE_BASE + 27) From 75b85ebc60826d2c0e44e37a2a18bb273cc1b15b Mon Sep 17 00:00:00 2001 From: "mronstrom@mysql.com" <> Date: Thu, 5 Aug 2004 18:51:27 +0200 Subject: [PATCH 71/93] Small fix for updated config params --- ndb/src/kernel/vm/Configuration.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp index 11bad203619..550c6313058 100644 --- a/ndb/src/kernel/vm/Configuration.cpp +++ b/ndb/src/kernel/vm/Configuration.cpp @@ -548,7 +548,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){ 2 * noOfTransactions); cfg.put(CFG_DIH_CONNECT, - noOfOperations + 46); + noOfOperations + noOfTransactions + 46); cfg.put(CFG_DIH_FRAG_CONNECT, NO_OF_FRAG_PER_NODE * noOfTables * noOfDBNodes); From 403b91798a7865717d9b6b296e39a2c6c7238213 Mon Sep 17 00:00:00 2001 From: "konstantin@mysql.com" <> Date: Thu, 5 Aug 2004 14:16:43 -0700 Subject: [PATCH 72/93] Cleanup in mysql_time.h/my_time.h headers. The first is used in mysql.h, the second is for the rest of time declarations in mysys. --- include/my_time.h | 9 +++++++++ include/mysql_time.h | 18 ++++++++---------- sql/tztime.cc | 1 + sql/tztime.h | 2 -- 4 files changed, 18 insertions(+), 12 deletions(-) diff --git a/include/my_time.h b/include/my_time.h index 1212f0533e2..1c549ced6b0 100644 --- a/include/my_time.h +++ b/include/my_time.h @@ -28,6 +28,15 @@ C_MODE_START extern ulonglong log_10_int[20]; +/* + Portable time_t replacement. + Should be signed and hold seconds for 1902-2038 range. +*/ +typedef long my_time_t; + +#define MY_TIME_T_MAX LONG_MAX +#define MY_TIME_T_MIN LONG_MIN + #define YY_PART_YEAR 70 /* Flags to str_to_datetime */ diff --git a/include/mysql_time.h b/include/mysql_time.h index 32da27ba33e..ec67d60dea5 100644 --- a/include/mysql_time.h +++ b/include/mysql_time.h @@ -17,7 +17,14 @@ #ifndef _mysql_time_h_ #define _mysql_time_h_ -/* Time declarations shared between server and client library */ +/* + Time declarations shared between the server and client API: + you should not add anything to this header unless it's used + (and hence should be visible) in mysql.h. + If you're looking for a place to add new time-related declaration, + it's most likely my_time.h. See also "C API Handling of Date + and Time Values" chapter in documentation. +*/ enum enum_mysql_timestamp_type { @@ -34,13 +41,4 @@ typedef struct st_mysql_time enum enum_mysql_timestamp_type time_type; } MYSQL_TIME; - -/* - Portable time_t replacement. - Should be signed and hold seconds for 1902-2038 range. -*/ -typedef long my_time_t; -#define MY_TIME_T_MAX LONG_MAX -#define MY_TIME_T_MIN LONG_MIN - #endif /* _mysql_time_h_ */ diff --git a/sql/tztime.cc b/sql/tztime.cc index aab0d36b61e..2ed55f2fa4e 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -32,6 +32,7 @@ #include "mysql_priv.h" #else #include +#include #include "tztime.h" #include #endif diff --git a/sql/tztime.h b/sql/tztime.h index 9df5f965f34..69ff176326e 100644 --- a/sql/tztime.h +++ b/sql/tztime.h @@ -19,8 +19,6 @@ #pragma interface /* gcc class interface */ #endif -#include - #if !defined(TESTTIME) && !defined(TZINFO2SQL) /* From 1a5575251a16d1e2e5c76affe48f0e68e02c67bf Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Thu, 5 Aug 2004 23:39:12 +0200 Subject: [PATCH 73/93] removed unused and illegal print method --- ndb/src/kernel/vm/ArrayPool.hpp | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/ndb/src/kernel/vm/ArrayPool.hpp b/ndb/src/kernel/vm/ArrayPool.hpp index c06f48f2e8e..924ed51ee15 100644 --- a/ndb/src/kernel/vm/ArrayPool.hpp +++ b/ndb/src/kernel/vm/ArrayPool.hpp @@ -148,26 +148,6 @@ public: void releaseList(Uint32 n, Uint32 first, Uint32 last); //private: - /** - * Print - * (Run operator NdbOut<< on every element) - */ - void print(NdbOut & out){ -#ifdef VM_TRACE - out << "FirstFree = " << firstFree << endl; - for(Uint32 i = 0; i Date: Fri, 6 Aug 2004 10:01:29 +0400 Subject: [PATCH 74/93] Fix for bug #4756 "STR_TO_DATE() returning bad results with AM/PM". Added support of converion specifiers mentioned in manual but missing in code. --- mysql-test/r/date_formats.result | 72 ++++++++++++++-- mysql-test/t/date_formats.test | 18 +++- sql/item_timefunc.cc | 140 ++++++++++++++++++++++++------- 3 files changed, 189 insertions(+), 41 deletions(-) diff --git a/mysql-test/r/date_formats.result b/mysql-test/r/date_formats.result index 6a4935ef3f8..23da99f38bb 100644 --- a/mysql-test/r/date_formats.result +++ b/mysql-test/r/date_formats.result @@ -90,16 +90,23 @@ insert into t1 values ('2003-01-02 11:11:12Pm', '%Y-%m-%d %h:%i:%S%p'), ('10:20:10', '%H:%i:%s'), ('10:20:10', '%h:%i:%s.%f'), +('10:20:10', '%T'), ('10:20:10AM', '%h:%i:%s%p'), +('10:20:10AM', '%r'), ('10:20:10.44AM', '%h:%i:%s.%f%p'), ('15-01-2001 12:59:58', '%d-%m-%Y %H:%i:%S'), ('15 September 2001', '%d %M %Y'), ('15 SEPTEMB 2001', '%d %M %Y'), ('15 MAY 2001', '%d %b %Y'), +('15th May 2001', '%D %b %Y'), ('Sunday 15 MAY 2001', '%W %d %b %Y'), ('Sund 15 MAY 2001', '%W %d %b %Y'), ('Tuesday 00 2002', '%W %U %Y'), ('Thursday 53 1998', '%W %u %Y'), +('Sunday 01 2001', '%W %v %x'), +('Tuesday 52 2001', '%W %V %X'), +('060 2004', '%j %Y'), +('4 53 1998', '%w %u %Y'), ('15-01-2001', '%d-%m-%Y %H:%i:%S'), ('15-01-20', '%d-%m-%y'), ('15-2001-1', '%d-%Y-%c'); @@ -114,16 +121,23 @@ date format str_to_date 2003-01-02 11:11:12Pm %Y-%m-%d %h:%i:%S%p 2003-01-02 23:11:12 10:20:10 %H:%i:%s 0000-00-00 10:20:10 10:20:10 %h:%i:%s.%f 0000-00-00 10:20:10 +10:20:10 %T 0000-00-00 10:20:10 10:20:10AM %h:%i:%s%p 0000-00-00 10:20:10 +10:20:10AM %r 0000-00-00 10:20:10 10:20:10.44AM %h:%i:%s.%f%p 0000-00-00 10:20:10.440000 15-01-2001 12:59:58 %d-%m-%Y %H:%i:%S 2001-01-15 12:59:58 15 September 2001 %d %M %Y 2001-09-15 00:00:00 15 SEPTEMB 2001 %d %M %Y 2001-09-15 00:00:00 15 MAY 2001 %d %b %Y 2001-05-15 00:00:00 +15th May 2001 %D %b %Y 2001-05-15 00:00:00 Sunday 15 MAY 2001 %W %d %b %Y 2001-05-15 00:00:00 Sund 15 MAY 2001 %W %d %b %Y 2001-05-15 00:00:00 Tuesday 00 2002 %W %U %Y 2002-01-01 00:00:00 Thursday 53 1998 %W %u %Y 1998-12-31 00:00:00 +Sunday 01 2001 %W %v %x 2001-01-07 00:00:00 +Tuesday 52 2001 %W %V %X 2002-01-01 00:00:00 +060 2004 %j %Y 2004-02-29 00:00:00 +4 53 1998 %w %u %Y 1998-12-31 00:00:00 15-01-2001 %d-%m-%Y %H:%i:%S 2001-01-15 00:00:00 15-01-20 %d-%m-%y 2020-01-15 00:00:00 15-2001-1 %d-%Y-%c 2001-01-15 00:00:00 @@ -138,16 +152,23 @@ date format con 2003-01-02 11:11:12Pm %Y-%m-%d %h:%i:%S%p 2003-01-02 23:11:12 10:20:10 %H:%i:%s 0000-00-00 10:20:10 10:20:10 %h:%i:%s.%f 0000-00-00 10:20:10 +10:20:10 %T 0000-00-00 10:20:10 10:20:10AM %h:%i:%s%p 0000-00-00 10:20:10 +10:20:10AM %r 0000-00-00 10:20:10 10:20:10.44AM %h:%i:%s.%f%p 0000-00-00 10:20:10.440000 15-01-2001 12:59:58 %d-%m-%Y %H:%i:%S 2001-01-15 12:59:58 15 September 2001 %d %M %Y 2001-09-15 00:00:00 15 SEPTEMB 2001 %d %M %Y 2001-09-15 00:00:00 15 MAY 2001 %d %b %Y 2001-05-15 00:00:00 +15th May 2001 %D %b %Y 2001-05-15 00:00:00 Sunday 15 MAY 2001 %W %d %b %Y 2001-05-15 00:00:00 Sund 15 MAY 2001 %W %d %b %Y 2001-05-15 00:00:00 Tuesday 00 2002 %W %U %Y 2002-01-01 00:00:00 Thursday 53 1998 %W %u %Y 1998-12-31 00:00:00 +Sunday 01 2001 %W %v %x 2001-01-07 00:00:00 +Tuesday 52 2001 %W %V %X 2002-01-01 00:00:00 +060 2004 %j %Y 2004-02-29 00:00:00 +4 53 1998 %w %u %Y 1998-12-31 00:00:00 15-01-2001 %d-%m-%Y %H:%i:%S 2001-01-15 00:00:00 15-01-20 %d-%m-%y 2020-01-15 00:00:00 15-2001-1 %d-%Y-%c 2001-01-15 00:00:00 @@ -162,16 +183,23 @@ date format datetime 2003-01-02 11:11:12Pm %Y-%m-%d %h:%i:%S%p 2003-01-02 23:11:12 10:20:10 %H:%i:%s 0000-00-00 10:20:10 10:20:10 %h:%i:%s.%f 0000-00-00 10:20:10 +10:20:10 %T 0000-00-00 10:20:10 10:20:10AM %h:%i:%s%p 0000-00-00 10:20:10 +10:20:10AM %r 0000-00-00 10:20:10 10:20:10.44AM %h:%i:%s.%f%p 0000-00-00 10:20:10.440000 15-01-2001 12:59:58 %d-%m-%Y %H:%i:%S 2001-01-15 12:59:58 15 September 2001 %d %M %Y 2001-09-15 00:00:00 15 SEPTEMB 2001 %d %M %Y 2001-09-15 00:00:00 15 MAY 2001 %d %b %Y 2001-05-15 00:00:00 +15th May 2001 %D %b %Y 2001-05-15 00:00:00 Sunday 15 MAY 2001 %W %d %b %Y 2001-05-15 00:00:00 Sund 15 MAY 2001 %W %d %b %Y 2001-05-15 00:00:00 Tuesday 00 2002 %W %U %Y 2002-01-01 00:00:00 Thursday 53 1998 %W %u %Y 1998-12-31 00:00:00 +Sunday 01 2001 %W %v %x 2001-01-07 00:00:00 +Tuesday 52 2001 %W %V %X 2002-01-01 00:00:00 +060 2004 %j %Y 2004-02-29 00:00:00 +4 53 1998 %w %u %Y 1998-12-31 00:00:00 15-01-2001 %d-%m-%Y %H:%i:%S 2001-01-15 00:00:00 15-01-20 %d-%m-%y 2020-01-15 00:00:00 15-2001-1 %d-%Y-%c 2001-01-15 00:00:00 @@ -186,16 +214,23 @@ date format date2 2003-01-02 11:11:12Pm %Y-%m-%d %h:%i:%S%p 2003-01-02 10:20:10 %H:%i:%s 0000-00-00 10:20:10 %h:%i:%s.%f 0000-00-00 +10:20:10 %T 0000-00-00 10:20:10AM %h:%i:%s%p 0000-00-00 +10:20:10AM %r 0000-00-00 10:20:10.44AM %h:%i:%s.%f%p 0000-00-00 15-01-2001 12:59:58 %d-%m-%Y %H:%i:%S 2001-01-15 15 September 2001 %d %M %Y 2001-09-15 15 SEPTEMB 2001 %d %M %Y 2001-09-15 15 MAY 2001 %d %b %Y 2001-05-15 +15th May 2001 %D %b %Y 2001-05-15 Sunday 15 MAY 2001 %W %d %b %Y 2001-05-15 Sund 15 MAY 2001 %W %d %b %Y 2001-05-15 Tuesday 00 2002 %W %U %Y 2002-01-01 Thursday 53 1998 %W %u %Y 1998-12-31 +Sunday 01 2001 %W %v %x 2001-01-07 +Tuesday 52 2001 %W %V %X 2002-01-01 +060 2004 %j %Y 2004-02-29 +4 53 1998 %w %u %Y 1998-12-31 15-01-2001 %d-%m-%Y %H:%i:%S 2001-01-15 15-01-20 %d-%m-%y 2020-01-15 15-2001-1 %d-%Y-%c 2001-01-15 @@ -210,16 +245,23 @@ date format time 2003-01-02 11:11:12Pm %Y-%m-%d %h:%i:%S%p 23:11:12 10:20:10 %H:%i:%s 10:20:10 10:20:10 %h:%i:%s.%f 10:20:10 +10:20:10 %T 10:20:10 10:20:10AM %h:%i:%s%p 10:20:10 +10:20:10AM %r 10:20:10 10:20:10.44AM %h:%i:%s.%f%p 10:20:10.440000 15-01-2001 12:59:58 %d-%m-%Y %H:%i:%S 12:59:58 15 September 2001 %d %M %Y 00:00:00 15 SEPTEMB 2001 %d %M %Y 00:00:00 15 MAY 2001 %d %b %Y 00:00:00 +15th May 2001 %D %b %Y 00:00:00 Sunday 15 MAY 2001 %W %d %b %Y 00:00:00 Sund 15 MAY 2001 %W %d %b %Y 00:00:00 Tuesday 00 2002 %W %U %Y 00:00:00 Thursday 53 1998 %W %u %Y 00:00:00 +Sunday 01 2001 %W %v %x 00:00:00 +Tuesday 52 2001 %W %V %X 00:00:00 +060 2004 %j %Y 00:00:00 +4 53 1998 %w %u %Y 00:00:00 15-01-2001 %d-%m-%Y %H:%i:%S 00:00:00 15-01-20 %d-%m-%y 00:00:00 15-2001-1 %d-%Y-%c 00:00:00 @@ -234,16 +276,23 @@ date format time2 2003-01-02 11:11:12Pm %Y-%m-%d %h:%i:%S%p 23:11:12 10:20:10 %H:%i:%s 10:20:10 10:20:10 %h:%i:%s.%f 10:20:10 +10:20:10 %T 10:20:10 10:20:10AM %h:%i:%s%p 10:20:10 +10:20:10AM %r 10:20:10 10:20:10.44AM %h:%i:%s.%f%p 10:20:10.440000 15-01-2001 12:59:58 %d-%m-%Y %H:%i:%S 12:59:58 15 September 2001 %d %M %Y 00:00:00 15 SEPTEMB 2001 %d %M %Y 00:00:00 15 MAY 2001 %d %b %Y 00:00:00 +15th May 2001 %D %b %Y 00:00:00 Sunday 15 MAY 2001 %W %d %b %Y 00:00:00 Sund 15 MAY 2001 %W %d %b %Y 00:00:00 Tuesday 00 2002 %W %U %Y 00:00:00 Thursday 53 1998 %W %u %Y 00:00:00 +Sunday 01 2001 %W %v %x 00:00:00 +Tuesday 52 2001 %W %V %X 00:00:00 +060 2004 %j %Y 00:00:00 +4 53 1998 %w %u %Y 00:00:00 15-01-2001 %d-%m-%Y %H:%i:%S 00:00:00 15-01-20 %d-%m-%y 00:00:00 15-2001-1 %d-%Y-%c 00:00:00 @@ -258,10 +307,13 @@ insert into t1 values ('15 Septembei 2001', '%d %M %Y'), ('15 Ju 2001', '%d %M %Y'), ('Sund 15 MA', '%W %d %b %Y'), -('Sunday 01 2001', '%W %V %X'), ('Thursdai 12 1998', '%W %u %Y'), -(NULL, get_format(DATE,'USA')), -('Tuesday 52 2001', '%W %V %X'); +('Sunday 01 2001', '%W %v %X'), +('Tuesday 52 2001', '%W %V %x'), +('Tuesday 52 2001', '%W %V %Y'), +('Tuesday 52 2001', '%W %u %x'), +('7 53 1998', '%w %u %Y'), +(NULL, get_format(DATE,'USA')); select date,format,str_to_date(date, format) as str_to_date from t1; date format str_to_date 2003-01-02 10:11:12 PM %Y-%m-%d %H:%i:%S %p NULL @@ -273,10 +325,13 @@ date format str_to_date 15 Septembei 2001 %d %M %Y NULL 15 Ju 2001 %d %M %Y NULL Sund 15 MA %W %d %b %Y NULL -Sunday 01 2001 %W %V %X NULL Thursdai 12 1998 %W %u %Y NULL +Sunday 01 2001 %W %v %X NULL +Tuesday 52 2001 %W %V %x NULL +Tuesday 52 2001 %W %V %Y NULL +Tuesday 52 2001 %W %u %x NULL +7 53 1998 %w %u %Y NULL NULL %m.%d.%Y NULL -Tuesday 52 2001 %W %V %X NULL select date,format,concat(str_to_date(date, format),'') as con from t1; date format con 2003-01-02 10:11:12 PM %Y-%m-%d %H:%i:%S %p NULL @@ -288,10 +343,13 @@ date format con 15 Septembei 2001 %d %M %Y NULL 15 Ju 2001 %d %M %Y NULL Sund 15 MA %W %d %b %Y NULL -Sunday 01 2001 %W %V %X NULL Thursdai 12 1998 %W %u %Y NULL +Sunday 01 2001 %W %v %X NULL +Tuesday 52 2001 %W %V %x NULL +Tuesday 52 2001 %W %V %Y NULL +Tuesday 52 2001 %W %u %x NULL +7 53 1998 %w %u %Y NULL NULL %m.%d.%Y NULL -Tuesday 52 2001 %W %V %X NULL truncate table t1; insert into t1 values ('10:20:10AM', '%h:%i:%s'), diff --git a/mysql-test/t/date_formats.test b/mysql-test/t/date_formats.test index 1fc04cb907b..2e6e1fabd8d 100644 --- a/mysql-test/t/date_formats.test +++ b/mysql-test/t/date_formats.test @@ -132,16 +132,23 @@ insert into t1 values ('2003-01-02 11:11:12Pm', '%Y-%m-%d %h:%i:%S%p'), ('10:20:10', '%H:%i:%s'), ('10:20:10', '%h:%i:%s.%f'), +('10:20:10', '%T'), ('10:20:10AM', '%h:%i:%s%p'), +('10:20:10AM', '%r'), ('10:20:10.44AM', '%h:%i:%s.%f%p'), ('15-01-2001 12:59:58', '%d-%m-%Y %H:%i:%S'), ('15 September 2001', '%d %M %Y'), ('15 SEPTEMB 2001', '%d %M %Y'), ('15 MAY 2001', '%d %b %Y'), +('15th May 2001', '%D %b %Y'), ('Sunday 15 MAY 2001', '%W %d %b %Y'), ('Sund 15 MAY 2001', '%W %d %b %Y'), ('Tuesday 00 2002', '%W %U %Y'), ('Thursday 53 1998', '%W %u %Y'), +('Sunday 01 2001', '%W %v %x'), +('Tuesday 52 2001', '%W %V %X'), +('060 2004', '%j %Y'), +('4 53 1998', '%w %u %Y'), ('15-01-2001', '%d-%m-%Y %H:%i:%S'), ('15-01-20', '%d-%m-%y'), ('15-2001-1', '%d-%Y-%c'); @@ -156,7 +163,7 @@ select date,format,DATE(str_to_date(date, format)) as date2 from t1; select date,format,TIME(str_to_date(date, format)) as time from t1; select date,format,concat(TIME(str_to_date(date, format))) as time2 from t1; -# Test wrong dates +# Test wrong dates or converion specifiers truncate table t1; insert into t1 values @@ -169,10 +176,13 @@ insert into t1 values ('15 Septembei 2001', '%d %M %Y'), ('15 Ju 2001', '%d %M %Y'), ('Sund 15 MA', '%W %d %b %Y'), -('Sunday 01 2001', '%W %V %X'), ('Thursdai 12 1998', '%W %u %Y'), -(NULL, get_format(DATE,'USA')), -('Tuesday 52 2001', '%W %V %X'); +('Sunday 01 2001', '%W %v %X'), +('Tuesday 52 2001', '%W %V %x'), +('Tuesday 52 2001', '%W %V %Y'), +('Tuesday 52 2001', '%W %u %x'), +('7 53 1998', '%w %u %Y'), +(NULL, get_format(DATE,'USA')); select date,format,str_to_date(date, format) as str_to_date from t1; select date,format,concat(str_to_date(date, format),'') as con from t1; diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 786bcf434ed..cc320addd47 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -113,6 +113,12 @@ static bool make_datetime(date_time_format_types format, TIME *ltime, } +/* Date formats corresponding to compound %r and %T conversion specifiers */ +static DATE_TIME_FORMAT time_ampm_format= {{}, '\0', 0, + {(char *)"%I:%i:%S %p", 11}}; +static DATE_TIME_FORMAT time_24hrs_format= {{}, '\0', 0, + {(char *)"%H:%i:%S", 8}}; + /* Extract datetime value to TIME struct from string value according to format string. @@ -126,6 +132,17 @@ static bool make_datetime(date_time_format_types format, TIME *ltime, cached_timestamp_type It uses to get an appropriate warning in the case when the value is truncated. + sub_pattern_end if non-zero then we are parsing string which + should correspond compound specifier (like %T or + %r) and this parameter is pointer to place where + pointer to end of string matching this specifier + should be stored. + NOTE + Possibility to parse strings matching to patterns equivalent to compound + specifiers is mainly intended for use from inside of this function in + order to understand %T and %r conversion specifiers, so number of + conversion specifiers that can be used in such sub-patterns is limited. + Also most of checks are skipped in this case. RETURN 0 ok @@ -134,14 +151,18 @@ static bool make_datetime(date_time_format_types format, TIME *ltime, static bool extract_date_time(DATE_TIME_FORMAT *format, const char *val, uint length, TIME *l_time, - timestamp_type cached_timestamp_type) + timestamp_type cached_timestamp_type, + const char **sub_pattern_end) { int weekday= 0, yearday= 0, daypart= 0; int week_number= -1; CHARSET_INFO *cs= &my_charset_bin; int error= 0; bool usa_time= 0; - bool sunday_first= 0; + bool sunday_first_n_first_week_non_iso; + bool strict_week_number; + int strict_week_number_year= -1; + bool strict_week_number_year_type; int frac_part; const char *val_begin= val; const char *val_end= val + length; @@ -149,7 +170,12 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, const char *end= ptr + format->format.length; DBUG_ENTER("extract_date_time"); - bzero((char*) l_time, sizeof(*l_time)); + LINT_INIT(sunday_first_n_first_week_non_iso); + LINT_INIT(strict_week_number); + LINT_INIT(strict_week_number_year_type); + + if (!sub_pattern_end) + bzero((char*) l_time, sizeof(*l_time)); for (; ptr != end && val != val_end; ptr++) { @@ -160,7 +186,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, char *tmp; /* Skip pre-space between each argument */ - while (my_isspace(cs, *val) && val != val_end) + while (val != val_end && my_isspace(cs, *val)) val++; val_len= (uint) (val_end - val); @@ -268,9 +294,12 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, break; case 'w': tmp= (char*) val + 1; - if ((weekday= (int) my_strtoll10(val, &tmp, &error)) <= 0 || + if ((weekday= (int) my_strtoll10(val, &tmp, &error)) < 0 || weekday >= 7) goto err; + /* We should use the same 1 - 7 scale for %w as for %W */ + if (!weekday) + weekday= 7; val= tmp; break; case 'j': @@ -279,15 +308,45 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, val= tmp; break; + /* Week numbers */ + case 'V': case 'U': - sunday_first= 1; - /* Fall through */ + case 'v': case 'u': + sunday_first_n_first_week_non_iso= (*ptr=='U' || *ptr== 'V'); + strict_week_number= (*ptr=='V' || *ptr=='v'); tmp= (char*) val + min(val_len, 2); - week_number= (int) my_strtoll10(val, &tmp, &error); + if ((week_number= (int) my_strtoll10(val, &tmp, &error)) < 0 || + strict_week_number && !week_number || + week_number > 53) + goto err; val= tmp; break; + /* Year used with 'strict' %V and %v week numbers */ + case 'X': + case 'x': + strict_week_number_year_type= (*ptr=='X'); + tmp= (char*) val + min(4, val_len); + strict_week_number_year= (int) my_strtoll10(val, &tmp, &error); + val= tmp; + break; + + /* Time in AM/PM notation */ + case 'r': + error= extract_date_time(&time_ampm_format, val, + (uint)(val_end - val), l_time, + cached_timestamp_type, &val); + break; + + /* Time in 24-hour notation */ + case 'T': + error= extract_date_time(&time_24hrs_format, val, + (uint)(val_end - val), l_time, + cached_timestamp_type, &val); + break; + + /* Conversion specifiers that match classes of characters */ case '.': while (my_ispunct(cs, *val) && val != val_end) val++; @@ -320,6 +379,16 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, l_time->hour= l_time->hour%12+daypart; } + /* + If we are recursively called for parsing string matching compound + specifiers we are already done. + */ + if (sub_pattern_end) + { + *sub_pattern_end= val; + DBUG_RETURN(0); + } + if (yearday > 0) { uint days= calc_daynr(l_time->year,1,1) + yearday - 1; @@ -330,34 +399,45 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, if (week_number >= 0 && weekday) { - int days= calc_daynr(l_time->year,1,1); + int days; uint weekday_b; - - if (weekday > 7 || weekday < 0) - goto err; - if (sunday_first) - weekday = weekday%7; - if (week_number == 53) + /* + %V,%v require %X,%x resprectively, + %U,%u should be used with %Y and not %X or %x + */ + if (strict_week_number && + (strict_week_number_year < 0 || + strict_week_number_year_type != sunday_first_n_first_week_non_iso) || + !strict_week_number && strict_week_number_year >= 0) + goto err; + + /* Number of days since year 0 till 1st Jan of this year */ + days= calc_daynr((strict_week_number ? strict_week_number_year : + l_time->year), + 1, 1); + /* Which day of week is 1st Jan of this year */ + weekday_b= calc_weekday(days, sunday_first_n_first_week_non_iso); + + /* + Below we are going to sum: + 1) number of days since year 0 till 1st day of 1st week of this year + 2) number of days between 1st week and our week + 3) and position of our day in the week + */ + if (sunday_first_n_first_week_non_iso) { - days+= (week_number - 1)*7; - weekday_b= calc_weekday(days, sunday_first); - weekday = weekday - weekday_b - !sunday_first; - days+= weekday; - } - else if (week_number == 0) - { - weekday_b= calc_weekday(days, sunday_first); - weekday = weekday - weekday_b - !sunday_first; - days+= weekday; + days+= ((weekday_b == 0) ? 0 : 7) - weekday_b + + (week_number - 1) * 7 + + weekday % 7; } else { - days+= (week_number - !sunday_first)*7; - weekday_b= calc_weekday(days, sunday_first); - weekday =weekday - weekday_b - !sunday_first; - days+= weekday; + days+= ((weekday_b <= 3) ? 0 : 7) - weekday_b + + (week_number - 1) * 7 + + (weekday - 1); } + if (days <= 0 || days >= MAX_DAY_NUMBER) goto err; get_date_from_daynr(days,&l_time->year,&l_time->month,&l_time->day); @@ -2599,7 +2679,7 @@ bool Item_func_str_to_date::get_date(TIME *ltime, uint fuzzy_date) date_time_format.format.str= (char*) format->ptr(); date_time_format.format.length= format->length(); if (extract_date_time(&date_time_format, val->ptr(), val->length(), - ltime, cached_timestamp_type)) + ltime, cached_timestamp_type, 0)) goto null_date; if (cached_timestamp_type == MYSQL_TIMESTAMP_TIME && ltime->day) { From 744af07953e031c4d9d050ef1068e2291e79b584 Mon Sep 17 00:00:00 2001 From: "magnus@neptunus.(none)" <> Date: Fri, 6 Aug 2004 09:41:44 +0200 Subject: [PATCH 75/93] Increase value of TimeBetweenWatchDogCheck to make it to start cluster on lowend machines. --- mysql-test/ndb/ndb_config_2_node.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/ndb/ndb_config_2_node.ini b/mysql-test/ndb/ndb_config_2_node.ini index 312b2f8c4c0..847fe615a15 100644 --- a/mysql-test/ndb/ndb_config_2_node.ini +++ b/mysql-test/ndb/ndb_config_2_node.ini @@ -4,6 +4,7 @@ MaxNoOfConcurrentOperations: CHOOSE_MaxNoOfConcurrentOperations DataMemory: CHOOSE_DataMemory IndexMemory: CHOOSE_IndexMemory Diskless: CHOOSE_Diskless +TimeBetweenWatchDogCheck: 30000 [COMPUTER] Id: 1 From e02ee413496dd84531b28cc4ef01dd7081c46353 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Fri, 6 Aug 2004 09:43:06 +0200 Subject: [PATCH 76/93] Fixed ndb backup bug --- ndb/src/kernel/blocks/backup/Backup.hpp | 3 ++- ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/ndb/src/kernel/blocks/backup/Backup.hpp b/ndb/src/kernel/blocks/backup/Backup.hpp index 77669e759d3..1e2100251be 100644 --- a/ndb/src/kernel/blocks/backup/Backup.hpp +++ b/ndb/src/kernel/blocks/backup/Backup.hpp @@ -696,6 +696,7 @@ Uint32 * Backup::OperationRecord::newVariableKey(Uint32 sz){ attrLeft--; attrSzLeft = 0; + attrSzTotal += sz; dst = &dst_VariableData->Data[0]; dst_VariableData->Sz = htonl(sz); @@ -712,7 +713,7 @@ Backup::OperationRecord::finished(){ return false; } - attrLen[opNoDone] = attrSzTotal; + attrLen[opNoDone] = attrSzTotal + sz_FixedKeys; opNoDone++; scanStop = dst = (Uint32 *)dst_VariableData; diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index eb8e2917a8e..d9422622bfa 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -8569,7 +8569,8 @@ void Dblqh::sendKeyinfo20(Signal* signal, return; } - EXECUTE_DIRECT(refToBlock(ref), GSN_KEYINFO20, signal, 3 + keyLen); + EXECUTE_DIRECT(refToBlock(ref), GSN_KEYINFO20, signal, + KeyInfo20::HeaderLength + keyLen); jamEntry(); return; } From 6b8e4c93d98be005169f5fcefbb325ef7453086e Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Fri, 6 Aug 2004 11:29:56 +0200 Subject: [PATCH 77/93] Bug#4586 + TC fix --- ndb/src/kernel/blocks/dbtc/DbtcMain.cpp | 3 ++- ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index d395e75a3f0..ed467db1c6c 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -4959,6 +4959,7 @@ void Dbtc::execLQHKEYREF(Signal* signal) } } + Uint32 marker = regTcPtr->commitAckMarker; markOperationAborted(regApiPtr, regTcPtr); if(regApiPtr->apiConnectstate == CS_ABORTING){ @@ -4978,7 +4979,7 @@ void Dbtc::execLQHKEYREF(Signal* signal) return; }//if - if (regTcPtr->commitAckMarker != RNIL){ + if (marker != RNIL){ /** * This was an insert/update/delete/write which failed * that contained the marker diff --git a/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp b/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp index cccbcfbe966..d168a6797bb 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp @@ -139,8 +139,9 @@ void Dbtup::initializePage() ptrAss(pagePtr, page); pagePtr.p->pageWord[ZPAGE_STATE_POS] = ~ZFREE_COMMON; - cnoOfAllocatedPages = 1 + MAX_PARALLELL_TUP_SRREQ; - returnCommonArea(cnoOfAllocatedPages, cnoOfPage - cnoOfAllocatedPages); + Uint32 tmp = 1 + MAX_PARALLELL_TUP_SRREQ; + returnCommonArea(tmp, cnoOfPage - tmp); + cnoOfAllocatedPages = tmp; // Is updated by returnCommonArea c_sr_free_page_0 = ~0; }//Dbtup::initializePage() From a3ea1e2339bf9a167af292b6c1d43b8065d56ed8 Mon Sep 17 00:00:00 2001 From: "kent@mysql.com" <> Date: Fri, 6 Aug 2004 18:03:27 +0200 Subject: [PATCH 78/93] libmysql.c: Can't return value from void function --- libmysql/libmysql.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index b9c8201ed56..a276b3d70e4 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -3277,7 +3277,8 @@ static void fetch_long_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field, char buff[22]; /* Enough for longlong */ char *end= longlong10_to_str(value, buff, field_is_unsigned ? 10: -10); /* Resort to string conversion which supports all typecodes */ - return fetch_string_with_conversion(param, buff, end - buff); + fetch_string_with_conversion(param, buff, end - buff); + break; } } } @@ -3349,10 +3350,11 @@ static void fetch_float_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field, } else { - sprintf(buff, "%.*f", field->decimals, value); + sprintf(buff, "%.*f", (int) field->decimals, value); end= strend(buff); } - return fetch_string_with_conversion(param, buff, end - buff); + fetch_string_with_conversion(param, buff, end - buff); + break; } } } From eaaedb69a8e320762fbced9ba5889a02a8a54f71 Mon Sep 17 00:00:00 2001 From: "serg@serg.mylan" <> Date: Sat, 7 Aug 2004 18:26:59 +0200 Subject: [PATCH 79/93] bug#4881 - crash in ALTER .. RENAME if rename fails --- sql/sql_select.cc | 16 +++++++--------- sql/sql_table.cc | 2 +- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 487caeb62db..3b02735edc3 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -854,7 +854,7 @@ JOIN::optimize() as in other cases the join is done before the sort. */ if (const_tables != tables && - (order || group_list) && + (order || group_list) && join_tab[const_tables].type != JT_ALL && join_tab[const_tables].type != JT_FT && join_tab[const_tables].type != JT_REF_OR_NULL && @@ -868,9 +868,7 @@ JOIN::optimize() ((group_list && const_tables != tables && (!simple_group || !test_if_skip_sort_order(&join_tab[const_tables], group_list, - unit->select_limit_cnt, - 0))) || - select_distinct) && + HA_POS_ERROR, 0))) || select_distinct) && tmp_table_param.quick_group && !procedure) { need_tmp=1; simple_order=simple_group=0; // Force tmp table without sort @@ -2069,7 +2067,7 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, } else if (old->eq_func && new_fields->eq_func && old->val->eq(new_fields->val, old->field->binary())) - + { old->level= and_level; old->optimize= ((old->optimize & new_fields->optimize & @@ -2128,7 +2126,7 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, field Field used in comparision eq_func True if we used =, <=> or IS NULL value Value used for comparison with field - Is NULL for BETWEEN and IN + Is NULL for BETWEEN and IN usable_tables Tables which can be used for key optimization NOTES @@ -2207,7 +2205,7 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, COND *cond, (*value)->result_type() != STRING_RESULT && field->cmp_type() != (*value)->result_type()) return; - + /* We can't use indexes if the effective collation of the operation differ from the field collation. @@ -2320,7 +2318,7 @@ add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, !(cond_func->used_tables() & OUTER_REF_TABLE_BIT)) { Item *tmp=new Item_null; - if (!tmp) // Should never be true + if (unlikely(!tmp)) // Should never be true return; add_key_field(key_fields,*and_level,cond_func, ((Item_field*) (cond_func->arguments()[0])->real_item()) @@ -2731,7 +2729,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, rec= keyuse->ref_table_rows; /* If there is one 'key_column IS NULL' expression, we can - use this ref_or_null optimsation of this field + use this ref_or_null optimisation of this field */ found_ref_or_null|= (keyuse->optimize & KEY_OPTIMIZE_REF_OR_NULL); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 7afbe6d0b87..37e959d38a1 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2691,7 +2691,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, if (do_send_ok) send_ok(thd); } - else + else if (error > 0) { table->file->print_error(error, MYF(0)); error= -1; From e1e1c39bdade2caabb97bc8e307a8bf6c459105e Mon Sep 17 00:00:00 2001 From: "serg@serg.mylan" <> Date: Sat, 7 Aug 2004 23:18:13 +0200 Subject: [PATCH 80/93] cleanup --- client/mysqltest.c | 4 ++-- mysql-test/r/select_found.result | 5 ++++- mysql-test/t/select_found.test | 12 +++++++++++- sql/item_cmpfunc.cc | 4 ++-- sql/sql_select.cc | 2 +- 5 files changed, 20 insertions(+), 7 deletions(-) diff --git a/client/mysqltest.c b/client/mysqltest.c index 8307fe44bd9..3287c9738d3 100644 --- a/client/mysqltest.c +++ b/client/mysqltest.c @@ -641,7 +641,7 @@ VAR* var_get(const char* var_name, const char** var_name_end, my_bool raw, if (*var_name != '$') goto err; digit = *++var_name - '0'; - if (!(digit < 10 && digit >= 0)) + if (digit < 0 || digit >= 10) { const char* save_var_name = var_name, *end; uint length; @@ -660,7 +660,7 @@ VAR* var_get(const char* var_name, const char** var_name_end, my_bool raw, length < MAX_VAR_NAME) { char buff[MAX_VAR_NAME+1]; - strmake(buff, save_var_name, length); + strmake(buff, save_var_name, length); v= var_from_env(buff, ""); } var_name--; /* Point at last character */ diff --git a/mysql-test/r/select_found.result b/mysql-test/r/select_found.result index 470a3e8439f..00dbcb54d93 100644 --- a/mysql-test/r/select_found.result +++ b/mysql-test/r/select_found.result @@ -81,7 +81,10 @@ email varchar(50) NOT NULL default '', PRIMARY KEY (id), UNIQUE KEY e_n (email,name) ); -INSERT INTO t2 VALUES (1,'name1','email1'),(2,'name2','email2'),(3,'name3','email3'),(4,'name4','email4'),(5,'name5','email5'),(6,'name6','email6'),(7,'name7','email7'),(8,'name8','email8'),(9,'name9','email9'),(10,'name10','email10'),(11,'name11','email11'),(12,'name12','email12'),(13,'name13','email13'),(14,'name14','email14'),(15,'name15','email15'),(16,'name16','email16'),(17,'name17','email17'),(18,'name18','email18'),(19,'name19','email19'),(20,'name20','email20'),(21,'name21','email21'),(22,'name22','email22'),(23,'name23','email23'),(24,'name24','email24'),(25,'name25','email25'),(26,'name26','email26'),(27,'name27','email27'),(28,'name28','email28'),(29,'name29','email29'),(30,'name30','email30'),(31,'name31','email31'),(32,'name32','email32'),(33,'name33','email33'),(34,'name34','email34'),(35,'name35','email35'),(36,'name36','email36'),(37,'name37','email37'),(38,'name38','email38'),(39,'name39','email39'),(40,'name40','email40'),(41,'name41','email41'),(42,'name42','email42'),(43,'name43','email43'),(44,'name44','email44'),(45,'name45','email45'),(46,'name46','email46'),(47,'name47','email47'),(48,'name48','email48'),(49,'name49','email49'),(50,'name50','email50'),(51,'name51','email51'),(52,'name52','email52'),(53,'name53','email53'),(54,'name54','email54'),(55,'name55','email55'),(56,'name56','email56'),(57,'name57','email57'),(58,'name58','email58'),(59,'name59','email59'),(60,'name60','email60'),(61,'name61','email61'),(62,'name62','email62'),(63,'name63','email63'),(64,'name64','email64'),(65,'name65','email65'),(66,'name66','email66'),(67,'name67','email67'),(68,'name68','email68'),(69,'name69','email69'),(70,'name70','email70'),(71,'name71','email71'),(72,'name72','email72'),(73,'name73','email73'),(74,'name74','email74'),(75,'name75','email75'),(76,'name76','email76'),(77,'name77','email77'),(78,'name78','email78'),(79,'name79','email79'),(80,'name80','email80'),(81,'name81','email81'),(82,'name82','email82'),(83,'name83','email83'),(84,'name84','email84'),(85,'name85','email85'),(86,'name86','email86'),(87,'name87','email87'),(88,'name88','email88'),(89,'name89','email89'),(90,'name90','email90'),(91,'name91','email91'),(92,'name92','email92'),(93,'name93','email93'),(94,'name94','email94'),(95,'name95','email95'),(96,'name96','email96'),(97,'name97','email97'),(98,'name98','email98'),(99,'name99','email99'),(100,'name100','email100'),(101,'name101','email101'),(102,'name102','email102'),(103,'name103','email103'),(104,'name104','email104'),(105,'name105','email105'),(106,'name106','email106'),(107,'name107','email107'),(108,'name108','email108'),(109,'name109','email109'),(110,'name110','email110'),(111,'name111','email111'),(112,'name112','email112'),(113,'name113','email113'),(114,'name114','email114'),(115,'name115','email115'),(116,'name116','email116'),(117,'name117','email117'),(118,'name118','email118'),(119,'name119','email119'),(120,'name120','email120'),(121,'name121','email121'),(122,'name122','email122'),(123,'name123','email123'),(124,'name124','email124'),(125,'name125','email125'),(126,'name126','email126'),(127,'name127','email127'),(128,'name128','email128'),(129,'name129','email129'),(130,'name130','email130'),(131,'name131','email131'),(132,'name132','email132'),(133,'name133','email133'),(134,'name134','email134'),(135,'name135','email135'),(136,'name136','email136'),(137,'name137','email137'),(138,'name138','email138'),(139,'name139','email139'),(140,'name140','email140'),(141,'name141','email141'),(142,'name142','email142'),(143,'name143','email143'),(144,'name144','email144'),(145,'name145','email145'),(146,'name146','email146'),(147,'name147','email147'),(148,'name148','email148'),(149,'name149','email149'),(150,'name150','email150'),(151,'name151','email151'),(152,'name152','email152'),(153,'name153','email153'),(154,'name154','email154'),(155,'name155','email155'),(156,'name156','email156'),(157,'name157','email157'),(158,'name158','email158'),(159,'name159','email159'),(160,'name160','email160'),(161,'name161','email161'),(162,'name162','email162'),(163,'name163','email163'),(164,'name164','email164'),(165,'name165','email165'),(166,'name166','email166'),(167,'name167','email167'),(168,'name168','email168'),(169,'name169','email169'),(170,'name170','email170'),(171,'name171','email171'),(172,'name172','email172'),(173,'name173','email173'),(174,'name174','email174'),(175,'name175','email175'),(176,'name176','email176'),(177,'name177','email177'),(178,'name178','email178'),(179,'name179','email179'),(180,'name180','email180'),(181,'name181','email181'),(182,'name182','email182'),(183,'name183','email183'),(184,'name184','email184'),(185,'name185','email185'),(186,'name186','email186'),(187,'name187','email187'),(188,'name188','email188'),(189,'name189','email189'),(190,'name190','email190'),(191,'name191','email191'),(192,'name192','email192'),(193,'name193','email193'),(194,'name194','email194'),(195,'name195','email195'),(196,'name196','email196'),(197,'name197','email197'),(198,'name198','email198'),(199,'name199','email199'),(200,'name200','email200'); +EXPLAIN SELECT SQL_CALC_FOUND_ROWS DISTINCT email FROM t2 LEFT JOIN t1 ON kid = t2.id WHERE t1.id IS NULL LIMIT 10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 system PRIMARY,kid NULL NULL NULL 0 const row not found +1 SIMPLE t2 index NULL e_n 100 NULL 200 SELECT SQL_CALC_FOUND_ROWS DISTINCT email FROM t2 LEFT JOIN t1 ON kid = t2.id WHERE t1.id IS NULL LIMIT 10; email email1 diff --git a/mysql-test/t/select_found.test b/mysql-test/t/select_found.test index c20b6e9ab6c..943174462e3 100644 --- a/mysql-test/t/select_found.test +++ b/mysql-test/t/select_found.test @@ -54,8 +54,18 @@ CREATE TABLE t2 ( UNIQUE KEY e_n (email,name) ); -INSERT INTO t2 VALUES (1,'name1','email1'),(2,'name2','email2'),(3,'name3','email3'),(4,'name4','email4'),(5,'name5','email5'),(6,'name6','email6'),(7,'name7','email7'),(8,'name8','email8'),(9,'name9','email9'),(10,'name10','email10'),(11,'name11','email11'),(12,'name12','email12'),(13,'name13','email13'),(14,'name14','email14'),(15,'name15','email15'),(16,'name16','email16'),(17,'name17','email17'),(18,'name18','email18'),(19,'name19','email19'),(20,'name20','email20'),(21,'name21','email21'),(22,'name22','email22'),(23,'name23','email23'),(24,'name24','email24'),(25,'name25','email25'),(26,'name26','email26'),(27,'name27','email27'),(28,'name28','email28'),(29,'name29','email29'),(30,'name30','email30'),(31,'name31','email31'),(32,'name32','email32'),(33,'name33','email33'),(34,'name34','email34'),(35,'name35','email35'),(36,'name36','email36'),(37,'name37','email37'),(38,'name38','email38'),(39,'name39','email39'),(40,'name40','email40'),(41,'name41','email41'),(42,'name42','email42'),(43,'name43','email43'),(44,'name44','email44'),(45,'name45','email45'),(46,'name46','email46'),(47,'name47','email47'),(48,'name48','email48'),(49,'name49','email49'),(50,'name50','email50'),(51,'name51','email51'),(52,'name52','email52'),(53,'name53','email53'),(54,'name54','email54'),(55,'name55','email55'),(56,'name56','email56'),(57,'name57','email57'),(58,'name58','email58'),(59,'name59','email59'),(60,'name60','email60'),(61,'name61','email61'),(62,'name62','email62'),(63,'name63','email63'),(64,'name64','email64'),(65,'name65','email65'),(66,'name66','email66'),(67,'name67','email67'),(68,'name68','email68'),(69,'name69','email69'),(70,'name70','email70'),(71,'name71','email71'),(72,'name72','email72'),(73,'name73','email73'),(74,'name74','email74'),(75,'name75','email75'),(76,'name76','email76'),(77,'name77','email77'),(78,'name78','email78'),(79,'name79','email79'),(80,'name80','email80'),(81,'name81','email81'),(82,'name82','email82'),(83,'name83','email83'),(84,'name84','email84'),(85,'name85','email85'),(86,'name86','email86'),(87,'name87','email87'),(88,'name88','email88'),(89,'name89','email89'),(90,'name90','email90'),(91,'name91','email91'),(92,'name92','email92'),(93,'name93','email93'),(94,'name94','email94'),(95,'name95','email95'),(96,'name96','email96'),(97,'name97','email97'),(98,'name98','email98'),(99,'name99','email99'),(100,'name100','email100'),(101,'name101','email101'),(102,'name102','email102'),(103,'name103','email103'),(104,'name104','email104'),(105,'name105','email105'),(106,'name106','email106'),(107,'name107','email107'),(108,'name108','email108'),(109,'name109','email109'),(110,'name110','email110'),(111,'name111','email111'),(112,'name112','email112'),(113,'name113','email113'),(114,'name114','email114'),(115,'name115','email115'),(116,'name116','email116'),(117,'name117','email117'),(118,'name118','email118'),(119,'name119','email119'),(120,'name120','email120'),(121,'name121','email121'),(122,'name122','email122'),(123,'name123','email123'),(124,'name124','email124'),(125,'name125','email125'),(126,'name126','email126'),(127,'name127','email127'),(128,'name128','email128'),(129,'name129','email129'),(130,'name130','email130'),(131,'name131','email131'),(132,'name132','email132'),(133,'name133','email133'),(134,'name134','email134'),(135,'name135','email135'),(136,'name136','email136'),(137,'name137','email137'),(138,'name138','email138'),(139,'name139','email139'),(140,'name140','email140'),(141,'name141','email141'),(142,'name142','email142'),(143,'name143','email143'),(144,'name144','email144'),(145,'name145','email145'),(146,'name146','email146'),(147,'name147','email147'),(148,'name148','email148'),(149,'name149','email149'),(150,'name150','email150'),(151,'name151','email151'),(152,'name152','email152'),(153,'name153','email153'),(154,'name154','email154'),(155,'name155','email155'),(156,'name156','email156'),(157,'name157','email157'),(158,'name158','email158'),(159,'name159','email159'),(160,'name160','email160'),(161,'name161','email161'),(162,'name162','email162'),(163,'name163','email163'),(164,'name164','email164'),(165,'name165','email165'),(166,'name166','email166'),(167,'name167','email167'),(168,'name168','email168'),(169,'name169','email169'),(170,'name170','email170'),(171,'name171','email171'),(172,'name172','email172'),(173,'name173','email173'),(174,'name174','email174'),(175,'name175','email175'),(176,'name176','email176'),(177,'name177','email177'),(178,'name178','email178'),(179,'name179','email179'),(180,'name180','email180'),(181,'name181','email181'),(182,'name182','email182'),(183,'name183','email183'),(184,'name184','email184'),(185,'name185','email185'),(186,'name186','email186'),(187,'name187','email187'),(188,'name188','email188'),(189,'name189','email189'),(190,'name190','email190'),(191,'name191','email191'),(192,'name192','email192'),(193,'name193','email193'),(194,'name194','email194'),(195,'name195','email195'),(196,'name196','email196'),(197,'name197','email197'),(198,'name198','email198'),(199,'name199','email199'),(200,'name200','email200'); +disable_query_log; +let $1=200; +let $2=0; +while ($1) +{ + inc $2; + eval INSERT INTO t2 VALUES ($2,'name$2','email$2'); + dec $1; +} +enable_query_log; +EXPLAIN SELECT SQL_CALC_FOUND_ROWS DISTINCT email FROM t2 LEFT JOIN t1 ON kid = t2.id WHERE t1.id IS NULL LIMIT 10; SELECT SQL_CALC_FOUND_ROWS DISTINCT email FROM t2 LEFT JOIN t1 ON kid = t2.id WHERE t1.id IS NULL LIMIT 10; SELECT FOUND_ROWS(); diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 60f80249e94..14c0d996360 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -268,8 +268,8 @@ void Item_bool_func2::fix_length_and_dec() int Arg_comparator::set_compare_func(Item_bool_func2 *item, Item_result type) { owner= item; - func= comparator_matrix[type][(owner->functype() == Item_func::EQUAL_FUNC)? - 1:0]; + func= comparator_matrix[type] + [test(owner->functype() == Item_func::EQUAL_FUNC)]; if (type == ROW_RESULT) { uint n= (*a)->cols(); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 3b02735edc3..3b3d8303210 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -868,7 +868,7 @@ JOIN::optimize() ((group_list && const_tables != tables && (!simple_group || !test_if_skip_sort_order(&join_tab[const_tables], group_list, - HA_POS_ERROR, 0))) || select_distinct) && + unit->select_limit_cnt, 0))) || select_distinct) && tmp_table_param.quick_group && !procedure) { need_tmp=1; simple_order=simple_group=0; // Force tmp table without sort From 072818eab528eeb4470589ac6f04f353a92ad881 Mon Sep 17 00:00:00 2001 From: "kent@mysql.com" <> Date: Sun, 8 Aug 2004 15:46:57 +0200 Subject: [PATCH 81/93] mysqld.cc: get_options() did an exit(0) after reporting "Too many arguments" --- sql/mysqld.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 998b5501724..669c8f91c4c 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -6078,12 +6078,13 @@ static void get_options(int argc,char **argv) my_getopt_register_get_addr(mysql_getopt_value); strmake(def_ft_boolean_syntax, ft_boolean_syntax, sizeof(ft_boolean_syntax)-1); - if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option))) + if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)) != 0) exit(ho_error); if (argc > 0) { fprintf(stderr, "%s: Too many arguments (first extra is '%s').\nUse --help to get a list of available options\n", my_progname, *argv); - exit(ho_error); + /* FIXME add EXIT_TOO_MANY_ARGUMENTS to "mysys_err.h" and return that code? */ + exit(1); } if (opt_help) From 1b756b68a0843d72fe1f769f35ab8250e3bada41 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Sun, 8 Aug 2004 20:27:39 +0200 Subject: [PATCH 82/93] testDict -n CreateMaxTables - Init _all_ of CREATE_TABLE_REF --- ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 70d27934f1e..bd191d112f1 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -3756,6 +3756,10 @@ Dbdict::createTab_reply(Signal* signal, ref->senderRef = reference(); ref->senderData = createTabPtr.p->m_senderData; ref->errorCode = createTabPtr.p->m_errorCode; + ref->masterNodeId = c_masterNodeId; + ref->status = 0; + ref->errorKey = 0; + ref->errorLine = 0; //@todo check api failed sendSignal(createTabPtr.p->m_senderRef, GSN_CREATE_TABLE_REF, signal, From 651b6893b13d772cfd06c1feda8104ffb8f9b216 Mon Sep 17 00:00:00 2001 From: "paul@kite-hub.kitebird.com" <> Date: Sun, 8 Aug 2004 21:23:03 -0500 Subject: [PATCH 83/93] mysqld.cc: Put --help first, reorder other options so that they are alphabetical. (shouldn't have to use grep to find an option.) Move group_concat_max_len to variable part of list. Rename character_set_server, collation_server, shared_memory_base_name to character-set-server, collation-server, shared-memory-base-name. Make default-collation message refer to collation-server rather than character-set-server. --- sql/mysqld.cc | 424 ++++++++++++++++++++++++++------------------------ 1 file changed, 217 insertions(+), 207 deletions(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 669c8f91c4c..98e8183d2d5 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -3973,12 +3973,25 @@ enum options_mysqld struct my_option my_long_options[] = { + {"help", '?', "Display this help and exit.", + (gptr*) &opt_help, (gptr*) &opt_help, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, + 0, 0}, +#ifdef HAVE_REPLICATION + {"abort-slave-event-count", OPT_ABORT_SLAVE_EVENT_COUNT, + "Option used by mysql-test for debugging and testing of replication.", + (gptr*) &abort_slave_event_count, (gptr*) &abort_slave_event_count, + 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, +#endif /* HAVE_REPLICATION */ {"ansi", 'a', "Use ANSI SQL syntax instead of MySQL syntax.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"basedir", 'b', "Path to installation directory. All paths are usually resolved relative to this.", (gptr*) &mysql_home_ptr, (gptr*) &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"bdb", OPT_BDB, "Enable Berkeley DB (if this version of MySQL supports it). \ +Disable with --skip-bdb (will save memory).", + (gptr*) &opt_bdb, (gptr*) &opt_bdb, 0, GET_BOOL, NO_ARG, 1, 0, 0, + 0, 0, 0}, #ifdef HAVE_BERKELEY_DB {"bdb-home", OPT_BDB_HOME, "Berkeley home directory.", (gptr*) &berkeley_home, (gptr*) &berkeley_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -3995,10 +4008,6 @@ struct my_option my_long_options[] = "Disable synchronously flushing logs. This option is deprecated, use --skip-sync-bdb-logs or sync-bdb-logs=0 instead", // (gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"sync-bdb-logs", OPT_BDB_SYNC, - "Synchronously flush logs. Enabled by default", - (gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL, - NO_ARG, 1, 0, 0, 0, 0, 0}, {"bdb-shared-data", OPT_BDB_SHARED, "Start Berkeley DB in multi-process mode.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -4006,70 +4015,51 @@ struct my_option my_long_options[] = (gptr*) &berkeley_tmpdir, (gptr*) &berkeley_tmpdir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif /* HAVE_BERKELEY_DB */ - {"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default", - (gptr*) &opt_sync_frm, (gptr*) &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0, - 0, 0, 0, 0}, - {"bdb", OPT_BDB, "Enable Berkeley DB (if this version of MySQL supports it). \ -Disable with --skip-bdb (will save memory).", - (gptr*) &opt_bdb, (gptr*) &opt_bdb, 0, GET_BOOL, NO_ARG, 1, 0, 0, - 0, 0, 0}, {"big-tables", OPT_BIG_TABLES, "Allow big result sets by saving all temporary sets on file (Solves most 'table full' errors).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"bind-address", OPT_BIND_ADDRESS, "IP address to bind to.", + (gptr*) &my_bind_addr_str, (gptr*) &my_bind_addr_str, 0, GET_STR, + REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"binlog-do-db", OPT_BINLOG_DO_DB, "Tells the master it should log updates for the specified database, and exclude all others not explicitly mentioned.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"binlog-ignore-db", OPT_BINLOG_IGNORE_DB, "Tells the master that updates to the given database should not be logged tothe binary log.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"bind-address", OPT_BIND_ADDRESS, "IP address to bind to.", - (gptr*) &my_bind_addr_str, (gptr*) &my_bind_addr_str, 0, GET_STR, - REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"bootstrap", OPT_BOOTSTRAP, "Used by mysql installation scripts.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"character_set_server", 'C', "Set the default character set.", + {"character-set-server", 'C', "Set the default character set.", (gptr*) &default_character_set_name, (gptr*) &default_character_set_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, - {"collation_server", OPT_DEFAULT_COLLATION, "Set the default collation.", - (gptr*) &default_collation_name, (gptr*) &default_collation_name, - 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, - {"console", OPT_CONSOLE, "Write error output on screen; Don't remove the console window on windows.", - (gptr*) &opt_console, (gptr*) &opt_console, 0, GET_BOOL, NO_ARG, 0, 0, 0, - 0, 0, 0}, -#ifdef __WIN__ - {"standalone", OPT_STANDALONE, - "Dummy option to start as a standalone program (NT).", 0, 0, 0, GET_NO_ARG, - NO_ARG, 0, 0, 0, 0, 0, 0}, -#endif - {"core-file", OPT_WANT_CORE, "Write core on errors.", 0, 0, 0, GET_NO_ARG, - NO_ARG, 0, 0, 0, 0, 0, 0}, - {"chroot", 'r', "Chroot mysqld daemon during startup.", - (gptr*) &mysqld_chroot, (gptr*) &mysqld_chroot, 0, GET_STR, REQUIRED_ARG, - 0, 0, 0, 0, 0, 0}, {"character-sets-dir", OPT_CHARSETS_DIR, "Directory where character sets are.", (gptr*) &charsets_dir, (gptr*) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"chroot", 'r', "Chroot mysqld daemon during startup.", + (gptr*) &mysqld_chroot, (gptr*) &mysqld_chroot, 0, GET_STR, REQUIRED_ARG, + 0, 0, 0, 0, 0, 0}, + {"collation-server", OPT_DEFAULT_COLLATION, "Set the default collation.", + (gptr*) &default_collation_name, (gptr*) &default_collation_name, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + {"concurrent-insert", OPT_CONCURRENT_INSERT, + "Use concurrent insert with MyISAM. Disable with --skip-concurrent-insert.", + (gptr*) &myisam_concurrent_insert, (gptr*) &myisam_concurrent_insert, + 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, + {"console", OPT_CONSOLE, "Write error output on screen; Don't remove the console window on windows.", + (gptr*) &opt_console, (gptr*) &opt_console, 0, GET_BOOL, NO_ARG, 0, 0, 0, + 0, 0, 0}, + {"core-file", OPT_WANT_CORE, "Write core on errors.", 0, 0, 0, GET_NO_ARG, + NO_ARG, 0, 0, 0, 0, 0, 0}, {"datadir", 'h', "Path to the database root.", (gptr*) &mysql_data_home, (gptr*) &mysql_data_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifndef DBUG_OFF {"debug", '#', "Debug log.", (gptr*) &default_dbug_option, (gptr*) &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, -#ifdef SAFEMALLOC - {"skip-safemalloc", OPT_SKIP_SAFEMALLOC, - "Don't use the memory allocation checking.", 0, 0, 0, GET_NO_ARG, NO_ARG, - 0, 0, 0, 0, 0, 0}, #endif -#endif -#ifdef HAVE_OPENSSL - {"des-key-file", OPT_DES_KEY_FILE, - "Load keys for des_encrypt() and des_encrypt from given file.", - (gptr*) &des_key_file, (gptr*) &des_key_file, 0, GET_STR, REQUIRED_ARG, - 0, 0, 0, 0, 0, 0}, -#endif /* HAVE_OPENSSL */ - {"default-character-set", 'C', "Set the default character set (Deprecated option, use character_set_server instead).", + {"default-character-set", 'C', "Set the default character set (deprecated option, use --character-set-server instead).", (gptr*) &default_character_set_name, (gptr*) &default_character_set_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, - {"default-collation", OPT_DEFAULT_COLLATION, "Set the default collation (Deprecated option, use character_set_server instead).", + {"default-collation", OPT_DEFAULT_COLLATION, "Set the default collation (deprecated option, use --collation-server instead).", (gptr*) &default_collation_name, (gptr*) &default_collation_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, {"default-storage-engine", OPT_STORAGE_ENGINE, @@ -4086,6 +4076,19 @@ Disable with --skip-bdb (will save memory).", {"delay-key-write-for-all-tables", OPT_DELAY_KEY_WRITE_ALL, "Don't flush key buffers between writes for any MyISAM table (Deprecated option, use --delay-key-write=all instead).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, +#ifdef HAVE_OPENSSL + {"des-key-file", OPT_DES_KEY_FILE, + "Load keys for des_encrypt() and des_encrypt from given file.", + (gptr*) &des_key_file, (gptr*) &des_key_file, 0, GET_STR, REQUIRED_ARG, + 0, 0, 0, 0, 0, 0}, +#endif /* HAVE_OPENSSL */ +#ifdef HAVE_REPLICATION + {"disconnect-slave-event-count", OPT_DISCONNECT_SLAVE_EVENT_COUNT, + "Option used by mysql-test for debugging and testing of replication.", + (gptr*) &disconnect_slave_event_count, + (gptr*) &disconnect_slave_event_count, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, + 0, 0, 0}, +#endif /* HAVE_REPLICATION */ {"enable-locking", OPT_ENABLE_LOCK, "Deprecated option, use --external-locking instead.", (gptr*) &opt_external_locking, (gptr*) &opt_external_locking, @@ -4098,46 +4101,49 @@ Disable with --skip-bdb (will save memory).", {"enable-pstack", OPT_DO_PSTACK, "Print a symbolic stack trace on failure.", (gptr*) &opt_do_pstack, (gptr*) &opt_do_pstack, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, -#ifdef HAVE_SMEM - {"shared-memory", OPT_ENABLE_SHARED_MEMORY, - "Enable the shared memory.",(gptr*) &opt_enable_shared_memory, (gptr*) &opt_enable_shared_memory, - 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, -#endif {"exit-info", 'T', "Used for debugging; Use at your own risk!", 0, 0, 0, GET_LONG, OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"external-locking", OPT_USE_LOCKING, "Use system (external) locking. With this option enabled you can run myisamchk to test (not repair) tables while the MySQL server is running.", + (gptr*) &opt_external_locking, (gptr*) &opt_external_locking, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"flush", OPT_FLUSH, "Flush tables to disk between SQL commands.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - { "group_concat_max_len", OPT_GROUP_CONCAT_MAX_LEN, - "The maximum length of the result of function group_concat.", - (gptr*) &global_system_variables.group_concat_max_len, - (gptr*) &max_system_variables.group_concat_max_len, 0, GET_ULONG, - REQUIRED_ARG, 1024, 4, (long) ~0, 0, 1, 0}, /* We must always support the next option to make scripts like mysqltest easier to do */ {"gdb", OPT_DEBUGGING, "Set up signals usable for debugging", (gptr*) &opt_debugging, (gptr*) &opt_debugging, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"init-connect", OPT_INIT_CONNECT, "Command(s) that are executed for each new connection", + (gptr*) &opt_init_connect, (gptr*) &opt_init_connect, 0, GET_STR_ALLOC, + REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"init-file", OPT_INIT_FILE, "Read SQL commands from this file at startup.", + (gptr*) &opt_init_file, (gptr*) &opt_init_file, 0, GET_STR, REQUIRED_ARG, + 0, 0, 0, 0, 0, 0}, {"init-rpl-role", OPT_INIT_RPL_ROLE, "Set the replication role.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"init-slave", OPT_INIT_SLAVE, "Command(s) that are executed when a slave connects to this master", + (gptr*) &opt_init_slave, (gptr*) &opt_init_slave, 0, GET_STR_ALLOC, + REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"innodb", OPT_INNODB, "Enable InnoDB (if this version of MySQL supports it). \ +Disable with --skip-innodb (will save memory).", + (gptr*) &opt_innodb, (gptr*) &opt_innodb, 0, GET_BOOL, NO_ARG, 1, 0, 0, + 0, 0, 0}, {"innodb_data_file_path", OPT_INNODB_DATA_FILE_PATH, "Path to individual files and their sizes.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_INNOBASE_DB {"innodb_data_home_dir", OPT_INNODB_DATA_HOME_DIR, - "The common part for Innodb table spaces.", (gptr*) &innobase_data_home_dir, + "The common part for InnoDB table spaces.", (gptr*) &innobase_data_home_dir, (gptr*) &innobase_data_home_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"innodb_log_group_home_dir", OPT_INNODB_LOG_GROUP_HOME_DIR, - "Path to innodb log files.", (gptr*) &innobase_log_group_home_dir, - (gptr*) &innobase_log_group_home_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, - 0, 0}, - {"innodb_log_arch_dir", OPT_INNODB_LOG_ARCH_DIR, - "Where full logs should be archived.", (gptr*) &innobase_log_arch_dir, - (gptr*) &innobase_log_arch_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"innodb_log_archive", OPT_INNODB_LOG_ARCHIVE, - "Set to 1 if you want to have logs archived.", 0, 0, 0, GET_LONG, OPT_ARG, - 0, 0, 0, 0, 0, 0}, + {"innodb_fast_shutdown", OPT_INNODB_FAST_SHUTDOWN, + "Speeds up server shutdown process.", (gptr*) &innobase_fast_shutdown, + (gptr*) &innobase_fast_shutdown, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"innodb_file_per_table", OPT_INNODB_FILE_PER_TABLE, + "Stores each InnoDB table to an .ibd file in the database dir.", + (gptr*) &innobase_file_per_table, + (gptr*) &innobase_file_per_table, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"innodb_flush_log_at_trx_commit", OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT, "Set to 0 (write and flush once per second), 1 (write and flush at each commit) or 2 (write at commit, flush once per second).", (gptr*) &innobase_flush_log_at_trx_commit, @@ -4147,38 +4153,28 @@ Disable with --skip-bdb (will save memory).", "With which method to flush data.", (gptr*) &innobase_unix_file_flush_method, (gptr*) &innobase_unix_file_flush_method, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"innodb_fast_shutdown", OPT_INNODB_FAST_SHUTDOWN, - "Speeds up server shutdown process.", (gptr*) &innobase_fast_shutdown, - (gptr*) &innobase_fast_shutdown, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"innodb_locks_unsafe_for_binlog", OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG, + "Force InnoDB not to use next-key locking. Instead use only row-level locking", + (gptr*) &innobase_locks_unsafe_for_binlog, + (gptr*) &innobase_locks_unsafe_for_binlog, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"innodb_log_arch_dir", OPT_INNODB_LOG_ARCH_DIR, + "Where full logs should be archived.", (gptr*) &innobase_log_arch_dir, + (gptr*) &innobase_log_arch_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"innodb_log_archive", OPT_INNODB_LOG_ARCHIVE, + "Set to 1 if you want to have logs archived.", 0, 0, 0, GET_LONG, OPT_ARG, + 0, 0, 0, 0, 0, 0}, + {"innodb_log_group_home_dir", OPT_INNODB_LOG_GROUP_HOME_DIR, + "Path to InnoDB log files.", (gptr*) &innobase_log_group_home_dir, + (gptr*) &innobase_log_group_home_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, + 0, 0}, {"innodb_max_dirty_pages_pct", OPT_INNODB_MAX_DIRTY_PAGES_PCT, "Percentage of dirty pages allowed in bufferpool.", (gptr*) &srv_max_buf_pool_modified_pct, (gptr*) &srv_max_buf_pool_modified_pct, 0, GET_ULONG, REQUIRED_ARG, 90, 0, 100, 0, 0, 0}, - {"innodb_file_per_table", OPT_INNODB_FILE_PER_TABLE, - "Stores each InnoDB table to an .ibd file in the database dir.", - (gptr*) &innobase_file_per_table, - (gptr*) &innobase_file_per_table, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"innodb_locks_unsafe_for_binlog", OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG, - "Force Innodb not to use next-key locking. Instead use only row-level locking", - (gptr*) &innobase_locks_unsafe_for_binlog, - (gptr*) &innobase_locks_unsafe_for_binlog, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif /* End HAVE_INNOBASE_DB */ - {"init-connect", OPT_INIT_CONNECT, "Command(s) that are executed for each new connection", - (gptr*) &opt_init_connect, (gptr*) &opt_init_connect, 0, GET_STR_ALLOC, - REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"init-slave", OPT_INIT_SLAVE, "Command(s) that are executed when a slave connects to this master", - (gptr*) &opt_init_slave, (gptr*) &opt_init_slave, 0, GET_STR_ALLOC, - REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"help", '?', "Display this help and exit.", - (gptr*) &opt_help, (gptr*) &opt_help, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, - 0, 0}, - {"verbose", 'v', "Used with --help option for detailed help", - (gptr*) &opt_verbose, (gptr*) &opt_verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, - 0, 0}, - {"init-file", OPT_INIT_FILE, "Read SQL commands from this file at startup.", - (gptr*) &opt_init_file, (gptr*) &opt_init_file, 0, GET_STR, REQUIRED_ARG, - 0, 0, 0, 0, 0, 0}, - {"log", 'l', "Log connections and queries to file.", (gptr*) &opt_logname, - (gptr*) &opt_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"isam", OPT_ISAM, "Enable ISAM (if this version of MySQL supports it). \ +Disable with --skip-isam.", + (gptr*) &opt_isam, (gptr*) &opt_isam, 0, GET_BOOL, NO_ARG, 1, 0, 0, + 0, 0, 0}, {"language", 'L', "Client error messages in given language. May be given as a full path.", (gptr*) &language_ptr, (gptr*) &language_ptr, 0, GET_STR, REQUIRED_ARG, @@ -4188,6 +4184,8 @@ Disable with --skip-bdb (will save memory).", (gptr*) &opt_local_infile, (gptr*) &opt_local_infile, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"log", 'l', "Log connections and queries to file.", (gptr*) &opt_logname, + (gptr*) &opt_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"log-bin", OPT_BIN_LOG, "Log update queries in binary format.", (gptr*) &opt_bin_logname, (gptr*) &opt_bin_logname, 0, GET_STR_ALLOC, @@ -4196,45 +4194,57 @@ Disable with --skip-bdb (will save memory).", "File that holds the names for last binary log files.", (gptr*) &opt_binlog_index_name, (gptr*) &opt_binlog_index_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"log-error", OPT_ERROR_LOG_FILE, "Log error file.", + (gptr*) &log_error_file_ptr, (gptr*) &log_error_file_ptr, 0, GET_STR, + OPT_ARG, 0, 0, 0, 0, 0, 0}, {"log-isam", OPT_ISAM_LOG, "Log all MyISAM changes to file.", (gptr*) &myisam_log_filename, (gptr*) &myisam_log_filename, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"log-update", OPT_UPDATE_LOG, - "Log updates to file.# where # is a unique number if not given.", - (gptr*) &opt_update_logname, (gptr*) &opt_update_logname, 0, GET_STR, - OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"log-slow-queries", OPT_SLOW_QUERY_LOG, - "Log slow queries to this log file. Defaults logging to hostname-slow.log file.", - (gptr*) &opt_slow_logname, (gptr*) &opt_slow_logname, 0, GET_STR, OPT_ARG, - 0, 0, 0, 0, 0, 0}, {"log-long-format", '0', "Log some extra information to update log. Please note that this option is deprecated; see --log-short-format option.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"log-short-format", OPT_SHORT_LOG_FORMAT, - "Don't log extra information to update and slow-query logs.", - (gptr*) &opt_short_log_format, (gptr*) &opt_short_log_format, - 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"log-queries-not-using-indexes", OPT_LOG_QUERIES_NOT_USING_INDEXES, "Log queries that are executed without benefit of any index.", (gptr*) &opt_log_queries_not_using_indexes, (gptr*) &opt_log_queries_not_using_indexes, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"log-short-format", OPT_SHORT_LOG_FORMAT, + "Don't log extra information to update and slow-query logs.", + (gptr*) &opt_short_log_format, (gptr*) &opt_short_log_format, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"log-slave-updates", OPT_LOG_SLAVE_UPDATES, "Tells the slave to log the updates from the slave thread to the binary log. You will need to turn it on if you plan to daisy-chain the slaves.", (gptr*) &opt_log_slave_updates, (gptr*) &opt_log_slave_updates, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"log-slow-queries", OPT_SLOW_QUERY_LOG, + "Log slow queries to this log file. Defaults logging to hostname-slow.log file.", + (gptr*) &opt_slow_logname, (gptr*) &opt_slow_logname, 0, GET_STR, OPT_ARG, + 0, 0, 0, 0, 0, 0}, + {"log-update", OPT_UPDATE_LOG, + "Log updates to file.# where # is a unique number if not given.", + (gptr*) &opt_update_logname, (gptr*) &opt_update_logname, 0, GET_STR, + OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"log-warnings", 'W', "Log some not critical warnings to the log file. Use this option twice, or --log-warnings=2 if you want 'Aborted connections' warning to be logged in the error log file.", + (gptr*) &global_system_variables.log_warnings, + (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, 0, + 0, 0, 0}, {"low-priority-updates", OPT_LOW_PRIORITY_UPDATES, "INSERT/DELETE/UPDATE has lower priority than selects.", (gptr*) &global_system_variables.low_priority_updates, (gptr*) &max_system_variables.low_priority_updates, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"master-connect-retry", OPT_MASTER_CONNECT_RETRY, + "The number of seconds the slave thread will sleep before retrying to connect to the master in case the master goes down or the connection is lost.", + (gptr*) &master_connect_retry, (gptr*) &master_connect_retry, 0, GET_UINT, + REQUIRED_ARG, 60, 0, 0, 0, 0, 0}, {"master-host", OPT_MASTER_HOST, "Master hostname or IP address for replication. If not set, the slave thread will not be started. Note that the setting of master-host will be ignored if there exists a valid master.info file.", (gptr*) &master_host, (gptr*) &master_host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"master-user", OPT_MASTER_USER, - "The username the slave thread will use for authentication when connecting to the master. The user must have FILE privilege. If the master user is not set, user test is assumed. The value in master.info will take precedence if it can be read.", - (gptr*) &master_user, (gptr*) &master_user, 0, GET_STR, REQUIRED_ARG, 0, 0, - 0, 0, 0, 0}, + {"master-info-file", OPT_MASTER_INFO_FILE, + "The location and name of the file that remembers the master and where the I/O replication \ +thread is in the master's binlogs.", + (gptr*) &master_info_file, (gptr*) &master_info_file, 0, GET_STR, + REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"master-password", OPT_MASTER_PASSWORD, "The password the slave thread will authenticate with when connecting to the master. If not set, an empty password is assumed.The value in master.info will take precedence if it can be read.", (gptr*)&master_password, (gptr*)&master_password, 0, @@ -4243,32 +4253,14 @@ Disable with --skip-bdb (will save memory).", "The port the master is listening on. If not set, the compiled setting of MYSQL_PORT is assumed. If you have not tinkered with configure options, this should be 3306. The value in master.info will take precedence if it can be read.", (gptr*) &master_port, (gptr*) &master_port, 0, GET_UINT, REQUIRED_ARG, MYSQL_PORT, 0, 0, 0, 0, 0}, - {"master-connect-retry", OPT_MASTER_CONNECT_RETRY, - "The number of seconds the slave thread will sleep before retrying to connect to the master in case the master goes down or the connection is lost.", - (gptr*) &master_connect_retry, (gptr*) &master_connect_retry, 0, GET_UINT, - REQUIRED_ARG, 60, 0, 0, 0, 0, 0}, {"master-retry-count", OPT_MASTER_RETRY_COUNT, "The number of tries the slave will make to connect to the master before giving up.", (gptr*) &master_retry_count, (gptr*) &master_retry_count, 0, GET_ULONG, REQUIRED_ARG, 3600*24, 0, 0, 0, 0, 0}, - {"master-info-file", OPT_MASTER_INFO_FILE, - "The location and name of the file that remembers the master and where the I/O replication \ -thread is in the master's binlogs.", - (gptr*) &master_info_file, (gptr*) &master_info_file, 0, GET_STR, - REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"master-ssl", OPT_MASTER_SSL, "Enable the slave to connect to the master using SSL.", (gptr*) &master_ssl, (gptr*) &master_ssl, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"master-ssl-key", OPT_MASTER_SSL_KEY, - "Master SSL keyfile name. Only applies if you have enabled master-ssl.", - (gptr*) &master_ssl_key, (gptr*) &master_ssl_key, 0, GET_STR, OPT_ARG, - 0, 0, 0, 0, 0, 0}, - {"master-ssl-cert", OPT_MASTER_SSL_CERT, - "Master SSL certificate file name. Only applies if you have enabled \ -master-ssl", - (gptr*) &master_ssl_cert, (gptr*) &master_ssl_cert, 0, GET_STR, OPT_ARG, - 0, 0, 0, 0, 0, 0}, {"master-ssl-ca", OPT_MASTER_SSL_CA, "Master SSL CA file. Only applies if you have enabled master-ssl.", (gptr*) &master_ssl_ca, (gptr*) &master_ssl_ca, 0, GET_STR, OPT_ARG, @@ -4277,39 +4269,39 @@ master-ssl", "Master SSL CA path. Only applies if you have enabled master-ssl.", (gptr*) &master_ssl_capath, (gptr*) &master_ssl_capath, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"master-ssl-cert", OPT_MASTER_SSL_CERT, + "Master SSL certificate file name. Only applies if you have enabled \ +master-ssl", + (gptr*) &master_ssl_cert, (gptr*) &master_ssl_cert, 0, GET_STR, OPT_ARG, + 0, 0, 0, 0, 0, 0}, {"master-ssl-cipher", OPT_MASTER_SSL_CIPHER, "Master SSL cipher. Only applies if you have enabled master-ssl.", (gptr*) &master_ssl_cipher, (gptr*) &master_ssl_capath, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"myisam-recover", OPT_MYISAM_RECOVER, - "Syntax: myisam-recover[=option[,option...]], where option can be DEFAULT, BACKUP, FORCE or QUICK.", - (gptr*) &myisam_recover_options_str, (gptr*) &myisam_recover_options_str, 0, - GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"memlock", OPT_MEMLOCK, "Lock mysqld in memory.", (gptr*) &locked_in_memory, - (gptr*) &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"master-ssl-key", OPT_MASTER_SSL_KEY, + "Master SSL keyfile name. Only applies if you have enabled master-ssl.", + (gptr*) &master_ssl_key, (gptr*) &master_ssl_key, 0, GET_STR, OPT_ARG, + 0, 0, 0, 0, 0, 0}, + {"master-user", OPT_MASTER_USER, + "The username the slave thread will use for authentication when connecting to the master. The user must have FILE privilege. If the master user is not set, user test is assumed. The value in master.info will take precedence if it can be read.", + (gptr*) &master_user, (gptr*) &master_user, 0, GET_STR, REQUIRED_ARG, 0, 0, + 0, 0, 0, 0}, #ifdef HAVE_REPLICATION - {"disconnect-slave-event-count", OPT_DISCONNECT_SLAVE_EVENT_COUNT, - "Option used by mysql-test for debugging and testing of replication.", - (gptr*) &disconnect_slave_event_count, - (gptr*) &disconnect_slave_event_count, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, - 0, 0, 0}, - {"abort-slave-event-count", OPT_ABORT_SLAVE_EVENT_COUNT, - "Option used by mysql-test for debugging and testing of replication.", - (gptr*) &abort_slave_event_count, (gptr*) &abort_slave_event_count, - 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"max-binlog-dump-events", OPT_MAX_BINLOG_DUMP_EVENTS, "Option used by mysql-test for debugging and testing of replication.", (gptr*) &max_binlog_dump_events, (gptr*) &max_binlog_dump_events, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"sporadic-binlog-dump-fail", OPT_SPORADIC_BINLOG_DUMP_FAIL, - "Option used by mysql-test for debugging and testing of replication.", - (gptr*) &opt_sporadic_binlog_dump_fail, - (gptr*) &opt_sporadic_binlog_dump_fail, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, - 0}, #endif /* HAVE_REPLICATION */ - {"safemalloc-mem-limit", OPT_SAFEMALLOC_MEM_LIMIT, - "Simulate memory shortage when compiled with the --with-debug=full option.", - 0, 0, 0, GET_ULL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"memlock", OPT_MEMLOCK, "Lock mysqld in memory.", (gptr*) &locked_in_memory, + (gptr*) &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"myisam-recover", OPT_MYISAM_RECOVER, + "Syntax: myisam-recover[=option[,option...]], where option can be DEFAULT, BACKUP, FORCE or QUICK.", + (gptr*) &myisam_recover_options_str, (gptr*) &myisam_recover_options_str, 0, + GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"ndbcluster", OPT_NDBCLUSTER, "Enable NDB Cluster (if this version of MySQL supports it). \ +Disable with --skip-ndbcluster (will save memory).", + (gptr*) &opt_ndbcluster, (gptr*) &opt_ndbcluster, 0, GET_BOOL, NO_ARG, 1, 0, 0, + 0, 0, 0}, {"new", 'n', "Use very new possible 'unsafe' functions.", (gptr*) &global_system_variables.new_mode, (gptr*) &max_system_variables.new_mode, @@ -4331,32 +4323,43 @@ master-ssl", {"pid-file", OPT_PID_FILE, "Pid file used by safe_mysqld.", (gptr*) &pidfile_name_ptr, (gptr*) &pidfile_name_ptr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"log-error", OPT_ERROR_LOG_FILE, "Log error file.", - (gptr*) &log_error_file_ptr, (gptr*) &log_error_file_ptr, 0, GET_STR, - OPT_ARG, 0, 0, 0, 0, 0, 0}, {"port", 'P', "Port number to use for connection.", (gptr*) &mysqld_port, (gptr*) &mysqld_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"relay-log", OPT_RELAY_LOG, + "The location and name to use for relay logs.", + (gptr*) &opt_relay_logname, (gptr*) &opt_relay_logname, 0, + GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"relay-log-index", OPT_RELAY_LOG_INDEX, + "The location and name to use for the file that keeps a list of the last \ +relay logs.", + (gptr*) &opt_relaylog_index_name, (gptr*) &opt_relaylog_index_name, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"relay-log-info-file", OPT_RELAY_LOG_INFO_FILE, + "The location and name of the file that remembers where the SQL replication \ +thread is in the relay logs.", + (gptr*) &relay_log_info_file, (gptr*) &relay_log_info_file, 0, GET_STR, + REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replicate-do-db", OPT_REPLICATE_DO_DB, "Tells the slave thread to restrict replication to the specified database. To specify more than one database, use the directive multiple times, once for each database. Note that this will only work if you do not use cross-database queries such as UPDATE some_db.some_table SET foo='bar' while having selected a different or no database. If you need cross database updates to work, make sure you have 3.23.28 or later, and use replicate-wild-do-table=db_name.%.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replicate-do-table", OPT_REPLICATE_DO_TABLE, "Tells the slave thread to restrict replication to the specified table. To specify more than one table, use the directive multiple times, once for each table. This will work for cross-database updates, in contrast to replicate-do-db.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"replicate-wild-do-table", OPT_REPLICATE_WILD_DO_TABLE, - "Tells the slave thread to restrict replication to the tables that match the specified wildcard pattern. To specify more than one table, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-do-table=foo%.bar% will replicate only updates to tables in all databases that start with foo and whose table names start with bar.", - 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replicate-ignore-db", OPT_REPLICATE_IGNORE_DB, "Tells the slave thread to not replicate to the specified database. To specify more than one database to ignore, use the directive multiple times, once for each database. This option will not work if you use cross database updates. If you need cross database updates to work, make sure you have 3.23.28 or later, and use replicate-wild-ignore-table=db_name.%. ", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replicate-ignore-table", OPT_REPLICATE_IGNORE_TABLE, "Tells the slave thread to not replicate to the specified table. To specify more than one table to ignore, use the directive multiple times, once for each table. This will work for cross-datbase updates, in contrast to replicate-ignore-db.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"replicate-wild-ignore-table", OPT_REPLICATE_WILD_IGNORE_TABLE, - "Tells the slave thread to not replicate to the tables that match the given wildcard pattern. To specify more than one table to ignore, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-ignore-table=foo%.bar% will not do updates to tables in databases that start with foo and whose table names start with bar.", - 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replicate-rewrite-db", OPT_REPLICATE_REWRITE_DB, "Updates to a database with a different name than the original. Example: replicate-rewrite-db=master_db_name->slave_db_name.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"replicate-wild-do-table", OPT_REPLICATE_WILD_DO_TABLE, + "Tells the slave thread to restrict replication to the tables that match the specified wildcard pattern. To specify more than one table, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-do-table=foo%.bar% will replicate only updates to tables in all databases that start with foo and whose table names start with bar.", + 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"replicate-wild-ignore-table", OPT_REPLICATE_WILD_IGNORE_TABLE, + "Tells the slave thread to not replicate to the tables that match the given wildcard pattern. To specify more than one table to ignore, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-ignore-table=foo%.bar% will not do updates to tables in databases that start with foo and whose table names start with bar.", + 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_REPLICATION {"replicate-same-server-id", OPT_REPLICATE_SAME_SERVER_ID, "In replication, if set to 1, do not skip events having our server id. \ @@ -4371,8 +4374,6 @@ Can't be set to 1 if --log-slave-updates is used.", "Hostname or IP of the slave to be reported to to the master during slave registration. Will appear in the output of SHOW SLAVE HOSTS. Leave unset if you do not want the slave to register itself with the master. Note that it is not sufficient for the master to simply read the IP of the slave off the socket once the slave connects. Due to NAT and other routing issues, that IP may not be valid for connecting to the slave from the master or other hosts.", (gptr*) &report_host, (gptr*) &report_host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"report-user", OPT_REPORT_USER, "Undocumented.", (gptr*) &report_user, - (gptr*) &report_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"report-password", OPT_REPORT_PASSWORD, "Undocumented.", (gptr*) &report_password, (gptr*) &report_password, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -4380,29 +4381,25 @@ Can't be set to 1 if --log-slave-updates is used.", "Port for connecting to slave reported to the master during slave registration. Set it only if the slave is listening on a non-default port or if you have a special tunnel from the master or other clients to the slave. If not sure, leave this option unset.", (gptr*) &report_port, (gptr*) &report_port, 0, GET_UINT, REQUIRED_ARG, MYSQL_PORT, 0, 0, 0, 0, 0}, + {"report-user", OPT_REPORT_USER, "Undocumented.", (gptr*) &report_user, + (gptr*) &report_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"rpl-recovery-rank", OPT_RPL_RECOVERY_RANK, "Undocumented.", (gptr*) &rpl_recovery_rank, (gptr*) &rpl_recovery_rank, 0, GET_ULONG, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"relay-log", OPT_RELAY_LOG, - "The location and name to use for relay logs.", - (gptr*) &opt_relay_logname, (gptr*) &opt_relay_logname, 0, - GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"relay-log-index", OPT_RELAY_LOG_INDEX, - "The location and name to use for the file that keeps a list of the last \ -relay logs.", - (gptr*) &opt_relaylog_index_name, (gptr*) &opt_relaylog_index_name, 0, - GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"safe-mode", OPT_SAFE, "Skip some optimize stages (for testing).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifndef TO_BE_DELETED {"safe-show-database", OPT_SAFE_SHOW_DB, - "Deprecated option; One should use GRANT SHOW DATABASES instead...", + "Deprecated option; use GRANT SHOW DATABASES instead...", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"safe-user-create", OPT_SAFE_USER_CREATE, "Don't allow new user creation by the user who has no write privileges to the mysql.user table.", (gptr*) &opt_safe_user_create, (gptr*) &opt_safe_user_create, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"safemalloc-mem-limit", OPT_SAFEMALLOC_MEM_LIMIT, + "Simulate memory shortage when compiled with the --with-debug=full option.", + 0, 0, 0, GET_ULL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"secure-auth", OPT_SECURE_AUTH, "Disallow authentication for accounts that have old (pre-4.1) passwords.", (gptr*) &opt_secure_auth, (gptr*) &opt_secure_auth, 0, GET_BOOL, NO_ARG, my_bool(0), 0, 0, 0, 0, 0}, @@ -4414,7 +4411,12 @@ relay logs.", "Change the value of a variable. Please note that this option is deprecated;you can set variables directly with --variable-name=value.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_SMEM - {"shared_memory_base_name",OPT_SHARED_MEMORY_BASE_NAME, + {"shared-memory", OPT_ENABLE_SHARED_MEMORY, + "Enable the shared memory.",(gptr*) &opt_enable_shared_memory, (gptr*) &opt_enable_shared_memory, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, +#endif +#ifdef HAVE_SMEM + {"shared-memory-base-name",OPT_SHARED_MEMORY_BASE_NAME, "Base name of shared memory.", (gptr*) &shared_memory_base_name, (gptr*) &shared_memory_base_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif @@ -4422,31 +4424,15 @@ relay logs.", "Show user and password in SHOW SLAVE HOSTS on this master", (gptr*) &opt_show_slave_auth_info, (gptr*) &opt_show_slave_auth_info, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"concurrent-insert", OPT_CONCURRENT_INSERT, - "Use concurrent insert with MyISAM. Disable with --skip-concurrent-insert.", - (gptr*) &myisam_concurrent_insert, (gptr*) &myisam_concurrent_insert, - 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"skip-grant-tables", OPT_SKIP_GRANT, "Start without grant tables. This gives all users FULL ACCESS to all tables!", (gptr*) &opt_noacl, (gptr*) &opt_noacl, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"innodb", OPT_INNODB, "Enable InnoDB (if this version of MySQL supports it). \ -Disable with --skip-innodb (will save memory).", - (gptr*) &opt_innodb, (gptr*) &opt_innodb, 0, GET_BOOL, NO_ARG, 1, 0, 0, - 0, 0, 0}, - {"isam", OPT_ISAM, "Enable isam (if this version of MySQL supports it). \ -Disable with --skip-isam.", - (gptr*) &opt_isam, (gptr*) &opt_isam, 0, GET_BOOL, NO_ARG, 1, 0, 0, - 0, 0, 0}, - {"ndbcluster", OPT_NDBCLUSTER, "Enable NDB Cluster (if this version of MySQL supports it). \ -Disable with --skip-ndbcluster (will save memory).", - (gptr*) &opt_ndbcluster, (gptr*) &opt_ndbcluster, 0, GET_BOOL, NO_ARG, 1, 0, 0, - 0, 0, 0}, + {"skip-host-cache", OPT_SKIP_HOST_CACHE, "Don't cache host names.", 0, 0, 0, + GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"skip-locking", OPT_SKIP_LOCK, "Deprecated option, use --skip-external-locking instead.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"skip-host-cache", OPT_SKIP_HOST_CACHE, "Don't cache host names.", 0, 0, 0, - GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"skip-name-resolve", OPT_SKIP_RESOLVE, "Don't resolve hostnames. All hostnames are IP's or 'localhost'.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -4455,6 +4441,13 @@ Disable with --skip-ndbcluster (will save memory).", 0, 0, 0}, {"skip-new", OPT_SKIP_NEW, "Don't use new, possible wrong routines.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, +#ifndef DBUG_OFF +#ifdef SAFEMALLOC + {"skip-safemalloc", OPT_SKIP_SAFEMALLOC, + "Don't use the memory allocation checking.", 0, 0, 0, GET_NO_ARG, NO_ARG, + 0, 0, 0, 0, 0, 0}, +#endif +#endif {"skip-show-database", OPT_SKIP_SHOW_DB, "Don't allow 'SHOW DATABASE' commands.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -4469,11 +4462,6 @@ Disable with --skip-ndbcluster (will save memory).", {"skip-thread-priority", OPT_SKIP_PRIOR, "Don't give threads different priorities.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"relay-log-info-file", OPT_RELAY_LOG_INFO_FILE, - "The location and name of the file that remembers where the SQL replication \ -thread is in the relay logs.", - (gptr*) &relay_log_info_file, (gptr*) &relay_log_info_file, 0, GET_STR, - REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_REPLICATION {"slave-load-tmpdir", OPT_SLAVE_LOAD_TMPDIR, "The location where the slave should put its temporary files when \ @@ -4487,6 +4475,13 @@ replicating a LOAD DATA INFILE command.", {"socket", OPT_SOCKET, "Socket file to use for connection.", (gptr*) &mysqld_unix_port, (gptr*) &mysqld_unix_port, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, +#ifdef HAVE_REPLICATION + {"sporadic-binlog-dump-fail", OPT_SPORADIC_BINLOG_DUMP_FAIL, + "Option used by mysql-test for debugging and testing of replication.", + (gptr*) &opt_sporadic_binlog_dump_fail, + (gptr*) &opt_sporadic_binlog_dump_fail, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, + 0}, +#endif /* HAVE_REPLICATION */ {"sql-bin-update-same", OPT_SQL_BIN_UPDATE_SAME, "If set, setting SQL_LOG_BIN to a value will automatically set SQL_LOG_UPDATE to the same value and vice versa.", (gptr*) &opt_sql_bin_update, (gptr*) &opt_sql_bin_update, 0, GET_BOOL, @@ -4498,6 +4493,14 @@ replicating a LOAD DATA INFILE command.", #ifdef HAVE_OPENSSL #include "sslopt-longopts.h" #endif +#ifdef __WIN__ + {"standalone", OPT_STANDALONE, + "Dummy option to start as a standalone program (NT).", 0, 0, 0, GET_NO_ARG, + NO_ARG, 0, 0, 0, 0, 0, 0}, +#endif + {"symbolic-links", 's', "Enable symbolic link support.", + (gptr*) &my_use_symdir, (gptr*) &my_use_symdir, 0, GET_BOOL, NO_ARG, + IF_PURIFY(0,1), 0, 0, 0, 0, 0}, {"temp-pool", OPT_TEMP_POOL, "Using this option will cause most temporary files created to use a small set of names, rather than a unique name for each new file.", (gptr*) &use_temp_pool, (gptr*) &use_temp_pool, 0, GET_BOOL, NO_ARG, 1, @@ -4515,24 +4518,17 @@ replicating a LOAD DATA INFILE command.", {"transaction-isolation", OPT_TX_ISOLATION, "Default transaction isolation level.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"external-locking", OPT_USE_LOCKING, "Use system (external) locking. With this option enabled you can run myisamchk to test (not repair) tables while the MySQL server is running.", - (gptr*) &opt_external_locking, (gptr*) &opt_external_locking, - 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"use-symbolic-links", 's', "Enable symbolic link support. Deprecated option; Use --symbolic-links instead.", - (gptr*) &my_use_symdir, (gptr*) &my_use_symdir, 0, GET_BOOL, NO_ARG, - IF_PURIFY(0,1), 0, 0, 0, 0, 0}, - {"symbolic-links", 's', "Enable symbolic link support.", + {"use-symbolic-links", 's', "Enable symbolic link support. Deprecated option; use --symbolic-links instead.", (gptr*) &my_use_symdir, (gptr*) &my_use_symdir, 0, GET_BOOL, NO_ARG, IF_PURIFY(0,1), 0, 0, 0, 0, 0}, {"user", 'u', "Run mysqld daemon as user.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"verbose", 'v', "Used with --help option for detailed help", + (gptr*) &opt_verbose, (gptr*) &opt_verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, + 0, 0}, {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"log-warnings", 'W', "Log some not critical warnings to the log file. Use this option twice, or --log-warnings=2 if you want 'Aborted connections' warning to be logged in the error log file.", - (gptr*) &global_system_variables.log_warnings, - (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, 0, - 0, 0, 0}, - {"warnings", 'W', "Deprecated ; Use --log-warnings instead.", + {"warnings", 'W', "Deprecated; use --log-warnings instead.", (gptr*) &global_system_variables.log_warnings, (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, 0, 0, 0, 0}, @@ -4608,6 +4604,11 @@ replicating a LOAD DATA INFILE command.", "Use stopwords from this file instead of built-in list.", (gptr*) &ft_stopword_file, (gptr*) &ft_stopword_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "group_concat_max_len", OPT_GROUP_CONCAT_MAX_LEN, + "The maximum length of the result of function group_concat.", + (gptr*) &global_system_variables.group_concat_max_len, + (gptr*) &max_system_variables.group_concat_max_len, 0, GET_ULONG, + REQUIRED_ARG, 1024, 4, (long) ~0, 0, 1, 0}, #ifdef HAVE_INNOBASE_DB {"innodb_mirrored_log_groups", OPT_INNODB_MIRRORED_LOG_GROUPS, "Number of identical copies of log groups we keep for the database. Currently this should be set to 1.", @@ -4967,12 +4968,21 @@ The minimum value for this variable is 4096.", (gptr*) &max_system_variables.sortbuff_size, 0, GET_ULONG, REQUIRED_ARG, MAX_SORT_MEMORY, MIN_SORT_MEMORY+MALLOC_OVERHEAD*2, ~0L, MALLOC_OVERHEAD, 1, 0}, +#ifdef HAVE_BERKELEY_DB + {"sync-bdb-logs", OPT_BDB_SYNC, + "Synchronously flush logs. Enabled by default", + (gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL, + NO_ARG, 1, 0, 0, 0, 0, 0}, +#endif /* HAVE_BERKELEY_DB */ {"sync-binlog", OPT_SYNC_BINLOG, "Sync the binlog to disk after every #th event. \ #=0 (the default) does no sync. Syncing slows MySQL down", (gptr*) &sync_binlog_period, (gptr*) &sync_binlog_period, 0, GET_ULONG, REQUIRED_ARG, 0, 0, ~0L, 0, 1, 0}, + {"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default", + (gptr*) &opt_sync_frm, (gptr*) &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0, + 0, 0, 0, 0}, {"table_cache", OPT_TABLE_CACHE, "The number of open tables for all threads.", (gptr*) &table_cache_size, (gptr*) &table_cache_size, 0, GET_ULONG, REQUIRED_ARG, 64, 1, 512*1024L, From ad325516a912b5ae5f364c4e1cf3f2c58c3aad5f Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Mon, 9 Aug 2004 09:44:05 +0200 Subject: [PATCH 84/93] testTransaction -n FRead* testTransaction -n *Scan* testTimeout -n * --- .../kernel/signaldata/DumpStateOrd.hpp | 1 + ndb/src/kernel/blocks/dbtc/DbtcMain.cpp | 11 +- ndb/test/include/HugoOperations.hpp | 13 +- ndb/test/ndbapi/testTimeout.cpp | 148 ++++++----- ndb/test/ndbapi/testTransactions.cpp | 21 +- ndb/test/run-test/daily-basic-tests.txt | 240 +++++++++++++++--- ndb/test/src/HugoOperations.cpp | 94 ++++++- ndb/test/src/NDBT_Test.cpp | 8 +- 8 files changed, 414 insertions(+), 122 deletions(-) diff --git a/ndb/include/kernel/signaldata/DumpStateOrd.hpp b/ndb/include/kernel/signaldata/DumpStateOrd.hpp index 6403a52926f..1e349fad55a 100644 --- a/ndb/include/kernel/signaldata/DumpStateOrd.hpp +++ b/ndb/include/kernel/signaldata/DumpStateOrd.hpp @@ -94,6 +94,7 @@ public: TcDumpOneApiConnectRec = 2505, TcDumpAllApiConnectRec = 2506, TcSetTransactionTimeout = 2507, + TcSetApplTransactionTimeout = 2508, CmvmiDumpConnections = 2600, CmvmiDumpLongSignalMemory = 2601, CmvmiSetRestartOnErrorInsert = 2602, diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index ed467db1c6c..a3ec91cce19 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -5043,11 +5043,11 @@ void Dbtc::execLQHKEYREF(Signal* signal) jam(); diverify010Lab(signal); return; - } else if (regApiPtr->tckeyrec > 0) { + } else if (regApiPtr->tckeyrec > 0 || regApiPtr->m_exec_flag) { jam(); sendtckeyconf(signal, 2); return; - }//if + } }//if return; @@ -10533,6 +10533,13 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal) set_timeout_value(signal->theData[1]); } } + + if (dumpState->args[0] == DumpStateOrd::TcSetApplTransactionTimeout){ + jam(); + if(signal->getLength() > 1){ + set_appl_timeout_value(signal->theData[1]); + } + } }//Dbtc::execDUMP_STATE_ORD() void Dbtc::execSET_VAR_REQ(Signal* signal) diff --git a/ndb/test/include/HugoOperations.hpp b/ndb/test/include/HugoOperations.hpp index 37e53e322c8..6bd8f7204b2 100644 --- a/ndb/test/include/HugoOperations.hpp +++ b/ndb/test/include/HugoOperations.hpp @@ -58,9 +58,6 @@ public: int recordNo, int numRecords = 1); - NdbResultSet* scanReadRecords(Ndb* pNdb, ScanLock lock = SL_Read); - int readTuples(NdbResultSet*); - int execute_Commit(Ndb*, AbortOption ao = AbortOnError); int execute_NoCommit(Ndb*, @@ -92,7 +89,11 @@ public: int recordNo, int numRecords = 1, int updatesValue = 0); - + + int scanReadRecords(Ndb*, NdbScanOperation::LockMode = + NdbScanOperation::LM_CommittedRead, + int numRecords = 1); + protected: void allocRows(int rows); void deallocRows(); @@ -101,6 +102,10 @@ protected: HugoCalculator calc; Vector savedRecords; + + struct RsPair { NdbResultSet* m_result_set; int records; }; + Vector m_result_sets; + Vector m_executed_result_sets; private: NdbConnection* pTrans; }; diff --git a/ndb/test/ndbapi/testTimeout.cpp b/ndb/test/ndbapi/testTimeout.cpp index d37c58f7ea6..62e69125073 100644 --- a/ndb/test/ndbapi/testTimeout.cpp +++ b/ndb/test/ndbapi/testTimeout.cpp @@ -20,6 +20,44 @@ #include #include #include +#include + +#define TIMEOUT 3000 + +Uint32 g_org_timeout = 3000; + +int +setTransactionTimeout(NDBT_Context* ctx, NDBT_Step* step){ + NdbRestarter restarter; + + NdbConfig conf(GETNDB(step)->getNodeId()+1); + unsigned int nodeId = conf.getMasterNodeId(); + if (!conf.getProperty(nodeId, + NODE_TYPE_DB, + CFG_DB_TRANSACTION_INACTIVE_TIMEOUT, + &g_org_timeout)){ + return NDBT_FAILED; + } + + int val[] = { DumpStateOrd::TcSetApplTransactionTimeout, TIMEOUT }; + if(restarter.dumpStateAllNodes(val, 2) != 0){ + return NDBT_FAILED; + } + + return NDBT_OK; +} + +int +resetTransactionTimeout(NDBT_Context* ctx, NDBT_Step* step){ + NdbRestarter restarter; + + int val[] = { DumpStateOrd::TcSetApplTransactionTimeout, g_org_timeout }; + if(restarter.dumpStateAllNodes(val, 2) != 0){ + return NDBT_FAILED; + } + + return NDBT_OK; +} int runLoadTable(NDBT_Context* ctx, NDBT_Step* step){ @@ -55,16 +93,10 @@ int runTimeoutTrans(NDBT_Context* ctx, NDBT_Step* step){ NdbConfig conf(GETNDB(step)->getNodeId()+1); unsigned int nodeId = conf.getMasterNodeId(); int stepNo = step->getStepNo(); - Uint32 timeoutVal; - if (!conf.getProperty(nodeId, - NODE_TYPE_DB, - CFG_DB_TRANSACTION_INACTIVE_TIMEOUT, - &timeoutVal)){ - return NDBT_FAILED; - } - int minSleep = (int)(timeoutVal * 1.5); - int maxSleep = timeoutVal * 2; - ndbout << "TransactionInactiveTimeout="<getProperty("Op2", (Uint32)0); int records = ctx->getNumRecords(); - Uint32 timeoutVal; - if (!conf.getProperty(nodeId, - NODE_TYPE_DB, - CFG_DB_TRANSACTION_INACTIVE_TIMEOUT, - &timeoutVal)){ - return NDBT_FAILED; - } - - int minSleep = (int)(timeoutVal * 1.5); - int maxSleep = timeoutVal * 2; + int minSleep = (int)(TIMEOUT * 1.5); + int maxSleep = TIMEOUT * 2; HugoOperations hugoOps(*ctx->getTab()); Ndb* pNdb = GETNDB(step); - for (int l = 0; l < loops && !ctx->isTestStopped(); l++){ + for (int l = 0; lisTestStopped() && result == NDBT_OK; l++){ int op1 = 0 + (l + stepNo) * mul1; int op2 = 0 + (l + stepNo) * mul2; @@ -127,7 +148,7 @@ int runTimeoutTrans2(NDBT_Context* ctx, NDBT_Step* step){ op1 = (op1 % 5); op2 = (op2 % 5); - ndbout << stepNo << ": TransactionInactiveTimeout="<nextResult(); + switch(res){ + case 1: + return 626; + case -1: + const NdbError err = pTrans->getNdbError(); + ERR(err); + return (err.code > 0 ? err.code : NDBT_FAILED); + } + + // A row found + + switch(rows){ + case 0: + return 4000; + default: + m_result_sets[i].records--; + break; + } + } + + m_result_sets.clear(); + return NDBT_OK; } @@ -388,6 +421,35 @@ int HugoOperations::execute_NoCommit(Ndb* pNdb, AbortOption eao){ return NDBT_FAILED; return err.code; } + + for(int i = 0; inextResult(); + switch(res){ + case 1: + return 626; + case -1: + const NdbError err = pTrans->getNdbError(); + ERR(err); + return (err.code > 0 ? err.code : NDBT_FAILED); + } + + // A row found + + switch(rows){ + case 0: + return 4000; + default: + case 1: + break; + } + } + + m_result_sets.clear(); + return NDBT_OK; } @@ -704,3 +766,33 @@ HugoOperations::indexUpdateRecord(Ndb*, } return NDBT_OK; } + +int +HugoOperations::scanReadRecords(Ndb* pNdb, NdbScanOperation::LockMode lm, + int records){ + + allocRows(records); + NdbScanOperation * pOp = pTrans->getNdbScanOperation(tab.getName()); + + if(!pOp) + return -1; + + NdbResultSet * rs = pOp->readTuples(lm, 1, 1); + + if(!rs){ + return -1; + } + + for(int a = 0; aattributeStore(a) = + pOp->getValue(tab.getColumn(a)->getName())) == 0) { + ERR(pTrans->getNdbError()); + return NDBT_FAILED; + } + } + + RsPair p = {rs, records}; + m_result_sets.push_back(p); + + return 0; +} diff --git a/ndb/test/src/NDBT_Test.cpp b/ndb/test/src/NDBT_Test.cpp index ed9967fdb9a..e5ad531675d 100644 --- a/ndb/test/src/NDBT_Test.cpp +++ b/ndb/test/src/NDBT_Test.cpp @@ -820,15 +820,15 @@ void NDBT_TestSuite::execute(Ndb* ndb, const NdbDictionary::Table* pTab, const NdbDictionary::Table* pTab2 = pDict->getTable(pTab->getName()); if (createTable == true){ - if (pTab2 != 0 && !pTab->equal(* pTab2)){ + if(pTab2 != 0 && pDict->dropTable(pTab->getName()) != 0){ numTestsFail++; numTestsExecuted++; - g_err << "ERROR0: Failed to create table " << pTab->getName() << endl; + g_err << "ERROR0: Failed to drop table " << pTab->getName() << endl; tests[t]->saveTestResult(pTab, FAILED_TO_CREATE); continue; } - - if(pTab2 == 0 && pDict->createTable(* pTab) != 0){ + + if(pDict->createTable(* pTab) != 0){ numTestsFail++; numTestsExecuted++; g_err << "ERROR1: Failed to create table " << pTab->getName() From e4348ee9a5421287d544266512dfd8288bd4ea7d Mon Sep 17 00:00:00 2001 From: "bar@mysql.com" <> Date: Mon, 9 Aug 2004 13:04:34 +0500 Subject: [PATCH 85/93] configure.in: Collation name fixes. --- configure.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configure.in b/configure.in index 0fb0ccb9b9b..10d9a8662eb 100644 --- a/configure.in +++ b/configure.in @@ -2562,7 +2562,7 @@ case $default_charset in ;; cp1250) default_charset_default_collation="cp1250_general_ci" - default_charset_collations="cp1250_general_ci cp1250_czech_ci cp1250_bin" + default_charset_collations="cp1250_general_ci cp1250_czech_cs cp1250_bin" ;; cp1251) default_charset_default_collation="cp1251_general_ci" @@ -2638,7 +2638,7 @@ case $default_charset in ;; latin2) default_charset_default_collation="latin2_general_ci" - default_charset_collations="latin2_general_ci latin2_bin latin2_czech_ci latin2_hungarian_ci latin2_croatian_ci" + default_charset_collations="latin2_general_ci latin2_bin latin2_czech_cs latin2_hungarian_ci latin2_croatian_ci" ;; latin5) default_charset_default_collation="latin5_turkish_ci" From 82d54f6d8691ce08a4536482738389ba36516de9 Mon Sep 17 00:00:00 2001 From: "ingo@mysql.com" <> Date: Mon, 9 Aug 2004 11:02:09 +0200 Subject: [PATCH 86/93] bug#4369 - MySQL 4.1 regression in Alter table/tmp table from hash. Solved performance problems by fixing a typo, which prevented enabling of write buffer. --- sql/ha_myisam.cc | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index 51c8521c376..3d2d25b3e7d 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -957,15 +957,21 @@ int ha_myisam::indexes_are_disabled(void) start_bulk_insert(rows) rows Rows to be inserted 0 if we don't know + + NOTICE + Do not forget to call end_bulk_insert() later! */ void ha_myisam::start_bulk_insert(ha_rows rows) { + DBUG_ENTER("ha_myisam::start_bulk_insert"); THD *thd=current_thd; ulong size= min(thd->variables.read_buff_size, table->avg_row_length*rows); + DBUG_PRINT("info",("start_bulk_insert: rows %lu size %lu", + (ulong) rows, size)); /* don't enable row cache if too few rows */ - if (!rows && rows > MI_MIN_ROWS_TO_USE_WRITE_CACHE) + if (! rows || (rows > MI_MIN_ROWS_TO_USE_WRITE_CACHE)) mi_extra(file, HA_EXTRA_WRITE_CACHE, (void*) &size); can_enable_indexes= (file->s->state.key_map == @@ -989,8 +995,22 @@ void ha_myisam::start_bulk_insert(ha_rows rows) mi_init_bulk_insert(file, thd->variables.bulk_insert_buff_size, rows); } } + DBUG_VOID_RETURN; } +/* + end special bulk-insert optimizations, + which have been activated by start_bulk_insert(). + + SYNOPSIS + end_bulk_insert() + no arguments + + RETURN + 0 OK + != 0 Error +*/ + int ha_myisam::end_bulk_insert() { mi_end_bulk_insert(file); From 1add7651b497e4d82020bdb7643a36b5fe4b19ef Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Mon, 9 Aug 2004 13:25:07 +0200 Subject: [PATCH 87/93] bug#4529 --- ndb/include/mgmcommon/ConfigRetriever.hpp | 45 ++- .../mgmcommon/LocalConfig.hpp | 34 +-- ndb/src/common/mgmcommon/ConfigRetriever.cpp | 278 +++++++----------- ndb/src/common/mgmcommon/LocalConfig.cpp | 94 ++---- ndb/src/kernel/main.cpp | 11 +- ndb/src/kernel/vm/Configuration.cpp | 72 +++-- ndb/src/kernel/vm/Configuration.hpp | 1 + ndb/src/mgmapi/mgmapi.cpp | 21 +- ndb/src/mgmclient/main.cpp | 10 +- ndb/src/mgmsrv/MgmtSrvr.cpp | 29 +- ndb/src/mgmsrv/MgmtSrvr.hpp | 5 +- ndb/src/mgmsrv/MgmtSrvrConfig.cpp | 7 +- ndb/src/mgmsrv/Services.cpp | 41 +-- ndb/src/mgmsrv/main.cpp | 23 +- ndb/src/ndbapi/TransporterFacade.cpp | 50 ++-- 15 files changed, 309 insertions(+), 412 deletions(-) rename ndb/{src/common => include}/mgmcommon/LocalConfig.hpp (73%) diff --git a/ndb/include/mgmcommon/ConfigRetriever.hpp b/ndb/include/mgmcommon/ConfigRetriever.hpp index d884e914f0b..396ce24308c 100644 --- a/ndb/include/mgmcommon/ConfigRetriever.hpp +++ b/ndb/include/mgmcommon/ConfigRetriever.hpp @@ -19,6 +19,8 @@ #include #include +#include +#include /** * @class ConfigRetriever @@ -26,15 +28,16 @@ */ class ConfigRetriever { public: - ConfigRetriever(); - ConfigRetriever(const int id, const char* remoteHost, const int port); + ConfigRetriever(Uint32 version, Uint32 nodeType); ~ConfigRetriever(); /** * Read local config * @return Own node id, -1 means fail */ - int init(bool onlyNodeId = false); + int init(); + + int do_connect(); /** * Get configuration for current (nodeId given in local config file) node. @@ -47,7 +50,7 @@ public: * @return ndb_mgm_configuration object if succeeded, * NULL if erroneous local config file or configuration error. */ - struct ndb_mgm_configuration * getConfig(int versionId, int nodeType); + struct ndb_mgm_configuration * getConfig(); const char * getErrorString(); @@ -61,29 +64,22 @@ public: */ void setLocalConfigFileName(const char * connectString); - /** - * Sets connectstring which can be used instead of local config file - * environment variables and Ndb.cfg has precidence over this - */ - void setDefaultConnectString(const char * defaultConnectString); - /** * @return Node id of this node (as stated in local config or connectString) */ - inline Uint32 getOwnNodeId() { return _ownNodeId; } - + Uint32 allocNodeId(); /** * Get config using socket */ - struct ndb_mgm_configuration * getConfig(const char * mgmhost, short port, - int versionId, int nodetype); + struct ndb_mgm_configuration * getConfig(NdbMgmHandle handle); + /** * Get config from file */ - struct ndb_mgm_configuration * getConfig(const char * file, int versionId); + struct ndb_mgm_configuration * getConfig(const char * file); private: - char * errorString; + BaseString errorString; enum ErrorType { CR_ERROR = 0, CR_RETRY = 1 @@ -91,20 +87,21 @@ private: ErrorType latestErrorType; void setError(ErrorType, const char * errorMsg); - - char * _localConfigFileName; - struct LocalConfig * _localConfig; + + BaseString _localConfigFileName; + struct LocalConfig _localConfig; int _ownNodeId; - - char * m_connectString; - char * m_defaultConnectString; - + + BaseString m_connectString; + + Uint32 m_version; + Uint32 m_node_type; NdbMgmHandle m_handle; /** * Verify config */ - bool verifyConfig(const struct ndb_mgm_configuration *, int type); + bool verifyConfig(const struct ndb_mgm_configuration *); }; #endif diff --git a/ndb/src/common/mgmcommon/LocalConfig.hpp b/ndb/include/mgmcommon/LocalConfig.hpp similarity index 73% rename from ndb/src/common/mgmcommon/LocalConfig.hpp rename to ndb/include/mgmcommon/LocalConfig.hpp index eb676bf9bed..c741b35f482 100644 --- a/ndb/src/common/mgmcommon/LocalConfig.hpp +++ b/ndb/include/mgmcommon/LocalConfig.hpp @@ -32,49 +32,35 @@ enum MgmtSrvrId_Type { struct MgmtSrvrId { MgmtSrvrId_Type type; - union { - struct { - char * remoteHost; - unsigned int port; - } tcp; - struct { - char * filename; - } file; - } data; + BaseString name; + unsigned int port; }; struct LocalConfig { int _ownNodeId; - - int size; - int items; - MgmtSrvrId ** ids; - + Vector ids; + int error_line; char error_msg[256]; LocalConfig(); ~LocalConfig(); - bool init(bool onlyNodeId = false, - const char *connectString = 0, - const char *fileName = 0, - const char *defaultConnectString = 0); - - void add(MgmtSrvrId *i); + bool init(const char *connectString = 0, + const char *fileName = 0); void printError() const; void printUsage() const; void setError(int lineNumber, const char * _msg); - bool readConnectString(const char * connectString, bool onlyNodeId = false); - bool readFile(const char * filename, bool &fopenError, bool onlyNodeId = false); + bool readConnectString(const char *); + bool readFile(const char * file, bool &fopenError); bool parseLine(char * line, int lineNumber); - + bool parseNodeId(const char *buf); bool parseHostName(const char *buf); bool parseFileName(const char *buf); - bool parseString(const char *buf, bool onlyNodeId, char *line); + bool parseString(const char *buf, char *line); }; #endif // LocalConfig_H diff --git a/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/ndb/src/common/mgmcommon/ConfigRetriever.cpp index 2de82d7250e..b8856382c15 100644 --- a/ndb/src/common/mgmcommon/ConfigRetriever.cpp +++ b/ndb/src/common/mgmcommon/ConfigRetriever.cpp @@ -43,33 +43,14 @@ //**************************************************************************** //**************************************************************************** -ConfigRetriever::ConfigRetriever() { +ConfigRetriever::ConfigRetriever(Uint32 version, Uint32 node_type) { - _localConfigFileName = 0; - m_defaultConnectString = 0; - - - errorString = 0; - _localConfig = new LocalConfig(); - m_connectString = 0; - m_handle= 0; + m_version = version; + m_node_type = node_type; } ConfigRetriever::~ConfigRetriever(){ - if(_localConfigFileName != 0) - free(_localConfigFileName); - - if(m_defaultConnectString != 0) - free(m_defaultConnectString); - - if(m_connectString != 0) - free(m_connectString); - - if(errorString != 0) - free(errorString); - - delete _localConfig; if (m_handle) { ndb_mgm_disconnect(m_handle); @@ -82,68 +63,51 @@ ConfigRetriever::~ConfigRetriever(){ //**************************************************************************** int -ConfigRetriever::init(bool onlyNodeId) { - if (_localConfig->init(onlyNodeId, m_connectString, _localConfigFileName, m_defaultConnectString)) { - return _ownNodeId = (*_localConfig)._ownNodeId; +ConfigRetriever::init() { + if (!_localConfig.init(m_connectString.c_str(), + _localConfigFileName.c_str())){ + + setError(CR_ERROR, "error in retrieving contact info for mgmtsrvr"); + _localConfig.printError(); + _localConfig.printUsage(); + return -1; } - - setError(CR_ERROR, "error in retrieving contact info for mgmtsrvr"); - _localConfig->printError(); - _localConfig->printUsage(); - - return -1; -} -//**************************************************************************** -//**************************************************************************** -//**************************************************************************** -//**************************************************************************** -struct ndb_mgm_configuration* -ConfigRetriever::getConfig(int verId, int nodeType) { + return _ownNodeId = _localConfig._ownNodeId; +} - int res = init(); - if (res == -1) { - return 0; - } +int +ConfigRetriever::do_connect(){ - if (_localConfig->items == 0){ - setError(CR_ERROR,"No Management Servers configured in local config file"); - return 0; + if(!m_handle) + m_handle= ndb_mgm_create_handle(); + + if (m_handle == 0) { + setError(CR_ERROR, "Unable to allocate mgm handle"); + return -1; } int retry = 1; int retry_max = 12; // Max number of retry attempts int retry_interval= 5; // Seconds between each retry - do { + while(retry < retry_max){ Uint32 type = CR_ERROR; - for (int i = 0; i<_localConfig->items; i++){ - MgmtSrvrId * m = _localConfig->ids[i]; - struct ndb_mgm_configuration * p = 0; + BaseString tmp; + for (int i = 0; i<_localConfig.ids.size(); i++){ + MgmtSrvrId * m = &_localConfig.ids[i]; switch(m->type){ case MgmId_TCP: - p = getConfig(m->data.tcp.remoteHost, m->data.tcp.port, - verId, nodeType); - break; - case MgmId_File: - p = getConfig(m->data.file.filename, verId); - break; - default: - setError(CR_ERROR, "Unknown error type"); - break; - } - - if (p != 0) { - if(!verifyConfig(p, nodeType)){ - free(p); + tmp.assfmt("%s:%d", m->name.c_str(), m->port); + if (ndb_mgm_connect(m_handle, tmp.c_str()) == 0) { return 0; } - return p; + setError(CR_RETRY, ndb_mgm_get_latest_error_desc(m_handle)); + case MgmId_File: + break; } - if(latestErrorType == CR_RETRY) - type = CR_RETRY; - } // for - - if(type == CR_RETRY){ + } + + if(latestErrorType == CR_RETRY){ REPORT_WARNING("Failed to retrieve cluster configuration"); ndbout << "(Cause of failure: " << getErrorString() << ")" << endl; ndbout << "Attempt " << retry << " of " << retry_max << ". " @@ -154,82 +118,63 @@ ConfigRetriever::getConfig(int verId, int nodeType) { break; } retry++; - - } while (retry <= retry_max); + } - return 0; + ndb_mgm_destroy_handle(&m_handle); + m_handle= 0; + return -1; +} + +//**************************************************************************** +//**************************************************************************** +//**************************************************************************** +//**************************************************************************** +struct ndb_mgm_configuration* +ConfigRetriever::getConfig() { + + struct ndb_mgm_configuration * p = 0; + + if(m_handle != 0){ + p = getConfig(m_handle); + } else { + for (int i = 0; i<_localConfig.ids.size(); i++){ + MgmtSrvrId * m = &_localConfig.ids[i]; + switch(m->type){ + case MgmId_File: + p = getConfig(m->name.c_str()); + break; + case MgmId_TCP: + break; + } + if(p) + break; + } + } + if(p == 0) + return 0; + + if(!verifyConfig(p)){ + free(p); + p= 0; + } + + return p; } ndb_mgm_configuration * -ConfigRetriever::getConfig(const char * mgmhost, - short port, - int versionId, - int nodetype){ - if (m_handle) { - ndb_mgm_disconnect(m_handle); - ndb_mgm_destroy_handle(&m_handle); - } - - m_handle = ndb_mgm_create_handle(); - - if (m_handle == 0) { - setError(CR_ERROR, "Unable to allocate mgm handle"); - return 0; - } - - BaseString tmp; - tmp.assfmt("%s:%d", mgmhost, port); - if (ndb_mgm_connect(m_handle, tmp.c_str()) != 0) { - setError(CR_RETRY, ndb_mgm_get_latest_error_desc(m_handle)); - ndb_mgm_destroy_handle(&m_handle); - m_handle= 0; - return 0; - } - - ndb_mgm_configuration * conf = ndb_mgm_get_configuration(m_handle, versionId); +ConfigRetriever::getConfig(NdbMgmHandle m_handle){ + + ndb_mgm_configuration * conf = ndb_mgm_get_configuration(m_handle,m_version); if(conf == 0){ setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle)); - ndb_mgm_disconnect(m_handle); - ndb_mgm_destroy_handle(&m_handle); - m_handle= 0; return 0; } - - { - unsigned nodeid= getOwnNodeId(); - - int res= ndb_mgm_alloc_nodeid(m_handle, versionId, &nodeid, nodetype); - if(res != 0) { - setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle)); - ndb_mgm_disconnect(m_handle); - ndb_mgm_destroy_handle(&m_handle); - m_handle= 0; - return 0; - } - - _ownNodeId= nodeid; - } - + return conf; -#if 0 - bool compatible; - if (global_ndb_check) - compatible = ndbCompatible_ndb_mgmt(versionId, version); - else - compatible = ndbCompatible_api_mgmt(versionId, version); - - if(!compatible){ // if(version != versionId){ - NDB_CLOSE_SOCKET(sockfd); - snprintf(err_buf, sizeof(err_buf), "Management Server: Invalid version. " - "Version from server: %d Own version: %d", version, versionId); - setError(CR_ERROR, err_buf); - return 0; - } -#endif } ndb_mgm_configuration * -ConfigRetriever::getConfig(const char * filename, int versionId){ +ConfigRetriever::getConfig(const char * filename){ struct stat sbuf; const int res = stat(filename, &sbuf); @@ -272,60 +217,29 @@ ConfigRetriever::getConfig(const char * filename, int versionId){ void ConfigRetriever::setError(ErrorType et, const char * s){ - if(errorString != 0){ - free(errorString); - } - if(s == 0) - errorString = 0; - else - errorString = strdup(s); + errorString.assign(s ? s : ""); latestErrorType = et; } const char * ConfigRetriever::getErrorString(){ - return errorString; + return errorString.c_str(); } void ConfigRetriever::setLocalConfigFileName(const char * localConfigFileName) { - if(_localConfigFileName != 0) - free(_localConfigFileName); - if(localConfigFileName != 0) - _localConfigFileName = strdup(localConfigFileName); - else - _localConfigFileName = 0; + _localConfigFileName.assign(localConfigFileName ? localConfigFileName : ""); } void ConfigRetriever::setConnectString(const char * connectString) { - if(m_connectString != 0) - free(m_connectString); - if (connectString != 0) { - m_connectString = strdup(connectString); - } else { - m_connectString = 0; - } -} - -/** - * @note Do not use! Use the one above if possible. /elathal - */ -void -ConfigRetriever::setDefaultConnectString(const char * defaultConnectString) { - if(m_defaultConnectString != 0) - free(m_defaultConnectString); - if (defaultConnectString != 0) { - m_defaultConnectString = strdup(defaultConnectString); - } else { - m_defaultConnectString = 0; - } + m_connectString.assign(connectString ? connectString : ""); } bool -ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, - int type){ +ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf){ + char buf[255]; ndb_mgm_configuration_iterator * it; it = ndb_mgm_create_configuration_iterator((struct ndb_mgm_configuration *)conf, CFG_SECTION_NODE); @@ -338,8 +252,8 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, } NdbAutoPtr ptr(it); - if(ndb_mgm_find(it, CFG_NODE_ID, getOwnNodeId()) != 0){ - snprintf(buf, 255, "Unable to find node with id: %d", getOwnNodeId()); + if(ndb_mgm_find(it, CFG_NODE_ID, _ownNodeId) != 0){ + snprintf(buf, 255, "Unable to find node with id: %d", _ownNodeId); setError(CR_ERROR, buf); return false; } @@ -396,11 +310,27 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, return false; } - if(_type != type){ + if(_type != m_node_type){ snprintf(buf, 255, "Supplied node type(%d) and config node type(%d) " - " don't match", type, _type); + " don't match", m_node_type, _type); setError(CR_ERROR, buf); return false; } + return true; } + +Uint32 +ConfigRetriever::allocNodeId(){ + unsigned nodeid= _ownNodeId; + + if(m_handle != 0){ + int res= ndb_mgm_alloc_nodeid(m_handle, m_version, &nodeid, m_node_type); + if(res != 0) { + setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle)); + return 0; + } + } + + return _ownNodeId= nodeid; +} diff --git a/ndb/src/common/mgmcommon/LocalConfig.cpp b/ndb/src/common/mgmcommon/LocalConfig.cpp index 9915cbdc642..46afc58b756 100644 --- a/ndb/src/common/mgmcommon/LocalConfig.cpp +++ b/ndb/src/common/mgmcommon/LocalConfig.cpp @@ -20,16 +20,13 @@ #include LocalConfig::LocalConfig(){ - ids = 0; size = 0; items = 0; error_line = 0; error_msg[0] = 0; _ownNodeId= 0; } bool -LocalConfig::init(bool onlyNodeId, - const char *connectString, - const char *fileName, - const char *defaultConnectString) { +LocalConfig::init(const char *connectString, + const char *fileName) { /** * Escalation: * 1. Check connectString @@ -41,8 +38,8 @@ LocalConfig::init(bool onlyNodeId, */ //1. Check connectString - if(connectString != 0) { - if(readConnectString(connectString, onlyNodeId)){ + if(connectString != 0 && connectString[0] != 0){ + if(readConnectString(connectString)){ return true; } return false; @@ -51,7 +48,7 @@ LocalConfig::init(bool onlyNodeId, //2. Check given filename if (fileName && strlen(fileName) > 0) { bool fopenError; - if(readFile(fileName, fopenError, onlyNodeId)){ + if(readFile(fileName, fopenError)){ return true; } return false; @@ -61,7 +58,7 @@ LocalConfig::init(bool onlyNodeId, char buf[255]; if(NdbEnv_GetEnv("NDB_CONNECTSTRING", buf, sizeof(buf)) && strlen(buf) != 0){ - if(readConnectString(buf, onlyNodeId)){ + if(readConnectString(buf)){ return true; } return false; @@ -72,7 +69,7 @@ LocalConfig::init(bool onlyNodeId, bool fopenError; char *buf= NdbConfig_NdbCfgName(1 /*true*/); NdbAutoPtr tmp_aptr(buf); - if(readFile(buf, fopenError, onlyNodeId)) + if(readFile(buf, fopenError)) return true; if (!fopenError) return false; @@ -83,24 +80,17 @@ LocalConfig::init(bool onlyNodeId, bool fopenError; char *buf= NdbConfig_NdbCfgName(0 /*false*/); NdbAutoPtr tmp_aptr(buf); - if(readFile(buf, fopenError, onlyNodeId)) + if(readFile(buf, fopenError)) return true; if (!fopenError) return false; } - //6. Check defaultConnectString - if(defaultConnectString != 0) { - if(readConnectString(defaultConnectString, onlyNodeId)) - return true; - return false; - } - //7. Check { char buf[256]; snprintf(buf, sizeof(buf), "host=localhost:%u", NDB_BASE_PORT); - if(readConnectString(buf, onlyNodeId)) + if(readConnectString(buf)) return true; } @@ -110,30 +100,8 @@ LocalConfig::init(bool onlyNodeId, } LocalConfig::~LocalConfig(){ - for(int i = 0; itype == MgmId_TCP) - free(ids[i]->data.tcp.remoteHost); - else if(ids[i]->type == MgmId_File) - free(ids[i]->data.file.filename); - delete ids[i]; - } - if(ids != 0) - delete[] ids; } -void LocalConfig::add(MgmtSrvrId * i){ - if(items == size){ - MgmtSrvrId ** tmp = new MgmtSrvrId * [size+10]; - if(ids != 0){ - memcpy(tmp, ids, items*sizeof(MgmtSrvrId *)); - delete []ids; - } - ids = tmp; - } - ids[items] = i; - items++; -} - void LocalConfig::setError(int lineNumber, const char * _msg) { error_line = lineNumber; strncpy(error_msg, _msg, sizeof(error_msg)); @@ -162,13 +130,13 @@ void LocalConfig::printUsage() const { <type = MgmId_TCP; - mgmtSrvrId->data.tcp.remoteHost = strdup(tempString); - mgmtSrvrId->data.tcp.port = port; - add(mgmtSrvrId); + MgmtSrvrId mgmtSrvrId; + mgmtSrvrId.type = MgmId_TCP; + mgmtSrvrId.name.assign(tempString); + mgmtSrvrId.port = port; + ids.push_back(mgmtSrvrId); return true; } } @@ -212,10 +180,10 @@ LocalConfig::parseFileName(const char * buf){ char tempString[1024]; for(int i = 0; fileNameTokens[i] != 0; i++) { if (sscanf(buf, fileNameTokens[i], tempString) == 1) { - MgmtSrvrId* mgmtSrvrId = new MgmtSrvrId(); - mgmtSrvrId->type = MgmId_File; - mgmtSrvrId->data.file.filename = strdup(tempString); - add(mgmtSrvrId); + MgmtSrvrId mgmtSrvrId; + mgmtSrvrId.type = MgmId_File; + mgmtSrvrId.name.assign(tempString); + ids.push_back(mgmtSrvrId); return true; } } @@ -223,7 +191,7 @@ LocalConfig::parseFileName(const char * buf){ } bool -LocalConfig::parseString(const char * connectString, bool onlyNodeId, char *line){ +LocalConfig::parseString(const char * connectString, char *line){ char * for_strtok; char * copy = strdup(connectString); NdbAutoPtr tmp_aptr(copy); @@ -231,8 +199,7 @@ LocalConfig::parseString(const char * connectString, bool onlyNodeId, char *line bool b_nodeId = false; bool found_other = false; - for (char *tok = strtok_r(copy,";",&for_strtok); - tok != 0 && !(onlyNodeId && b_nodeId); + for (char *tok = strtok_r(copy,";",&for_strtok); tok != 0; tok = strtok_r(NULL, ";", &for_strtok)) { if (tok[0] == '#') continue; @@ -240,8 +207,6 @@ LocalConfig::parseString(const char * connectString, bool onlyNodeId, char *line if (!b_nodeId) // only one nodeid definition allowed if (b_nodeId = parseNodeId(tok)) continue; - if (onlyNodeId) - continue; if (found_other = parseHostName(tok)) continue; if (found_other = parseFileName(tok)) @@ -252,16 +217,17 @@ LocalConfig::parseString(const char * connectString, bool onlyNodeId, char *line return false; } - if (!onlyNodeId && !found_other) { + if (!found_other) { if (line) - snprintf(line, 150, "Missing host/file name extry in \"%s\"", connectString); + snprintf(line, 150, "Missing host/file name extry in \"%s\"", + connectString); return false; } return true; } -bool LocalConfig::readFile(const char * filename, bool &fopenError, bool onlyNodeId) +bool LocalConfig::readFile(const char * filename, bool &fopenError) { char line[150], line2[150]; @@ -292,7 +258,7 @@ bool LocalConfig::readFile(const char * filename, bool &fopenError, bool onlyNod strcat(theString, line); } - bool return_value = parseString(theString, onlyNodeId, line); + bool return_value = parseString(theString, line); if (!return_value) { snprintf(line2, 150, "Reading %s: %s", filename, line); @@ -305,9 +271,9 @@ bool LocalConfig::readFile(const char * filename, bool &fopenError, bool onlyNod } bool -LocalConfig::readConnectString(const char * connectString, bool onlyNodeId){ +LocalConfig::readConnectString(const char * connectString){ char line[150], line2[150]; - bool return_value = parseString(connectString, onlyNodeId, line); + bool return_value = parseString(connectString, line); if (!return_value) { snprintf(line2, 150, "Reading NDB_CONNECTSTRING \"%s\": %s", connectString, line); setError(0,line2); diff --git a/ndb/src/kernel/main.cpp b/ndb/src/kernel/main.cpp index f8e852b9d35..858af88d6de 100644 --- a/ndb/src/kernel/main.cpp +++ b/ndb/src/kernel/main.cpp @@ -69,9 +69,10 @@ NDB_MAIN(ndb_kernel){ } { // Do configuration - theConfig->setupConfiguration(); + signal(SIGPIPE, SIG_IGN); + theConfig->fetch_configuration(); } - + if (theConfig->getDaemonMode()) { // Become a daemon char *lockfile= NdbConfig_PidFileName(globalData.ownId); @@ -88,8 +89,6 @@ NDB_MAIN(ndb_kernel){ /** * Parent */ - theConfig->closeConfiguration(); - catchsigs(true); int status = 0; @@ -132,11 +131,13 @@ NDB_MAIN(ndb_kernel){ exit(0); } g_eventLogger.info("Ndb has terminated (pid %d) restarting", child); + theConfig->fetch_configuration(); } g_eventLogger.info("Angel pid: %d ndb pid: %d", getppid(), getpid()); + theConfig->setupConfiguration(); systemInfo(* theConfig, * theConfig->m_logLevel); - + // Load blocks globalEmulatorData.theSimBlockList->load(* theConfig); diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp index 550c6313058..03e4f07f2ff 100644 --- a/ndb/src/kernel/vm/Configuration.cpp +++ b/ndb/src/kernel/vm/Configuration.cpp @@ -35,6 +35,7 @@ #include #include "pc.hpp" #include +#include extern "C" { void ndbSetOwnVersion(); @@ -153,39 +154,82 @@ Configuration::closeConfiguration(){ } void -Configuration::setupConfiguration(){ +Configuration::fetch_configuration(){ /** * Fetch configuration from management server */ if (m_config_retriever) { delete m_config_retriever; } - m_config_retriever= new ConfigRetriever(); - ConfigRetriever &cr= *m_config_retriever; - cr.setConnectString(_connectString); - stopOnError(true); - ndb_mgm_configuration * p = cr.getConfig(NDB_VERSION, NODE_TYPE_DB); + m_config_retriever= new ConfigRetriever(NDB_VERSION, NODE_TYPE_DB); + m_config_retriever->setConnectString(_connectString ? _connectString : ""); + if(m_config_retriever->init() == -1 || + m_config_retriever->do_connect() == -1){ + + const char * s = m_config_retriever->getErrorString(); + if(s == 0) + s = "No error given!"; + + /* Set stop on error to true otherwise NDB will + go into an restart loop... + */ + ERROR_SET(fatal, ERR_INVALID_CONFIG, "Could connect to ndb_mgmd", s); + } + + ConfigRetriever &cr= *m_config_retriever; + + if((globalData.ownId = cr.allocNodeId()) == 0){ + for(Uint32 i = 0; i<3; i++){ + NdbSleep_SecSleep(3); + if(globalData.ownId = cr.allocNodeId()) + break; + } + } + + if(globalData.ownId == 0){ + ERROR_SET(fatal, ERR_INVALID_CONFIG, + "Unable to alloc node id", m_config_retriever->getErrorString()); + } + + ndb_mgm_configuration * p = cr.getConfig(); if(p == 0){ const char * s = cr.getErrorString(); if(s == 0) s = "No error given!"; - + /* Set stop on error to true otherwise NDB will go into an restart loop... - */ - + */ + ERROR_SET(fatal, ERR_INVALID_CONFIG, "Could not fetch configuration" "/invalid configuration", s); } + if(m_clusterConfig) + free(m_clusterConfig); + + m_clusterConfig = p; + + ndb_mgm_configuration_iterator iter(* p, CFG_SECTION_NODE); + if (iter.find(CFG_NODE_ID, globalData.ownId)){ + ERROR_SET(fatal, ERR_INVALID_CONFIG, "Invalid configuration fetched", "DB missing"); + } + + if(iter.get(CFG_DB_STOP_ON_ERROR, &_stopOnError)){ + ERROR_SET(fatal, ERR_INVALID_CONFIG, "Invalid configuration fetched", + "StopOnError missing"); + } +} - Uint32 nodeId = globalData.ownId = cr.getOwnNodeId(); +void +Configuration::setupConfiguration(){ + ndb_mgm_configuration * p = m_clusterConfig; /** * Configure transporters */ { - int res = IPCConfig::configureTransporters(nodeId, + int res = IPCConfig::configureTransporters(globalData.ownId, * p, globalTransporterRegistry); if(res <= 0){ @@ -247,11 +291,6 @@ Configuration::setupConfiguration(){ } } - if(iter.get(CFG_DB_STOP_ON_ERROR, &_stopOnError)){ - ERROR_SET(fatal, ERR_INVALID_CONFIG, "Invalid configuration fetched", - "StopOnError missing"); - } - if(iter.get(CFG_DB_STOP_ON_ERROR_INSERT, &m_restartOnErrorInsert)){ ERROR_SET(fatal, ERR_INVALID_CONFIG, "Invalid configuration fetched", "RestartOnErrorInsert missing"); @@ -268,7 +307,6 @@ Configuration::setupConfiguration(){ ConfigValues* cf = ConfigValuesFactory::extractCurrentSection(iter.m_config); - m_clusterConfig = p; m_clusterConfigIter = ndb_mgm_create_configuration_iterator (p, CFG_SECTION_NODE); diff --git a/ndb/src/kernel/vm/Configuration.hpp b/ndb/src/kernel/vm/Configuration.hpp index ec5e8b371b1..bd91f3fa74b 100644 --- a/ndb/src/kernel/vm/Configuration.hpp +++ b/ndb/src/kernel/vm/Configuration.hpp @@ -32,6 +32,7 @@ public: */ bool init(int argc, const char** argv); + void fetch_configuration(); void setupConfiguration(); void closeConfiguration(); diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp index 1085c747c16..e78b0d41cf2 100644 --- a/ndb/src/mgmapi/mgmapi.cpp +++ b/ndb/src/mgmapi/mgmapi.cpp @@ -1438,11 +1438,7 @@ ndb_mgm_get_configuration(NdbMgmHandle handle, unsigned int version) { const Properties *prop; prop = ndb_mgm_call(handle, reply, "get config", &args); - - if(prop == NULL) { - SET_ERROR(handle, EIO, "Unable to fetch config"); - return 0; - } + CHECK_REPLY(prop, 0); do { const char * buf; @@ -1537,17 +1533,14 @@ ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, unsigned *pnodei const Properties *prop; prop= ndb_mgm_call(handle, reply, "get nodeid", &args); - - if(prop == NULL) { - SET_ERROR(handle, EIO, "Unable to alloc nodeid"); - return -1; - } + CHECK_REPLY(prop, -1); int res= -1; do { const char * buf; if(!prop->get("result", &buf) || strcmp(buf, "Ok") != 0){ - ndbout_c("ERROR Message: %s\n", buf); + setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__, + "Could not alloc node id: %s",buf); break; } if(!prop->get("nodeid", pnodeid) != 0){ @@ -1621,11 +1614,7 @@ ndb_mgm_set_int_parameter(NdbMgmHandle handle, const Properties *prop; prop= ndb_mgm_call(handle, reply, "set parameter", &args); - - if(prop == NULL) { - SET_ERROR(handle, EIO, "Unable set parameter"); - return -1; - } + CHECK_REPLY(prop, -1); int res= -1; do { diff --git a/ndb/src/mgmclient/main.cpp b/ndb/src/mgmclient/main.cpp index 5aefd4609b1..e70b454a01f 100644 --- a/ndb/src/mgmclient/main.cpp +++ b/ndb/src/mgmclient/main.cpp @@ -47,10 +47,6 @@ handler(int sig){ int main(int argc, const char** argv){ int optind = 0; - char _default_connectstring_buf[256]; - snprintf(_default_connectstring_buf, sizeof(_default_connectstring_buf), - "host=localhost:%u", NDB_BASE_PORT); - const char *_default_connectstring= _default_connectstring_buf; const char *_host = 0; int _port = 0; int _help = 0; @@ -79,9 +75,9 @@ int main(int argc, const char** argv){ _port = atoi(argv[1]); } } else { - if(cfg.init(false, 0, 0, _default_connectstring) && cfg.items > 0 && cfg.ids[0]->type == MgmId_TCP){ - _host = cfg.ids[0]->data.tcp.remoteHost; - _port = cfg.ids[0]->data.tcp.port; + if(cfg.init(0, 0) && cfg.ids.size() > 0 && cfg.ids[0].type == MgmId_TCP){ + _host = cfg.ids[0].name.c_str(); + _port = cfg.ids[0].port; } else { cfg.printError(); cfg.printUsage(); diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 523883f7832..55384a2f91e 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -584,18 +584,11 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId, _ownNodeId= 0; NodeId tmp= nodeId; - if (getFreeNodeId(&tmp, NDB_MGM_NODE_TYPE_MGM, 0, 0)){ - _ownNodeId= tmp; - if (nodeId != 0 && nodeId != tmp) { - ndbout << "Unable to obtain requested nodeid " << nodeId - << " nodeid " << tmp << " available\n"; - _ownNodeId= 0; // did not get nodeid requested - } - m_allocated_resources.reserve_node(_ownNodeId); - } else { - ndbout_c("Unable to retrieve own node id"); + if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM, 0, 0)){ + ndbout << "Unable to obtain requested nodeid " << nodeId; exit(-1); } + _ownNodeId = tmp; } @@ -2301,10 +2294,19 @@ MgmtSrvr::getNodeType(NodeId nodeId) const return nodeTypes[nodeId]; } +#ifdef NDB_WIN32 +static NdbMutex & f_node_id_mutex = * NdbMutex_Create(); +#else +static NdbMutex f_node_id_mutex = NDB_MUTEX_INITIALIZER; +#endif + bool -MgmtSrvr::getFreeNodeId(NodeId * nodeId, enum ndb_mgm_node_type type, - struct sockaddr *client_addr, socklen_t *client_addr_len) const +MgmtSrvr::alloc_node_id(NodeId * nodeId, + enum ndb_mgm_node_type type, + struct sockaddr *client_addr, + socklen_t *client_addr_len) { + Guard g(&f_node_id_mutex); #if 0 ndbout << "MgmtSrvr::getFreeNodeId type=" << type << " *nodeid=" << *nodeId << endl; @@ -2365,6 +2367,7 @@ MgmtSrvr::getFreeNodeId(NodeId * nodeId, enum ndb_mgm_node_type type, } } *nodeId= tmp; + m_reserved_nodes.set(tmp); #if 0 ndbout << "MgmtSrvr::getFreeNodeId found type=" << type << " *nodeid=" << *nodeId << endl; @@ -2769,6 +2772,7 @@ MgmtSrvr::Allocated_resources::Allocated_resources(MgmtSrvr &m) MgmtSrvr::Allocated_resources::~Allocated_resources() { + Guard g(&f_node_id_mutex); m_mgmsrv.m_reserved_nodes.bitANDC(m_reserved_nodes); } @@ -2776,7 +2780,6 @@ void MgmtSrvr::Allocated_resources::reserve_node(NodeId id) { m_reserved_nodes.set(id); - m_mgmsrv.m_reserved_nodes.set(id); } int diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index f677cdbb2d0..661dcdfb784 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -78,6 +78,7 @@ public: // methods to reserve/allocate resources which // will be freed when running destructor void reserve_node(NodeId id); + bool is_reserved(NodeId nodeId) { return m_reserved_nodes.get(nodeId);} private: MgmtSrvr &m_mgmsrv; NodeBitmask m_reserved_nodes; @@ -465,8 +466,8 @@ public: * @return false if none found */ bool getNextNodeId(NodeId * _nodeId, enum ndb_mgm_node_type type) const ; - bool getFreeNodeId(NodeId * _nodeId, enum ndb_mgm_node_type type, - struct sockaddr *client_addr, socklen_t *client_addr_len) const ; + bool alloc_node_id(NodeId * _nodeId, enum ndb_mgm_node_type type, + struct sockaddr *client_addr, socklen_t *client_addr_len); /** * diff --git a/ndb/src/mgmsrv/MgmtSrvrConfig.cpp b/ndb/src/mgmsrv/MgmtSrvrConfig.cpp index 10316bd2851..44c2aadd1e2 100644 --- a/ndb/src/mgmsrv/MgmtSrvrConfig.cpp +++ b/ndb/src/mgmsrv/MgmtSrvrConfig.cpp @@ -288,16 +288,15 @@ MgmtSrvr::readConfig() { Config * MgmtSrvr::fetchConfig() { - ConfigRetriever cr; + ConfigRetriever cr(NDB_VERSION, NODE_TYPE_MGM); cr.setLocalConfigFileName(m_localNdbConfigFilename.c_str()); - struct ndb_mgm_configuration * tmp = cr.getConfig(NDB_VERSION, - NODE_TYPE_MGM); + struct ndb_mgm_configuration * tmp = cr.getConfig(); if(tmp != 0){ Config * conf = new Config(); conf->m_configValues = tmp; return conf; } - + return 0; } diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp index 99913face05..ec734fe24c5 100644 --- a/ndb/src/mgmsrv/Services.cpp +++ b/ndb/src/mgmsrv/Services.cpp @@ -401,34 +401,26 @@ MgmApiSession::get_nodeid(Parser_t::Context &, struct sockaddr addr; socklen_t addrlen= sizeof(addr); - int r; - if (r= getpeername(m_socket, &addr, &addrlen)) { + int r = getpeername(m_socket, &addr, &addrlen); + if (r != 0 ) { m_output->println(cmd); m_output->println("result: getpeername(%d) failed, err= %d", m_socket, r); m_output->println(""); return; } - NodeId free_id= 0; NodeId tmp= nodeid; - if (m_mgmsrv.getFreeNodeId(&tmp, (enum ndb_mgm_node_type)nodetype, &addr, &addrlen)) - free_id= tmp; + if(tmp == 0 || !m_allocated_resources->is_reserved(tmp)){ + if (!m_mgmsrv.alloc_node_id(&tmp, (enum ndb_mgm_node_type)nodetype, + &addr, &addrlen)){ + m_output->println(cmd); + m_output->println("result: no free nodeid %d for nodetype %d", + nodeid, nodetype); + m_output->println(""); + return; + } + } - if (nodeid != 0 && free_id != nodeid){ - m_output->println(cmd); - m_output->println("result: no free nodeid %d for nodetype %d", - nodeid, nodetype); - m_output->println(""); - return; - } - - if (free_id == 0){ - m_output->println(cmd); - m_output->println("result: no free nodeid for nodetype %d", nodetype); - m_output->println(""); - return; - } - #if 0 if (!compatible){ m_output->println(cmd); @@ -438,14 +430,13 @@ MgmApiSession::get_nodeid(Parser_t::Context &, return; } #endif - + m_output->println(cmd); - m_output->println("nodeid: %u", free_id); + m_output->println("nodeid: %u", tmp); m_output->println("result: Ok"); m_output->println(""); - - m_allocated_resources->reserve_node(free_id); - + m_allocated_resources->reserve_node(tmp); + return; } diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp index 0bbf042fbd6..9e39452891f 100644 --- a/ndb/src/mgmsrv/main.cpp +++ b/ndb/src/mgmsrv/main.cpp @@ -16,8 +16,6 @@ #include -#include - #include "MgmtSrvr.hpp" #include "EventLogger.hpp" #include @@ -229,6 +227,7 @@ NDB_MAIN(mgmsrv){ } } + signal(SIGPIPE, SIG_IGN); if(!glob.mgmObject->start()){ ndbout_c("Unable to start management server."); ndbout_c("Probably caused by illegal initial configuration file."); @@ -312,14 +311,13 @@ MgmGlobals::~MgmGlobals(){ static bool readLocalConfig(){ // Read local config file - ConfigRetriever cr; - cr.setLocalConfigFileName(glob.local_config_filename); - int nodeid = cr.init(true); + LocalConfig lc; + int nodeid = lc.init(glob.local_config_filename); if(nodeid == -1){ return false; } - glob.localNodeId = (NodeId)nodeid; + glob.localNodeId = nodeid; return true; } @@ -342,18 +340,7 @@ readGlobalConfig() { InitConfigFileParser parser; glob.cluster_config = parser.parseConfig(glob.config_filename); if(glob.cluster_config == 0){ - /** - * Try to get configuration from other MGM server - * Note: Only new format - */ - glob.cluster_config = new Config(); - - ConfigRetriever cr; - cr.setLocalConfigFileName(glob.local_config_filename); - glob.cluster_config->m_configValues = cr.getConfig(NDB_VERSION, - NODE_TYPE_MGM); - if (glob.cluster_config->m_configValues == NULL) - return false; + return false; } return true; } diff --git a/ndb/src/ndbapi/TransporterFacade.cpp b/ndb/src/ndbapi/TransporterFacade.cpp index 6a25db560c9..d1e57e874ee 100644 --- a/ndb/src/ndbapi/TransporterFacade.cpp +++ b/ndb/src/ndbapi/TransporterFacade.cpp @@ -343,27 +343,39 @@ TransporterFacade* TransporterFacade::start_instance(const char * connectString){ // TransporterFacade used from API get config from mgmt srvr - s_config_retriever= new ConfigRetriever; + s_config_retriever= new ConfigRetriever(NDB_VERSION, NODE_TYPE_API); - ConfigRetriever &configRetriever= *s_config_retriever; - configRetriever.setConnectString(connectString); - ndb_mgm_configuration * props = configRetriever.getConfig(NDB_VERSION, - NODE_TYPE_API); - if (props == 0) { - ndbout << "Configuration error: "; - const char* erString = configRetriever.getErrorString(); - if (erString == 0) { - erString = "No error specified!"; - } - ndbout << erString << endl; - return 0; + s_config_retriever->setConnectString(connectString); + const char* error = 0; + do { + if(s_config_retriever->init() == -1) + break; + + if(s_config_retriever->do_connect() == -1) + break; + + const Uint32 nodeId = s_config_retriever->allocNodeId(); + if(nodeId == 0) + break; + + + ndb_mgm_configuration * props = s_config_retriever->getConfig(); + if(props == 0) + break; + + TransporterFacade * tf = start_instance(nodeId, props); + + free(props); + return tf; + } while(0); + + ndbout << "Configuration error: "; + const char* erString = s_config_retriever->getErrorString(); + if (erString == 0) { + erString = "No error specified!"; } - const int nodeId = configRetriever.getOwnNodeId(); - - TransporterFacade * tf = start_instance(nodeId, props); - - free(props); - return tf; + ndbout << erString << endl; + return 0; } TransporterFacade* From e3ed90a1e56de1e3baecaf40879f3579aadde457 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Mon, 9 Aug 2004 13:57:01 +0200 Subject: [PATCH 88/93] testBlobs take more than 5-minutes (but less than 10) --- ndb/test/run-test/daily-basic-tests.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index bca511a643a..0aa9c761b74 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -966,7 +966,7 @@ max-time: 1500 cmd: testRestartGci args: T6 -max-time: 300 +max-time: 600 cmd: testBlobs args: From e7d89abf33b2f251304527c350e53d9c6844d311 Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Mon, 9 Aug 2004 14:10:50 +0200 Subject: [PATCH 89/93] Forgott to update testBackuo w.r.t to ConfigRetreiver --- ndb/test/src/NdbBackup.cpp | 25 ++++++++++++++++--------- ndb/test/src/NdbRestarter.cpp | 12 ++++++------ 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/ndb/test/src/NdbBackup.cpp b/ndb/test/src/NdbBackup.cpp index 46917cbcb13..6cb3db7d0d3 100644 --- a/ndb/test/src/NdbBackup.cpp +++ b/ndb/test/src/NdbBackup.cpp @@ -69,17 +69,24 @@ NdbBackup::getFileSystemPathForNode(int _node_id){ /** * Fetch configuration from management server */ - ConfigRetriever cr; + ConfigRetriever cr(0, NODE_TYPE_API); + ndb_mgm_configuration * p; - ndb_mgm_configuration * p = cr.getConfig(host.c_str(), port, 0, NODE_TYPE_API); - if(p == 0){ - const char * s = cr.getErrorString(); - if(s == 0) - s = "No error given!"; + BaseString tmp; tmp.assfmt("%s:%d", host.c_str(), port); + NdbMgmHandle handle = ndb_mgm_create_handle(); + if(handle == 0 || ndb_mgm_connect(handle, tmp.c_str()) != 0 && + (p = ndb_mgm_get_configuration(handle, 0)) == 0){ - ndbout << "Could not fetch configuration" << endl; - ndbout << s << endl; - return NULL; + const char * s = 0; + if(p == 0 && handle != 0){ + s = ndb_mgm_get_latest_error_msg(handle); + if(s == 0) + s = "No error given!"; + + ndbout << "Could not fetch configuration" << endl; + ndbout << s << endl; + return NULL; + } } /** diff --git a/ndb/test/src/NdbRestarter.cpp b/ndb/test/src/NdbRestarter.cpp index 6d5abccf0e4..4d6d3ddc001 100644 --- a/ndb/test/src/NdbRestarter.cpp +++ b/ndb/test/src/NdbRestarter.cpp @@ -46,21 +46,21 @@ NdbRestarter::NdbRestarter(const char* _addr): return; } - if (lcfg.items == 0){ + if (lcfg.ids.size() == 0){ g_err << "NdbRestarter - No management servers configured in local config file" << endl; return; } - for (int i = 0; itype){ case MgmId_TCP: char buf[255]; - snprintf(buf, 255, "%s:%d", m->data.tcp.remoteHost, m->data.tcp.port); + snprintf(buf, 255, "%s:%d", m->name.c_str(), m->port); addr.assign(buf); - host.assign(m->data.tcp.remoteHost); - port = m->data.tcp.port; + host.assign(m->name.c_str()); + port = m->port; return; break; case MgmId_File: From 8b32c7ef446ccc5eb80f0e2688fe658defbfe55e Mon Sep 17 00:00:00 2001 From: "joreland@mysql.com" <> Date: Mon, 9 Aug 2004 15:13:46 +0200 Subject: [PATCH 90/93] mysql-test ndb_index_unique --- ndb/src/kernel/blocks/dbtc/DbtcMain.cpp | 1 + ndb/src/mgmsrv/main.cpp | 6 ++---- ndb/tools/waiter.cpp | 8 ++++---- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index a3ec91cce19..b3e6eb0dc6a 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -5004,6 +5004,7 @@ void Dbtc::execLQHKEYREF(Signal* signal) setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); if (isIndexOp) { jam(); + regApiPtr->lqhkeyreqrec--; // Compensate for extra during read tcKeyRef->connectPtr = indexOp; EXECUTE_DIRECT(DBTC, GSN_TCKEYREF, signal, TcKeyRef::SignalLength); } else { diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp index 9e39452891f..94603ddbe77 100644 --- a/ndb/src/mgmsrv/main.cpp +++ b/ndb/src/mgmsrv/main.cpp @@ -312,12 +312,10 @@ static bool readLocalConfig(){ // Read local config file LocalConfig lc; - int nodeid = lc.init(glob.local_config_filename); - if(nodeid == -1){ + if(!lc.init(glob.local_config_filename)) return false; - } - glob.localNodeId = nodeid; + glob.localNodeId = lc._ownNodeId; return true; } diff --git a/ndb/tools/waiter.cpp b/ndb/tools/waiter.cpp index f9d441ab352..b0ef8219fdf 100644 --- a/ndb/tools/waiter.cpp +++ b/ndb/tools/waiter.cpp @@ -22,7 +22,7 @@ #include #include #include -#include "../src/common/mgmcommon/LocalConfig.hpp" +#include "../include/mgmcommon/LocalConfig.hpp" #include @@ -67,13 +67,13 @@ int main(int argc, const char** argv){ return NDBT_ProgramExit(NDBT_FAILED); } - for (int i = 0; itype){ case MgmId_TCP: - snprintf(buf, 255, "%s:%d", m->data.tcp.remoteHost, m->data.tcp.port); + snprintf(buf, 255, "%s:%d", m->name.c_str(), m->port); _hostName = buf; break; case MgmId_File: From 976e0ce3a89ddadc6a30b5ec86e241561ded6868 Mon Sep 17 00:00:00 2001 From: "paul@kite-hub.kitebird.com" <> Date: Mon, 9 Aug 2004 10:34:36 -0500 Subject: [PATCH 91/93] mysqld.cc: minor option description change. --- sql/mysqld.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 98e8183d2d5..c81a888d2e9 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -4066,7 +4066,7 @@ Disable with --skip-bdb (will save memory).", "Set the default storage engine (table tyoe) for tables.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"default-table-type", OPT_STORAGE_ENGINE, - "(deprecated) Use default-storage-engine.", 0, 0, + "(deprecated) Use --default-storage-engine.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"default-time-zone", OPT_DEFAULT_TIME_ZONE, "Set the default time zone.", (gptr*) &default_tz_name, (gptr*) &default_tz_name, From 6989a499daf14a0236639b1dc7ba78b95f2a1e8f Mon Sep 17 00:00:00 2001 From: "konstantin@mysql.com" <> Date: Tue, 10 Aug 2004 01:08:53 -0700 Subject: [PATCH 92/93] Comments in libmysql (prepared statements API) --- libmysql/libmysql.c | 189 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 169 insertions(+), 20 deletions(-) diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index a276b3d70e4..08916b88cc8 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -1994,7 +1994,7 @@ mysql_stmt_prepare(MYSQL_STMT *stmt, const char *query, ulong length) } /* - alloc_root will return valid address even in case param_count + alloc_root will return valid address even in case when param_count and field_count are zero. Thus we should never rely on stmt->bind or stmt->params when checking for existence of placeholders or result set. @@ -2091,12 +2091,6 @@ static void update_stmt_fields(MYSQL_STMT *stmt) mysql_stmt_result_metadata() stmt statement handle - RETURN - NULL statement contains no result set or out of memory. - In the latter case you can retreive error message - with mysql_stmt_error. - MYSQL_RES a result set with no rows - DESCRIPTION This function should be used after mysql_stmt_execute(). You can safely check that prepared statement has a result set by calling @@ -2110,6 +2104,12 @@ static void update_stmt_fields(MYSQL_STMT *stmt) mysql_fetch_field_direct, mysql_fetch_fields, mysql_field_seek. - free returned MYSQL_RES structure with mysql_free_result. - proceed to binding of output parameters. + + RETURN + NULL statement contains no result set or out of memory. + In the latter case you can retreive error message + with mysql_stmt_error. + MYSQL_RES a result set with no rows */ MYSQL_RES * STDCALL @@ -2194,11 +2194,11 @@ static void store_param_type(char **pos, MYSQL_BIND *param) param MySQL bind param DESCRIPTION - These funtions are invoked from mysql_stmt_execute by - MYSQL_BIND::store_param_func pointer. This pointer is set once per many - executions in mysql_stmt_bind_param. The caller must ensure that network - buffer have enough capacity to store parameter (MYSQL_BIND::buffer_length - contains needed number of bytes). + These funtions are invoked from mysql_stmt_execute() by + MYSQL_BIND::store_param_func pointer. This pointer is set once per + many executions in mysql_stmt_bind_param(). The caller must ensure + that network buffer have enough capacity to store parameter + (MYSQL_BIND::buffer_length contains needed number of bytes). */ static void store_param_tinyint(NET *net, MYSQL_BIND *param) @@ -2701,7 +2701,7 @@ int STDCALL mysql_stmt_execute(MYSQL_STMT *stmt) example a table used in the query was altered. Note, that now (4.1.3) we always send metadata in reply to COM_EXECUTE (even if it is not necessary), so either this or - previous always branch works. + previous branch always works. TODO: send metadata only when it's really necessary and add a warning 'Metadata changed' when it's sent twice. */ @@ -2776,19 +2776,171 @@ static my_bool int_is_null_false= 0; /* - Setup the input parameter data buffers from application + Set up input data buffers for a statement. SYNOPSIS mysql_stmt_bind_param() stmt statement handle The statement must be prepared with mysql_stmt_prepare(). bind Array of mysql_stmt_param_count() bind parameters. + This function doesn't check that size of this argument + is >= mysql_stmt_field_count(): it's user's responsibility. + + DESCRIPTION + Use this call after mysql_stmt_prepare() to bind user variables to + placeholders. + Each element of bind array stands for a placeholder. Placeholders + are counted from 0. For example statement + 'INSERT INTO t (a, b) VALUES (?, ?)' + contains two placeholders, and for such statement you should supply + bind array of two elements (MYSQL_BIND bind[2]). + + By properly initializing bind array you can bind virtually any + C language type to statement's placeholders: + First, it's strongly recommended to always zero-initialize entire + bind structure before setting it's members. This will both shorten + your application code and make it robust to future extensions of + MYSQL_BIND structure. + Then you need to assign typecode of your applicatoin buffer to + MYSQL_BIND::buffer_type. The following typecodes with their + correspondence to C language types are supported: + MYSQL_TYPE_TINY for 8-bit integer variables. Normally it's + 'signed char' and 'unsigned char'; + MYSQL_TYPE_SHORT for 16-bit signed and unsigned variables. This + is usually 'short' and 'unsigned short'; + MYSQL_TYPE_LONG for 32-bit signed and unsigned variables. It + corresponds to 'int' and 'unsigned int' on + vast majority of platforms. On IA-32 and some + other 32-bit systems you can also use 'long' + here; + MYSQL_TYPE_LONGLONG 64-bit signed or unsigned integer. Stands for + '[unsigned] long long' on most platforms; + MYSQL_TYPE_FLOAT 32-bit floating point type, 'float' on most + systems; + MYSQL_TYPE_DOUBLE 64-bit floating point type, 'double' on most + systems; + MYSQL_TYPE_TIME broken-down time stored in MYSQL_TIME + structure + MYSQL_TYPE_DATE date stored in MYSQL_TIME structure + MYSQL_TYPE_DATETIME datetime stored in MYSQL_TIME structure See + more on how to use these types for sending + dates and times below; + MYSQL_TYPE_STRING character string, assumed to be in + character-set-client. If character set of + client is not equal to character set of + column, value for this placeholder will be + converted to destination character set before + insert. + MYSQL_TYPE_BLOB sequence of bytes. This sequence is assumed to + be in binary character set (which is the same + as no particular character set), and is never + converted to any other character set. See also + notes about supplying string/blob length + below. + MYSQL_TYPE_NULL special typecode for binding nulls. + These C/C++ types are not supported yet by the API: long double, + bool. + + As you can see from the list above, it's responsibility of + application programmer to ensure that chosen typecode properly + corresponds to host language type. For example on all platforms + where we build MySQL packages (as of MySQL 4.1.4) int is a 32-bit + type. So for int you can always assume that proper typecode is + MYSQL_TYPE_LONG (however queer it sounds, the name is legacy of the + old MySQL API). In contrary sizeof(long) can be 4 or 8 8-bit bytes, + depending on platform. + + TODO: provide client typedefs for each integer and floating point + typecode, i. e. int8, uint8, float32, etc. + + Once typecode was set, it's necessary to assign MYSQL_BIND::buffer + to point to the buffer of given type. Finally, additional actions + may be taken for some types or use cases: + + Binding integer types. + For integer types you might also need to set MYSQL_BIND::is_unsigned + member. Set it to TRUE when binding unsigned char, unsigned short, + unsigned int, unsigned long, unsigned long long. + + Binding floating point types. + For floating point types you just need to set + MYSQL_BIND::buffer_type and MYSQL_BIND::buffer. The rest of the + members should be zero-initialized. + + Binding NULLs. + You might have a column always NULL, never NULL, or sometimes NULL. + For an always NULL column set MYSQL_BIND::buffer_type to + MYSQL_TYPE_NULL. The rest of the members just need to be + zero-initialized. For never NULL columns set MYSQL_BIND::is_null to + 0, or this has already been done if you zero-initialized the entire + structure. If you set MYSQL_TYPE::is_null to point to an + application buffer of type 'my_bool', then this buffer will be + checked on each execution: this way you can set the buffer to TRUE, + or any non-0 value for NULLs, and to FALSE or 0 for not NULL data. + + Binding text strings and sequences of bytes. + For strings, in addition to MYSQL_BIND::buffer_type and + MYSQL_BIND::buffer you need to set MYSQL_BIND::length or + MYSQL_BIND::buffer_length. + If 'length' is set, 'buffer_length' is ignored. 'buffer_length' + member should be used when size of string doesn't change between + executions. If you want to vary buffer length for each value, set + 'length' to point to an application buffer of type 'unsigned long' + and set this long to length of the string before each + mysql_stmt_execute(). + + Binding dates and times. + For binding dates and times prepared statements API provides clients + with MYSQL_TIME structure. A pointer to instance of this structure + should be assigned to MYSQL_BIND::buffer whenever MYSQL_TYPE_TIME, + MYSQL_TYPE_DATE, MYSQL_TYPE_DATETIME typecodes are used. When + typecode is MYSQL_TYPE_TIME, only members 'hour', 'minute', 'second' + and 'neg' (is time offset negative) are used. These members only + will be sent to the server. + MYSQL_TYPE_DATE implies use of 'year', 'month', 'day', 'neg'. + MYSQL_TYPE_DATETIME utilizes both parts of MYSQL_TIME structure. + You don't have to set MYSQL_TIME::time_type member: it's not used + when sending data to the server, typecode information is enough. + 'second_part' member can hold microsecond precision of time value, + but now it's only supported on protocol level: you can't store + microsecond in a column, or use in temporal calculations. However, + if you send a time value with microsecond part for 'SELECT ?', + statement, you'll get it back unchanged from the server. + + Data conversion. + If conversion from host language type to data representation, + corresponding to SQL type, is required it's done on the server. + Data truncation is possible when conversion is lossy. For example, + if you supply MYSQL_TYPE_DATETIME value out of valid SQL type + TIMESTAMP range, the same conversion will be applied as if this + value would have been sent as string in the old protocol. + TODO: document how the server will behave in case of truncation/data + loss. + + After variables were bound, you can repeatedly set/change their + values and mysql_stmt_execute() the statement. + + See also: mysql_stmt_send_long_data() for sending long text/blob + data in pieces, examples in tests/client_test.c. + Next steps you might want to make: + - execute statement with mysql_stmt_execute(), + - reset statement using mysql_stmt_reset() or reprepare it with + another query using mysql_stmt_prepare() + - close statement with mysql_stmt_close(). + + IMPLEMENTATION + The function copies given bind array to internal storage of the + statement, and sets up typecode-specific handlers to perform + serialization of bound data. This means that although you don't need + to call this routine after each assignement to bind buffers, you + need to call eat each time you change parameter typecodes, or other + members of MYSQL_BIND array. + This is a pure local call. Data types of client buffers are sent + along with buffers' data at first execution of the statement. RETURN 0 success 1 error, can be retrieved with mysql_stmt_error. - Note, that this function doesn't check that size of MYSQL_BIND - array is >= mysql_stmt_field_count(), */ my_bool STDCALL mysql_stmt_bind_param(MYSQL_STMT *stmt, MYSQL_BIND *bind) @@ -2971,10 +3123,7 @@ mysql_stmt_send_long_data(MYSQL_STMT *stmt, uint param_number, if (param->buffer_type < MYSQL_TYPE_TINY_BLOB || param->buffer_type > MYSQL_TYPE_STRING) { - /* - Long data handling should be used only for string/binary - types only - */ + /* Long data handling should be used only for string/binary types */ strmov(stmt->sqlstate, unknown_sqlstate); sprintf(stmt->last_error, ER(stmt->last_errno= CR_INVALID_BUFFER_USE), param->param_number); From 72348e7c1c330a9126871b02ac7d94b940b7219c Mon Sep 17 00:00:00 2001 From: "konstantin@mysql.com" <> Date: Tue, 10 Aug 2004 01:16:19 -0700 Subject: [PATCH 93/93] Fixing typos in big comment (libmysql): you need to check in to find out another couple of typos --- libmysql/libmysql.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index 08916b88cc8..3a1d0d4c9fc 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -2801,7 +2801,7 @@ static my_bool int_is_null_false= 0; bind structure before setting it's members. This will both shorten your application code and make it robust to future extensions of MYSQL_BIND structure. - Then you need to assign typecode of your applicatoin buffer to + Then you need to assign typecode of your application buffer to MYSQL_BIND::buffer_type. The following typecodes with their correspondence to C language types are supported: MYSQL_TYPE_TINY for 8-bit integer variables. Normally it's @@ -2932,8 +2932,8 @@ static my_bool int_is_null_false= 0; The function copies given bind array to internal storage of the statement, and sets up typecode-specific handlers to perform serialization of bound data. This means that although you don't need - to call this routine after each assignement to bind buffers, you - need to call eat each time you change parameter typecodes, or other + to call this routine after each assignment to bind buffers, you + need to call it each time you change parameter typecodes, or other members of MYSQL_BIND array. This is a pure local call. Data types of client buffers are sent along with buffers' data at first execution of the statement.