From 547b51124c580e0d5fbfd519a93e38f2364f729c Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 28 May 2007 15:59:02 +0200 Subject: [PATCH 01/38] bug#28717, make sure only master updates activeStatus so that othernodes dont get confused after having recevied status from master and then tries to update it self ndb/src/kernel/blocks/ERROR_codes.txt: error 1001, delay node_failrep ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp: error 1001, delay node_failrep ndb/test/ndbapi/testNodeRestart.cpp: testcase ndb/test/run-test/daily-basic-tests.txt: testcase --- ndb/src/kernel/blocks/ERROR_codes.txt | 7 +- ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 8 +- ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp | 7 ++ ndb/test/ndbapi/testNodeRestart.cpp | 81 +++++++++++++++++++ ndb/test/run-test/daily-basic-tests.txt | 4 + 5 files changed, 105 insertions(+), 2 deletions(-) diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt index 0bcc99a6334..bf54d583299 100644 --- a/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/ndb/src/kernel/blocks/ERROR_codes.txt @@ -1,5 +1,5 @@ Next QMGR 1 -Next NDBCNTR 1000 +Next NDBCNTR 1002 Next NDBFS 2000 Next DBACC 3002 Next DBTUP 4014 @@ -487,3 +487,8 @@ Dbdict: 6003 Crash in participant @ CreateTabReq::Prepare 6004 Crash in participant @ CreateTabReq::Commit 6005 Crash in participant @ CreateTabReq::CreateDrop + +Ndbcntr: +-------- + +1001: Delay sending NODE_FAILREP (to own node), until error is cleared diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index f24a8e2c7d5..44e2293f318 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -4448,12 +4448,18 @@ void Dbdih::failedNodeLcpHandling(Signal* signal, NodeRecordPtr failedNodePtr) jam(); const Uint32 nodeId = failedNodePtr.i; - if (c_lcpState.m_participatingLQH.get(failedNodePtr.i)){ + if (isMaster() && c_lcpState.m_participatingLQH.get(failedNodePtr.i)) + { /*----------------------------------------------------*/ /* THE NODE WAS INVOLVED IN A LOCAL CHECKPOINT. WE */ /* MUST UPDATE THE ACTIVE STATUS TO INDICATE THAT */ /* THE NODE HAVE MISSED A LOCAL CHECKPOINT. */ /*----------------------------------------------------*/ + + /** + * Bug#28717, Only master should do this, as this status is copied + * to other nodes + */ switch (failedNodePtr.p->activeStatus) { case Sysfile::NS_Active: jam(); diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp index 26e8f246293..65d80669316 100644 --- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp +++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp @@ -1375,6 +1375,13 @@ void Ndbcntr::execNODE_FAILREP(Signal* signal) { jamEntry(); + if (ERROR_INSERTED(1001)) + { + sendSignalWithDelay(reference(), GSN_NODE_FAILREP, signal, 100, + signal->getLength()); + return; + } + const NodeFailRep * nodeFail = (NodeFailRep *)&signal->theData[0]; NdbNodeBitmask allFailed; allFailed.assign(NdbNodeBitmask::Size, nodeFail->theNodes); diff --git a/ndb/test/ndbapi/testNodeRestart.cpp b/ndb/test/ndbapi/testNodeRestart.cpp index 9adbfbd46a6..e5ced961b6f 100644 --- a/ndb/test/ndbapi/testNodeRestart.cpp +++ b/ndb/test/ndbapi/testNodeRestart.cpp @@ -1045,6 +1045,84 @@ int runBug25554(NDBT_Context* ctx, NDBT_Step* step){ } +int +runBug28717(NDBT_Context* ctx, NDBT_Step* step) +{ + int result = NDBT_OK; + int loops = ctx->getNumLoops(); + int records = ctx->getNumRecords(); + Ndb* pNdb = GETNDB(step); + NdbRestarter res; + + if (res.getNumDbNodes() < 4) + { + return NDBT_OK; + } + + int master = res.getMasterNodeId(); + int node0 = res.getRandomNodeOtherNodeGroup(master, rand()); + int node1 = res.getRandomNodeSameNodeGroup(node0, rand()); + + ndbout_c("master: %d node0: %d node1: %d", master, node0, node1); + + if (res.restartOneDbNode(node0, false, true, true)) + { + return NDBT_FAILED; + } + + { + int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_CHECKPOINT, 0 }; + NdbLogEventHandle handle = + ndb_mgm_create_logevent_handle(res.handle, filter); + + + int dump[] = { DumpStateOrd::DihStartLcpImmediately }; + struct ndb_logevent event; + + for (Uint32 i = 0; i<3; i++) + { + res.dumpStateOneNode(master, dump, 1); + while(ndb_logevent_get_next(handle, &event, 0) >= 0 && + event.type != NDB_LE_LocalCheckpointStarted); + while(ndb_logevent_get_next(handle, &event, 0) >= 0 && + event.type != NDB_LE_LocalCheckpointCompleted); + } + } + + if (res.waitNodesNoStart(&node0, 1)) + return NDBT_FAILED; + + int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 }; + + if (res.dumpStateOneNode(node0, val2, 2)) + return NDBT_FAILED; + + if (res.insertErrorInNode(node0, 5010)) + return NDBT_FAILED; + + if (res.insertErrorInNode(node1, 1001)) + return NDBT_FAILED; + + if (res.startNodes(&node0, 1)) + return NDBT_FAILED; + + NdbSleep_SecSleep(3); + + if (res.insertErrorInNode(node1, 0)) + return NDBT_FAILED; + + if (res.waitNodesNoStart(&node0, 1)) + return NDBT_FAILED; + + if (res.startNodes(&node0, 1)) + return NDBT_FAILED; + + if (res.waitClusterStarted()) + return NDBT_FAILED; + + return NDBT_OK; +} + NDBT_TESTSUITE(testNodeRestart); TESTCASE("NoLoad", "Test that one node at a time can be stopped and then restarted "\ @@ -1366,6 +1444,9 @@ TESTCASE("Bug25364", ""){ TESTCASE("Bug25554", ""){ INITIALIZER(runBug25554); } +TESTCASE("Bug28717", ""){ + INITIALIZER(runBug28717); +} NDBT_TESTSUITE_END(testNodeRestart); int main(int argc, const char** argv){ diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index d2c91279d18..5a3947ec1e9 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -492,6 +492,10 @@ max-time: 1500 cmd: testDict args: -n CreateAndDrop +max-time: 1000 +cmd: testNodeRestart +args: -n Bug28717 T1 + max-time: 1500 cmd: testDict args: -n CreateAndDropAtRandom -l 200 T1 From f4c75aefb2f064dbca40e83e96f29dcf1fb0b9c1 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 28 May 2007 16:18:04 +0200 Subject: [PATCH 02/38] make memeber public --- ndb/test/include/NdbRestarter.hpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ndb/test/include/NdbRestarter.hpp b/ndb/test/include/NdbRestarter.hpp index 2f21c41b9c4..63de32ac038 100644 --- a/ndb/test/include/NdbRestarter.hpp +++ b/ndb/test/include/NdbRestarter.hpp @@ -65,6 +65,8 @@ public: int getRandomNodeOtherNodeGroup(int nodeId, int randomNumber); int getRandomNotMasterNodeId(int randomNumber); + NdbMgmHandle handle; + protected: int waitClusterState(ndb_mgm_node_status _status, @@ -87,7 +89,6 @@ protected: bool connected; BaseString addr; - NdbMgmHandle handle; ndb_mgm_configuration * m_config; protected: ndb_mgm_configuration * getConfig(); From 07524c35f8cc6962b8be558cf8bb1731e3cbd349 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 28 May 2007 16:31:31 +0200 Subject: [PATCH 03/38] print user and system time at watchdog check --- ndb/src/kernel/vm/WatchDog.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ndb/src/kernel/vm/WatchDog.cpp b/ndb/src/kernel/vm/WatchDog.cpp index d1abb709b1e..2e24a5eaa6c 100644 --- a/ndb/src/kernel/vm/WatchDog.cpp +++ b/ndb/src/kernel/vm/WatchDog.cpp @@ -16,6 +16,7 @@ #include #include +#include #include "WatchDog.hpp" #include "GlobalData.hpp" @@ -129,6 +130,13 @@ WatchDog::run(){ break; }//switch g_eventLogger.warning("Ndb kernel is stuck in: %s", last_stuck_action); + { + struct tms my_tms; + times(&my_tms); + g_eventLogger.info("User time: %llu System time: %llu", + (Uint64)my_tms.tms_utime, + (Uint64)my_tms.tms_stime); + } if(alerts == 3){ shutdownSystem(last_stuck_action); } From bfc82a63637aea40e1e671bff97a0fb13f9fefe3 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 29 May 2007 07:16:26 +0200 Subject: [PATCH 04/38] ndb - fix typo --- ndb/test/run-test/daily-basic-tests.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index fffe1ac9046..1ade56f7579 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -608,7 +608,7 @@ args: -n Bug_11133 T1 max-time: 1000 cmd: testNdbApi -args: -n BugBug28443 +args: -n Bug28443 #max-time: 500 #cmd: testInterpreter From ff0479e367d1734ace67f27ea28e0e1778315f33 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 29 May 2007 11:55:12 +0200 Subject: [PATCH 05/38] Bug #26783 replication status unknown after cluster or mysqld failure --- mysql-test/t/disabled.def | 1 - sql/ha_ndbcluster.cc | 2 -- 2 files changed, 3 deletions(-) diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index e2a0b30c592..e283ca9458f 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -39,5 +39,4 @@ synchronization : Bug#24529 Test 'synchronization' fails on Mac pushb #rpl_ndb_dd_advance : Bug#25913 rpl_ndb_dd_advance fails randomly -rpl_ndb_stm_innodb : Bug#26783 ndb_partition_error2 : HF is not sure if the test can work as internded on all the platforms diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 5ec125718c0..52861e3e627 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4248,8 +4248,6 @@ THR_LOCK_DATA **ha_ndbcluster::store_lock(THD *thd, extern MASTER_INFO *active_mi; static int ndbcluster_update_apply_status(THD *thd, int do_update) { - return 0; - Thd_ndb *thd_ndb= get_thd_ndb(thd); Ndb *ndb= thd_ndb->ndb; NDBDICT *dict= ndb->getDictionary(); From a14059db518e6519a2bc1579e5b0f7e04a89c7c0 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 29 May 2007 23:39:57 +0200 Subject: [PATCH 06/38] shorten some files for tar to work storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp: Rename: storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp -> storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp storage/ndb/ndbapi-examples/mgmapi_logevent/main.cpp: Rename: storage/ndb/ndbapi-examples/mgmapi_logevent/mgmapi_logevent.cpp -> storage/ndb/ndbapi-examples/mgmapi_logevent/main.cpp storage/ndb/ndbapi-examples/mgmapi_logevent2/main.cpp: Rename: storage/ndb/ndbapi-examples/mgmapi_logevent2/mgmapi_logevent2.cpp -> storage/ndb/ndbapi-examples/mgmapi_logevent2/main.cpp storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp: Rename: storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp -> storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp storage/ndb/ndbapi-examples/ndbapi_simple_dual/main.cpp: Rename: storage/ndb/ndbapi-examples/ndbapi_simple_dual/ndbapi_simple_dual.cpp -> storage/ndb/ndbapi-examples/ndbapi_simple_dual/main.cpp --- storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile | 6 +++--- .../mgmapi_logevent/{mgmapi_logevent.cpp => main.cpp} | 0 storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile | 6 +++--- .../mgmapi_logevent2/{mgmapi_logevent2.cpp => main.cpp} | 0 storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile | 6 +++--- .../ndbapi_simple_dual/{ndbapi_simple_dual.cpp => main.cpp} | 0 storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile | 6 +++--- .../{ndbapi_simple_index.cpp => main.cpp} | 0 storage/ndb/src/kernel/blocks/dblqh/Makefile.am | 2 +- .../redoLogReader/{redoLogFileReader.cpp => reader.cpp} | 0 10 files changed, 13 insertions(+), 13 deletions(-) rename storage/ndb/ndbapi-examples/mgmapi_logevent/{mgmapi_logevent.cpp => main.cpp} (100%) rename storage/ndb/ndbapi-examples/mgmapi_logevent2/{mgmapi_logevent2.cpp => main.cpp} (100%) rename storage/ndb/ndbapi-examples/ndbapi_simple_dual/{ndbapi_simple_dual.cpp => main.cpp} (100%) rename storage/ndb/ndbapi-examples/ndbapi_simple_index/{ndbapi_simple_index.cpp => main.cpp} (100%) rename storage/ndb/src/kernel/blocks/dblqh/redoLogReader/{redoLogFileReader.cpp => reader.cpp} (100%) diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile b/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile index c9b4507c4a7..b67150b71fa 100644 --- a/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile +++ b/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile @@ -1,6 +1,6 @@ TARGET = mgmapi_logevent -SRCS = $(TARGET).cpp -OBJS = $(TARGET).o +SRCS = main.cpp +OBJS = main.o CXX = g++ CFLAGS = -c -Wall -fno-rtti -fno-exceptions CXXFLAGS = @@ -17,7 +17,7 @@ SYS_LIB = $(TARGET): $(OBJS) $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) -$(TARGET).o: $(SRCS) +$(OBJS): $(SRCS) $(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/mgmapi -I$(INCLUDE_DIR)/ndbapi $(SRCS) clean: diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent/mgmapi_logevent.cpp b/storage/ndb/ndbapi-examples/mgmapi_logevent/main.cpp similarity index 100% rename from storage/ndb/ndbapi-examples/mgmapi_logevent/mgmapi_logevent.cpp rename to storage/ndb/ndbapi-examples/mgmapi_logevent/main.cpp diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile b/storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile index 95b43b11f6b..fd9499c7a68 100644 --- a/storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile +++ b/storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile @@ -1,6 +1,6 @@ TARGET = mgmapi_logevent2 -SRCS = $(TARGET).cpp -OBJS = $(TARGET).o +SRCS = main.cpp +OBJS = main.o CXX = g++ CFLAGS = -c -Wall -fno-rtti -fno-exceptions CXXFLAGS = @@ -17,7 +17,7 @@ SYS_LIB = $(TARGET): $(OBJS) $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) -$(TARGET).o: $(SRCS) +$(OBJS): $(SRCS) $(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/mgmapi -I$(INCLUDE_DIR)/ndbapi $(SRCS) clean: diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent2/mgmapi_logevent2.cpp b/storage/ndb/ndbapi-examples/mgmapi_logevent2/main.cpp similarity index 100% rename from storage/ndb/ndbapi-examples/mgmapi_logevent2/mgmapi_logevent2.cpp rename to storage/ndb/ndbapi-examples/mgmapi_logevent2/main.cpp diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile b/storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile index 7f0ca52fcc3..9757df3ceab 100644 --- a/storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile +++ b/storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile @@ -1,6 +1,6 @@ TARGET = ndbapi_simple_dual -SRCS = $(TARGET).cpp -OBJS = $(TARGET).o +SRCS = main.cpp +OBJS = main.o CXX = g++ CFLAGS = -c -Wall -fno-rtti -fno-exceptions CXXFLAGS = @@ -17,7 +17,7 @@ SYS_LIB = $(TARGET): $(OBJS) $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) -$(TARGET).o: $(SRCS) +$(OBJS): $(SRCS) $(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS) clean: diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_dual/ndbapi_simple_dual.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple_dual/main.cpp similarity index 100% rename from storage/ndb/ndbapi-examples/ndbapi_simple_dual/ndbapi_simple_dual.cpp rename to storage/ndb/ndbapi-examples/ndbapi_simple_dual/main.cpp diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile b/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile index c38975381f5..975563b9508 100644 --- a/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile +++ b/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile @@ -1,6 +1,6 @@ TARGET = ndbapi_simple_index -SRCS = $(TARGET).cpp -OBJS = $(TARGET).o +SRCS = main.cpp +OBJS = main.o CXX = g++ CFLAGS = -c -Wall -fno-rtti -fno-exceptions CXXFLAGS = @@ -17,7 +17,7 @@ SYS_LIB = $(TARGET): $(OBJS) $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) -$(TARGET).o: $(SRCS) +$(OBJS): $(SRCS) $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/storage/ndb/include -I$(INCLUDE_DIR)/storage/ndb/include/ndbapi $(SRCS) clean: diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp similarity index 100% rename from storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp rename to storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp diff --git a/storage/ndb/src/kernel/blocks/dblqh/Makefile.am b/storage/ndb/src/kernel/blocks/dblqh/Makefile.am index c7c477a512c..b545096dc83 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/Makefile.am +++ b/storage/ndb/src/kernel/blocks/dblqh/Makefile.am @@ -16,7 +16,7 @@ EXTRA_PROGRAMS = ndbd_redo_log_reader ndbd_redo_log_reader_SOURCES = redoLogReader/records.cpp \ - redoLogReader/redoLogFileReader.cpp + redoLogReader/reader.cpp include $(top_srcdir)/storage/ndb/config/common.mk.am include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am diff --git a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp similarity index 100% rename from storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp rename to storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp From afac7ead7f3bf5e4a3335af0da7b7abb274c7c02 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 30 May 2007 09:00:50 +0200 Subject: [PATCH 07/38] Bug #28749 MaxNoOfOpenFiles offset by 1 --- ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp b/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp index 353330929e5..55b0a8c4d39 100644 --- a/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp +++ b/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp @@ -569,7 +569,7 @@ AsyncFile* Ndbfs::createAsyncFile(){ // Check limit of open files - if (theFiles.size()+1 == m_maxFiles) { + if (theFiles.size() == m_maxFiles) { // Print info about all open files for (unsigned i = 0; i < theFiles.size(); i++){ AsyncFile* file = theFiles[i]; From 40462a078f9b640e80d5649f0f682bfcc64c5a08 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 30 May 2007 12:29:19 +0200 Subject: [PATCH 08/38] Bug #28770 file already opened error when corrupt schema file - make sure we close the first file, before opening the next --- ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index b125f8d988d..699b5cb735b 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -401,6 +401,9 @@ void Dbdict::execFSCLOSECONF(Signal* signal) case FsConnectRecord::OPEN_READ_SCHEMA2: openSchemaFile(signal, 1, fsPtr.i, false, false); break; + case FsConnectRecord::OPEN_READ_TAB_FILE2: + openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false); + break; default: jamLine((fsPtr.p->fsState & 0xFFF)); ndbrequire(false); @@ -780,8 +783,11 @@ void Dbdict::readTableConf(Signal* signal, void Dbdict::readTableRef(Signal* signal, FsConnectRecordPtr fsPtr) { + /** + * First close corrupt file + */ fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE2; - openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false); + closeFile(signal, fsPtr.p->filePtr, fsPtr.i); return; }//Dbdict::readTableRef() From 91e5084ef13fa870ec332ce1e74dd92f0228fdee Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 30 May 2007 17:25:22 +0200 Subject: [PATCH 09/38] Bug #28443 - correction of merge error --- ndb/src/common/transporter/Packer.cpp | 5 +++ .../common/transporter/TCP_Transporter.hpp | 4 ++ .../transporter/TransporterRegistry.cpp | 41 ++++++++++--------- ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 17 ++++++++ ndb/test/ndbapi/testNdbApi.cpp | 34 +++++++++++++++ ndb/test/run-test/daily-basic-tests.txt | 4 ++ 6 files changed, 86 insertions(+), 19 deletions(-) diff --git a/ndb/src/common/transporter/Packer.cpp b/ndb/src/common/transporter/Packer.cpp index 66c00b0af89..d471167b0e7 100644 --- a/ndb/src/common/transporter/Packer.cpp +++ b/ndb/src/common/transporter/Packer.cpp @@ -20,7 +20,12 @@ #include #include +#ifdef ERROR_INSERT +Uint32 MAX_RECEIVED_SIGNALS = 1024; +#else #define MAX_RECEIVED_SIGNALS 1024 +#endif + Uint32 TransporterRegistry::unpack(Uint32 * readPtr, Uint32 sizeOfData, diff --git a/ndb/src/common/transporter/TCP_Transporter.hpp b/ndb/src/common/transporter/TCP_Transporter.hpp index 8cba7a01532..fdb64939d5a 100644 --- a/ndb/src/common/transporter/TCP_Transporter.hpp +++ b/ndb/src/common/transporter/TCP_Transporter.hpp @@ -101,6 +101,10 @@ private: virtual void updateReceiveDataPtr(Uint32 bytesRead); virtual Uint32 get_free_buffer() const; + + inline bool hasReceiveData () const { + return receiveBuffer.sizeOfData > 0; + } protected: /** * Setup client/server and perform connect/accept diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/ndb/src/common/transporter/TransporterRegistry.cpp index bd3136f023c..7a05dcb30c2 100644 --- a/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/ndb/src/common/transporter/TransporterRegistry.cpp @@ -841,6 +841,7 @@ TransporterRegistry::poll_OSE(Uint32 timeOutMillis) Uint32 TransporterRegistry::poll_TCP(Uint32 timeOutMillis) { + bool hasdata = false; if (false && nTCPTransporters == 0) { tcpReadSelectReply = 0; @@ -885,6 +886,7 @@ TransporterRegistry::poll_TCP(Uint32 timeOutMillis) // Put the connected transporters in the socket read-set FD_SET(socket, &tcpReadset); } + hasdata |= t->hasReceiveData(); } // The highest socket value plus one @@ -901,7 +903,7 @@ TransporterRegistry::poll_TCP(Uint32 timeOutMillis) } #endif - return tcpReadSelectReply; + return tcpReadSelectReply || hasdata; } #endif @@ -937,26 +939,27 @@ TransporterRegistry::performReceive() #endif #ifdef NDB_TCP_TRANSPORTER - if(tcpReadSelectReply > 0) + for (int i=0; igetRemoteNodeId(); - const NDB_SOCKET_TYPE socket = t->getSocket(); - if(is_connected(nodeId)){ - if(t->isConnected() && FD_ISSET(socket, &tcpReadset)) + checkJobBuffer(); + TCP_Transporter *t = theTCPTransporters[i]; + const NodeId nodeId = t->getRemoteNodeId(); + const NDB_SOCKET_TYPE socket = t->getSocket(); + if(is_connected(nodeId)){ + if(t->isConnected()) + { + if (FD_ISSET(socket, &tcpReadset)) { - const int receiveSize = t->doReceive(); - if(receiveSize > 0) - { - Uint32 * ptr; - Uint32 sz = t->getReceiveData(&ptr); - transporter_recv_from(callbackObj, nodeId); - Uint32 szUsed = unpack(ptr, sz, nodeId, ioStates[nodeId]); - t->updateReceiveDataPtr(szUsed); - } + t->doReceive(); + } + + if (t->hasReceiveData()) + { + Uint32 * ptr; + Uint32 sz = t->getReceiveData(&ptr); + transporter_recv_from(callbackObj, nodeId); + Uint32 szUsed = unpack(ptr, sz, nodeId, ioStates[nodeId]); + t->updateReceiveDataPtr(szUsed); } } } diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index a9d9c991ca3..8c3148862d4 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -136,6 +136,7 @@ Cmvmi::~Cmvmi() #ifdef ERROR_INSERT NodeBitmask c_error_9000_nodes_mask; +extern Uint32 MAX_RECEIVED_SIGNALS; #endif void Cmvmi::execNDB_TAMPER(Signal* signal) @@ -165,6 +166,22 @@ void Cmvmi::execNDB_TAMPER(Signal* signal) kill(getpid(), SIGABRT); } #endif + +#ifdef ERROR_INSERT + if (signal->theData[0] == 9003) + { + if (MAX_RECEIVED_SIGNALS < 1024) + { + MAX_RECEIVED_SIGNALS = 1024; + } + else + { + MAX_RECEIVED_SIGNALS = rand() % 128; + } + ndbout_c("MAX_RECEIVED_SIGNALS: %d", MAX_RECEIVED_SIGNALS); + CLEAR_ERROR_INSERT_VALUE; + } +#endif }//execNDB_TAMPER() void Cmvmi::execSET_LOGLEVELORD(Signal* signal) diff --git a/ndb/test/ndbapi/testNdbApi.cpp b/ndb/test/ndbapi/testNdbApi.cpp index aee668039fe..ad16c472229 100644 --- a/ndb/test/ndbapi/testNdbApi.cpp +++ b/ndb/test/ndbapi/testNdbApi.cpp @@ -1306,6 +1306,36 @@ int runTestExecuteAsynch(NDBT_Context* ctx, NDBT_Step* step){ template class Vector; +int +runBug28443(NDBT_Context* ctx, NDBT_Step* step) +{ + int result = NDBT_OK; + int records = ctx->getNumRecords(); + + NdbRestarter restarter; + + restarter.insertErrorInAllNodes(9003); + + for (Uint32 i = 0; igetNumLoops(); i++) + { + HugoTransactions hugoTrans(*ctx->getTab()); + if (hugoTrans.loadTable(GETNDB(step), records, 2048) != 0) + { + result = NDBT_FAILED; + goto done; + } + if (runClearTable(ctx, step) != 0) + { + result = NDBT_FAILED; + goto done; + } + } + +done: + restarter.insertErrorInAllNodes(9003); + + return result; +} NDBT_TESTSUITE(testNdbApi); TESTCASE("MaxNdb", @@ -1392,6 +1422,10 @@ TESTCASE("Scan_4006", INITIALIZER(runScan_4006); FINALIZER(runClearTable); } +TESTCASE("Bug28443", + ""){ + INITIALIZER(runBug28443); +} TESTCASE("ExecuteAsynch", "Check that executeAsync() works (BUG#27495)\n"){ INITIALIZER(runTestExecuteAsynch); diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index 5a3947ec1e9..f4a685299d6 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -617,6 +617,10 @@ max-time: 500 cmd: testNdbApi args: -n ExecuteAsynch T1 +max-time: 1000 +cmd: testNdbApi +args: -n BugBug28443 + #max-time: 500 #cmd: testInterpreter #args: T1 From 1a166bc4c98282acc24e4cf708511d9f412644ce Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 3 Jun 2007 19:30:37 +0200 Subject: [PATCH 10/38] Bug#20612. storage/ndb/src/kernel/blocks/pgman.cpp: a) in one case pl_queue.remove(ptr) was not followed by state &= ~ Page_entry::ONQUEUE. b) when collecting initial hot entries have to remove from queue if somehow got there. b) is easy to get with large buffer cache (256M). a) or b) is probably cause of bug#20612. --- storage/ndb/src/kernel/blocks/pgman.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/storage/ndb/src/kernel/blocks/pgman.cpp b/storage/ndb/src/kernel/blocks/pgman.cpp index 72333856cf1..78bc70427a9 100644 --- a/storage/ndb/src/kernel/blocks/pgman.cpp +++ b/storage/ndb/src/kernel/blocks/pgman.cpp @@ -669,6 +669,7 @@ Pgman::lirs_reference(Ptr ptr) jam(); move_cleanup_ptr(ptr); pl_queue.remove(ptr); + state &= ~ Page_entry::ONQUEUE; } if (state & Page_entry::BOUND) { @@ -699,6 +700,12 @@ Pgman::lirs_reference(Ptr ptr) pl_stack.add(ptr); state |= Page_entry::ONSTACK; state |= Page_entry::HOT; + // it could be on queue already + if (state & Page_entry::ONQUEUE) { + jam(); + pl_queue.remove(ptr); + state &= ~Page_entry::ONQUEUE; + } } set_page_state(ptr, state); From c8e0d0dbd54652c8eb3fd652228ee9ef86dc038a Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 4 Jun 2007 10:27:10 +0200 Subject: [PATCH 11/38] ndb - make size of redo log files (fragment log files) configurable using new config variable FragmentLogFileSize (4M - 1G) mysql-test/ndb/ndb_config_1_node.ini: change log file size (test) mysql-test/ndb/ndb_config_2_node.ini: change log file size (test) storage/ndb/include/mgmapi/mgmapi_config_parameters.h: add new confif parameter storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp: make logfile size configurable storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp: make logfile size configurable storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp: make logfile size configurable storage/ndb/src/mgmsrv/ConfigInfo.cpp: add new config variable storage/ndb/src/ndbapi/ndberror.c: update error message --- mysql-test/ndb/ndb_config_1_node.ini | 3 +- mysql-test/ndb/ndb_config_2_node.ini | 3 +- .../include/mgmapi/mgmapi_config_parameters.h | 1 + storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp | 13 +- .../ndb/src/kernel/blocks/dblqh/DblqhInit.cpp | 3 + .../ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 282 +++++++++++------- storage/ndb/src/mgmsrv/ConfigInfo.cpp | 12 + storage/ndb/src/ndbapi/ndberror.c | 2 +- 8 files changed, 195 insertions(+), 124 deletions(-) diff --git a/mysql-test/ndb/ndb_config_1_node.ini b/mysql-test/ndb/ndb_config_1_node.ini index 39e758493c8..24f6c904737 100644 --- a/mysql-test/ndb/ndb_config_1_node.ini +++ b/mysql-test/ndb/ndb_config_1_node.ini @@ -10,7 +10,8 @@ DataDir= CHOOSE_FILESYSTEM MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes TimeBetweenGlobalCheckpoints= 500 -NoOfFragmentLogFiles= 3 +NoOfFragmentLogFiles= 8 +FragmentLogFileSize= 6M DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory # diff --git a/mysql-test/ndb/ndb_config_2_node.ini b/mysql-test/ndb/ndb_config_2_node.ini index 99f31150d8c..302998bc79e 100644 --- a/mysql-test/ndb/ndb_config_2_node.ini +++ b/mysql-test/ndb/ndb_config_2_node.ini @@ -10,7 +10,8 @@ DataDir= CHOOSE_FILESYSTEM MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes TimeBetweenGlobalCheckpoints= 500 -NoOfFragmentLogFiles= 3 +NoOfFragmentLogFiles= 4 +FragmentLogFileSize=12M DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory # the following parametes just function as a small regression # test that the parameter exists diff --git a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h index 119958d0ce0..ed34a372db6 100644 --- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h +++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h @@ -64,6 +64,7 @@ #define CFG_DB_FILESYSTEM_PATH 125 #define CFG_DB_NO_REDOLOG_FILES 126 +#define CFG_DB_REDOLOG_FILE_SIZE 140 #define CFG_DB_LCP_DISC_PAGES_TUP 127 #define CFG_DB_LCP_DISC_PAGES_TUP_SR 128 diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index 0f88933f617..18fc7417623 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -71,7 +71,6 @@ class Dbtup; /* CONSTANTS OF THE LOG PAGES */ /* ------------------------------------------------------------------------- */ #define ZPAGE_HEADER_SIZE 32 -#define ZNO_MBYTES_IN_FILE 16 #define ZPAGE_SIZE 8192 #define ZPAGES_IN_MBYTE 32 #define ZTWOLOG_NO_PAGES_IN_MBYTE 5 @@ -142,7 +141,7 @@ class Dbtup; /* IN THE MBYTE. */ /* ------------------------------------------------------------------------- */ #define ZFD_HEADER_SIZE 3 -#define ZFD_PART_SIZE 48 +#define ZFD_MBYTE_SIZE 3 #define ZLOG_HEAD_SIZE 8 #define ZNEXT_LOG_SIZE 2 #define ZABORT_LOG_SIZE 3 @@ -169,7 +168,6 @@ class Dbtup; #define ZPOS_LOG_TYPE 0 #define ZPOS_NO_FD 1 #define ZPOS_FILE_NO 2 -#define ZMAX_LOG_FILES_IN_PAGE_ZERO 40 /* ------------------------------------------------------------------------- */ /* THE POSITIONS WITHIN A PREPARE LOG RECORD AND A NEW PREPARE */ /* LOG RECORD. */ @@ -1436,17 +1434,17 @@ public: * header of each log file. That information is used during * system restart to find the tail of the log. */ - UintR logLastPrepRef[16]; + UintR *logLastPrepRef; /** * The max global checkpoint completed before the mbyte in the * log file was started. One variable per mbyte. */ - UintR logMaxGciCompleted[16]; + UintR *logMaxGciCompleted; /** * The max global checkpoint started before the mbyte in the log * file was started. One variable per mbyte. */ - UintR logMaxGciStarted[16]; + UintR *logMaxGciStarted; /** * This variable contains the file name as needed by the file * system when opening the file. @@ -2162,6 +2160,7 @@ private: void execSTART_RECREF(Signal* signal); void execGCP_SAVEREQ(Signal* signal); + void execFSOPENREF(Signal* signal); void execFSOPENCONF(Signal* signal); void execFSCLOSECONF(Signal* signal); void execFSWRITECONF(Signal* signal); @@ -2671,6 +2670,8 @@ private: LogPartRecord *logPartRecord; LogPartRecordPtr logPartPtr; UintR clogPartFileSize; + Uint32 clogFileSize; // In MBYTE + Uint32 cmaxLogFilesInPageZero; // // Configurable LogFileRecord *logFileRecord; diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp index 8aaf86de73a..05ea2047fc0 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp @@ -60,6 +60,8 @@ void Dblqh::initData() cLqhTimeOutCheckCount = 0; cbookedAccOps = 0; m_backup_ptr = RNIL; + clogFileSize = 16; + cmaxLogFilesInPageZero = 40; }//Dblqh::initData() void Dblqh::initRecords() @@ -260,6 +262,7 @@ Dblqh::Dblqh(Block_context& ctx): addRecSignal(GSN_START_FRAGREQ, &Dblqh::execSTART_FRAGREQ); addRecSignal(GSN_START_RECREF, &Dblqh::execSTART_RECREF); addRecSignal(GSN_GCP_SAVEREQ, &Dblqh::execGCP_SAVEREQ); + addRecSignal(GSN_FSOPENREF, &Dblqh::execFSOPENREF, true); addRecSignal(GSN_FSOPENCONF, &Dblqh::execFSOPENCONF); addRecSignal(GSN_FSCLOSECONF, &Dblqh::execFSCLOSECONF); addRecSignal(GSN_FSWRITECONF, &Dblqh::execFSWRITECONF); diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 33696ebba27..4a7d38c293c 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -62,6 +62,7 @@ #include #include #include +#include // Use DEBUG to print messages that should be // seen only when we debug the product @@ -1020,9 +1021,37 @@ void Dblqh::execREAD_CONFIG_REQ(Signal* signal) ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_FRAG, &tmp)); c_fragment_pool.setSize(tmp); + if (!ndb_mgm_get_int_parameter(p, CFG_DB_REDOLOG_FILE_SIZE, + &clogFileSize)) + { + // convert to mbyte + clogFileSize = (clogFileSize + 1024*1024 - 1) / (1024 * 1024); + ndbrequire(clogFileSize >= 4 && clogFileSize <= 1024); + } + + cmaxLogFilesInPageZero = (ZPAGE_SIZE - ZPAGE_HEADER_SIZE - 128) / + (ZFD_MBYTE_SIZE * clogFileSize); + + /** + * "Old" cmaxLogFilesInPageZero was 40 + * Each FD need 3 words per mb, require that they can fit into 1 page + * (atleast 1 FD) + * Is also checked in ConfigInfo.cpp (max FragmentLogFileSize = 1Gb) + * 1Gb = 1024Mb => 3(ZFD_MBYTE_SIZE) * 1024 < 8192 (ZPAGE_SIZE) + */ + if (cmaxLogFilesInPageZero > 40) + { + jam(); + cmaxLogFilesInPageZero = 40; + } + else + { + ndbrequire(cmaxLogFilesInPageZero); + } + initRecords(); initialiseRecordsLab(signal, 0, ref, senderData); - + return; }//Dblqh::execSIZEALT_REP() @@ -11750,9 +11779,9 @@ void Dblqh::sendStartLcp(Signal* signal) Uint32 Dblqh::remainingLogSize(const LogFileRecordPtr &sltCurrLogFilePtr, const LogPartRecordPtr &sltLogPartPtr) { - Uint32 hf = sltCurrLogFilePtr.p->fileNo*ZNO_MBYTES_IN_FILE+sltCurrLogFilePtr.p->currentMbyte; - Uint32 tf = sltLogPartPtr.p->logTailFileNo*ZNO_MBYTES_IN_FILE+sltLogPartPtr.p->logTailMbyte; - Uint32 sz = sltLogPartPtr.p->noLogFiles*ZNO_MBYTES_IN_FILE; + Uint32 hf = sltCurrLogFilePtr.p->fileNo*clogFileSize+sltCurrLogFilePtr.p->currentMbyte; + Uint32 tf = sltLogPartPtr.p->logTailFileNo*clogFileSize+sltLogPartPtr.p->logTailMbyte; + Uint32 sz = sltLogPartPtr.p->noLogFiles*clogFileSize; if (tf > hf) hf += sz; return sz-(hf-tf); } @@ -11810,7 +11839,7 @@ void Dblqh::setLogTail(Signal* signal, Uint32 keepGci) /* ------------------------------------------------------------------------- */ SLT_LOOP: for (tsltIndex = tsltStartMbyte; - tsltIndex <= ZNO_MBYTES_IN_FILE - 1; + tsltIndex <= clogFileSize - 1; tsltIndex++) { if (sltLogFilePtr.p->logMaxGciStarted[tsltIndex] >= keepGci) { /* ------------------------------------------------------------------------- */ @@ -11826,7 +11855,7 @@ void Dblqh::setLogTail(Signal* signal, Uint32 keepGci) /* ------------------------------------------------------------------------- */ /*STEPPING BACK INCLUDES ALSO STEPPING BACK TO THE PREVIOUS LOG FILE. */ /* ------------------------------------------------------------------------- */ - tsltMbyte = ZNO_MBYTES_IN_FILE - 1; + tsltMbyte = clogFileSize - 1; sltLogFilePtr.i = sltLogFilePtr.p->prevLogFile; ptrCheckGuard(sltLogFilePtr, clogFileFileSize, logFileRecord); }//if @@ -11864,7 +11893,7 @@ void Dblqh::setLogTail(Signal* signal, Uint32 keepGci) UintR ToldTailFileNo = sltLogPartPtr.p->logTailFileNo; UintR ToldTailMByte = sltLogPartPtr.p->logTailMbyte; - arrGuard(tsltMbyte, 16); + arrGuard(tsltMbyte, clogFileSize); sltLogPartPtr.p->logTailFileNo = sltLogFilePtr.p->logLastPrepRef[tsltMbyte] >> 16; /* ------------------------------------------------------------------------- */ @@ -12364,6 +12393,26 @@ void Dblqh::execFSOPENCONF(Signal* signal) }//switch }//Dblqh::execFSOPENCONF() +void +Dblqh::execFSOPENREF(Signal* signal) +{ + jamEntry(); + FsRef* ref = (FsRef*)signal->getDataPtr(); + Uint32 err = ref->errorCode; + if (err == FsRef::fsErrInvalidFileSize) + { + char buf[256]; + BaseString::snprintf(buf, sizeof(buf), + "Invalid file size for redo logfile, " + " size only changable with --initial"); + progError(__LINE__, + NDBD_EXIT_INVALID_CONFIG, + buf); + return; + } + + SimulatedBlock::execFSOPENREF(signal); +} /* ************>> */ /* FSREADCONF > */ @@ -13009,7 +13058,7 @@ void Dblqh::openFileInitLab(Signal* signal) { logFilePtr.p->logFileStatus = LogFileRecord::OPEN_INIT; seizeLogpage(signal); - writeSinglePage(signal, (ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE) - 1, + writeSinglePage(signal, (clogFileSize * ZPAGES_IN_MBYTE) - 1, ZPAGE_SIZE - 1, __LINE__); lfoPtr.p->lfoState = LogFileOperationRecord::INIT_WRITE_AT_END; return; @@ -13072,7 +13121,7 @@ void Dblqh::writeInitMbyteLab(Signal* signal) { releaseLfo(signal); logFilePtr.p->currentMbyte = logFilePtr.p->currentMbyte + 1; - if (logFilePtr.p->currentMbyte == ZNO_MBYTES_IN_FILE) { + if (logFilePtr.p->currentMbyte == clogFileSize) { jam(); releaseLogpage(signal); logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_INIT; @@ -13192,7 +13241,7 @@ void Dblqh::initLogfile(Signal* signal, Uint32 fileNo) logFilePtr.p->lastPageWritten = 0; logFilePtr.p->logPageZero = RNIL; logFilePtr.p->currentMbyte = 0; - for (tilIndex = 0; tilIndex <= 15; tilIndex++) { + for (tilIndex = 0; tilIndex < clogFileSize; tilIndex++) { logFilePtr.p->logMaxGciCompleted[tilIndex] = (UintR)-1; logFilePtr.p->logMaxGciStarted[tilIndex] = (UintR)-1; logFilePtr.p->logLastPrepRef[tilIndex] = 0; @@ -13243,8 +13292,12 @@ void Dblqh::openFileRw(Signal* signal, LogFileRecordPtr olfLogFilePtr) signal->theData[3] = olfLogFilePtr.p->fileName[1]; signal->theData[4] = olfLogFilePtr.p->fileName[2]; signal->theData[5] = olfLogFilePtr.p->fileName[3]; - signal->theData[6] = ZOPEN_READ_WRITE | FsOpenReq::OM_AUTOSYNC; + signal->theData[6] = ZOPEN_READ_WRITE | FsOpenReq::OM_AUTOSYNC | FsOpenReq::OM_CHECK_SIZE; req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord); + Uint64 sz = clogFileSize; + sz *= 1024; sz *= 1024; + req->file_size_hi = sz >> 32; + req->file_size_lo = sz & 0xFFFFFFFF; sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); }//Dblqh::openFileRw() @@ -13299,8 +13352,12 @@ void Dblqh::openNextLogfile(Signal* signal) signal->theData[3] = onlLogFilePtr.p->fileName[1]; signal->theData[4] = onlLogFilePtr.p->fileName[2]; signal->theData[5] = onlLogFilePtr.p->fileName[3]; - signal->theData[6] = 2 | FsOpenReq::OM_AUTOSYNC; + signal->theData[6] = 2 | FsOpenReq::OM_AUTOSYNC | FsOpenReq::OM_CHECK_SIZE; req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord); + Uint64 sz = clogFileSize; + sz *= 1024; sz *= 1024; + req->file_size_hi = sz >> 32; + req->file_size_lo = sz & 0xFFFFFFFF; sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); }//if }//Dblqh::openNextLogfile() @@ -13431,7 +13488,7 @@ void Dblqh::writeFileDescriptor(Signal* signal) /* -------------------------------------------------- */ /* START BY WRITING TO LOG FILE RECORD */ /* -------------------------------------------------- */ - arrGuard(logFilePtr.p->currentMbyte, 16); + arrGuard(logFilePtr.p->currentMbyte, clogFileSize); logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] = logPartPtr.p->logPartNewestCompletedGCI; logFilePtr.p->logMaxGciStarted[logFilePtr.p->currentMbyte] = cnewestGci; @@ -13457,10 +13514,7 @@ void Dblqh::writeFileDescriptor(Signal* signal) /* ------------------------------------------------------------------------- */ void Dblqh::writeFileHeaderOpen(Signal* signal, Uint32 wmoType) { - LogFileRecordPtr wmoLogFilePtr; UintR twmoNoLogDescriptors; - UintR twmoLoop; - UintR twmoIndex; /* -------------------------------------------------- */ /* WRITE HEADER INFORMATION IN THE NEW FILE. */ @@ -13468,52 +13522,44 @@ void Dblqh::writeFileHeaderOpen(Signal* signal, Uint32 wmoType) logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_LOG_TYPE] = ZFD_TYPE; logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] = logFilePtr.p->fileNo; - if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) { + if (logPartPtr.p->noLogFiles > cmaxLogFilesInPageZero) { jam(); - twmoNoLogDescriptors = ZMAX_LOG_FILES_IN_PAGE_ZERO; + twmoNoLogDescriptors = cmaxLogFilesInPageZero; } else { jam(); twmoNoLogDescriptors = logPartPtr.p->noLogFiles; }//if logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD] = twmoNoLogDescriptors; - wmoLogFilePtr.i = logFilePtr.i; - twmoLoop = 0; -WMO_LOOP: - jam(); - if (twmoLoop < twmoNoLogDescriptors) { - jam(); - ptrCheckGuard(wmoLogFilePtr, clogFileFileSize, logFileRecord); - for (twmoIndex = 0; twmoIndex <= ZNO_MBYTES_IN_FILE - 1; twmoIndex++) { + + { + Uint32 pos = ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE; + LogFileRecordPtr filePtr = logFilePtr; + for (Uint32 fd = 0; fd < twmoNoLogDescriptors; fd++) + { jam(); - arrGuard(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (twmoLoop * ZFD_PART_SIZE)) + twmoIndex, ZPAGE_SIZE); - logPagePtr.p->logPageWord[((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (twmoLoop * ZFD_PART_SIZE)) + twmoIndex] = - wmoLogFilePtr.p->logMaxGciCompleted[twmoIndex]; - arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (twmoLoop * ZFD_PART_SIZE)) + ZNO_MBYTES_IN_FILE) + - twmoIndex, ZPAGE_SIZE); - logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (twmoLoop * ZFD_PART_SIZE)) + ZNO_MBYTES_IN_FILE) + twmoIndex] = - wmoLogFilePtr.p->logMaxGciStarted[twmoIndex]; - arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (twmoLoop * ZFD_PART_SIZE)) + (2 * ZNO_MBYTES_IN_FILE)) + - twmoIndex, ZPAGE_SIZE); - logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (twmoLoop * ZFD_PART_SIZE)) + (2 * ZNO_MBYTES_IN_FILE)) + twmoIndex] = - wmoLogFilePtr.p->logLastPrepRef[twmoIndex]; - }//for - wmoLogFilePtr.i = wmoLogFilePtr.p->prevLogFile; - twmoLoop = twmoLoop + 1; - goto WMO_LOOP; - }//if - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = - (ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (ZFD_PART_SIZE * twmoNoLogDescriptors); - arrGuard(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX], ZPAGE_SIZE); - logPagePtr.p->logPageWord[logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]] = - ZNEXT_LOG_RECORD_TYPE; + ptrCheckGuard(filePtr, clogFileFileSize, logFileRecord); + for (Uint32 mb = 0; mb < clogFileSize; mb ++) + { + jam(); + Uint32 pos0 = pos + fd * (ZFD_MBYTE_SIZE * clogFileSize) + mb; + Uint32 pos1 = pos0 + clogFileSize; + Uint32 pos2 = pos1 + clogFileSize; + arrGuard(pos0, ZPAGE_SIZE); + arrGuard(pos1, ZPAGE_SIZE); + arrGuard(pos2, ZPAGE_SIZE); + logPagePtr.p->logPageWord[pos0] = filePtr.p->logMaxGciCompleted[mb]; + logPagePtr.p->logPageWord[pos1] = filePtr.p->logMaxGciStarted[mb]; + logPagePtr.p->logPageWord[pos2] = filePtr.p->logLastPrepRef[mb]; + } + filePtr.i = filePtr.p->prevLogFile; + } + pos += (twmoNoLogDescriptors * ZFD_MBYTE_SIZE * clogFileSize); + arrGuard(pos, ZPAGE_SIZE); + logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = pos; + logPagePtr.p->logPageWord[pos] = ZNEXT_LOG_RECORD_TYPE; + } + /* ------------------------------------------------------- */ /* THIS IS A SPECIAL WRITE OF THE FIRST PAGE IN THE */ /* LOG FILE. THIS HAS SPECIAL SIGNIFANCE TO FIND */ @@ -13658,9 +13704,9 @@ void Dblqh::openSrLastFileLab(Signal* signal) void Dblqh::readSrLastFileLab(Signal* signal) { logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP]; - if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) { + if (logPartPtr.p->noLogFiles > cmaxLogFilesInPageZero) { jam(); - initGciInLogFileRec(signal, ZMAX_LOG_FILES_IN_PAGE_ZERO); + initGciInLogFileRec(signal, cmaxLogFilesInPageZero); } else { jam(); initGciInLogFileRec(signal, logPartPtr.p->noLogFiles); @@ -13685,7 +13731,7 @@ void Dblqh::readSrLastMbyteLab(Signal* signal) logPartPtr.p->lastMbyte = logFilePtr.p->currentMbyte - 1; }//if }//if - arrGuard(logFilePtr.p->currentMbyte, 16); + arrGuard(logFilePtr.p->currentMbyte, clogFileSize); logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] = logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED]; logFilePtr.p->logMaxGciStarted[logFilePtr.p->currentMbyte] = @@ -13693,7 +13739,7 @@ void Dblqh::readSrLastMbyteLab(Signal* signal) logFilePtr.p->logLastPrepRef[logFilePtr.p->currentMbyte] = logPagePtr.p->logPageWord[ZLAST_LOG_PREP_REF]; releaseLogpage(signal); - if (logFilePtr.p->currentMbyte < (ZNO_MBYTES_IN_FILE - 1)) { + if (logFilePtr.p->currentMbyte < (clogFileSize - 1)) { jam(); logFilePtr.p->currentMbyte++; readSinglePage(signal, ZPAGES_IN_MBYTE * logFilePtr.p->currentMbyte); @@ -13707,21 +13753,21 @@ void Dblqh::readSrLastMbyteLab(Signal* signal) * ---------------------------------------------------------------------- */ if (logPartPtr.p->lastMbyte == ZNIL) { jam(); - logPartPtr.p->lastMbyte = ZNO_MBYTES_IN_FILE - 1; + logPartPtr.p->lastMbyte = clogFileSize - 1; }//if }//if logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR; closeFile(signal, logFilePtr, __LINE__); - if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) { + if (logPartPtr.p->noLogFiles > cmaxLogFilesInPageZero) { Uint32 fileNo; - if (logFilePtr.p->fileNo >= ZMAX_LOG_FILES_IN_PAGE_ZERO) { + if (logFilePtr.p->fileNo >= cmaxLogFilesInPageZero) { jam(); - fileNo = logFilePtr.p->fileNo - ZMAX_LOG_FILES_IN_PAGE_ZERO; + fileNo = logFilePtr.p->fileNo - cmaxLogFilesInPageZero; } else { jam(); fileNo = (logPartPtr.p->noLogFiles + logFilePtr.p->fileNo) - - ZMAX_LOG_FILES_IN_PAGE_ZERO; + cmaxLogFilesInPageZero; }//if if (fileNo == 0) { jam(); @@ -13731,11 +13777,11 @@ void Dblqh::readSrLastMbyteLab(Signal* signal) * -------------------------------------------------------------------- */ fileNo = 1; logPartPtr.p->srRemainingFiles = - logPartPtr.p->noLogFiles - (ZMAX_LOG_FILES_IN_PAGE_ZERO - 1); + logPartPtr.p->noLogFiles - (cmaxLogFilesInPageZero - 1); } else { jam(); logPartPtr.p->srRemainingFiles = - logPartPtr.p->noLogFiles - ZMAX_LOG_FILES_IN_PAGE_ZERO; + logPartPtr.p->noLogFiles - cmaxLogFilesInPageZero; }//if LogFileRecordPtr locLogFilePtr; findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr); @@ -13760,9 +13806,9 @@ void Dblqh::openSrNextFileLab(Signal* signal) void Dblqh::readSrNextFileLab(Signal* signal) { - if (logPartPtr.p->srRemainingFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) { + if (logPartPtr.p->srRemainingFiles > cmaxLogFilesInPageZero) { jam(); - initGciInLogFileRec(signal, ZMAX_LOG_FILES_IN_PAGE_ZERO); + initGciInLogFileRec(signal, cmaxLogFilesInPageZero); } else { jam(); initGciInLogFileRec(signal, logPartPtr.p->srRemainingFiles); @@ -13770,16 +13816,16 @@ void Dblqh::readSrNextFileLab(Signal* signal) releaseLogpage(signal); logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR; closeFile(signal, logFilePtr, __LINE__); - if (logPartPtr.p->srRemainingFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) { + if (logPartPtr.p->srRemainingFiles > cmaxLogFilesInPageZero) { Uint32 fileNo; - if (logFilePtr.p->fileNo >= ZMAX_LOG_FILES_IN_PAGE_ZERO) { + if (logFilePtr.p->fileNo >= cmaxLogFilesInPageZero) { jam(); - fileNo = logFilePtr.p->fileNo - ZMAX_LOG_FILES_IN_PAGE_ZERO; + fileNo = logFilePtr.p->fileNo - cmaxLogFilesInPageZero; } else { jam(); fileNo = (logPartPtr.p->noLogFiles + logFilePtr.p->fileNo) - - ZMAX_LOG_FILES_IN_PAGE_ZERO; + cmaxLogFilesInPageZero; }//if if (fileNo == 0) { jam(); @@ -13788,11 +13834,11 @@ void Dblqh::readSrNextFileLab(Signal* signal) * -------------------------------------------------------------------- */ fileNo = 1; logPartPtr.p->srRemainingFiles = - logPartPtr.p->srRemainingFiles - (ZMAX_LOG_FILES_IN_PAGE_ZERO - 1); + logPartPtr.p->srRemainingFiles - (cmaxLogFilesInPageZero - 1); } else { jam(); logPartPtr.p->srRemainingFiles = - logPartPtr.p->srRemainingFiles - ZMAX_LOG_FILES_IN_PAGE_ZERO; + logPartPtr.p->srRemainingFiles - cmaxLogFilesInPageZero; }//if LogFileRecordPtr locLogFilePtr; findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr); @@ -14663,7 +14709,7 @@ void Dblqh::srLogLimits(Signal* signal) * EXECUTED. * ----------------------------------------------------------------------- */ while(true) { - ndbrequire(tmbyte < 16); + ndbrequire(tmbyte < clogFileSize); if (logPartPtr.p->logExecState == LogPartRecord::LES_SEARCH_STOP) { if (logFilePtr.p->logMaxGciCompleted[tmbyte] < logPartPtr.p->logLastGci) { jam(); @@ -14704,7 +14750,7 @@ void Dblqh::srLogLimits(Signal* signal) if (logPartPtr.p->logExecState != LogPartRecord::LES_EXEC_LOG) { if (tmbyte == 0) { jam(); - tmbyte = ZNO_MBYTES_IN_FILE - 1; + tmbyte = clogFileSize - 1; logFilePtr.i = logFilePtr.p->prevLogFile; ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); } else { @@ -15098,7 +15144,7 @@ void Dblqh::execSr(Signal* signal) logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD]; logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = (ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (noFdDescriptors * ZFD_PART_SIZE); + (noFdDescriptors * ZFD_MBYTE_SIZE * clogFileSize); } break; /* ========================================================================= */ @@ -15138,11 +15184,11 @@ void Dblqh::execSr(Signal* signal) /*---------------------------------------------------------------------------*/ /* START EXECUTION OF A NEW MBYTE IN THE LOG. */ /*---------------------------------------------------------------------------*/ - if (logFilePtr.p->currentMbyte < (ZNO_MBYTES_IN_FILE - 1)) { + if (logFilePtr.p->currentMbyte < (clogFileSize - 1)) { jam(); logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_NEW_MBYTE; } else { - ndbrequire(logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1)); + ndbrequire(logFilePtr.p->currentMbyte == (clogFileSize - 1)); jam(); /*---------------------------------------------------------------------------*/ /* WE HAVE TO CHANGE FILE. CLOSE THIS ONE AND THEN OPEN THE NEXT. */ @@ -15339,7 +15385,7 @@ void Dblqh::invalidateLogAfterLastGCI(Signal* signal) { jam(); releaseLfo(signal); releaseLogpage(signal); - if (logPartPtr.p->invalidatePageNo < (ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE - 1)) { + if (logPartPtr.p->invalidatePageNo < (clogFileSize * ZPAGES_IN_MBYTE - 1)) { // We continue in this file. logPartPtr.p->invalidatePageNo++; } else { @@ -16680,6 +16726,22 @@ void Dblqh::initialiseLogFile(Signal* signal) ptrAss(logFilePtr, logFileRecord); logFilePtr.p->nextLogFile = logFilePtr.i + 1; logFilePtr.p->logFileStatus = LogFileRecord::LFS_IDLE; + + logFilePtr.p->logLastPrepRef = new Uint32[clogFileSize]; + logFilePtr.p->logMaxGciCompleted = new Uint32[clogFileSize]; + logFilePtr.p->logMaxGciStarted = new Uint32[clogFileSize]; + + if (logFilePtr.p->logLastPrepRef == 0 || + logFilePtr.p->logMaxGciCompleted == 0 || + logFilePtr.p->logMaxGciStarted == 0) + { + char buf[256]; + BaseString::snprintf(buf, sizeof(buf), + "Failed to alloc mbyte(%u) arrays for logfile %u", + clogFileSize, logFilePtr.i); + progError(__LINE__, NDBD_EXIT_MEMALLOC, buf); + } + }//for logFilePtr.i = clogFileFileSize - 1; ptrAss(logFilePtr, logFileRecord); @@ -17008,41 +17070,31 @@ void Dblqh::initFragrec(Signal* signal, * ========================================================================= */ void Dblqh::initGciInLogFileRec(Signal* signal, Uint32 noFdDescriptors) { - LogFileRecordPtr iglLogFilePtr; - UintR tiglLoop; - UintR tiglIndex; - - tiglLoop = 0; - iglLogFilePtr.i = logFilePtr.i; - iglLogFilePtr.p = logFilePtr.p; -IGL_LOOP: - for (tiglIndex = 0; tiglIndex <= ZNO_MBYTES_IN_FILE - 1; tiglIndex++) { - arrGuard(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (tiglLoop * ZFD_PART_SIZE)) + tiglIndex, ZPAGE_SIZE); - iglLogFilePtr.p->logMaxGciCompleted[tiglIndex] = - logPagePtr.p->logPageWord[((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (tiglLoop * ZFD_PART_SIZE)) + tiglIndex]; - arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + ZNO_MBYTES_IN_FILE) + - (tiglLoop * ZFD_PART_SIZE)) + tiglIndex, ZPAGE_SIZE); - iglLogFilePtr.p->logMaxGciStarted[tiglIndex] = - logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - ZNO_MBYTES_IN_FILE) + - (tiglLoop * ZFD_PART_SIZE)) + tiglIndex]; - arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (2 * ZNO_MBYTES_IN_FILE)) + (tiglLoop * ZFD_PART_SIZE)) + - tiglIndex, ZPAGE_SIZE); - iglLogFilePtr.p->logLastPrepRef[tiglIndex] = - logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (2 * ZNO_MBYTES_IN_FILE)) + - (tiglLoop * ZFD_PART_SIZE)) + tiglIndex]; - }//for - tiglLoop = tiglLoop + 1; - if (tiglLoop < noFdDescriptors) { + LogFileRecordPtr filePtr = logFilePtr; + Uint32 pos = ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE; + for (Uint32 fd = 0; fd < noFdDescriptors; fd++) + { jam(); - iglLogFilePtr.i = iglLogFilePtr.p->prevLogFile; - ptrCheckGuard(iglLogFilePtr, clogFileFileSize, logFileRecord); - goto IGL_LOOP; - }//if + for (Uint32 mb = 0; mb < clogFileSize; mb++) + { + jam(); + Uint32 pos0 = pos + fd * (ZFD_MBYTE_SIZE * clogFileSize) + mb; + Uint32 pos1 = pos0 + clogFileSize; + Uint32 pos2 = pos1 + clogFileSize; + arrGuard(pos0, ZPAGE_SIZE); + arrGuard(pos1, ZPAGE_SIZE); + arrGuard(pos2, ZPAGE_SIZE); + filePtr.p->logMaxGciCompleted[mb] = logPagePtr.p->logPageWord[pos0]; + filePtr.p->logMaxGciStarted[mb] = logPagePtr.p->logPageWord[pos1]; + filePtr.p->logLastPrepRef[mb] = logPagePtr.p->logPageWord[pos2]; + } + if (fd + 1 < noFdDescriptors) + { + jam(); + filePtr.i = filePtr.p->prevLogFile; + ptrCheckGuard(filePtr, clogFileFileSize, logFileRecord); + } + } }//Dblqh::initGciInLogFileRec() /* ========================================================================== @@ -18295,7 +18347,7 @@ void Dblqh::writeNextLog(Signal* signal) ndbrequire(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] < ZPAGE_SIZE); logPagePtr.p->logPageWord[logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]] = ZNEXT_MBYTE_TYPE; - if (logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1)) { + if (logFilePtr.p->currentMbyte == (clogFileSize - 1)) { jam(); /* -------------------------------------------------- */ /* CALCULATE THE NEW REMAINING WORDS WHEN */ @@ -18384,7 +18436,7 @@ void Dblqh::writeNextLog(Signal* signal) systemError(signal, __LINE__); }//if }//if - if (logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1)) { + if (logFilePtr.p->currentMbyte == (clogFileSize - 1)) { jam(); twnlNextMbyte = 0; if (logFilePtr.p->fileChangeState != LogFileRecord::NOT_ONGOING) { diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp index c10dacbee28..92ca24e8e0a 100644 --- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp @@ -871,6 +871,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { "3", STR_VALUE(MAX_INT_RNIL) }, + { + CFG_DB_REDOLOG_FILE_SIZE, + "FragmentLogFileSize", + DB_TOKEN, + "Size of each Redo log file", + ConfigInfo::CI_USED, + false, + ConfigInfo::CI_INT, + "16M", + "4M", + "1G" }, + { CFG_DB_MAX_OPEN_FILES, "MaxNoOfOpenFiles", diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c index fc0d53b6a6e..0587fac7e8a 100644 --- a/storage/ndb/src/ndbapi/ndberror.c +++ b/storage/ndb/src/ndbapi/ndberror.c @@ -179,7 +179,7 @@ ErrorBundle ErrorCodes[] = { { 873, DMEC, TR, "Out of attrinfo records for scan in tuple manager" }, { 899, DMEC, TR, "Rowid already allocated" }, { 1217, DMEC, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" }, - { 1220, DMEC, TR, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" }, + { 1220, DMEC, TR, "REDO log files overloaded, consult online manual (increase FragmentLogFileSize)" }, { 1222, DMEC, TR, "Out of transaction markers in LQH" }, { 4021, DMEC, TR, "Out of Send Buffer space in NDB API" }, { 4022, DMEC, TR, "Out of Send Buffer space in NDB API" }, From f8057c4b92f6c4a00efd8035fcc1b359628e670f Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 4 Jun 2007 10:32:32 +0200 Subject: [PATCH 12/38] ndb - update dl145a config for autotest storage/ndb/test/run-test/conf-dl145a.cnf: update dl145a config for autotest --- storage/ndb/test/run-test/conf-dl145a.cnf | 3 +++ 1 file changed, 3 insertions(+) diff --git a/storage/ndb/test/run-test/conf-dl145a.cnf b/storage/ndb/test/run-test/conf-dl145a.cnf index ea344f1a62a..5f61bee755d 100644 --- a/storage/ndb/test/run-test/conf-dl145a.cnf +++ b/storage/ndb/test/run-test/conf-dl145a.cnf @@ -21,3 +21,6 @@ BackupMemory = 64M MaxNoOfConcurrentScans = 100 MaxNoOfSavedMessages= 1000 SendBufferMemory = 2M +NoOfFragmentLogFiles = 4 +FragmentLogFileSize = 64M + From 253c2808b39576e0f26c2095b705123ef497005c Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 4 Jun 2007 11:58:25 +0200 Subject: [PATCH 13/38] ndb - bug#28726 make sure to remove LCP files aswell if specifying --initial storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp: Add removal of LCP/X directories --- .../src/kernel/blocks/ndbcntr/NdbcntrMain.cpp | 30 +++++++++++++++---- 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp index 69673796fee..6fc88c5061f 100644 --- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp +++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp @@ -2761,16 +2761,34 @@ void Ndbcntr::execSTART_ORD(Signal* signal){ c_missra.execSTART_ORD(signal); } +#define CLEAR_DX 13 +#define CLEAR_LCP 3 + void -Ndbcntr::clearFilesystem(Signal* signal){ +Ndbcntr::clearFilesystem(Signal* signal) +{ + const Uint32 lcp = c_fsRemoveCount >= CLEAR_DX; + FsRemoveReq * req = (FsRemoveReq *)signal->getDataPtrSend(); req->userReference = reference(); req->userPointer = 0; req->directory = 1; req->ownDirectory = 1; - FsOpenReq::setVersion(req->fileNumber, 3); - FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); // Can by any... - FsOpenReq::v1_setDisk(req->fileNumber, c_fsRemoveCount); + + if (lcp == 0) + { + FsOpenReq::setVersion(req->fileNumber, 3); + FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); // Can by any... + FsOpenReq::v1_setDisk(req->fileNumber, c_fsRemoveCount); + } + else + { + FsOpenReq::setVersion(req->fileNumber, 5); + FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA); + FsOpenReq::v5_setLcpNo(req->fileNumber, c_fsRemoveCount - CLEAR_DX); + FsOpenReq::v5_setTableId(req->fileNumber, 0); + FsOpenReq::v5_setFragmentId(req->fileNumber, 0); + } sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal, FsRemoveReq::SignalLength, JBA); c_fsRemoveCount++; @@ -2779,12 +2797,12 @@ Ndbcntr::clearFilesystem(Signal* signal){ void Ndbcntr::execFSREMOVECONF(Signal* signal){ jamEntry(); - if(c_fsRemoveCount == 13){ + if(c_fsRemoveCount == CLEAR_DX + CLEAR_LCP){ jam(); sendSttorry(signal); } else { jam(); - ndbrequire(c_fsRemoveCount < 13); + ndbrequire(c_fsRemoveCount < CLEAR_DX + CLEAR_LCP); clearFilesystem(signal); }//if } From 1182b801d435d500c92ce4439b2531521ea6df33 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 5 Jun 2007 17:06:33 +0200 Subject: [PATCH 14/38] Bug #28899 not possible to set separate watchdog timeout at startup storage/ndb/include/mgmapi/mgmapi_config_parameters.h: add new configuration parameter TimeBetweenWatchDogCheckInitial storage/ndb/include/portlib/NdbTick.h: enable timing code storage/ndb/src/common/portlib/NdbTick.c: enable timing code storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp: read watchdog timeout to set it after malloc storage/ndb/src/kernel/vm/Configuration.cpp: read initial watchdog timeout and set it in the beginning storage/ndb/src/kernel/vm/Configuration.hpp: read initial watchdog timeout and set it in the beginning storage/ndb/src/kernel/vm/SimulatedBlock.cpp: introduce new state for "action" malloc of memory storage/ndb/src/kernel/vm/SimulatedBlock.hpp: introduce new state for "action" malloc of memory storage/ndb/src/kernel/vm/WatchDog.cpp: rewrite watchdog to check every 100ms for being stuch, but keep shutdown after 3 * interval for "action" == 9 (malloc) keep old behavior and only output every interval storage/ndb/src/mgmsrv/ConfigInfo.cpp: add new configuration parameter TimeBetweenWatchDogCheckInitial --- .../include/mgmapi/mgmapi_config_parameters.h | 2 + storage/ndb/include/portlib/NdbTick.h | 4 - storage/ndb/src/common/portlib/NdbTick.c | 4 +- .../src/kernel/blocks/ndbcntr/NdbcntrMain.cpp | 8 + storage/ndb/src/kernel/vm/Configuration.cpp | 12 +- storage/ndb/src/kernel/vm/Configuration.hpp | 1 + storage/ndb/src/kernel/vm/SimulatedBlock.cpp | 18 +- storage/ndb/src/kernel/vm/SimulatedBlock.hpp | 3 +- storage/ndb/src/kernel/vm/WatchDog.cpp | 156 +++++++++++------- storage/ndb/src/mgmsrv/ConfigInfo.cpp | 12 ++ 10 files changed, 149 insertions(+), 71 deletions(-) diff --git a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h index 119958d0ce0..45cfd5fd7bf 100644 --- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h +++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h @@ -81,6 +81,8 @@ #define CFG_DB_BACKUP_WRITE_SIZE 136 #define CFG_DB_BACKUP_MAX_WRITE_SIZE 139 +#define CFG_DB_WATCHDOG_INTERVAL_INITIAL 141 + #define CFG_LOG_DESTINATION 147 #define CFG_DB_DISCLESS 148 diff --git a/storage/ndb/include/portlib/NdbTick.h b/storage/ndb/include/portlib/NdbTick.h index 59f580de38e..70c36fdfd1e 100644 --- a/storage/ndb/include/portlib/NdbTick.h +++ b/storage/ndb/include/portlib/NdbTick.h @@ -37,9 +37,6 @@ NDB_TICKS NdbTick_CurrentMillisecond(void); */ int NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros); - /*#define TIME_MEASUREMENT*/ -#ifdef TIME_MEASUREMENT - struct MicroSecondTimer { NDB_TICKS seconds; NDB_TICKS micro_seconds; @@ -54,7 +51,6 @@ struct MicroSecondTimer { NDB_TICKS NdbTick_getMicrosPassed(struct MicroSecondTimer start, struct MicroSecondTimer stop); int NdbTick_getMicroTimer(struct MicroSecondTimer* time_now); -#endif #ifdef __cplusplus } diff --git a/storage/ndb/src/common/portlib/NdbTick.c b/storage/ndb/src/common/portlib/NdbTick.c index eff6b28b7eb..f69c42c0ca0 100644 --- a/storage/ndb/src/common/portlib/NdbTick.c +++ b/storage/ndb/src/common/portlib/NdbTick.c @@ -15,7 +15,7 @@ #include -#include "NdbTick.h" +#include #define NANOSEC_PER_SEC 1000000000 #define MICROSEC_PER_SEC 1000000 @@ -71,7 +71,6 @@ NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros){ } #endif -#ifdef TIME_MEASUREMENT int NdbTick_getMicroTimer(struct MicroSecondTimer* input_timer) { @@ -102,4 +101,3 @@ NdbTick_getMicrosPassed(struct MicroSecondTimer start, } return ret_value; } -#endif diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp index 69673796fee..fd383de6f59 100644 --- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp +++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp @@ -277,6 +277,14 @@ void Ndbcntr::execSTTOR(Signal* signal) break; case ZSTART_PHASE_1: jam(); + { + Uint32 db_watchdog_interval = 0; + const ndb_mgm_configuration_iterator * p = + m_ctx.m_config.getOwnConfigIterator(); + ndb_mgm_get_int_parameter(p, CFG_DB_WATCHDOG_INTERVAL, &db_watchdog_interval); + ndbrequire(db_watchdog_interval); + update_watch_dog_timer(db_watchdog_interval); + } startPhase1Lab(signal); break; case ZSTART_PHASE_2: diff --git a/storage/ndb/src/kernel/vm/Configuration.cpp b/storage/ndb/src/kernel/vm/Configuration.cpp index e0b485eda59..fbda9873fd8 100644 --- a/storage/ndb/src/kernel/vm/Configuration.cpp +++ b/storage/ndb/src/kernel/vm/Configuration.cpp @@ -443,6 +443,11 @@ Configuration::setupConfiguration(){ "TimeBetweenWatchDogCheck missing"); } + if(iter.get(CFG_DB_WATCHDOG_INTERVAL_INITIAL, &_timeBetweenWatchDogCheckInitial)){ + ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, "Invalid configuration fetched", + "TimeBetweenWatchDogCheckInitial missing"); + } + /** * Get paths */ @@ -462,9 +467,12 @@ Configuration::setupConfiguration(){ * Create the watch dog thread */ { - Uint32 t = _timeBetweenWatchDogCheck; + if (_timeBetweenWatchDogCheckInitial < _timeBetweenWatchDogCheck) + _timeBetweenWatchDogCheckInitial = _timeBetweenWatchDogCheck; + + Uint32 t = _timeBetweenWatchDogCheckInitial; t = globalEmulatorData.theWatchDog ->setCheckInterval(t); - _timeBetweenWatchDogCheck = t; + _timeBetweenWatchDogCheckInitial = t; } ConfigValues* cf = ConfigValuesFactory::extractCurrentSection(iter.m_config); diff --git a/storage/ndb/src/kernel/vm/Configuration.hpp b/storage/ndb/src/kernel/vm/Configuration.hpp index 934261e40af..918a889a171 100644 --- a/storage/ndb/src/kernel/vm/Configuration.hpp +++ b/storage/ndb/src/kernel/vm/Configuration.hpp @@ -84,6 +84,7 @@ private: Uint32 _maxErrorLogs; Uint32 _lockPagesInMainMemory; Uint32 _timeBetweenWatchDogCheck; + Uint32 _timeBetweenWatchDogCheckInitial; ndb_mgm_configuration * m_ownConfig; ndb_mgm_configuration * m_clusterConfig; diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp index 3125fc33258..1ba7368c352 100644 --- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp +++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -662,7 +663,7 @@ SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear, U void * p = NULL; size_t size = n*s; Uint64 real_size = (Uint64)((Uint64)n)*((Uint64)s); - refresh_watch_dog(); + refresh_watch_dog(9); if (real_size > 0){ #ifdef VM_TRACE_MEM ndbout_c("%s::allocRecord(%s, %u, %u) = %llu bytes", @@ -696,12 +697,12 @@ SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear, U char * ptr = (char*)p; const Uint32 chunk = 128 * 1024; while(size > chunk){ - refresh_watch_dog(); + refresh_watch_dog(9); memset(ptr, 0, chunk); ptr += chunk; size -= chunk; } - refresh_watch_dog(); + refresh_watch_dog(9); memset(ptr, 0, size); } } @@ -720,9 +721,16 @@ SimulatedBlock::deallocRecord(void ** ptr, } void -SimulatedBlock::refresh_watch_dog() +SimulatedBlock::refresh_watch_dog(Uint32 place) { - globalData.incrementWatchDogCounter(1); + globalData.incrementWatchDogCounter(place); +} + +void +SimulatedBlock::update_watch_dog_timer(Uint32 interval) +{ + extern EmulatorData globalEmulatorData; + globalEmulatorData.theWatchDog->setCheckInterval(interval); } void diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp index 37a8dde5956..01fb11e05e8 100644 --- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp +++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp @@ -334,7 +334,8 @@ protected: * Refresh Watch Dog in initialising code * */ - void refresh_watch_dog(); + void refresh_watch_dog(Uint32 place = 1); + void update_watch_dog_timer(Uint32 interval); /** * Prog error diff --git a/storage/ndb/src/kernel/vm/WatchDog.cpp b/storage/ndb/src/kernel/vm/WatchDog.cpp index 2e24a5eaa6c..a7f5e8f5c2b 100644 --- a/storage/ndb/src/kernel/vm/WatchDog.cpp +++ b/storage/ndb/src/kernel/vm/WatchDog.cpp @@ -25,6 +25,8 @@ #include #include +#include + extern EventLogger g_eventLogger; extern "C" @@ -72,73 +74,115 @@ WatchDog::doStop(){ } } +const char *get_action(Uint32 IPValue) +{ + const char *action; + switch (IPValue) { + case 1: + action = "Job Handling"; + break; + case 2: + action = "Scanning Timers"; + break; + case 3: + action = "External I/O"; + break; + case 4: + action = "Print Job Buffers at crash"; + break; + case 5: + action = "Checking connections"; + break; + case 6: + action = "Performing Send"; + break; + case 7: + action = "Polling for Receive"; + break; + case 8: + action = "Performing Receive"; + break; + case 9: + action = "Allocating memory"; + break; + default: + action = "Unknown place"; + break; + }//switch + return action; +} + void -WatchDog::run(){ - unsigned int anIPValue; - unsigned int alerts = 0; +WatchDog::run() +{ + unsigned int anIPValue, sleep_time; unsigned int oldIPValue = 0; - + unsigned int theIntervalCheck = theInterval; + struct MicroSecondTimer start_time, last_time, now; + NdbTick_getMicroTimer(&start_time); + last_time = start_time; + // WatchDog for the single threaded NDB - while(!theStop){ - Uint32 tmp = theInterval / 500; - tmp= (tmp ? tmp : 1); - - while(!theStop && tmp > 0){ - NdbSleep_MilliSleep(500); - tmp--; - } - + while (!theStop) + { + sleep_time= 100; + + NdbSleep_MilliSleep(sleep_time); if(theStop) break; + NdbTick_getMicroTimer(&now); + if (NdbTick_getMicrosPassed(last_time, now)/1000 > sleep_time*2) + { + struct tms my_tms; + times(&my_tms); + g_eventLogger.info("Watchdog: User time: %llu System time: %llu", + (Uint64)my_tms.tms_utime, + (Uint64)my_tms.tms_stime); + g_eventLogger.warning("Watchdog: Warning overslept %u ms, expected %u ms.", + NdbTick_getMicrosPassed(last_time, now)/1000, + sleep_time); + } + last_time = now; + // Verify that the IP thread is not stuck in a loop anIPValue = *theIPValue; - if(anIPValue != 0) { + if (anIPValue != 0) + { oldIPValue = anIPValue; globalData.incrementWatchDogCounter(0); - alerts = 0; - } else { - const char *last_stuck_action; - alerts++; - switch (oldIPValue) { - case 1: - last_stuck_action = "Job Handling"; - break; - case 2: - last_stuck_action = "Scanning Timers"; - break; - case 3: - last_stuck_action = "External I/O"; - break; - case 4: - last_stuck_action = "Print Job Buffers at crash"; - break; - case 5: - last_stuck_action = "Checking connections"; - break; - case 6: - last_stuck_action = "Performing Send"; - break; - case 7: - last_stuck_action = "Polling for Receive"; - break; - case 8: - last_stuck_action = "Performing Receive"; - break; - default: - last_stuck_action = "Unknown place"; - break; - }//switch - g_eventLogger.warning("Ndb kernel is stuck in: %s", last_stuck_action); + NdbTick_getMicroTimer(&start_time); + theIntervalCheck = theInterval; + } + else + { + int warn = 1; + Uint32 elapsed = NdbTick_getMicrosPassed(start_time, now)/1000; + /* + oldIPValue == 9 indicates malloc going on, this can take some time + so only warn if we pass the watchdog interval + */ + if (oldIPValue == 9) + if (elapsed < theIntervalCheck) + warn = 0; + else + theIntervalCheck += theInterval; + + if (warn) { - struct tms my_tms; - times(&my_tms); - g_eventLogger.info("User time: %llu System time: %llu", - (Uint64)my_tms.tms_utime, - (Uint64)my_tms.tms_stime); - } - if(alerts == 3){ - shutdownSystem(last_stuck_action); + const char *last_stuck_action = get_action(oldIPValue); + g_eventLogger.warning("Ndb kernel is stuck in: %s", last_stuck_action); + { + struct tms my_tms; + times(&my_tms); + g_eventLogger.info("Watchdog: User time: %llu System time: %llu", + (Uint64)my_tms.tms_utime, + (Uint64)my_tms.tms_stime); + } + if (elapsed > 3 * theInterval) + { + shutdownSystem(last_stuck_action); + } } } } diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp index c10dacbee28..3e76071a0db 100644 --- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp @@ -571,6 +571,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { "70", STR_VALUE(MAX_INT_RNIL) }, + { + CFG_DB_WATCHDOG_INTERVAL_INITIAL, + "TimeBetweenWatchDogCheckInitial", + DB_TOKEN, + "Time between execution checks inside a database node in the early start phases when memory is allocated", + ConfigInfo::CI_USED, + true, + ConfigInfo::CI_INT, + "6000", + "70", + STR_VALUE(MAX_INT_RNIL) }, + { CFG_DB_STOP_ON_ERROR, "StopOnError", From 35b2f212ba5f5cd98b9ee4bd8332012cf3bd3438 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 5 Jun 2007 17:29:50 +0200 Subject: [PATCH 15/38] Bug #28751 Lots of memory locked in memory causes high kswapd - add odirect option for lcp+backup+redo log to lower CPU/kswapd usage - writing odirect removes need for kernel write buffers avoiding kswapd to kick in mysql-test/ndb/ndb_config_2_node.ini: run mysql-test-run using ODirect storage/ndb/include/mgmapi/mgmapi_config_parameters.h: add new config parameter to choose ODirect storage/ndb/include/ndb_global.h.in: specify alignment needed for odirect storage/ndb/src/kernel/blocks/backup/Backup.cpp: read odirect config param open LCP and Backup datafiles with odirect if specified insert empty padding record if odirect is used allocate buffers aligned to be able to use odirect storage/ndb/src/kernel/blocks/backup/Backup.hpp: odirect and padding options storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp: add empty_record in file format storage/ndb/src/kernel/blocks/backup/BackupInit.cpp: read odirect config and allocate aligned storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp: correct debug printouts storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp: read odirect config param and align buffers storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp: read odirect config param and align buffers storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp: read config params and open redo log files with odirect if set storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp: aligned writing for odirect correct odirect open options with test+fallback if odirect fails storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp: align + odirect check storage/ndb/src/kernel/blocks/restore.cpp: restor block to ignore new lcp padding empty_record storage/ndb/src/kernel/vm/SimulatedBlock.cpp: alligend log buffer allocation for odirect storage/ndb/src/kernel/vm/SimulatedBlock.hpp: alligend log buffer allocation for odirect storage/ndb/src/mgmsrv/ConfigInfo.cpp: new config param for odirect, default false storage/ndb/tools/restore/Restore.cpp: ndb_restore to skip empty_record alignment padding in backup file --- mysql-test/ndb/ndb_config_2_node.ini | 1 + .../include/mgmapi/mgmapi_config_parameters.h | 2 + storage/ndb/include/ndb_global.h.in | 2 + .../ndb/src/kernel/blocks/backup/Backup.cpp | 49 +++++- .../ndb/src/kernel/blocks/backup/Backup.hpp | 3 +- .../src/kernel/blocks/backup/BackupFormat.hpp | 10 +- .../src/kernel/blocks/backup/BackupInit.cpp | 7 +- .../ndb/src/kernel/blocks/backup/FsBuffer.hpp | 24 +-- storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp | 5 +- .../ndb/src/kernel/blocks/dblqh/DblqhInit.cpp | 14 +- .../ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 14 +- .../ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp | 151 +++++++++++++----- .../ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp | 4 + storage/ndb/src/kernel/blocks/restore.cpp | 3 + storage/ndb/src/kernel/vm/SimulatedBlock.cpp | 25 ++- storage/ndb/src/kernel/vm/SimulatedBlock.hpp | 1 + storage/ndb/src/mgmsrv/ConfigInfo.cpp | 12 ++ storage/ndb/tools/restore/Restore.cpp | 29 +++- 18 files changed, 279 insertions(+), 77 deletions(-) diff --git a/mysql-test/ndb/ndb_config_2_node.ini b/mysql-test/ndb/ndb_config_2_node.ini index 99f31150d8c..0badf3145c3 100644 --- a/mysql-test/ndb/ndb_config_2_node.ini +++ b/mysql-test/ndb/ndb_config_2_node.ini @@ -12,6 +12,7 @@ MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes TimeBetweenGlobalCheckpoints= 500 NoOfFragmentLogFiles= 3 DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory +ODirect= 1 # the following parametes just function as a small regression # test that the parameter exists InitialNoOfOpenFiles= 27 diff --git a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h index 45cfd5fd7bf..661e24b53cc 100644 --- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h +++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h @@ -115,6 +115,8 @@ #define CFG_DB_MEMREPORT_FREQUENCY 166 +#define CFG_DB_O_DIRECT 168 + #define CFG_DB_SGA 198 /* super pool mem */ #define CFG_DB_DATA_MEM_2 199 /* used in special build in 5.1 */ diff --git a/storage/ndb/include/ndb_global.h.in b/storage/ndb/include/ndb_global.h.in index 60d32f62ee3..c3ea909ba2e 100644 --- a/storage/ndb/include/ndb_global.h.in +++ b/storage/ndb/include/ndb_global.h.in @@ -146,4 +146,6 @@ extern "C" { #define MAX(x,y) (((x)>(y))?(x):(y)) #endif +#define NDB_O_DIRECT_WRITE_ALIGNMENT 512 + #endif diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp index 57082eaccc8..645eb590ae3 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp @@ -2761,6 +2761,8 @@ Backup::openFiles(Signal* signal, BackupRecordPtr ptr) c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr); filePtr.p->m_flags |= BackupFile::BF_OPENING; + if (c_defaults.m_o_direct) + req->fileFlags |= FsOpenReq::OM_DIRECT; req->userPointer = filePtr.i; FsOpenReq::setVersion(req->fileNumber, 2); FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA); @@ -3735,12 +3737,31 @@ Backup::OperationRecord::newFragment(Uint32 tableId, Uint32 fragNo) } bool -Backup::OperationRecord::fragComplete(Uint32 tableId, Uint32 fragNo) +Backup::OperationRecord::fragComplete(Uint32 tableId, Uint32 fragNo, bool fill_record) { Uint32 * tmp; const Uint32 footSz = sizeof(BackupFormat::DataFile::FragmentFooter) >> 2; + Uint32 sz = footSz + 1; - if(dataBuffer.getWritePtr(&tmp, footSz + 1)) { + if (fill_record) + { + Uint32 * new_tmp; + if (!dataBuffer.getWritePtr(&tmp, sz)) + return false; + new_tmp = tmp + sz; + + if ((UintPtr)new_tmp & (sizeof(Page32)-1)) + { + /* padding is needed to get full write */ + new_tmp += 2 /* to fit empty header minimum 2 words*/; + new_tmp = (Uint32 *)(((UintPtr)new_tmp + sizeof(Page32)-1) & + ~(UintPtr)(sizeof(Page32)-1)); + /* new write sz */ + sz = new_tmp - tmp; + } + } + + if(dataBuffer.getWritePtr(&tmp, sz)) { jam(); * tmp = 0; // Finish record stream tmp++; @@ -3752,7 +3773,17 @@ Backup::OperationRecord::fragComplete(Uint32 tableId, Uint32 fragNo) foot->FragmentNo = htonl(fragNo); foot->NoOfRecords = htonl(noOfRecords); foot->Checksum = htonl(0); - dataBuffer.updateWritePtr(footSz + 1); + + if (sz != footSz + 1) + { + tmp += footSz; + memset(tmp, 0, (sz - footSz - 1) * 4); + *tmp = htonl(BackupFormat::EMPTY_ENTRY); + tmp++; + *tmp = htonl(sz - footSz - 1); + } + + dataBuffer.updateWritePtr(sz); return true; }//if return false; @@ -3854,8 +3885,13 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) return; }//if + BackupRecordPtr ptr LINT_SET_PTR; + c_backupPool.getPtr(ptr, filePtr.p->backupPtr); + OperationRecord & op = filePtr.p->operation; - if(!op.fragComplete(filePtr.p->tableId, filePtr.p->fragmentNo)) { + if(!op.fragComplete(filePtr.p->tableId, filePtr.p->fragmentNo, + c_defaults.m_o_direct)) + { jam(); signal->theData[0] = BackupContinueB::BUFFER_FULL_FRAG_COMPLETE; signal->theData[1] = filePtr.i; @@ -3865,9 +3901,6 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_SCAN_THREAD; - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - if (ptr.p->is_lcp()) { ptr.p->slaveState.setState(STOPPING); @@ -4905,6 +4938,8 @@ Backup::lcp_open_file(Signal* signal, BackupRecordPtr ptr) FsOpenReq::OM_CREATE | FsOpenReq::OM_APPEND | FsOpenReq::OM_AUTOSYNC; + if (c_defaults.m_o_direct) + req->fileFlags |= FsOpenReq::OM_DIRECT; FsOpenReq::v2_setCount(req->fileNumber, 0xFFFFFFFF); req->auto_sync_size = c_defaults.m_disk_synch_size; diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.hpp b/storage/ndb/src/kernel/blocks/backup/Backup.hpp index 32f2e14ac92..3fd9b2967fd 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.hpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.hpp @@ -240,7 +240,7 @@ public: * Once per fragment */ bool newFragment(Uint32 tableId, Uint32 fragNo); - bool fragComplete(Uint32 tableId, Uint32 fragNo); + bool fragComplete(Uint32 tableId, Uint32 fragNo, bool fill_record); /** * Once per scan frag (next) req/conf @@ -534,6 +534,7 @@ public: Uint32 m_disk_write_speed; Uint32 m_disk_synch_size; Uint32 m_diskless; + Uint32 m_o_direct; }; /** diff --git a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp index ace9dfe5c79..20f8f6650be 100644 --- a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp +++ b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp @@ -32,7 +32,8 @@ struct BackupFormat { TABLE_LIST = 4, TABLE_DESCRIPTION = 5, GCP_ENTRY = 6, - FRAGMENT_INFO = 7 + FRAGMENT_INFO = 7, + EMPTY_ENTRY = 8 }; struct FileHeader { @@ -93,6 +94,13 @@ struct BackupFormat { Uint32 NoOfRecords; Uint32 Checksum; }; + + /* optional padding for O_DIRECT */ + struct EmptyEntry { + Uint32 SectionType; + Uint32 SectionLength; + /* not used data */ + }; }; /** diff --git a/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp index 4faa02e494f..2cd2a8a2bee 100644 --- a/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp +++ b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp @@ -148,10 +148,13 @@ Backup::execREAD_CONFIG_REQ(Signal* signal) c_defaults.m_disk_write_speed = 10 * (1024 * 1024); c_defaults.m_disk_write_speed_sr = 100 * (1024 * 1024); c_defaults.m_disk_synch_size = 4 * (1024 * 1024); - + c_defaults.m_o_direct = true; + Uint32 noBackups = 0, noTables = 0, noAttribs = 0, noFrags = 0; ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &c_defaults.m_diskless)); + ndb_mgm_get_int_parameter(p, CFG_DB_O_DIRECT, + &c_defaults.m_o_direct); ndb_mgm_get_int_parameter(p, CFG_DB_CHECKPOINT_SPEED_SR, &c_defaults.m_disk_write_speed_sr); ndb_mgm_get_int_parameter(p, CFG_DB_CHECKPOINT_SPEED, @@ -204,7 +207,7 @@ Backup::execREAD_CONFIG_REQ(Signal* signal) / sizeof(Page32); // We need to allocate an additional of 2 pages. 1 page because of a bug in // ArrayPool and another one for DICTTAINFO. - c_pagePool.setSize(noPages + NO_OF_PAGES_META_FILE + 2); + c_pagePool.setSize(noPages + NO_OF_PAGES_META_FILE + 2, true); { // Init all tables SLList tables(c_tablePool); diff --git a/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp b/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp index d26f36ccf40..bb0bbd6d770 100644 --- a/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp +++ b/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp @@ -270,8 +270,8 @@ FsBuffer::getReadPtr(Uint32 ** ptr, Uint32 * sz, bool * _eof){ * ptr = &Tp[Tr]; - DEBUG(ndbout_c("getReadPtr() Tr: %d Tw: %d Ts: %d Tm: %d sz1: %d -> %d", - Tr, Tw, Ts, Tm, sz1, * sz)); + DEBUG(ndbout_c("getReadPtr() Tr: %d Tmw: %d Ts: %d Tm: %d sz1: %d -> %d", + Tr, Tmw, Ts, Tm, sz1, * sz)); return true; } @@ -279,8 +279,8 @@ FsBuffer::getReadPtr(Uint32 ** ptr, Uint32 * sz, bool * _eof){ if(!m_eof){ * _eof = false; - DEBUG(ndbout_c("getReadPtr() Tr: %d Tw: %d Ts: %d Tm: %d sz1: %d -> false", - Tr, Tw, Ts, Tm, sz1)); + DEBUG(ndbout_c("getReadPtr() Tr: %d Tmw: %d Ts: %d Tm: %d sz1: %d -> false", + Tr, Tmw, Ts, Tm, sz1)); return false; } @@ -289,8 +289,8 @@ FsBuffer::getReadPtr(Uint32 ** ptr, Uint32 * sz, bool * _eof){ * _eof = true; * ptr = &Tp[Tr]; - DEBUG(ndbout_c("getReadPtr() Tr: %d Tw: %d Ts: %d Tm: %d sz1: %d -> %d eof", - Tr, Tw, Ts, Tm, sz1, * sz)); + DEBUG(ndbout_c("getReadPtr() Tr: %d Tmw: %d Ts: %d Tm: %d sz1: %d -> %d eof", + Tr, Tmw, Ts, Tm, sz1, * sz)); return false; } @@ -316,13 +316,13 @@ FsBuffer::getWritePtr(Uint32 ** ptr, Uint32 sz){ if(sz1 > sz){ // Note at least 1 word of slack * ptr = &Tp[Tw]; - DEBUG(ndbout_c("getWritePtr(%d) Tr: %d Tw: %d Ts: %d sz1: %d -> true", - sz, Tr, Tw, Ts, sz1)); + DEBUG(ndbout_c("getWritePtr(%d) Tw: %d sz1: %d -> true", + sz, Tw, sz1)); return true; } - DEBUG(ndbout_c("getWritePtr(%d) Tr: %d Tw: %d Ts: %d sz1: %d -> false", - sz, Tr, Tw, Ts, sz1)); + DEBUG(ndbout_c("getWritePtr(%d) Tw: %d sz1: %d -> false", + sz, Tw, sz1)); return false; } @@ -339,11 +339,15 @@ FsBuffer::updateWritePtr(Uint32 sz){ m_free -= sz; if(Tnew < Ts){ m_writeIndex = Tnew; + DEBUG(ndbout_c("updateWritePtr(%d) m_writeIndex: %d", + sz, m_writeIndex)); return; } memcpy(Tp, &Tp[Ts], (Tnew - Ts) << 2); m_writeIndex = Tnew - Ts; + DEBUG(ndbout_c("updateWritePtr(%d) m_writeIndex: %d", + sz, m_writeIndex)); } inline diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index 0f88933f617..21a887c23de 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -115,9 +115,6 @@ class Dbtup; /* ------------------------------------------------------------------------- */ /* VARIOUS CONSTANTS USED AS FLAGS TO THE FILE MANAGER. */ /* ------------------------------------------------------------------------- */ -#define ZOPEN_READ 0 -#define ZOPEN_WRITE 1 -#define ZOPEN_READ_WRITE 2 #define ZVAR_NO_LOG_PAGE_WORD 1 #define ZLIST_OF_PAIRS 0 #define ZLIST_OF_PAIRS_SYNCH 16 @@ -2686,6 +2683,7 @@ private: UintR clfoFileSize; LogPageRecord *logPageRecord; + void *logPageRecordUnaligned; LogPageRecordPtr logPagePtr; UintR cfirstfreeLogPage; UintR clogPageFileSize; @@ -2889,6 +2887,7 @@ private: UintR ctransidHash[1024]; Uint32 c_diskless; + Uint32 c_o_direct; Uint32 c_error_insert_table_id; public: diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp index 8aaf86de73a..f597519d8f4 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp @@ -49,6 +49,7 @@ void Dblqh::initData() logFileRecord = 0; logFileOperationRecord = 0; logPageRecord = 0; + logPageRecordUnaligned= 0; pageRefRecord = 0; tablerec = 0; tcConnectionrec = 0; @@ -105,10 +106,13 @@ void Dblqh::initRecords() sizeof(LogFileOperationRecord), clfoFileSize); - logPageRecord = (LogPageRecord*)allocRecord("LogPageRecord", - sizeof(LogPageRecord), - clogPageFileSize, - false); + logPageRecord = + (LogPageRecord*)allocRecordAligned("LogPageRecord", + sizeof(LogPageRecord), + clogPageFileSize, + &logPageRecordUnaligned, + NDB_O_DIRECT_WRITE_ALIGNMENT, + false); pageRefRecord = (PageRefRecord*)allocRecord("PageRefRecord", sizeof(PageRefRecord), @@ -378,7 +382,7 @@ Dblqh::~Dblqh() sizeof(LogFileOperationRecord), clfoFileSize); - deallocRecord((void**)&logPageRecord, + deallocRecord((void**)&logPageRecordUnaligned, "LogPageRecord", sizeof(LogPageRecord), clogPageFileSize); diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 33696ebba27..644ff58cae5 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -1015,6 +1015,8 @@ void Dblqh::execREAD_CONFIG_REQ(Signal* signal) cmaxAccOps = cscanrecFileSize * MAX_PARALLEL_OP_PER_SCAN; ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &c_diskless)); + c_o_direct = true; + ndb_mgm_get_int_parameter(p, CFG_DB_O_DIRECT, &c_o_direct); Uint32 tmp= 0; ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_FRAG, &tmp)); @@ -13243,7 +13245,9 @@ void Dblqh::openFileRw(Signal* signal, LogFileRecordPtr olfLogFilePtr) signal->theData[3] = olfLogFilePtr.p->fileName[1]; signal->theData[4] = olfLogFilePtr.p->fileName[2]; signal->theData[5] = olfLogFilePtr.p->fileName[3]; - signal->theData[6] = ZOPEN_READ_WRITE | FsOpenReq::OM_AUTOSYNC; + signal->theData[6] = FsOpenReq::OM_READWRITE | FsOpenReq::OM_AUTOSYNC; + if (c_o_direct) + signal->theData[6] |= FsOpenReq::OM_DIRECT; req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord); sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); }//Dblqh::openFileRw() @@ -13263,7 +13267,9 @@ void Dblqh::openLogfileInit(Signal* signal) signal->theData[3] = logFilePtr.p->fileName[1]; signal->theData[4] = logFilePtr.p->fileName[2]; signal->theData[5] = logFilePtr.p->fileName[3]; - signal->theData[6] = 0x302 | FsOpenReq::OM_AUTOSYNC; + signal->theData[6] = FsOpenReq::OM_READWRITE | FsOpenReq::OM_TRUNCATE | FsOpenReq::OM_CREATE | FsOpenReq::OM_AUTOSYNC; + if (c_o_direct) + signal->theData[6] |= FsOpenReq::OM_DIRECT; req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord); sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); }//Dblqh::openLogfileInit() @@ -13299,7 +13305,9 @@ void Dblqh::openNextLogfile(Signal* signal) signal->theData[3] = onlLogFilePtr.p->fileName[1]; signal->theData[4] = onlLogFilePtr.p->fileName[2]; signal->theData[5] = onlLogFilePtr.p->fileName[3]; - signal->theData[6] = 2 | FsOpenReq::OM_AUTOSYNC; + signal->theData[6] = FsOpenReq::OM_READWRITE | FsOpenReq::OM_AUTOSYNC; + if (c_o_direct) + signal->theData[6] |= FsOpenReq::OM_DIRECT; req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord); sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); }//if diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp index 5f93ee31bc7..cf18bf34040 100644 --- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp +++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp @@ -163,7 +163,12 @@ AsyncFile::run() theStartFlag = true; // Create write buffer for bigger writes theWriteBufferSize = WRITEBUFFERSIZE; - theWriteBuffer = (char *) ndbd_malloc(theWriteBufferSize); + theWriteBufferUnaligned = (char *) ndbd_malloc(theWriteBufferSize + + NDB_O_DIRECT_WRITE_ALIGNMENT-1); + theWriteBuffer = (char *) + (((UintPtr)theWriteBufferUnaligned + NDB_O_DIRECT_WRITE_ALIGNMENT - 1) & + ~(UintPtr)(NDB_O_DIRECT_WRITE_ALIGNMENT - 1)); + NdbMutex_Unlock(theStartMutexPtr); NdbCondition_Signal(theStartConditionPtr); @@ -247,6 +252,78 @@ AsyncFile::run() static char g_odirect_readbuf[2*GLOBAL_PAGE_SIZE -1]; #endif +int +AsyncFile::check_odirect_write(Uint32 flags, int& new_flags, int mode) +{ + assert(new_flags & (O_CREAT | O_TRUNC)); +#ifdef O_DIRECT + int ret; + char * bufptr = (char*)((UintPtr(g_odirect_readbuf)+(GLOBAL_PAGE_SIZE - 1)) & ~(GLOBAL_PAGE_SIZE - 1)); + while (((ret = ::write(theFd, bufptr, GLOBAL_PAGE_SIZE)) == -1) && + (errno == EINTR)); + if (ret == -1) + { + new_flags &= ~O_DIRECT; + ndbout_c("%s Failed to write using O_DIRECT, disabling", + theFileName.c_str()); + } + + close(theFd); + theFd = ::open(theFileName.c_str(), new_flags, mode); + if (theFd == -1) + return errno; +#endif + + return 0; +} + +int +AsyncFile::check_odirect_read(Uint32 flags, int &new_flags, int mode) +{ +#ifdef O_DIRECT + int ret; + char * bufptr = (char*)((UintPtr(g_odirect_readbuf)+(GLOBAL_PAGE_SIZE - 1)) & ~(GLOBAL_PAGE_SIZE - 1)); + while (((ret = ::read(theFd, bufptr, GLOBAL_PAGE_SIZE)) == -1) && + (errno == EINTR)); + if (ret == -1) + { + ndbout_c("%s Failed to read using O_DIRECT, disabling", + theFileName.c_str()); + goto reopen; + } + + if(lseek(theFd, 0, SEEK_SET) != 0) + { + return errno; + } + + if ((flags & FsOpenReq::OM_CHECK_SIZE) == 0) + { + struct stat buf; + if ((fstat(theFd, &buf) == -1)) + { + return errno; + } + else if ((buf.st_size % GLOBAL_PAGE_SIZE) != 0) + { + ndbout_c("%s filesize not a multiple of %d, disabling O_DIRECT", + theFileName.c_str(), GLOBAL_PAGE_SIZE); + goto reopen; + } + } + + return 0; + +reopen: + close(theFd); + new_flags &= ~O_DIRECT; + theFd = ::open(theFileName.c_str(), new_flags, mode); + if (theFd == -1) + return errno; +#endif + return 0; +} + void AsyncFile::openReq(Request* request) { m_auto_sync_freq = 0; @@ -312,7 +389,7 @@ void AsyncFile::openReq(Request* request) } #else Uint32 flags = request->par.open.flags; - Uint32 new_flags = 0; + int new_flags = 0; // Convert file open flags from Solaris to Liux if (flags & FsOpenReq::OM_CREATE) @@ -343,10 +420,6 @@ void AsyncFile::openReq(Request* request) { new_flags |= O_DIRECT; } -#elif defined O_SYNC - { - flags |= FsOpenReq::OM_SYNC; - } #endif if ((flags & FsOpenReq::OM_SYNC) && ! (flags & FsOpenReq::OM_INIT)) @@ -355,15 +428,19 @@ void AsyncFile::openReq(Request* request) new_flags |= O_SYNC; #endif } - + + const char * rw = ""; switch(flags & 0x3){ case FsOpenReq::OM_READONLY: + rw = "r"; new_flags |= O_RDONLY; break; case FsOpenReq::OM_WRITEONLY: + rw = "w"; new_flags |= O_WRONLY; break; case FsOpenReq::OM_READWRITE: + rw = "rw"; new_flags |= O_RDWR; break; default: @@ -404,11 +481,6 @@ no_odirect: if (new_flags & O_DIRECT) { new_flags &= ~O_DIRECT; - flags |= FsOpenReq::OM_SYNC; -#ifdef O_SYNC - if (! (flags & FsOpenReq::OM_INIT)) - new_flags |= O_SYNC; -#endif goto no_odirect; } #endif @@ -421,11 +493,6 @@ no_odirect: else if (new_flags & O_DIRECT) { new_flags &= ~O_DIRECT; - flags |= FsOpenReq::OM_SYNC; -#ifdef O_SYNC - if (! (flags & FsOpenReq::OM_INIT)) - new_flags |= O_SYNC; -#endif goto no_odirect; } #endif @@ -512,7 +579,6 @@ no_odirect: { ndbout_c("error on first write(%d), disable O_DIRECT", err); new_flags &= ~O_DIRECT; - flags |= FsOpenReq::OM_SYNC; close(theFd); theFd = ::open(theFileName.c_str(), new_flags, mode); if (theFd != -1) @@ -532,26 +598,32 @@ no_odirect: else if (flags & FsOpenReq::OM_DIRECT) { #ifdef O_DIRECT - do { - int ret; - char * bufptr = (char*)((UintPtr(g_odirect_readbuf)+(GLOBAL_PAGE_SIZE - 1)) & ~(GLOBAL_PAGE_SIZE - 1)); - while (((ret = ::read(theFd, bufptr, GLOBAL_PAGE_SIZE)) == -1) && (errno == EINTR)); - if (ret == -1) - { - ndbout_c("%s Failed to read using O_DIRECT, disabling", theFileName.c_str()); - flags |= FsOpenReq::OM_SYNC; - flags |= FsOpenReq::OM_INIT; - break; - } - if(lseek(theFd, 0, SEEK_SET) != 0) - { - request->error = errno; - return; - } - } while (0); + if (flags & (FsOpenReq::OM_TRUNCATE | FsOpenReq::OM_CREATE)) + { + request->error = check_odirect_write(flags, new_flags, mode); + } + else + { + request->error = check_odirect_read(flags, new_flags, mode); + } + + if (request->error) + return; #endif } - +#ifdef VM_TRACE + if (flags & FsOpenReq::OM_DIRECT) + { +#ifdef O_DIRECT + ndbout_c("%s %s O_DIRECT: %d", + theFileName.c_str(), rw, + !!(new_flags & O_DIRECT)); +#else + ndbout_c("%s %s O_DIRECT: 0", + theFileName.c_str(), rw); +#endif + } +#endif if ((flags & FsOpenReq::OM_SYNC) && (flags & FsOpenReq::OM_INIT)) { #ifdef O_SYNC @@ -562,6 +634,10 @@ no_odirect: new_flags &= ~(O_CREAT | O_TRUNC); new_flags |= O_SYNC; theFd = ::open(theFileName.c_str(), new_flags, mode); + if (theFd == -1) + { + request->error = errno; + } #endif } #endif @@ -1079,7 +1155,8 @@ AsyncFile::rmrfReq(Request * request, char * path, bool removePath){ void AsyncFile::endReq() { // Thread is ended with return - if (theWriteBuffer) ndbd_free(theWriteBuffer, theWriteBufferSize); + if (theWriteBufferUnaligned) + ndbd_free(theWriteBufferUnaligned, theWriteBufferSize); } diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp index e8f2deb016c..64567dd2bb8 100644 --- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp +++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp @@ -232,9 +232,13 @@ private: bool theStartFlag; int theWriteBufferSize; char* theWriteBuffer; + void* theWriteBufferUnaligned; size_t m_write_wo_sync; // Writes wo/ sync size_t m_auto_sync_freq; // Auto sync freq in bytes + + int check_odirect_read(Uint32 flags, int&new_flags, int mode); + int check_odirect_write(Uint32 flags, int&new_flags, int mode); public: SimulatedBlock& m_fs; Ptr m_page_ptr; diff --git a/storage/ndb/src/kernel/blocks/restore.cpp b/storage/ndb/src/kernel/blocks/restore.cpp index 51644ef0712..2c204b912b1 100644 --- a/storage/ndb/src/kernel/blocks/restore.cpp +++ b/storage/ndb/src/kernel/blocks/restore.cpp @@ -559,6 +559,9 @@ Restore::restore_next(Signal* signal, FilePtr file_ptr) case BackupFormat::GCP_ENTRY: parse_gcp_entry(signal, file_ptr, data, len); break; + case BackupFormat::EMPTY_ENTRY: + // skip + break; case 0x4e444242: // 'NDBB' if (check_file_version(signal, ntohl(* (data+2))) == 0) { diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp index 1ba7368c352..7ad1d486a02 100644 --- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp +++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp @@ -39,6 +39,9 @@ #include #include +#include +extern EventLogger g_eventLogger; + #define ljamEntry() jamEntryLine(30000 + __LINE__) #define ljam() jamLine(30000 + __LINE__) @@ -656,13 +659,19 @@ SimulatedBlock::getBatSize(Uint16 blockNo){ return sb->theBATSize; } +void* SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear, Uint32 paramId) +{ + return allocRecordAligned(type, s, n, 0, 0, clear, paramId); +} + void* -SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear, Uint32 paramId) +SimulatedBlock::allocRecordAligned(const char * type, size_t s, size_t n, void **unaligned_buffer, Uint32 align, bool clear, Uint32 paramId) { void * p = NULL; - size_t size = n*s; - Uint64 real_size = (Uint64)((Uint64)n)*((Uint64)s); + Uint32 over_alloc = unaligned_buffer ? (align - 1) : 0; + size_t size = n*s + over_alloc; + Uint64 real_size = (Uint64)((Uint64)n)*((Uint64)s) + over_alloc; refresh_watch_dog(9); if (real_size > 0){ #ifdef VM_TRACE_MEM @@ -705,6 +714,16 @@ SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear, U refresh_watch_dog(9); memset(ptr, 0, size); } + if (unaligned_buffer) + { + *unaligned_buffer = p; + p = (void *)(((UintPtr)p + over_alloc) & ~(UintPtr)(over_alloc)); +#ifdef VM_TRACE + g_eventLogger.info("'%s' (%u) %llu %llu, alignment correction %u bytes", + type, align, (Uint64)p, (Uint64)p+n*s, + (Uint32)((UintPtr)p - (UintPtr)*unaligned_buffer)); +#endif + } } return p; } diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp index 01fb11e05e8..86e26986f93 100644 --- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp +++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp @@ -378,6 +378,7 @@ protected: * */ void* allocRecord(const char * type, size_t s, size_t n, bool clear = true, Uint32 paramId = 0); + void* allocRecordAligned(const char * type, size_t s, size_t n, void **unaligned_buffer, Uint32 align = NDB_O_DIRECT_WRITE_ALIGNMENT, bool clear = true, Uint32 paramId = 0); /** * Deallocate record diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp index 3e76071a0db..d96942bbfb7 100644 --- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp @@ -1313,6 +1313,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { "0", STR_VALUE(MAX_INT_RNIL) }, + { + CFG_DB_O_DIRECT, + "ODirect", + DB_TOKEN, + "Use O_DIRECT file write/read when possible", + ConfigInfo::CI_USED, + true, + ConfigInfo::CI_BOOL, + "false", + "false", + "true"}, + /*************************************************************************** * API ***************************************************************************/ diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp index 3d466384782..15e442a4f35 100644 --- a/storage/ndb/tools/restore/Restore.cpp +++ b/storage/ndb/tools/restore/Restore.cpp @@ -867,13 +867,32 @@ bool RestoreDataIterator::readFragmentHeader(int & ret, Uint32 *fragmentId) debug << "RestoreDataIterator::getNextFragment" << endl; - if (buffer_read(&Header, sizeof(Header), 1) != 1){ + while (1) + { + /* read first part of header */ + if (buffer_read(&Header, 8, 1) != 1) + { + ret = 0; + return false; + } // if + + /* skip if EMPTY_ENTRY */ + Header.SectionType = ntohl(Header.SectionType); + Header.SectionLength = ntohl(Header.SectionLength); + if (Header.SectionType == BackupFormat::EMPTY_ENTRY) + { + void *tmp; + buffer_get_ptr(&tmp, Header.SectionLength*4-8, 1); + continue; + } + break; + } + /* read rest of header */ + if (buffer_read(((char*)&Header)+8, sizeof(Header)-8, 1) != 1) + { ret = 0; return false; - } // if - - Header.SectionType = ntohl(Header.SectionType); - Header.SectionLength = ntohl(Header.SectionLength); + } Header.TableId = ntohl(Header.TableId); Header.FragmentNo = ntohl(Header.FragmentNo); Header.ChecksumType = ntohl(Header.ChecksumType); From 9e692d86f83c672aef54218a80bd277524491bba Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 8 Jun 2007 12:27:52 +0200 Subject: [PATCH 16/38] ndb - bug#28724 for blobs, op flag to not set error on trans (fix, recommit) storage/ndb/include/ndbapi/NdbOperation.hpp: add NdbOperation option m_noErrorPropagation. If AO_IgnoreError and it are set then operation error does not set error code on transaction. Private, and used by NdbBlob.cpp only. storage/ndb/src/ndbapi/NdbBlob.cpp: add NdbOperation option m_noErrorPropagation. If AO_IgnoreError and it are set then operation error does not set error code on transaction. Private, and used by NdbBlob.cpp only. storage/ndb/src/ndbapi/NdbOperation.cpp: add NdbOperation option m_noErrorPropagation. If AO_IgnoreError and it are set then operation error does not set error code on transaction. Private, and used by NdbBlob.cpp only. --- storage/ndb/include/ndbapi/NdbOperation.hpp | 7 +++++++ storage/ndb/src/ndbapi/NdbBlob.cpp | 3 +++ storage/ndb/src/ndbapi/NdbOperation.cpp | 8 ++++++-- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/storage/ndb/include/ndbapi/NdbOperation.hpp b/storage/ndb/include/ndbapi/NdbOperation.hpp index 0fa2cac0a32..06111941df4 100644 --- a/storage/ndb/include/ndbapi/NdbOperation.hpp +++ b/storage/ndb/include/ndbapi/NdbOperation.hpp @@ -1042,6 +1042,13 @@ protected: */ Int8 m_abortOption; + /* + * For blob impl, option to not propagate error to trans level. + * Could be AO_IgnoreError variant if we want it public. + * Ignored unless AO_IgnoreError is also set. + */ + Int8 m_noErrorPropagation; + friend struct Ndb_free_list_t; }; diff --git a/storage/ndb/src/ndbapi/NdbBlob.cpp b/storage/ndb/src/ndbapi/NdbBlob.cpp index 25dcafdef53..24d648b0241 100644 --- a/storage/ndb/src/ndbapi/NdbBlob.cpp +++ b/storage/ndb/src/ndbapi/NdbBlob.cpp @@ -1261,6 +1261,7 @@ NdbBlob::deletePartsUnknown(Uint32 part) DBUG_RETURN(-1); } tOp->m_abortOption= NdbOperation::AO_IgnoreError; + tOp->m_noErrorPropagation = true; n++; } DBUG_PRINT("info", ("bat=%u", bat)); @@ -1597,6 +1598,7 @@ NdbBlob::preExecute(NdbTransaction::ExecType anExecType, bool& batch) } if (isWriteOp()) { tOp->m_abortOption = NdbOperation::AO_IgnoreError; + tOp->m_noErrorPropagation = true; } theHeadInlineReadOp = tOp; // execute immediately @@ -1643,6 +1645,7 @@ NdbBlob::preExecute(NdbTransaction::ExecType anExecType, bool& batch) } if (isWriteOp()) { tOp->m_abortOption = NdbOperation::AO_IgnoreError; + tOp->m_noErrorPropagation = true; } theHeadInlineReadOp = tOp; // execute immediately diff --git a/storage/ndb/src/ndbapi/NdbOperation.cpp b/storage/ndb/src/ndbapi/NdbOperation.cpp index 903372ddb9d..50531292e40 100644 --- a/storage/ndb/src/ndbapi/NdbOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbOperation.cpp @@ -76,7 +76,8 @@ NdbOperation::NdbOperation(Ndb* aNdb, NdbOperation::Type aType) : m_keyInfoGSN(GSN_KEYINFO), m_attrInfoGSN(GSN_ATTRINFO), theBlobList(NULL), - m_abortOption(-1) + m_abortOption(-1), + m_noErrorPropagation(false) { theReceiver.init(NdbReceiver::NDB_OPERATION, this); theError.code = 0; @@ -101,7 +102,8 @@ NdbOperation::setErrorCode(int anErrorCode) theError.code = anErrorCode; theNdbCon->theErrorLine = theErrorLine; theNdbCon->theErrorOperation = this; - theNdbCon->setOperationErrorCode(anErrorCode); + if (!(m_abortOption == AO_IgnoreError && m_noErrorPropagation)) + theNdbCon->setOperationErrorCode(anErrorCode); } /****************************************************************************** @@ -116,6 +118,7 @@ NdbOperation::setErrorCodeAbort(int anErrorCode) theError.code = anErrorCode; theNdbCon->theErrorLine = theErrorLine; theNdbCon->theErrorOperation = this; + // ignore m_noErrorPropagation theNdbCon->setOperationErrorCodeAbort(anErrorCode); } @@ -161,6 +164,7 @@ NdbOperation::init(const NdbTableImpl* tab, NdbTransaction* myConnection){ theMagicNumber = 0xABCDEF01; theBlobList = NULL; m_abortOption = -1; + m_noErrorPropagation = false; m_no_disk_flag = 1; tSignal = theNdb->getSignal(); From 2821723e0fc9738b33c188a8d3729586935186e1 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 11 Jun 2007 17:19:20 +0200 Subject: [PATCH 17/38] internal interface to ndb (to be used by e.g. ndb_restore) storage/ndb/src/ndbapi/ndb_internal.hpp: New BitKeeper file ``storage/ndb/src/ndbapi/ndb_internal.hpp'' --- storage/ndb/include/ndbapi/Ndb.hpp | 1 + .../ndb/src/ndbapi/NdbEventOperationImpl.cpp | 3 ++- storage/ndb/src/ndbapi/ndb_internal.hpp | 27 +++++++++++++++++++ 3 files changed, 30 insertions(+), 1 deletion(-) create mode 100644 storage/ndb/src/ndbapi/ndb_internal.hpp diff --git a/storage/ndb/include/ndbapi/Ndb.hpp b/storage/ndb/include/ndbapi/Ndb.hpp index 5f96408ea30..e677616f43b 100644 --- a/storage/ndb/include/ndbapi/Ndb.hpp +++ b/storage/ndb/include/ndbapi/Ndb.hpp @@ -1055,6 +1055,7 @@ class Ndb friend class NdbDictInterface; friend class NdbBlob; friend class NdbImpl; + friend class Ndb_internal; #endif public: diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index 00acfe62ad9..bfedf30d201 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -41,6 +41,7 @@ #include #include "NdbEventOperationImpl.hpp" #include +#include "ndb_internal.hpp" #include extern EventLogger g_eventLogger; @@ -2838,7 +2839,7 @@ send_report: data[5]= apply_gci >> 32; data[6]= latest_gci & ~(Uint32)0; data[7]= latest_gci >> 32; - m_ndb->theImpl->send_event_report(data,8); + Ndb_internal().send_event_report(m_ndb, data,8); #ifdef VM_TRACE assert(m_total_alloc >= m_free_data_sz); #endif diff --git a/storage/ndb/src/ndbapi/ndb_internal.hpp b/storage/ndb/src/ndbapi/ndb_internal.hpp new file mode 100644 index 00000000000..488946dec83 --- /dev/null +++ b/storage/ndb/src/ndbapi/ndb_internal.hpp @@ -0,0 +1,27 @@ +/* Copyright (C) 2007 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include +#include + +class Ndb_internal +{ +private: + friend class NdbEventBuffer; + Ndb_internal() {} + virtual ~Ndb_internal() {} + int send_event_report(Ndb *ndb, Uint32 *data, Uint32 length) + { return ndb->theImpl->send_event_report(data, length); } +}; From 5e047ec91bfe6a644523643e1fd664cd2458fd3e Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 11 Jun 2007 17:28:52 +0200 Subject: [PATCH 18/38] change include file --- storage/ndb/src/ndbapi/ndb_internal.hpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/storage/ndb/src/ndbapi/ndb_internal.hpp b/storage/ndb/src/ndbapi/ndb_internal.hpp index 488946dec83..2ed7a7ecc8c 100644 --- a/storage/ndb/src/ndbapi/ndb_internal.hpp +++ b/storage/ndb/src/ndbapi/ndb_internal.hpp @@ -13,8 +13,7 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include -#include +#include "NdbImpl.hpp" class Ndb_internal { From ab0df1eb16cf1838407e68bcfa45c84468b0c61a Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 11 Jun 2007 17:50:39 +0200 Subject: [PATCH 19/38] make function static --- storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp | 2 +- storage/ndb/src/ndbapi/ndb_internal.hpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index bfedf30d201..a82983fca8c 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -2839,7 +2839,7 @@ send_report: data[5]= apply_gci >> 32; data[6]= latest_gci & ~(Uint32)0; data[7]= latest_gci >> 32; - Ndb_internal().send_event_report(m_ndb, data,8); + Ndb_internal::send_event_report(m_ndb, data,8); #ifdef VM_TRACE assert(m_total_alloc >= m_free_data_sz); #endif diff --git a/storage/ndb/src/ndbapi/ndb_internal.hpp b/storage/ndb/src/ndbapi/ndb_internal.hpp index 2ed7a7ecc8c..f5f37f95a04 100644 --- a/storage/ndb/src/ndbapi/ndb_internal.hpp +++ b/storage/ndb/src/ndbapi/ndb_internal.hpp @@ -21,6 +21,6 @@ private: friend class NdbEventBuffer; Ndb_internal() {} virtual ~Ndb_internal() {} - int send_event_report(Ndb *ndb, Uint32 *data, Uint32 length) + static int send_event_report(Ndb *ndb, Uint32 *data, Uint32 length) { return ndb->theImpl->send_event_report(data, length); } }; From 1f90b253d4ff5d80f369c167aff8f8e775d75de0 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 12 Jun 2007 09:13:42 +0200 Subject: [PATCH 20/38] ndb - bug#29044 Improve buddy high order allocation Make removeCommonArea O(1) instead of O(N) Add limit to left/right search storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp: Add info to buddy module test about 1) loops being made in buddy 2) how much was allocated storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp: 1) make removeCommonArea o(1) - as list is (after fix) double linked anyway 2) set page_state = ZFREE_COMMON insertCommonArea and ~ZFREE_COMMON in removeCommonArea 3) add max loops in search left/right 4) add more debug info --- .../src/kernel/blocks/dbtup/DbtupDebug.cpp | 47 +++++++++++--- .../src/kernel/blocks/dbtup/DbtupPagMan.cpp | 61 +++++++++++++------ 2 files changed, 83 insertions(+), 25 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp index ecee7e867f8..9b60d5d47ed 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp @@ -76,6 +76,10 @@ Dbtup::reportMemoryUsage(Signal* signal, int incDec){ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 6, JBB); } +#ifdef VM_TRACE +extern Uint32 fc_left, fc_right, fc_remove; +#endif + void Dbtup::execDUMP_STATE_ORD(Signal* signal) { @@ -157,12 +161,20 @@ Dbtup::execDUMP_STATE_ORD(Signal* signal) return; }//if #endif -#if defined VM_TRACE && 0 - if (type == 1211){ - ndbout_c("Startar modul test av Page Manager"); +#if defined VM_TRACE + if (type == 1211 || type == 1212 || type == 1213){ + Uint32 seed = time(0); + if (signal->getLength() > 1) + seed = signal->theData[1]; + ndbout_c("Startar modul test av Page Manager (seed: 0x%x)", seed); + srand(seed); Vector chunks; const Uint32 LOOPS = 1000; + Uint32 sum_req = 0; + Uint32 sum_conf = 0; + Uint32 sum_loop = 0; + Uint32 max_loop = 0; for(Uint32 i = 0; i> 3) + (sum_conf >> 4); + } switch(c){ case 0:{ // Release const int ch = rand() % chunks.size(); @@ -192,23 +211,33 @@ Dbtup::execDUMP_STATE_ORD(Signal* signal) case 2: { // Seize(n) - fail alloc += free; // Fall through + sum_req += free; + goto doalloc; } case 1: { // Seize(n) (success) - + sum_req += alloc; + doalloc: Chunk chunk; allocConsPages(alloc, chunk.pageCount, chunk.pageId); ndbrequire(chunk.pageCount <= alloc); if(chunk.pageCount != 0){ chunks.push_back(chunk); if(chunk.pageCount != alloc) { - ndbout_c(" Tried to allocate %d - only allocated %d - free: %d", - alloc, chunk.pageCount, free); + if (type == 1211) + ndbout_c(" Tried to allocate %d - only allocated %d - free: %d", + alloc, chunk.pageCount, free); } } else { ndbout_c(" Failed to alloc %d pages with %d pages free", alloc, free); } + sum_conf += chunk.pageCount; + Uint32 tot = fc_left + fc_right + fc_remove; + sum_loop += tot; + if (tot > max_loop) + max_loop = tot; + for(Uint32 i = 0; i 0) { + Uint32 loop = 0; + while (allocPageRef > 0 && + ++loop < 16) + { ljam(); pageLastPtr.i = allocPageRef - 1; c_page_pool.getPtr(pageLastPtr); @@ -258,6 +268,9 @@ void Dbtup::findFreeLeftNeighbours(Uint32& allocPageRef, remainAllocate -= listSize; }//if }//if +#ifdef VM_TRACE + fc_left++; +#endif }//while }//Dbtup::findFreeLeftNeighbours() @@ -271,7 +284,10 @@ void Dbtup::findFreeRightNeighbours(Uint32& allocPageRef, ljam(); return; }//if - while ((allocPageRef + noPagesAllocated) < c_page_pool.getSize()) { + Uint32 loop = 0; + while ((allocPageRef + noPagesAllocated) < c_page_pool.getSize() && + ++loop < 16) + { ljam(); pageFirstPtr.i = allocPageRef + noPagesAllocated; c_page_pool.getPtr(pageFirstPtr); @@ -298,24 +314,37 @@ void Dbtup::findFreeRightNeighbours(Uint32& allocPageRef, remainAllocate -= listSize; }//if }//if +#ifdef VM_TRACE + fc_right++; +#endif }//while }//Dbtup::findFreeRightNeighbours() void Dbtup::insertCommonArea(Uint32 insPageRef, Uint32 insList) { cnoOfAllocatedPages -= (1 << insList); - PagePtr pageLastPtr, pageInsPtr; + PagePtr pageLastPtr, pageInsPtr, pageHeadPtr; + pageHeadPtr.i = cfreepageList[insList]; c_page_pool.getPtr(pageInsPtr, insPageRef); ndbrequire(insList < 16); pageLastPtr.i = (pageInsPtr.i + (1 << insList)) - 1; - pageInsPtr.p->next_cluster_page = cfreepageList[insList]; + pageInsPtr.p->page_state = ZFREE_COMMON; + pageInsPtr.p->next_cluster_page = pageHeadPtr.i; pageInsPtr.p->prev_cluster_page = RNIL; pageInsPtr.p->last_cluster_page = pageLastPtr.i; cfreepageList[insList] = pageInsPtr.i; + if (pageHeadPtr.i != RNIL) + { + jam(); + c_page_pool.getPtr(pageHeadPtr); + pageHeadPtr.p->prev_cluster_page = pageInsPtr.i; + } + c_page_pool.getPtr(pageLastPtr); + pageLastPtr.p->page_state = ZFREE_COMMON; pageLastPtr.p->first_cluster_page = pageInsPtr.i; pageLastPtr.p->next_page = RNIL; }//Dbtup::insertCommonArea() @@ -323,12 +352,13 @@ void Dbtup::insertCommonArea(Uint32 insPageRef, Uint32 insList) void Dbtup::removeCommonArea(Uint32 remPageRef, Uint32 list) { cnoOfAllocatedPages += (1 << list); - PagePtr pagePrevPtr, pageNextPtr, pageLastPtr, pageSearchPtr, remPagePtr; + PagePtr pagePrevPtr, pageNextPtr, pageLastPtr, remPagePtr; c_page_pool.getPtr(remPagePtr, remPageRef); ndbrequire(list < 16); if (cfreepageList[list] == remPagePtr.i) { ljam(); + ndbassert(remPagePtr.p->prev_cluster_page == RNIL); cfreepageList[list] = remPagePtr.p->next_cluster_page; pageNextPtr.i = cfreepageList[list]; if (pageNextPtr.i != RNIL) { @@ -337,30 +367,25 @@ void Dbtup::removeCommonArea(Uint32 remPageRef, Uint32 list) pageNextPtr.p->prev_cluster_page = RNIL; }//if } else { - pageSearchPtr.i = cfreepageList[list]; - while (true) { - ljam(); - c_page_pool.getPtr(pageSearchPtr); - pagePrevPtr = pageSearchPtr; - pageSearchPtr.i = pageSearchPtr.p->next_cluster_page; - if (pageSearchPtr.i == remPagePtr.i) { - ljam(); - break; - }//if - }//while + pagePrevPtr.i = remPagePtr.p->prev_cluster_page; pageNextPtr.i = remPagePtr.p->next_cluster_page; + c_page_pool.getPtr(pagePrevPtr); pagePrevPtr.p->next_cluster_page = pageNextPtr.i; - if (pageNextPtr.i != RNIL) { + if (pageNextPtr.i != RNIL) + { ljam(); c_page_pool.getPtr(pageNextPtr); pageNextPtr.p->prev_cluster_page = pagePrevPtr.i; - }//if + } }//if remPagePtr.p->next_cluster_page= RNIL; remPagePtr.p->last_cluster_page= RNIL; remPagePtr.p->prev_cluster_page= RNIL; + remPagePtr.p->page_state = ~ZFREE_COMMON; pageLastPtr.i = (remPagePtr.i + (1 << list)) - 1; c_page_pool.getPtr(pageLastPtr); pageLastPtr.p->first_cluster_page= RNIL; + pageLastPtr.p->page_state = ~ZFREE_COMMON; + }//Dbtup::removeCommonArea() From 98e08c79b029cc435f1a6fb61f822c709a9fedaa Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 12 Jun 2007 10:06:20 +0200 Subject: [PATCH 21/38] extend backup dump to give more info --- .../ndb/src/kernel/blocks/backup/Backup.cpp | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp index 645eb590ae3..6ad81df20be 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp @@ -448,6 +448,41 @@ Backup::execDUMP_STATE_ORD(Signal* signal) filePtr.p->m_flags); } } + + ndbout_c("m_curr_disk_write_speed: %u m_words_written_this_period: %u m_overflow_disk_write: %u", + m_curr_disk_write_speed, m_words_written_this_period, m_overflow_disk_write); + ndbout_c("m_reset_delay_used: %u m_reset_disk_speed_time: %llu", + m_reset_delay_used, (Uint64)m_reset_disk_speed_time); + for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr)) + { + ndbout_c("BackupRecord %u: BackupId: %u MasterRef: %x ClientRef: %x", + ptr.i, ptr.p->backupId, ptr.p->masterRef, ptr.p->clientRef); + ndbout_c(" State: %u", ptr.p->slaveState.getState()); + ndbout_c(" noOfByte: %llu noOfRecords: %llu", + ptr.p->noOfBytes, ptr.p->noOfRecords); + ndbout_c(" noOfLogBytes: %llu noOfLogRecords: %llu", + ptr.p->noOfLogBytes, ptr.p->noOfLogRecords); + ndbout_c(" errorCode: %u", ptr.p->errorCode); + BackupFilePtr filePtr; + for(ptr.p->files.first(filePtr); filePtr.i != RNIL; + ptr.p->files.next(filePtr)) + { + ndbout_c(" file %u: type: %u flags: H'%x tableId: %u fragmentId: %u", + filePtr.i, filePtr.p->fileType, filePtr.p->m_flags, + filePtr.p->tableId, filePtr.p->fragmentNo); + } + if (ptr.p->slaveState.getState() == SCANNING && ptr.p->dataFilePtr != RNIL) + { + c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr); + OperationRecord & op = filePtr.p->operation; + Uint32 *tmp = NULL; + Uint32 sz = 0; + bool eof = FALSE; + bool ready = op.dataBuffer.getReadPtr(&tmp, &sz, &eof); + ndbout_c("ready: %s eof: %s", ready ? "TRUE" : "FALSE", eof ? "TRUE" : "FALSE"); + } + } + return; } if(signal->theData[0] == 24){ /** From 805d32bd0f4ad0faef62db9c09c76dcc50c66464 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 12 Jun 2007 10:35:21 +0200 Subject: [PATCH 22/38] Bug#29044 - memory buddy allocator "unoptimal" memory handling - add config param to have better behavior with large tables --- .../ndb/include/mgmapi/mgmapi_config_parameters.h | 2 ++ storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 1 + storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp | 6 ++++++ storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp | 5 +++++ storage/ndb/src/mgmsrv/ConfigInfo.cpp | 12 ++++++++++++ 5 files changed, 26 insertions(+) diff --git a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h index d0bd8be16a3..ac2cbf060fd 100644 --- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h +++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h @@ -118,6 +118,8 @@ #define CFG_DB_O_DIRECT 168 +#define CFG_DB_MAX_ALLOCATE 169 + #define CFG_DB_SGA 198 /* super pool mem */ #define CFG_DB_DATA_MEM_2 199 /* used in special build in 5.1 */ diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index d59d5cd79f2..7845305da6c 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -2618,6 +2618,7 @@ private: ArrayPool c_page_pool; Uint32 cnoOfAllocatedPages; + Uint32 m_max_allocate_pages; Tablerec *tablerec; Uint32 cnoOfTablerec; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp index f4fd80a482a..3a8e996d435 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp @@ -308,6 +308,12 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal) Uint32 noOfTriggers= 0; Uint32 tmp= 0; + + if (ndb_mgm_get_int_parameter(p, CFG_DB_MAX_ALLOCATE, &tmp)) + tmp = 32 * 1024 * 1024; + m_max_allocate_pages = (tmp + GLOBAL_PAGE_SIZE - 1) / GLOBAL_PAGE_SIZE; + + tmp = 0; ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE_RANGE, &tmp)); initPageRangeSize(tmp); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_TABLE, &cnoOfTablerec)); diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp index 8493e0561cc..ed8f63ce3ad 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp @@ -434,6 +434,11 @@ void Dbtup::allocMoreFragPages(Fragrecord* const regFragPtr) // We will grow by 18.75% plus two more additional pages to grow // a little bit quicker in the beginning. /* -----------------------------------------------------------------*/ + + if (noAllocPages > m_max_allocate_pages) + { + noAllocPages = m_max_allocate_pages; + } Uint32 allocated = allocFragPages(regFragPtr, noAllocPages); regFragPtr->noOfPagesToGrow += allocated; }//Dbtup::allocMoreFragPages() diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp index 56aacda214d..229824c49bf 100644 --- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp @@ -1313,6 +1313,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { "0", STR_VALUE(MAX_INT_RNIL) }, + { + CFG_DB_MAX_ALLOCATE, + "MaxAllocate", + DB_TOKEN, + "Maximum size of allocation to use when allocating memory for tables", + ConfigInfo::CI_USED, + false, + ConfigInfo::CI_INT, + "32M", + "1M", + "1G" }, + { CFG_DB_MEMREPORT_FREQUENCY, "MemReportFrequency", From e42de77219131385fa958224399c0c56ca08d8f4 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 13 Jun 2007 22:54:00 +1000 Subject: [PATCH 23/38] [PATCH] BUG#29063 TESTCASE mgmapi: connect timeout set incorrectly Add test to testMgm for Connect timeout. add to autotest. Index: ndb-work/storage/ndb/test/ndbapi/testMgm.cpp =================================================================== storage/ndb/test/ndbapi/testMgm.cpp: BUG#29063 TESTCASE mgmapi: connect timeout set incorrectly storage/ndb/test/run-test/daily-basic-tests.txt: BUG#29063 TESTCASE mgmapi: connect timeout set incorrectly --- storage/ndb/test/ndbapi/testMgm.cpp | 75 +++++++++++++++++++ .../ndb/test/run-test/daily-basic-tests.txt | 4 + 2 files changed, 79 insertions(+) diff --git a/storage/ndb/test/ndbapi/testMgm.cpp b/storage/ndb/test/ndbapi/testMgm.cpp index cc074087bdb..e43972c8c29 100644 --- a/storage/ndb/test/ndbapi/testMgm.cpp +++ b/storage/ndb/test/ndbapi/testMgm.cpp @@ -212,6 +212,76 @@ int runTestApiSession(NDBT_Context* ctx, NDBT_Step* step) } } +int runTestApiConnectTimeout(NDBT_Context* ctx, NDBT_Step* step) +{ + char *mgm= ctx->getRemoteMgm(); + int result= NDBT_FAILED; + int cc= 0; + int mgmd_nodeid= 0; + ndb_mgm_reply reply; + + NdbMgmHandle h; + h= ndb_mgm_create_handle(); + ndb_mgm_set_connectstring(h, mgm); + + ndbout << "TEST connect timeout" << endl; + + ndb_mgm_set_timeout(h, 3000); + + struct timeval tstart, tend; + int secs; + timerclear(&tstart); + timerclear(&tend); + gettimeofday(&tstart,NULL); + + ndb_mgm_connect(h,0,0,0); + + gettimeofday(&tend,NULL); + + secs= tend.tv_sec - tstart.tv_sec; + ndbout << "Took about: " << secs <<" seconds"<getRemoteMgm(); @@ -727,6 +797,11 @@ TESTCASE("ApiSessionFailure", "Test failures in MGMAPI session"){ INITIALIZER(runTestApiSession); +} +TESTCASE("ApiConnectTimeout", + "Connect timeout tests for MGMAPI"){ + INITIALIZER(runTestApiConnectTimeout); + } TESTCASE("ApiTimeoutBasic", "Basic timeout tests for MGMAPI"){ diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt index 6ce2da47670..0e1cdfc647e 100644 --- a/storage/ndb/test/run-test/daily-basic-tests.txt +++ b/storage/ndb/test/run-test/daily-basic-tests.txt @@ -898,6 +898,10 @@ max-time: 120 cmd: testMgm args: -n ApiSessionFailure T1 +max-time: 15 +cmd: testMgm +args: -n ApiConnectTimeout T1 + max-time: 120 cmd: testMgm args: -n ApiTimeoutBasic T1 From 495a9490bd98a888c7fbcf704cce09421625b27e Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 13 Jun 2007 22:54:14 +1000 Subject: [PATCH 24/38] [PATCH] BUG#29063 mgmapi: connect timeout set incorrectly correctly divide timeout by 1000 to convert to seconds for SocketClient Index: ndb-work/storage/ndb/src/mgmapi/mgmapi.cpp =================================================================== storage/ndb/src/mgmapi/mgmapi.cpp: BUG#29063 mgmapi: connect timeout set incorrectly --- storage/ndb/src/mgmapi/mgmapi.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/ndb/src/mgmapi/mgmapi.cpp b/storage/ndb/src/mgmapi/mgmapi.cpp index e7dc1d1d503..5f975da8c73 100644 --- a/storage/ndb/src/mgmapi/mgmapi.cpp +++ b/storage/ndb/src/mgmapi/mgmapi.cpp @@ -524,7 +524,7 @@ ndb_mgm_connect(NdbMgmHandle handle, int no_retries, NDB_SOCKET_TYPE sockfd= NDB_INVALID_SOCKET; Uint32 i; SocketClient s(0, 0); - s.set_connect_timeout(handle->timeout); + s.set_connect_timeout((handle->timeout+999)/1000); if (!s.init()) { fprintf(handle->errstream, From 18c6c75a35f15b4611360d9e77e4d9a2f4b248b7 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 13 Jun 2007 23:33:37 +1000 Subject: [PATCH 25/38] [PATCH] Disable mysql_upgrade test (Bug#28560) Index: ndb-work/mysql-test/t/disabled.def =================================================================== mysql-test/t/disabled.def: Disable mysql_upgrade test (Bug#28560) --- mysql-test/t/disabled.def | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index e283ca9458f..90fd997e615 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -40,3 +40,4 @@ synchronization : Bug#24529 Test 'synchronization' fails on Mac pushb #rpl_ndb_dd_advance : Bug#25913 rpl_ndb_dd_advance fails randomly ndb_partition_error2 : HF is not sure if the test can work as internded on all the platforms +mysql_upgrade : Bug#28560 test links to /usr/local/mysql/lib libraries, causes non-determinism and failures on ABI breakage From 1f2ce0eb4828f07db69e5fa06f3070dc130fc4f2 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 13 Jun 2007 23:33:51 +1000 Subject: [PATCH 26/38] [PATCH] Add tests for ndb variables (related to BUG#26675) This is somewhat related to BUG#26675 (ndb_connectstring not reported in show global variables) Index: ndb-work/mysql-test/r/ndb_basic.result =================================================================== mysql-test/r/ndb_basic.result: Add tests for ndb variables (related to BUG#26675) mysql-test/t/ndb_basic.test: Add tests for ndb variables (related to BUG#26675) --- mysql-test/r/ndb_basic.result | 21 +++++++++++++++++++++ mysql-test/t/ndb_basic.test | 8 ++++++++ 2 files changed, 29 insertions(+) diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result index c84c7fffd66..0f28e6ac497 100644 --- a/mysql-test/r/ndb_basic.result +++ b/mysql-test/r/ndb_basic.result @@ -1,5 +1,26 @@ DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7; drop database if exists mysqltest; +SHOW GLOBAL STATUS LIKE 'ndb%'; +Variable_name Value +Ndb_cluster_node_id # +Ndb_config_from_host # +Ndb_config_from_port # +Ndb_number_of_data_nodes # +SHOW GLOBAL VARIABLES LIKE 'ndb%'; +Variable_name Value +ndb_autoincrement_prefetch_sz # +ndb_cache_check_time # +ndb_connectstring # +ndb_extra_logging # +ndb_force_send # +ndb_index_stat_cache_entries # +ndb_index_stat_enable # +ndb_index_stat_update_freq # +ndb_report_thresh_binlog_epoch_slip # +ndb_report_thresh_binlog_mem_usage # +ndb_use_copying_alter_table # +ndb_use_exact_count # +ndb_use_transactions # CREATE TABLE t1 ( pk1 INT NOT NULL PRIMARY KEY, attr1 INT NOT NULL, diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test index 870c7435d3e..6668ca86a94 100644 --- a/mysql-test/t/ndb_basic.test +++ b/mysql-test/t/ndb_basic.test @@ -22,6 +22,14 @@ drop database if exists mysqltest; # table handler is working # +# +# Show status and variables +# +--replace_column 2 # +SHOW GLOBAL STATUS LIKE 'ndb%'; +--replace_column 2 # +SHOW GLOBAL VARIABLES LIKE 'ndb%'; + # # Create a normal table with primary key # From 21819c2afa0e85595a01b2c3850c7bc60b451ca7 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 13 Jun 2007 23:34:09 +1000 Subject: [PATCH 27/38] [PATCH] BUG#29073 Store history for ndb_mgm Index: ndb-work/storage/ndb/src/mgmclient/main.cpp =================================================================== storage/ndb/src/mgmclient/main.cpp: BUG#29073 Store history for ndb_mgm --- storage/ndb/src/mgmclient/main.cpp | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/storage/ndb/src/mgmclient/main.cpp b/storage/ndb/src/mgmclient/main.cpp index 44408362f09..429ccb27b2e 100644 --- a/storage/ndb/src/mgmclient/main.cpp +++ b/storage/ndb/src/mgmclient/main.cpp @@ -155,10 +155,31 @@ int main(int argc, char** argv){ signal(SIGPIPE, handler); com = new Ndb_mgmclient(opt_connect_str,1); int ret= 0; + BaseString histfile; if (!opt_execute_str) { + char *histfile_env= getenv("NDB_MGM_HISTFILE"); + if (histfile_env) + histfile.assign(histfile_env,strlen(histfile_env)); + else if(getenv("HOME")) + { + histfile.assign(getenv("HOME"),strlen(getenv("HOME"))); + histfile.append("/.ndb_mgm_history"); + } + if (histfile.length()) + read_history(histfile.c_str()); + ndbout << "-- NDB Cluster -- Management Client --" << endl; while(read_and_execute(_try_reconnect)); + + if (histfile.length()) + { + BaseString histfile_tmp; + histfile_tmp.assign(histfile); + histfile_tmp.append(".TMP"); + if(!write_history(histfile_tmp.c_str())) + my_rename(histfile_tmp.c_str(), histfile.c_str(), MYF(MY_WME)); + } } else { From 85525c4a544f80ee9c8721aae944e7b815baa194 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 13 Jun 2007 23:34:22 +1000 Subject: [PATCH 28/38] [PATCH] Enable test for (Closed) bug 16445 Bug was updated on May 30th by Tomas to say that hasn't been seen in PB since global dict cache rewrite. This test should probably be enabled then. Index: ndb-work/mysql-test/t/ndb_basic.test =================================================================== mysql-test/r/ndb_basic.result: Enable test for (Closed) bug 16445 mysql-test/t/ndb_basic.test: Enable test for (Closed) bug 16445 --- mysql-test/r/ndb_basic.result | 7 +++++++ mysql-test/t/ndb_basic.test | 20 ++++++++++---------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result index 0f28e6ac497..4eddaeb1227 100644 --- a/mysql-test/r/ndb_basic.result +++ b/mysql-test/r/ndb_basic.result @@ -1,5 +1,12 @@ DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7; drop database if exists mysqltest; +CREATE TABLE t1 ( +pk1 INT NOT NULL PRIMARY KEY, +attr1 INT NOT NULL, +attr2 INT, +attr3 VARCHAR(10) +) ENGINE=ndbcluster; +drop table t1; SHOW GLOBAL STATUS LIKE 'ndb%'; Variable_name Value Ndb_cluster_node_id # diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test index 6668ca86a94..90839ce6cab 100644 --- a/mysql-test/t/ndb_basic.test +++ b/mysql-test/t/ndb_basic.test @@ -6,16 +6,16 @@ DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7; drop database if exists mysqltest; --enable_warnings -## workaround for bug#16445 -## remove to reproduce bug and run tests from ndb start -## and with ndb_autodiscover disabled. Fails on Linux 50 % of the times -#CREATE TABLE t1 ( -# pk1 INT NOT NULL PRIMARY KEY, -# attr1 INT NOT NULL, -# attr2 INT, -# attr3 VARCHAR(10) -#) ENGINE=ndbcluster; -#drop table t1; +# workaround for bug#16445 +# remove to reproduce bug and run tests from ndb start +# and with ndb_autodiscover disabled. Fails on Linux 50 % of the times +CREATE TABLE t1 ( + pk1 INT NOT NULL PRIMARY KEY, + attr1 INT NOT NULL, + attr2 INT, + attr3 VARCHAR(10) +) ENGINE=ndbcluster; +drop table t1; # # Basic test to show that the NDB From 42044a87abdc3212f87eacfbd57bfb9ad21a76c7 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 13 Jun 2007 23:34:36 +1000 Subject: [PATCH 29/38] [PATCH] BUG#29074 preserve file timestamps in ndb_error_reporter Index: ndb-work/storage/ndb/tools/ndb_error_reporter =================================================================== storage/ndb/tools/ndb_error_reporter: BUG#29074 preserve file timestamps in ndb_error_reporter --- storage/ndb/tools/ndb_error_reporter | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/ndb/tools/ndb_error_reporter b/storage/ndb/tools/ndb_error_reporter index 2b5aadb6171..7ad7a2f478a 100644 --- a/storage/ndb/tools/ndb_error_reporter +++ b/storage/ndb/tools/ndb_error_reporter @@ -62,13 +62,13 @@ foreach my $node (@nodes) (($config_get_fs)?" with filesystem":""). "\n\n"; my $recurse= ($config_get_fs)?'-r ':''; - system 'scp '.$recurse.$config_username.config($node,'host'). + system 'scp -p '.$recurse.$config_username.config($node,'host'). ':'.config($node,'datadir')."/ndb_".$node."* ". "$reportdir/\n"; } print "\n\n Copying configuration file...\n\n\t$config_file\n\n"; -system "cp $config_file $reportdir/"; +system "cp -p $config_file $reportdir/"; my $r = system 'bzip2 2>&1 > /dev/null < /dev/null'; my $outfile; From f344a35fee7b78011ed63986cf5c40166a1a8992 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 13 Jun 2007 23:52:47 +1000 Subject: [PATCH 30/38] [PATCH] Add MAINTAINERS file for NDB Index: ndb-merge/storage/ndb/MAINTAINERS =================================================================== storage/ndb/MAINTAINERS: Add MAINTAINERS file for NDB --- storage/ndb/MAINTAINERS | 157 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 157 insertions(+) create mode 100644 storage/ndb/MAINTAINERS diff --git a/storage/ndb/MAINTAINERS b/storage/ndb/MAINTAINERS new file mode 100644 index 00000000000..76318687dde --- /dev/null +++ b/storage/ndb/MAINTAINERS @@ -0,0 +1,157 @@ +MySQL Cluster MAINTAINERS +------------------------- + +This is a list of knowledgable people in parts of the NDB code. + +In changing that area of code, you probably want to talk to the +people who know a lot about it to look over the patch. + +When sending patches and queries, always CC the mailing list. + +If no list specified, assume internals@lists.mysql.com + +P: Person +M: Mail +L: Mailing list +W: Web page with status/info +C: Comment +SRC: Source directory (relative to this directory) +T: SCM tree type and location +S: Status, one of: + + Supported: Somebody is paid to maintain this. + Maintained: Not their primary job, but maintained. + Orphan: No current obvious maintainer. + Obsolete: Replaced by something else. + +------------------------------------------------------------- + +Binlog Injector +SRC: ha_ndbcluster_binlog.cc +C: see also row based replication +P: Stewart Smith +M: stewart@mysql.com +C: Original author +P: Tomas Ulin +M: tomas@mysql.com +C: Lots of updates +P: Martin Skold +M: martin@mysql.com +C: Metadata ops +S: Supported + +BLOBs +SRC: ha_ndbcluster.cc +SRC: src/ndbapi/NdbBlob* +P: Pekka +M: pekka@mysql.com +S: Supported + +cpcd/cpcc +SRC: src/cw/cpcd +SRC: src/cw/cpcc +C: Maintained only as part of autotest +P: Jonas Orland +M: jonas@mysql.com +S: Maintained + +cpcc-win32 +SRC: src/cw/cpcc-win32 +S: Obsolete + +Handler +SRC: ha_ndbcluster.cc +P: Martin Skold +M: martin@mysql.com +S: Supported + +Management Server +SRC: src/mgmsrv/ +P: Stewart Smith +M: stewart@mysql.com +S: Supported + +Management Client +SRC: src/mgmclient/ +P: Stewart Smith +M: stewart@mysql.com +S: Supported + +Management API +SRC: src/mgmapi/ +P: Stewart Smith +M: stewart@mysql.com +S: Supported + +NDB API Examples +SRC: ndbapi-examples/ +P: Tomas Ulin +M: tomas@mysql.com +C: Originally by Lars +P: Lars Thalmann +M: lars@mysql.com +S: Maintained + +tsman +C: Disk Data (Table Space MANager) +SRC: src/kernel/blocks/tsman.cpp +SRC: src/kernel/blocks/tsman.hpp +P: Jonas Oreland +M: jonas@mysql.com +S: Supported + +lgman +C: Disk Data (LoG MANager) +SRC: src/kernel/blocks/lgman.cpp +SRC: src/kernel/blocks/lgman.hpp +P: Jonas Oreland +M: jonas@mysql.com +S: Supported + +pgman +C: Disk Data (PaGe MANager) +SRC: src/kernel/blocks/lgman.cpp +SRC: src/kernel/blocks/lgman.hpp +P: Jonas Oreland +M: jonas@mysql.com +S: Supported + +SUMA +C: SUbscription MAnager +C: Used for replication +SRC: src/kernel/blocks/suma/ +P: Tomas Ulin +P: tomas@mysql.com +P: Jonas Oreland +P: jonas@mysql.com +S: Supported + +TRIX +C: TRiggers and IndeXs (but only online Index build) +SRC: src/kernel/blocks/trix +P: Martin Skold +P: mskold@mysql.com +S: Supported + +QMGR +C: Cluster (with a Q) ManaGeR +C: Heartbeats etc +SRC: src/kernel/blocks/qmgr +S: Supported + +NDBFS +C: NDB FileSystem +C: File System abstraction +SRC: src/kernel/blocks/ndbfs +S: Supported + +TRIX +C: TRiggers and IndeXs (but only online Index build) +SRC: src/kernel/blocks/trix +S: Supported + +TRIX +C: TRiggers and IndeXs (but only online Index build) +SRC: src/kernel/blocks/trix +S: Supported + From 29987e6e29990e24a90237bf360e9c1097f2a1e9 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 13 Jun 2007 23:53:01 +1000 Subject: [PATCH 31/38] [PATCH] add knielsen as MAINTAINER of NDBAPI NdbRecord examples Index: ndb-merge/storage/ndb/MAINTAINERS =================================================================== storage/ndb/MAINTAINERS: add knielsen as MAINTAINER of NDBAPI NdbRecord examples --- storage/ndb/MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/storage/ndb/MAINTAINERS b/storage/ndb/MAINTAINERS index 76318687dde..d1547d48234 100644 --- a/storage/ndb/MAINTAINERS +++ b/storage/ndb/MAINTAINERS @@ -92,6 +92,12 @@ P: Lars Thalmann M: lars@mysql.com S: Maintained +NDB API NdbRecord Examples +SRC: ndbapi-examples/ +P: Kristian Nielsen +M: knielsen@mysql.com +S: Maintained + tsman C: Disk Data (Table Space MANager) SRC: src/kernel/blocks/tsman.cpp From cb9817cc239ac7a42f4a12ef72379bdf0d18a934 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 14 Jun 2007 11:59:25 +1000 Subject: [PATCH 32/38] fix build of mgm client with history - caught by pb. storage/ndb/src/mgmclient/main.cpp: fix build issues only caught by pb (mgm client history related) --- storage/ndb/src/mgmclient/main.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/storage/ndb/src/mgmclient/main.cpp b/storage/ndb/src/mgmclient/main.cpp index 429ccb27b2e..7513064d273 100644 --- a/storage/ndb/src/mgmclient/main.cpp +++ b/storage/ndb/src/mgmclient/main.cpp @@ -23,6 +23,8 @@ extern "C" { #elif !defined(__NETWARE__) #include extern "C" int add_history(const char *command); /* From readline directory */ +extern "C" int read_history(const char *command); +extern "C" int write_history(const char *command); #define HAVE_READLINE #endif } @@ -158,6 +160,7 @@ int main(int argc, char** argv){ BaseString histfile; if (!opt_execute_str) { +#ifdef HAVE_READLINE char *histfile_env= getenv("NDB_MGM_HISTFILE"); if (histfile_env) histfile.assign(histfile_env,strlen(histfile_env)); @@ -168,10 +171,12 @@ int main(int argc, char** argv){ } if (histfile.length()) read_history(histfile.c_str()); +#endif ndbout << "-- NDB Cluster -- Management Client --" << endl; while(read_and_execute(_try_reconnect)); +#ifdef HAVE_READLINE if (histfile.length()) { BaseString histfile_tmp; @@ -180,6 +185,7 @@ int main(int argc, char** argv){ if(!write_history(histfile_tmp.c_str())) my_rename(histfile_tmp.c_str(), histfile.c_str(), MYF(MY_WME)); } +#endif } else { From 0d5c6b702a516c8cff37b9020d918bc4be50f184 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 14 Jun 2007 11:26:54 +0200 Subject: [PATCH 33/38] move all error codes to ndberror.c - step 1 mgmtsrvr error codes storage/ndb/src/mgmsrv/ndb_mgmd_error.h: New BitKeeper file ``storage/ndb/src/mgmsrv/ndb_mgmd_error.h'' --- storage/ndb/src/mgmsrv/MgmtSrvr.cpp | 76 +------------------------ storage/ndb/src/mgmsrv/MgmtSrvr.hpp | 39 ------------- storage/ndb/src/mgmsrv/ndb_mgmd_error.h | 33 +++++++++++ storage/ndb/src/ndbapi/ndberror.c | 30 ++++++++++ 4 files changed, 64 insertions(+), 114 deletions(-) create mode 100644 storage/ndb/src/mgmsrv/ndb_mgmd_error.h diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp index f84c79b704f..af708664a69 100644 --- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -18,6 +18,7 @@ #include "MgmtSrvr.hpp" #include "MgmtErrorReporter.hpp" +#include "ndb_mgmd_error.h" #include #include @@ -239,13 +240,6 @@ MgmtSrvr::stopEventLog() // Nothing yet } -class ErrorItem -{ -public: - int _errorCode; - const char * _errorText; -}; - bool MgmtSrvr::setEventLogFilter(int severity, int enable) { @@ -268,62 +262,6 @@ MgmtSrvr::isEventLogFilterEnabled(int severity) return g_eventLogger.isEnable((Logger::LoggerLevel)severity); } -static ErrorItem errorTable[] = -{ - {MgmtSrvr::NO_CONTACT_WITH_PROCESS, "No contact with the process (dead ?)."}, - {MgmtSrvr::PROCESS_NOT_CONFIGURED, "The process is not configured."}, - {MgmtSrvr::WRONG_PROCESS_TYPE, - "The process has wrong type. Expected a DB process."}, - {MgmtSrvr::COULD_NOT_ALLOCATE_MEMORY, "Could not allocate memory."}, - {MgmtSrvr::SEND_OR_RECEIVE_FAILED, "Send to process or receive failed."}, - {MgmtSrvr::INVALID_LEVEL, "Invalid level. Should be between 1 and 30."}, - {MgmtSrvr::INVALID_ERROR_NUMBER, "Invalid error number. Should be >= 0."}, - {MgmtSrvr::INVALID_TRACE_NUMBER, "Invalid trace number."}, - {MgmtSrvr::NOT_IMPLEMENTED, "Not implemented."}, - {MgmtSrvr::INVALID_BLOCK_NAME, "Invalid block name"}, - - {MgmtSrvr::CONFIG_PARAM_NOT_EXIST, - "The configuration parameter does not exist for the process type."}, - {MgmtSrvr::CONFIG_PARAM_NOT_UPDATEABLE, - "The configuration parameter is not possible to update."}, - {MgmtSrvr::VALUE_WRONG_FORMAT_INT_EXPECTED, - "Incorrect value. Expected integer."}, - {MgmtSrvr::VALUE_TOO_LOW, "Value is too low."}, - {MgmtSrvr::VALUE_TOO_HIGH, "Value is too high."}, - {MgmtSrvr::VALUE_WRONG_FORMAT_BOOL_EXPECTED, - "Incorrect value. Expected TRUE or FALSE."}, - - {MgmtSrvr::CONFIG_FILE_OPEN_WRITE_ERROR, - "Could not open configuration file for writing."}, - {MgmtSrvr::CONFIG_FILE_OPEN_READ_ERROR, - "Could not open configuration file for reading."}, - {MgmtSrvr::CONFIG_FILE_WRITE_ERROR, - "Write error when writing configuration file."}, - {MgmtSrvr::CONFIG_FILE_READ_ERROR, - "Read error when reading configuration file."}, - {MgmtSrvr::CONFIG_FILE_CLOSE_ERROR, "Could not close configuration file."}, - - {MgmtSrvr::CONFIG_CHANGE_REFUSED_BY_RECEIVER, - "The change was refused by the receiving process."}, - {MgmtSrvr::COULD_NOT_SYNC_CONFIG_CHANGE_AGAINST_PHYSICAL_MEDIUM, - "The change could not be synced against physical medium."}, - {MgmtSrvr::CONFIG_FILE_CHECKSUM_ERROR, - "The config file is corrupt. Checksum error."}, - {MgmtSrvr::NOT_POSSIBLE_TO_SEND_CONFIG_UPDATE_TO_PROCESS_TYPE, - "It is not possible to send an update of a configuration variable " - "to this kind of process."}, - {MgmtSrvr::NODE_SHUTDOWN_IN_PROGESS, "Node shutdown in progress" }, - {MgmtSrvr::SYSTEM_SHUTDOWN_IN_PROGRESS, "System shutdown in progress" }, - {MgmtSrvr::NODE_SHUTDOWN_WOULD_CAUSE_SYSTEM_CRASH, - "Node shutdown would cause system crash" }, - {MgmtSrvr::UNSUPPORTED_NODE_SHUTDOWN, - "Unsupported multi node shutdown. Abort option required." }, - {MgmtSrvr::NODE_NOT_API_NODE, "The specified node is not an API node." }, - {MgmtSrvr::OPERATION_NOT_ALLOWED_START_STOP, - "Operation not allowed while nodes are starting or stopping."}, - {MgmtSrvr::NO_CONTACT_WITH_DB_NODES, "No contact with database nodes" } -}; - int MgmtSrvr::translateStopRef(Uint32 errCode) { switch(errCode){ @@ -343,8 +281,6 @@ int MgmtSrvr::translateStopRef(Uint32 errCode) return 4999; } -static int noOfErrorCodes = sizeof(errorTable) / sizeof(ErrorItem); - int MgmtSrvr::getNodeCount(enum ndb_mgm_node_type type) const { @@ -1969,18 +1905,8 @@ MgmtSrvr::dumpState(int nodeId, const Uint32 args[], Uint32 no) const char* MgmtSrvr::getErrorText(int errorCode, char *buf, int buf_sz) { - - for (int i = 0; i < noOfErrorCodes; ++i) { - if (errorCode == errorTable[i]._errorCode) { - BaseString::snprintf(buf, buf_sz, errorTable[i]._errorText); - buf[buf_sz-1]= 0; - return buf; - } - } - ndb_error_string(errorCode, buf, buf_sz); buf[buf_sz-1]= 0; - return buf; } diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp index a54b7866091..90287554ef8 100644 --- a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -148,45 +148,6 @@ public: */ bool isEventLogFilterEnabled(int severity); - STATIC_CONST( NO_CONTACT_WITH_PROCESS = 5000 ); - STATIC_CONST( PROCESS_NOT_CONFIGURED = 5001 ); - STATIC_CONST( WRONG_PROCESS_TYPE = 5002 ); - STATIC_CONST( COULD_NOT_ALLOCATE_MEMORY = 5003 ); - STATIC_CONST( SEND_OR_RECEIVE_FAILED = 5005 ); - STATIC_CONST( INVALID_LEVEL = 5006 ); - STATIC_CONST( INVALID_ERROR_NUMBER = 5007 ); - STATIC_CONST( INVALID_TRACE_NUMBER = 5008 ); - STATIC_CONST( NOT_IMPLEMENTED = 5009 ); - STATIC_CONST( INVALID_BLOCK_NAME = 5010 ); - - STATIC_CONST( CONFIG_PARAM_NOT_EXIST = 5011 ); - STATIC_CONST( CONFIG_PARAM_NOT_UPDATEABLE = 5012 ); - STATIC_CONST( VALUE_WRONG_FORMAT_INT_EXPECTED = 5013 ); - STATIC_CONST( VALUE_TOO_LOW = 5014 ); - STATIC_CONST( VALUE_TOO_HIGH = 5015 ); - STATIC_CONST( VALUE_WRONG_FORMAT_BOOL_EXPECTED = 5016 ); - - STATIC_CONST( CONFIG_FILE_OPEN_WRITE_ERROR = 5017 ); - STATIC_CONST( CONFIG_FILE_OPEN_READ_ERROR = 5018 ); - STATIC_CONST( CONFIG_FILE_WRITE_ERROR = 5019 ); - STATIC_CONST( CONFIG_FILE_READ_ERROR = 5020 ); - STATIC_CONST( CONFIG_FILE_CLOSE_ERROR = 5021 ); - - STATIC_CONST( CONFIG_CHANGE_REFUSED_BY_RECEIVER = 5022 ); - STATIC_CONST( COULD_NOT_SYNC_CONFIG_CHANGE_AGAINST_PHYSICAL_MEDIUM = 5023 ); - STATIC_CONST( CONFIG_FILE_CHECKSUM_ERROR = 5024 ); - STATIC_CONST( NOT_POSSIBLE_TO_SEND_CONFIG_UPDATE_TO_PROCESS_TYPE = 5025 ); - - STATIC_CONST( NODE_SHUTDOWN_IN_PROGESS = 5026 ); - STATIC_CONST( SYSTEM_SHUTDOWN_IN_PROGRESS = 5027 ); - STATIC_CONST( NODE_SHUTDOWN_WOULD_CAUSE_SYSTEM_CRASH = 5028 ); - - STATIC_CONST( NO_CONTACT_WITH_DB_NODES = 5030 ); - STATIC_CONST( UNSUPPORTED_NODE_SHUTDOWN = 5031 ); - - STATIC_CONST( NODE_NOT_API_NODE = 5062 ); - STATIC_CONST( OPERATION_NOT_ALLOWED_START_STOP = 5063 ); - /** * This enum specifies the different signal loggig modes possible to set * with the setSignalLoggingMode method. diff --git a/storage/ndb/src/mgmsrv/ndb_mgmd_error.h b/storage/ndb/src/mgmsrv/ndb_mgmd_error.h new file mode 100644 index 00000000000..2438f15c808 --- /dev/null +++ b/storage/ndb/src/mgmsrv/ndb_mgmd_error.h @@ -0,0 +1,33 @@ +/* Copyright (C) 2007 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef NDB_MGMD_ERROR_H +#define NDB_MGMD_ERROR_H + +#define NO_CONTACT_WITH_PROCESS 5000 +#define WRONG_PROCESS_TYPE 5002 +#define SEND_OR_RECEIVE_FAILED 5005 +#define INVALID_ERROR_NUMBER 5007 +#define INVALID_TRACE_NUMBER 5008 +#define INVALID_BLOCK_NAME 5010 +#define NODE_SHUTDOWN_IN_PROGESS 5026 +#define SYSTEM_SHUTDOWN_IN_PROGRESS 5027 +#define NODE_SHUTDOWN_WOULD_CAUSE_SYSTEM_CRASH 5028 +#define NO_CONTACT_WITH_DB_NODES 5030 +#define UNSUPPORTED_NODE_SHUTDOWN 5031 +#define NODE_NOT_API_NODE 5062 +#define OPERATION_NOT_ALLOWED_START_STOP 5063 + +#endif diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c index b10859c3180..914acd17c08 100644 --- a/storage/ndb/src/ndbapi/ndberror.c +++ b/storage/ndb/src/ndbapi/ndberror.c @@ -19,6 +19,9 @@ #include #include +#include "../mgmsrv/ndb_mgmd_error.h" + + typedef struct ErrorBundle { int code; int mysql_code; @@ -619,6 +622,33 @@ ErrorBundle ErrorCodes[] = { { 4273, DMEC, IE, "No blob table in dict cache" }, { 4274, DMEC, IE, "Corrupted main table PK in blob operation" }, { 4275, DMEC, AE, "The blob method is incompatible with operation type or lock mode" }, + + { NO_CONTACT_WITH_PROCESS, DMEC, AE, + "No contact with the process (dead ?)."}, + { WRONG_PROCESS_TYPE, DMEC, AE, + "The process has wrong type. Expected a DB process."}, + { SEND_OR_RECEIVE_FAILED, DMEC, AE, + "Send to process or receive failed."}, + { INVALID_ERROR_NUMBER, DMEC, AE, + "Invalid error number. Should be >= 0."}, + { INVALID_TRACE_NUMBER, DMEC, AE, + "Invalid trace number."}, + { INVALID_BLOCK_NAME, DMEC, AE, + "Invalid block name"}, + { NODE_SHUTDOWN_IN_PROGESS, DMEC, AE, + "Node shutdown in progress" }, + { SYSTEM_SHUTDOWN_IN_PROGRESS, DMEC, AE, + "System shutdown in progress" }, + { NODE_SHUTDOWN_WOULD_CAUSE_SYSTEM_CRASH, DMEC, AE, + "Node shutdown would cause system crash" }, + { UNSUPPORTED_NODE_SHUTDOWN, DMEC, AE, + "Unsupported multi node shutdown. Abort option required." }, + { NODE_NOT_API_NODE, DMEC, AE, + "The specified node is not an API node." }, + { OPERATION_NOT_ALLOWED_START_STOP, DMEC, AE, + "Operation not allowed while nodes are starting or stopping."}, + { NO_CONTACT_WITH_DB_NODES, DMEC, AE, + "No contact with database nodes" } }; static From ef3ed50bc0be1170cf22bcdc914374b3a1a67407 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 14 Jun 2007 12:35:35 +0200 Subject: [PATCH 34/38] get mgmapi error codes into perror storage/ndb/include/mgmapi/mgmapi_error.h: New BitKeeper file ``storage/ndb/include/mgmapi/mgmapi_error.h'' --- extra/perror.c | 20 +++- storage/ndb/include/mgmapi/mgmapi.h | 100 +----------------- storage/ndb/include/mgmapi/mgmapi_error.h | 121 ++++++++++++++++++++++ 3 files changed, 140 insertions(+), 101 deletions(-) create mode 100644 storage/ndb/include/mgmapi/mgmapi_error.h diff --git a/extra/perror.c b/extra/perror.c index c49869be681..6ab2afe0b71 100644 --- a/extra/perror.c +++ b/extra/perror.c @@ -25,6 +25,7 @@ #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE #include "../storage/ndb/src/ndbapi/ndberror.c" #include "../storage/ndb/src/kernel/error/ndbd_exit_codes.c" +#include "../storage/ndb/include/mgmapi/mgmapi_error.h" #endif static my_bool verbose, print_all_codes; @@ -32,6 +33,20 @@ static my_bool verbose, print_all_codes; #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE static my_bool ndb_code; static char ndb_string[1024]; +int mgmapi_error_string(int err_no, char *str, int size) +{ + int i; + for (i= 0; i < ndb_mgm_noOfErrorMsgs; i++) + { + if (ndb_mgm_error_msgs[i].code == err_no) + { + my_snprintf(str, size-1, "%s", ndb_mgm_error_msgs[i].msg); + str[size-1]= '\0'; + return 0; + } + } + return -1; +} #endif static struct my_option my_long_options[] = @@ -238,8 +253,9 @@ int main(int argc,char *argv[]) #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE if (ndb_code) { - if ((ndb_error_string(code, ndb_string, sizeof(ndb_string)) < 0) && - (ndbd_exit_string(code, ndb_string, sizeof(ndb_string)) < 0)) + if ((ndb_error_string(code, ndb_string, sizeof(ndb_string)) < 0) && + (ndbd_exit_string(code, ndb_string, sizeof(ndb_string)) < 0) && + (mgmapi_error_string(code, ndb_string, sizeof(ndb_string)) < 0)) { msg= 0; } diff --git a/storage/ndb/include/mgmapi/mgmapi.h b/storage/ndb/include/mgmapi/mgmapi.h index ffed44c7da1..0853f5a4422 100644 --- a/storage/ndb/include/mgmapi/mgmapi.h +++ b/storage/ndb/include/mgmapi/mgmapi.h @@ -18,6 +18,7 @@ #include "mgmapi_config_parameters.h" #include "ndb_logevent.h" +#include "mgmapi_error.h" #define MGM_LOGLEVELS CFG_MAX_LOGLEVEL - CFG_MIN_LOGLEVEL + 1 #define NDB_MGM_MAX_LOGLEVEL 15 @@ -211,105 +212,6 @@ extern "C" { #endif }; - /** - * Error codes - */ - enum ndb_mgm_error { - /** Not an error */ - NDB_MGM_NO_ERROR = 0, - - /* Request for service errors */ - /** Supplied connectstring is illegal */ - NDB_MGM_ILLEGAL_CONNECT_STRING = 1001, - /** Supplied NdbMgmHandle is illegal */ - NDB_MGM_ILLEGAL_SERVER_HANDLE = 1005, - /** Illegal reply from server */ - NDB_MGM_ILLEGAL_SERVER_REPLY = 1006, - /** Illegal number of nodes */ - NDB_MGM_ILLEGAL_NUMBER_OF_NODES = 1007, - /** Illegal node status */ - NDB_MGM_ILLEGAL_NODE_STATUS = 1008, - /** Memory allocation error */ - NDB_MGM_OUT_OF_MEMORY = 1009, - /** Management server not connected */ - NDB_MGM_SERVER_NOT_CONNECTED = 1010, - /** Could not connect to socker */ - NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET = 1011, - /** Could not bind local address */ - NDB_MGM_BIND_ADDRESS = 1012, - - /* Alloc node id failures */ - /** Generic error, retry may succeed */ - NDB_MGM_ALLOCID_ERROR = 1101, - /** Non retriable error */ - NDB_MGM_ALLOCID_CONFIG_MISMATCH = 1102, - - /* Service errors - Start/Stop Node or System */ - /** Start failed */ - NDB_MGM_START_FAILED = 2001, - /** Stop failed */ - NDB_MGM_STOP_FAILED = 2002, - /** Restart failed */ - NDB_MGM_RESTART_FAILED = 2003, - - /* Service errors - Backup */ - /** Unable to start backup */ - NDB_MGM_COULD_NOT_START_BACKUP = 3001, - /** Unable to abort backup */ - NDB_MGM_COULD_NOT_ABORT_BACKUP = 3002, - - /* Service errors - Single User Mode */ - /** Unable to enter single user mode */ - NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE = 4001, - /** Unable to exit single user mode */ - NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE = 4002, - - /* Usage errors */ - /** Usage error */ - NDB_MGM_USAGE_ERROR = 5001 - }; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - struct Ndb_Mgm_Error_Msg { - enum ndb_mgm_error code; - const char * msg; - }; - const struct Ndb_Mgm_Error_Msg ndb_mgm_error_msgs[] = { - { NDB_MGM_NO_ERROR, "No error" }, - - /* Request for service errors */ - { NDB_MGM_ILLEGAL_CONNECT_STRING, "Illegal connect string" }, - { NDB_MGM_ILLEGAL_SERVER_HANDLE, "Illegal server handle" }, - { NDB_MGM_ILLEGAL_SERVER_REPLY, "Illegal reply from server" }, - { NDB_MGM_ILLEGAL_NUMBER_OF_NODES, "Illegal number of nodes" }, - { NDB_MGM_ILLEGAL_NODE_STATUS, "Illegal node status" }, - { NDB_MGM_OUT_OF_MEMORY, "Out of memory" }, - { NDB_MGM_SERVER_NOT_CONNECTED, "Management server not connected" }, - { NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, "Could not connect to socket" }, - - /* Service errors - Start/Stop Node or System */ - { NDB_MGM_START_FAILED, "Start failed" }, - { NDB_MGM_STOP_FAILED, "Stop failed" }, - { NDB_MGM_RESTART_FAILED, "Restart failed" }, - - /* Service errors - Backup */ - { NDB_MGM_COULD_NOT_START_BACKUP, "Could not start backup" }, - { NDB_MGM_COULD_NOT_ABORT_BACKUP, "Could not abort backup" }, - - /* Service errors - Single User Mode */ - { NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE, - "Could not enter single user mode" }, - { NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE, - "Could not exit single user mode" }, - - /* Usage errors */ - { NDB_MGM_USAGE_ERROR, - "Usage error" } - }; - const int ndb_mgm_noOfErrorMsgs = - sizeof(ndb_mgm_error_msgs)/sizeof(struct Ndb_Mgm_Error_Msg); -#endif - /** * Status of a node in the cluster. * diff --git a/storage/ndb/include/mgmapi/mgmapi_error.h b/storage/ndb/include/mgmapi/mgmapi_error.h new file mode 100644 index 00000000000..2d0aa1ded0f --- /dev/null +++ b/storage/ndb/include/mgmapi/mgmapi_error.h @@ -0,0 +1,121 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef MGMAPI_ERROR_H +#define MGMAPI_ERROR_H + +#ifdef __cplusplus +extern "C" { +#endif + /** + * Error codes + */ + enum ndb_mgm_error { + /** Not an error */ + NDB_MGM_NO_ERROR = 0, + + /* Request for service errors */ + /** Supplied connectstring is illegal */ + NDB_MGM_ILLEGAL_CONNECT_STRING = 1001, + /** Supplied NdbMgmHandle is illegal */ + NDB_MGM_ILLEGAL_SERVER_HANDLE = 1005, + /** Illegal reply from server */ + NDB_MGM_ILLEGAL_SERVER_REPLY = 1006, + /** Illegal number of nodes */ + NDB_MGM_ILLEGAL_NUMBER_OF_NODES = 1007, + /** Illegal node status */ + NDB_MGM_ILLEGAL_NODE_STATUS = 1008, + /** Memory allocation error */ + NDB_MGM_OUT_OF_MEMORY = 1009, + /** Management server not connected */ + NDB_MGM_SERVER_NOT_CONNECTED = 1010, + /** Could not connect to socker */ + NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET = 1011, + /** Could not bind local address */ + NDB_MGM_BIND_ADDRESS = 1012, + + /* Alloc node id failures */ + /** Generic error, retry may succeed */ + NDB_MGM_ALLOCID_ERROR = 1101, + /** Non retriable error */ + NDB_MGM_ALLOCID_CONFIG_MISMATCH = 1102, + + /* Service errors - Start/Stop Node or System */ + /** Start failed */ + NDB_MGM_START_FAILED = 2001, + /** Stop failed */ + NDB_MGM_STOP_FAILED = 2002, + /** Restart failed */ + NDB_MGM_RESTART_FAILED = 2003, + + /* Service errors - Backup */ + /** Unable to start backup */ + NDB_MGM_COULD_NOT_START_BACKUP = 3001, + /** Unable to abort backup */ + NDB_MGM_COULD_NOT_ABORT_BACKUP = 3002, + + /* Service errors - Single User Mode */ + /** Unable to enter single user mode */ + NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE = 4001, + /** Unable to exit single user mode */ + NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE = 4002, + + /* Usage errors */ + /** Usage error */ + NDB_MGM_USAGE_ERROR = 5001 + }; + struct Ndb_Mgm_Error_Msg { + enum ndb_mgm_error code; + const char * msg; + }; + const struct Ndb_Mgm_Error_Msg ndb_mgm_error_msgs[] = { + { NDB_MGM_NO_ERROR, "No error" }, + + /* Request for service errors */ + { NDB_MGM_ILLEGAL_CONNECT_STRING, "Illegal connect string" }, + { NDB_MGM_ILLEGAL_SERVER_HANDLE, "Illegal server handle" }, + { NDB_MGM_ILLEGAL_SERVER_REPLY, "Illegal reply from server" }, + { NDB_MGM_ILLEGAL_NUMBER_OF_NODES, "Illegal number of nodes" }, + { NDB_MGM_ILLEGAL_NODE_STATUS, "Illegal node status" }, + { NDB_MGM_OUT_OF_MEMORY, "Out of memory" }, + { NDB_MGM_SERVER_NOT_CONNECTED, "Management server not connected" }, + { NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, "Could not connect to socket" }, + + /* Service errors - Start/Stop Node or System */ + { NDB_MGM_START_FAILED, "Start failed" }, + { NDB_MGM_STOP_FAILED, "Stop failed" }, + { NDB_MGM_RESTART_FAILED, "Restart failed" }, + + /* Service errors - Backup */ + { NDB_MGM_COULD_NOT_START_BACKUP, "Could not start backup" }, + { NDB_MGM_COULD_NOT_ABORT_BACKUP, "Could not abort backup" }, + + /* Service errors - Single User Mode */ + { NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE, + "Could not enter single user mode" }, + { NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE, + "Could not exit single user mode" }, + + /* Usage errors */ + { NDB_MGM_USAGE_ERROR, + "Usage error" } + }; + const int ndb_mgm_noOfErrorMsgs = + sizeof(ndb_mgm_error_msgs)/sizeof(struct Ndb_Mgm_Error_Msg); +#ifdef __cplusplus +} +#endif + +#endif From 28444ac8b157fa2db16a886b2eb462161a73c97d Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 14 Jun 2007 12:51:13 +0200 Subject: [PATCH 35/38] Bug #29103 ndb_restore segfaults on NULL var[char|binary] mysql-test/r/ndb_restore.result: Bug #29103 ndb_restore segfaults on NULL var[char|binary] - add extra row with NULL value to test mysql-test/t/ndb_restore.test: Bug #29103 ndb_restore segfaults on NULL var[char|binary] - add extra row with NULL value to test storage/ndb/tools/restore/consumer_restore.cpp: Bug #29103 ndb_restore segfaults on NULL var[char|binary] - check that the attribute is not null --- mysql-test/r/ndb_restore.result | 20 ++++++------- mysql-test/t/ndb_restore.test | 2 +- .../ndb/tools/restore/consumer_restore.cpp | 29 ++++++++++--------- 3 files changed, 27 insertions(+), 24 deletions(-) diff --git a/mysql-test/r/ndb_restore.result b/mysql-test/r/ndb_restore.result index 8ecffa437b0..d1c76192cef 100644 --- a/mysql-test/r/ndb_restore.result +++ b/mysql-test/r/ndb_restore.result @@ -18,7 +18,7 @@ CREATE TABLE `t2_c` ( PRIMARY KEY (`capgotod`), KEY `i quadaddsvr` (`gotod`) ) ENGINE=ndbcluster DEFAULT CHARSET=latin1; -INSERT INTO `t2_c` VALUES (500,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST'); +INSERT INTO `t2_c` VALUES (500,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST'),(5,0,'',NULL,NULL,''); CREATE TABLE `t3_c` ( `CapGoaledatta` smallint(5) unsigned NOT NULL default '0', `capgotod` smallint(5) unsigned NOT NULL default '0', @@ -154,15 +154,15 @@ count(*) 5 select count(*) from t2; count(*) -6 +7 select count(*) from t2_c; count(*) -6 +7 select count(*) from (select * from t2 union select * from t2_c) a; count(*) -6 +7 select count(*) from t3; count(*) 4 @@ -286,15 +286,15 @@ count(*) 5 select count(*) from t2; count(*) -6 +7 select count(*) from t2_c; count(*) -6 +7 select count(*) from (select * from t2 union select * from t2_c) a; count(*) -6 +7 select count(*) from t3; count(*) 4 @@ -386,15 +386,15 @@ count(*) 5 select count(*) from t2; count(*) -6 +7 select count(*) from t2_c; count(*) -6 +7 select count(*) from (select * from t2 union select * from t2_c) a; count(*) -6 +7 select count(*) from t3; count(*) 4 diff --git a/mysql-test/t/ndb_restore.test b/mysql-test/t/ndb_restore.test index 61927a1f90a..7f0cafdfd77 100644 --- a/mysql-test/t/ndb_restore.test +++ b/mysql-test/t/ndb_restore.test @@ -33,7 +33,7 @@ CREATE TABLE `t2_c` ( PRIMARY KEY (`capgotod`), KEY `i quadaddsvr` (`gotod`) ) ENGINE=ndbcluster DEFAULT CHARSET=latin1; -INSERT INTO `t2_c` VALUES (500,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST'); +INSERT INTO `t2_c` VALUES (500,4,'','q3.net','addavp:MK_CASELECTOR=1','postorod rattoaa'),(2,1,'4','','addavp:MK_BRANDTAD=345','REDS Brandtad'),(3,2,'4','q3.net','execorder','fixedRatediPO REDS'),(1,1,'3','','addavp:MK_BRANDTAD=123','TEST Brandtad'),(6,5,'','told.q3.net','addavp:MK_BRANDTAD=123','Brandtad Toldzone'),(4,3,'3','q3.net','addavp:MK_POOLHINT=2','ratedi PO TEST'),(5,0,'',NULL,NULL,''); # Added ROW_FORMAT=FIXED to use below to see that setting is preserved # by restore diff --git a/storage/ndb/tools/restore/consumer_restore.cpp b/storage/ndb/tools/restore/consumer_restore.cpp index b7db8145c56..fde1f4c3074 100644 --- a/storage/ndb/tools/restore/consumer_restore.cpp +++ b/storage/ndb/tools/restore/consumer_restore.cpp @@ -1158,19 +1158,22 @@ void BackupRestore::tuple_a(restore_callback_t *cb) char * dataPtr = attr_data->string_value; Uint32 length = 0; - const unsigned char * src = (const unsigned char *)dataPtr; - switch(attr_desc->m_column->getType()){ - case NdbDictionary::Column::Varchar: - case NdbDictionary::Column::Varbinary: - length = src[0] + 1; - break; - case NdbDictionary::Column::Longvarchar: - case NdbDictionary::Column::Longvarbinary: - length = src[0] + (src[1] << 8) + 2; - break; - default: - length = attr_data->size; - break; + if (!attr_data->null) + { + const unsigned char * src = (const unsigned char *)dataPtr; + switch(attr_desc->m_column->getType()){ + case NdbDictionary::Column::Varchar: + case NdbDictionary::Column::Varbinary: + length = src[0] + 1; + break; + case NdbDictionary::Column::Longvarchar: + case NdbDictionary::Column::Longvarbinary: + length = src[0] + (src[1] << 8) + 2; + break; + default: + length = attr_data->size; + break; + } } if (j == 0 && tup.getTable()->have_auto_inc(i)) tup.getTable()->update_max_auto_val(dataPtr,size*arraySize); From 12d1dece300ad0c04fedb1765bb8f88ea08d8ae2 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 14 Jun 2007 12:57:32 +0200 Subject: [PATCH 36/38] Makefile.am: new public file needs to get into distribution storage/ndb/include/Makefile.am: new public file needs to get into distribution --- storage/ndb/include/Makefile.am | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/ndb/include/Makefile.am b/storage/ndb/include/Makefile.am index bf8fe392072..9e6ad016d75 100644 --- a/storage/ndb/include/Makefile.am +++ b/storage/ndb/include/Makefile.am @@ -45,6 +45,7 @@ ndbapi/ndberror.h mgmapiinclude_HEADERS = \ mgmapi/mgmapi.h \ +mgmapi/mgmapi_error.h \ mgmapi/mgmapi_debug.h \ mgmapi/mgmapi_config_parameters.h \ mgmapi/mgmapi_config_parameters_debug.h \ From 4daad4ebfe1f6a6ec97621eec717a21948ecc8b1 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 14 Jun 2007 19:40:44 +0800 Subject: [PATCH 37/38] Bug#27640, backup id not displayed in the output of "ndb_mgm start backup wait completed" ndb/include/debugger/EventLogger.hpp: add definition for new printLogEvent() function in CommandInterpreter.cpp ndb/src/mgmclient/CommandInterpreter.cpp: add a printLogEvent() function to print log event; filter "" by ndb_logevent_get_next() in event_thread_run(); filter "" by ndb_logevent_get_next() in executeStartBackup(); and make executeStartBackup() same in both 5.0 and 5.1 ndb/src/mgmclient/Makefile.am: add link to use ndb_logevent_get_next() mysql-test/r/ndb_backup_print.result: testcase result for ndb_backup_print produced by mysql_test_run.pl mysql-test/t/ndb_backup_print.test: add testcase for ndb_mgm -e "start backup", check outupt format --- mysql-test/r/ndb_backup_print.result | 64 ++++++ mysql-test/t/ndb_backup_print.test | 66 ++++++ ndb/include/debugger/EventLogger.hpp | 2 +- ndb/src/mgmclient/CommandInterpreter.cpp | 258 +++++++++++++++++------ ndb/src/mgmclient/Makefile.am | 3 +- 5 files changed, 322 insertions(+), 71 deletions(-) create mode 100644 mysql-test/r/ndb_backup_print.result create mode 100644 mysql-test/t/ndb_backup_print.test diff --git a/mysql-test/r/ndb_backup_print.result b/mysql-test/r/ndb_backup_print.result new file mode 100644 index 00000000000..872ec9d2b72 --- /dev/null +++ b/mysql-test/r/ndb_backup_print.result @@ -0,0 +1,64 @@ +use test; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +Connected to Management Server at: : +Waiting for completed, this may take several minutes +Backup started from node +Backup started from node completed + StartGCP: StopGCP: + #Records: #LogRecords: + Data: bytes Log: bytes +create table t1 +(pk int key +,a1 BIT(1), a2 BIT(5), a3 BIT(33), a4 BIT(63), a5 BIT(64) +,b1 TINYINT, b2 TINYINT UNSIGNED +,c1 SMALLINT, c2 SMALLINT UNSIGNED +,d1 INT, d2 INT UNSIGNED +,e1 BIGINT, e2 BIGINT UNSIGNED +,f1 CHAR(1) BINARY, f2 CHAR(32) BINARY, f3 CHAR(255) BINARY +,g1 VARCHAR(32) BINARY, g2 VARCHAR(255) BINARY, g3 VARCHAR(1000) BINARY +,h1 BINARY(1), h2 BINARY(8), h3 BINARY(255) +,i1 VARBINARY(32), i2 VARBINARY(255), i3 VARBINARY(1000) +) engine ndb; +insert into t1 values +(1 +,0x1, 0x17, 0x789a, 0x789abcde, 0xfedc0001 +,127, 255 +,32767, 65535 +,2147483647, 4294967295 +,9223372036854775807, 18446744073709551615 +,'1','12345678901234567890123456789012','123456789' + ,'1','12345678901234567890123456789012','123456789' + ,0x12,0x123456789abcdef0, 0x012345 +,0x12,0x123456789abcdef0, 0x00123450 +); +insert into t1 values +(2 +,0, 0, 0, 0, 0 +,-128, 0 +,-32768, 0 +,-2147483648, 0 +,-9223372036854775808, 0 +,'','','' + ,'','','' + ,0x0,0x0,0x0 +,0x0,0x0,0x0 +); +insert into t1 values +(3 +,NULL,NULL,NULL,NULL,NULL +,NULL,NULL +,NULL,NULL +,NULL,NULL +,NULL,NULL +,NULL,NULL,NULL +,NULL,NULL,NULL +,NULL,NULL,NULL +,NULL,NULL,NULL +); +Connected to Management Server at: : +Waiting for completed, this may take several minutes +Backup started from node +Backup started from node completed + StartGCP: StopGCP: + #Records: #LogRecords: + Data: bytes Log: bytes diff --git a/mysql-test/t/ndb_backup_print.test b/mysql-test/t/ndb_backup_print.test new file mode 100644 index 00000000000..34bdf519694 --- /dev/null +++ b/mysql-test/t/ndb_backup_print.test @@ -0,0 +1,66 @@ +-- source include/have_ndb.inc +-- source include/ndb_default_cluster.inc +-- source include/not_embedded.inc + +--disable_warnings +use test; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +--enable_warnings + +#NO.1 test output of backup +--exec $NDB_TOOLS_DIR/../src/mgmclient/ndb_mgm -e "start backup" |sed -e 's/[0-9]//g' |sed -e 's/localhost//g' |sed -e 's/\.\.\.*//g' + +create table t1 + (pk int key + ,a1 BIT(1), a2 BIT(5), a3 BIT(33), a4 BIT(63), a5 BIT(64) + ,b1 TINYINT, b2 TINYINT UNSIGNED + ,c1 SMALLINT, c2 SMALLINT UNSIGNED + ,d1 INT, d2 INT UNSIGNED + ,e1 BIGINT, e2 BIGINT UNSIGNED + ,f1 CHAR(1) BINARY, f2 CHAR(32) BINARY, f3 CHAR(255) BINARY + ,g1 VARCHAR(32) BINARY, g2 VARCHAR(255) BINARY, g3 VARCHAR(1000) BINARY + ,h1 BINARY(1), h2 BINARY(8), h3 BINARY(255) + ,i1 VARBINARY(32), i2 VARBINARY(255), i3 VARBINARY(1000) + ) engine ndb; + +insert into t1 values + (1 + ,0x1, 0x17, 0x789a, 0x789abcde, 0xfedc0001 + ,127, 255 + ,32767, 65535 + ,2147483647, 4294967295 + ,9223372036854775807, 18446744073709551615 + ,'1','12345678901234567890123456789012','123456789' + ,'1','12345678901234567890123456789012','123456789' + ,0x12,0x123456789abcdef0, 0x012345 + ,0x12,0x123456789abcdef0, 0x00123450 + ); + +insert into t1 values + (2 + ,0, 0, 0, 0, 0 + ,-128, 0 + ,-32768, 0 + ,-2147483648, 0 + ,-9223372036854775808, 0 + ,'','','' + ,'','','' + ,0x0,0x0,0x0 + ,0x0,0x0,0x0 + ); + +insert into t1 values + (3 + ,NULL,NULL,NULL,NULL,NULL + ,NULL,NULL + ,NULL,NULL + ,NULL,NULL + ,NULL,NULL + ,NULL,NULL,NULL + ,NULL,NULL,NULL + ,NULL,NULL,NULL + ,NULL,NULL,NULL + ); + +#NO.2 test output of backup after some simple SQL operations +--exec $NDB_TOOLS_DIR/../src/mgmclient/ndb_mgm -e "start backup" |sed -e 's/[0-9]//g' |sed -e 's/localhost//g' |sed -e 's/\.\.\.*//g' diff --git a/ndb/include/debugger/EventLogger.hpp b/ndb/include/debugger/EventLogger.hpp index 11df3f513fc..f6762743df0 100644 --- a/ndb/include/debugger/EventLogger.hpp +++ b/ndb/include/debugger/EventLogger.hpp @@ -175,5 +175,5 @@ private: char m_text[MAX_TEXT_LENGTH]; }; - +extern void getRestartAction(Uint32 action, BaseString &str); #endif diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp index 2ea98a57866..6212592461b 100644 --- a/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/ndb/src/mgmclient/CommandInterpreter.cpp @@ -25,6 +25,7 @@ #include #include +#include class MgmtSrvr; @@ -125,7 +126,7 @@ public: int executeStatus(int processId, const char* parameters, bool all); int executeEventReporting(int processId, const char* parameters, bool all); int executeDumpState(int processId, const char* parameters, bool all); - int executeStartBackup(char * parameters); + int executeStartBackup(char * parameters, bool interactive); int executeAbortBackup(char * parameters); int executeStop(Vector &command_list, unsigned command_pos, int *node_ids, int no_of_nodes); @@ -768,6 +769,113 @@ CommandInterpreter::printError() } } +/* + * print log event from mgmsrv to console screen + */ +static void +printLogEvent(struct ndb_logevent* event) +{ + switch (event->type) { + /** + * NDB_MGM_EVENT_CATEGORY_BACKUP + */ + case NDB_LE_BackupStarted: + ndbout_c("Backup %d started from node %d", + event->BackupStarted.backup_id, event->BackupStarted.starting_node); + break; + case NDB_LE_BackupFailedToStart: + ndbout_c("Backup request from %d failed to start. Error: %d", + event->BackupFailedToStart.starting_node, event->BackupFailedToStart.error); + break; + case NDB_LE_BackupCompleted: + ndbout_c("Backup %u started from node %u completed\n" + " StartGCP: %u StopGCP: %u\n" + " #Records: %u #LogRecords: %u\n" + " Data: %u bytes Log: %u bytes", + event->BackupCompleted.backup_id, event->BackupCompleted.starting_node, + event->BackupCompleted.start_gci, event->BackupCompleted.stop_gci, + event->BackupCompleted.n_records, event->BackupCompleted.n_log_records, + event->BackupCompleted.n_bytes, event->BackupCompleted.n_log_bytes); + break; + case NDB_LE_BackupAborted: + ndbout_c("Backup %d started from %d has been aborted. Error: %d", + event->BackupAborted.backup_id, event->BackupAborted.starting_node, + event->BackupAborted.error); + break; + /** + * NDB_MGM_EVENT_CATEGORY_STARTUP + */ + case NDB_LE_NDBStartStarted: + ndbout_c("Start initiated (version %d.%d.%d)", + getMajor(event->NDBStartStarted.version), + getMinor(event->NDBStartStarted.version), + getBuild(event->NDBStartStarted.version)); + break; + case NDB_LE_NDBStartCompleted: + ndbout_c("Started (version %d.%d.%d)", + getMajor(event->NDBStartCompleted.version), + getMinor(event->NDBStartCompleted.version), + getBuild(event->NDBStartCompleted.version)); + break; + case NDB_LE_NDBStopStarted: + ndbout_c("%s shutdown initiated", + (event->NDBStopStarted.stoptype == 1 ? "Cluster" : "Node")); + break; + case NDB_LE_NDBStopCompleted: + { + BaseString action_str(""); + BaseString signum_str(""); + getRestartAction(event->NDBStopCompleted.action, action_str); + if (event->NDBStopCompleted.signum) + signum_str.appfmt(" Initiated by signal %d.", + event->NDBStopCompleted.signum); + ndbout_c("Node shutdown completed%s.%s", + action_str.c_str(), + signum_str.c_str()); + } + break; + case NDB_LE_NDBStopForced: + { + BaseString action_str(""); + BaseString reason_str(""); + BaseString sphase_str(""); + int signum = event->NDBStopForced.signum; + int error = event->NDBStopForced.error; + int sphase = event->NDBStopForced.sphase; + int extra = event->NDBStopForced.extra; + getRestartAction(event->NDBStopForced.action, action_str); + if (signum) + reason_str.appfmt(" Initiated by signal %d.", signum); + if (error) + { + ndbd_exit_classification cl; + ndbd_exit_status st; + const char *msg = ndbd_exit_message(error, &cl); + const char *cl_msg = ndbd_exit_classification_message(cl, &st); + const char *st_msg = ndbd_exit_status_message(st); + reason_str.appfmt(" Caused by error %d: \'%s(%s). %s\'.", + error, msg, cl_msg, st_msg); + if (extra != 0) + reason_str.appfmt(" (extra info %d)", extra); + } + if (sphase < 255) + sphase_str.appfmt(" Occured during startphase %u.", sphase); + ndbout_c("Forced node shutdown completed%s.%s%s", + action_str.c_str(), sphase_str.c_str(), + reason_str.c_str()); + } + break; + case NDB_LE_NDBStopAborted: + ndbout_c("Node shutdown aborted"); + break; + /** + * default nothing to print + */ + default: + break; + } +} + //***************************************************************************** //***************************************************************************** @@ -784,27 +892,21 @@ event_thread_run(void* p) int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 1, NDB_MGM_EVENT_CATEGORY_STARTUP, 0 }; - int fd = ndb_mgm_listen_event(handle, filter); - if (fd != NDB_INVALID_SOCKET) + + NdbLogEventHandle log_handle= NULL; + struct ndb_logevent log_event; + + log_handle= ndb_mgm_create_logevent_handle(handle, filter); + if (log_handle) { do_event_thread= 1; - char *tmp= 0; - char buf[1024]; - SocketInputStream in(fd,10); do { - if (tmp == 0) NdbSleep_MilliSleep(10); - if((tmp = in.gets(buf, 1024))) - { - const char ping_token[]= ""; - if (memcmp(ping_token,tmp,sizeof(ping_token)-1)) - if(tmp && strlen(tmp)) - { - Guard g(printmutex); - ndbout << tmp; - } - } + if (ndb_logevent_get_next(log_handle, &log_event, 2000) <= 0) + continue; + Guard g(printmutex); + printLogEvent(&log_event); } while(do_event_thread); - NDB_CLOSE_SOCKET(fd); + ndb_mgm_destroy_logevent_handle(&log_handle); } else { @@ -1054,7 +1156,7 @@ CommandInterpreter::execute_impl(const char *_line, bool interactive) else if(strcasecmp(firstToken, "START") == 0 && allAfterFirstToken != NULL && strncasecmp(allAfterFirstToken, "BACKUP", sizeof("BACKUP") - 1) == 0){ - m_error= executeStartBackup(allAfterFirstToken); + m_error= executeStartBackup(allAfterFirstToken, interactive); DBUG_RETURN(true); } else if(strcasecmp(firstToken, "ABORT") == 0 && @@ -2518,20 +2620,11 @@ CommandInterpreter::executeEventReporting(int processId, * Backup *****************************************************************************/ int -CommandInterpreter::executeStartBackup(char* parameters) +CommandInterpreter::executeStartBackup(char* parameters, bool interactive) { struct ndb_mgm_reply reply; unsigned int backupId; -#if 0 - int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0 }; - int fd = ndb_mgm_listen_event(m_mgmsrv, filter); - if (fd < 0) - { - ndbout << "Initializing start of backup failed" << endl; - printError(); - return fd; - } -#endif + Vector args; { BaseString(parameters).split(args); @@ -2544,25 +2637,20 @@ CommandInterpreter::executeStartBackup(char* parameters) int sz= args.size(); int result; - if (sz == 2 && - args[1] == "NOWAIT") + int flags = 2; + if (sz == 2 && args[1] == "NOWAIT") { - result = ndb_mgm_start_backup(m_mgmsrv, 0, &backupId, &reply); + flags = 0; } - else if (sz == 1 || - (sz == 3 && - args[1] == "WAIT" && - args[2] == "COMPLETED")) + else if (sz == 1 || (sz == 3 && args[1] == "WAIT" && args[2] == "COMPLETED")) { + flags = 2; ndbout_c("Waiting for completed, this may take several minutes"); - result = ndb_mgm_start_backup(m_mgmsrv, 2, &backupId, &reply); } - else if (sz == 3 && - args[1] == "WAIT" && - args[2] == "STARTED") + else if (sz == 3 && args[1] == "WAIT" && args[2] == "STARTED") { ndbout_c("Waiting for started, this may take several minutes"); - result = ndb_mgm_start_backup(m_mgmsrv, 1, &backupId, &reply); + flags = 1; } else { @@ -2570,48 +2658,80 @@ CommandInterpreter::executeStartBackup(char* parameters) return -1; } + NdbLogEventHandle log_handle= NULL; + struct ndb_logevent log_event; + if (flags == 2 && !interactive) + { + int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0, 0 }; + log_handle = ndb_mgm_create_logevent_handle(m_mgmsrv, filter); + if (!log_handle) + { + ndbout << "Initializing start of backup failed" << endl; + printError(); + return -1; + } + } + result = ndb_mgm_start_backup(m_mgmsrv, flags, &backupId, &reply); + if (result != 0) { ndbout << "Backup failed" << endl; printError(); -#if 0 - close(fd); -#endif + + if (log_handle) + ndb_mgm_destroy_logevent_handle(&log_handle); return result; } -#if 0 - ndbout_c("Waiting for completed, this may take several minutes"); - char *tmp; - char buf[1024]; + + /** + * If interactive, event listner thread is already running + */ + if (log_handle && !interactive) { - SocketInputStream in(fd); int count = 0; + int retry = 0; do { - tmp = in.gets(buf, 1024); - if(tmp) + if (ndb_logevent_get_next(log_handle, &log_event, 60000) > 0) { - ndbout << tmp; - unsigned int id; - if(sscanf(tmp, "%*[^:]: Backup %d ", &id) == 1 && id == backupId){ - count++; - } + int print = 0; + switch (log_event.type) { + case NDB_LE_BackupStarted: + if (log_event.BackupStarted.backup_id == backupId) + print = 1; + break; + case NDB_LE_BackupCompleted: + if (log_event.BackupCompleted.backup_id == backupId) + print = 1; + break; + case NDB_LE_BackupAborted: + if (log_event.BackupAborted.backup_id == backupId) + print = 1; + break; + default: + break; + } + if (print) + { + Guard g(m_print_mutex); + printLogEvent(&log_event); + count++; + } } - } while(count < 2); + else + { + retry++; + } + } while(count < 2 && retry < 3); + + if (retry >= 3) + ndbout << "get backup event failed for " << retry << " times" << endl; + + ndb_mgm_destroy_logevent_handle(&log_handle); } - SocketInputStream in(fd, 10); - do { - tmp = in.gets(buf, 1024); - if(tmp && tmp[0] != 0) - { - ndbout << tmp; - } - } while(tmp && tmp[0] != 0); - - close(fd); -#endif return 0; } + int CommandInterpreter::executeAbortBackup(char* parameters) { diff --git a/ndb/src/mgmclient/Makefile.am b/ndb/src/mgmclient/Makefile.am index 8ce8bf4da45..99540160341 100644 --- a/ndb/src/mgmclient/Makefile.am +++ b/ndb/src/mgmclient/Makefile.am @@ -21,7 +21,8 @@ libndbmgmclient_la_LIBADD = ../mgmapi/libmgmapi.la \ ../common/logger/liblogger.la \ ../common/portlib/libportlib.la \ ../common/util/libgeneral.la \ - ../common/portlib/libportlib.la + ../common/portlib/libportlib.la \ + ../common/debugger/libtrace.la ndb_mgm_SOURCES = main.cpp From e470fbe9221da4eb9d582e6c0659d2fe23b30a5e Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 14 Jun 2007 20:25:45 +0800 Subject: [PATCH 38/38] Bug#27640, backup id not dispalyed in the output of "ndb_mgm start backup wait completed" correct related sourcecode after merge from 5.0 storage/ndb/src/mgmclient/CommandInterpreter.cpp: correct something to ensure only related sourcecode are merged --- .../ndb/src/mgmclient/CommandInterpreter.cpp | 348 ++---------------- 1 file changed, 35 insertions(+), 313 deletions(-) diff --git a/storage/ndb/src/mgmclient/CommandInterpreter.cpp b/storage/ndb/src/mgmclient/CommandInterpreter.cpp index 6212592461b..8175a1916b5 100644 --- a/storage/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/storage/ndb/src/mgmclient/CommandInterpreter.cpp @@ -15,14 +15,7 @@ #include #include - -//#define HAVE_GLOBAL_REPLICATION - #include -#ifdef HAVE_GLOBAL_REPLICATION -#include "../rep/repapi/repapi.h" -#endif - #include #include #include @@ -168,11 +161,6 @@ private: int m_verbose; int try_reconnect; int m_error; -#ifdef HAVE_GLOBAL_REPLICATION - NdbRepHandle m_repserver; - const char *rep_host; - bool rep_connected; -#endif struct NdbThread* m_event_thread; NdbMutex *m_print_mutex; }; @@ -237,10 +225,6 @@ extern "C" { #include #include #include -#include -#ifdef HAVE_GLOBAL_REPLICATION - -#endif // HAVE_GLOBAL_REPLICATION #include "MgmtErrorReporter.hpp" #include #include @@ -268,9 +252,6 @@ static const char* helpText = "---------------------------------------------------------------------------\n" "HELP Print help text\n" "HELP COMMAND Print detailed help for COMMAND(e.g. SHOW)\n" -#ifdef HAVE_GLOBAL_REPLICATION -"HELP REPLICATION Help for global replication\n" -#endif // HAVE_GLOBAL_REPLICATION #ifdef VM_TRACE // DEBUG ONLY "HELP DEBUG Help for debug compiled version\n" #endif @@ -294,9 +275,6 @@ static const char* helpText = "EXIT SINGLE USER MODE Exit single user mode\n" " STATUS Print status\n" " CLUSTERLOG {=}+ Set log level for cluster log\n" -#ifdef HAVE_GLOBAL_REPLICATION -"REP CONNECT Connect to REP server on host:port\n" -#endif "PURGE STALE SESSIONS Reset reserved nodeid's in the mgmt server\n" "CONNECT [] Connect to management server (reconnect if already connected)\n" "QUIT Quit management client\n" @@ -596,39 +574,6 @@ static const char* helpTextQuit = ; -#ifdef HAVE_GLOBAL_REPLICATION -static const char* helpTextRep = -"---------------------------------------------------------------------------\n" -" NDB Cluster -- Management Client -- Help for Global Replication\n" -"---------------------------------------------------------------------------\n" -"Commands should be executed on the standby NDB Cluster\n" -"These features are in an experimental release state.\n" -"\n" -"Simple Commands:\n" -"REP START Start Global Replication\n" -"REP START REQUESTOR Start Global Replication Requestor\n" -"REP STATUS Show Global Replication status\n" -"REP STOP Stop Global Replication\n" -"REP STOP REQUESTOR Stop Global Replication Requestor\n" -"\n" -"Advanced Commands:\n" -"REP START Starts protocol\n" -"REP STOP Stops protocol\n" -" = TRANSFER | APPLY | DELETE\n" -"\n" -#ifdef VM_TRACE // DEBUG ONLY -"Debugging commands:\n" -"REP DELETE Removes epochs stored in primary and standy systems\n" -"REP DROP Drop a table in SS identified by table id\n" -"REP SLOWSTOP Stop Replication (Tries to synchonize with primary)\n" -"REP FASTSTOP Stop Replication (Stops in consistent state)\n" -" = SUBSCRIPTION\n" -" METALOG | METASCAN | DATALOG | DATASCAN\n" -" REQUESTOR | TRANSFER | APPLY | DELETE\n" -#endif -; -#endif // HAVE_GLOBAL_REPLICATION - #ifdef VM_TRACE // DEBUG ONLY static const char* helpTextDebug = "---------------------------------------------------------------------------\n" @@ -681,10 +626,6 @@ struct st_cmd_help { {"PURGE STALE SESSIONS", helpTextPurgeStaleSessions}, {"CONNECT", helpTextConnect}, {"QUIT", helpTextQuit}, -#ifdef HAVE_GLOBAL_REPLICATION - {"REPLICATION", helpTextRep}, - {"REP", helpTextRep}, -#endif // HAVE_GLOBAL_REPLICATION #ifdef VM_TRACE // DEBUG ONLY {"DEBUG", helpTextDebug}, #endif //VM_TRACE @@ -724,11 +665,6 @@ CommandInterpreter::CommandInterpreter(const char *_host,int verbose) m_event_thread= NULL; try_reconnect = 0; m_print_mutex= NdbMutex_Create(); -#ifdef HAVE_GLOBAL_REPLICATION - rep_host = NULL; - m_repserver = NULL; - rep_connected = false; -#endif } /* @@ -1168,15 +1104,9 @@ CommandInterpreter::execute_impl(const char *_line, bool interactive) else if (strcasecmp(firstToken, "PURGE") == 0) { m_error = executePurge(allAfterFirstToken); DBUG_RETURN(true); - } -#ifdef HAVE_GLOBAL_REPLICATION - else if(strcasecmp(firstToken, "REPLICATION") == 0 || - strcasecmp(firstToken, "REP") == 0) { - m_error = executeRep(allAfterFirstToken); - DBUG_RETURN(true); - } -#endif // HAVE_GLOBAL_REPLICATION + } else if(strcasecmp(firstToken, "ENTER") == 0 && + allAfterFirstToken != NULL && allAfterFirstToken != NULL && strncasecmp(allAfterFirstToken, "SINGLE USER MODE ", sizeof("SINGLE USER MODE") - 1) == 0){ @@ -1651,7 +1581,6 @@ CommandInterpreter::executePurge(char* parameters) return -1; } - int i; char *str; if (ndb_mgm_purge_stale_sessions(m_mgmsrv, &str)) { @@ -1730,8 +1659,8 @@ CommandInterpreter::executeShow(char* parameters) case NDB_MGM_NODE_TYPE_UNKNOWN: ndbout << "Error: Unknown Node Type" << endl; return -1; - case NDB_MGM_NODE_TYPE_REP: - abort(); + case NDB_MGM_NODE_TYPE_MAX: + break; /* purify: deadcode */ } } @@ -1769,7 +1698,6 @@ CommandInterpreter::executeConnect(char* parameters, bool interactive) { BaseString *basestring = NULL; - int retval; disconnect(); if (!emptyString(parameters)) { basestring= new BaseString(parameters); @@ -1806,7 +1734,15 @@ CommandInterpreter::executeClusterLog(char* parameters) char * item = strtok_r(tmpString, " ", &tmpPtr); int enable; - const unsigned int *enabled= ndb_mgm_get_logfilter(m_mgmsrv); + ndb_mgm_severity enabled[NDB_MGM_EVENT_SEVERITY_ALL] = + {{NDB_MGM_EVENT_SEVERITY_ON,0}, + {NDB_MGM_EVENT_SEVERITY_DEBUG,0}, + {NDB_MGM_EVENT_SEVERITY_INFO,0}, + {NDB_MGM_EVENT_SEVERITY_WARNING,0}, + {NDB_MGM_EVENT_SEVERITY_ERROR,0}, + {NDB_MGM_EVENT_SEVERITY_CRITICAL,0}, + {NDB_MGM_EVENT_SEVERITY_ALERT,0}}; + ndb_mgm_get_clusterlog_severity_filter(m_mgmsrv, &enabled[0], NDB_MGM_EVENT_SEVERITY_ALL); if(enabled == NULL) { ndbout << "Couldn't get status" << endl; printError(); @@ -1819,25 +1755,25 @@ CommandInterpreter::executeClusterLog(char* parameters) ********************/ if (strcasecmp(item, "INFO") == 0) { DBUG_PRINT("info",("INFO")); - if(enabled[0] == 0) + if(enabled[0].value == 0) { ndbout << "Cluster logging is disabled." << endl; m_error = 0; DBUG_VOID_RETURN; } #if 0 - for(i = 0; i<7;i++) - printf("enabled[%d] = %d\n", i, enabled[i]); + for(i = 0; i &command_list, return -1; } + if (!nostart) + ndbout_c("Shutting down nodes with \"-n, no start\" option, to subsequently start the nodes."); + result= ndb_mgm_restart3(m_mgmsrv, no_of_nodes, node_ids, initialstart, nostart, abort, &need_disconnect); @@ -2204,7 +2143,6 @@ CommandInterpreter::executeStatus(int processId, ndb_mgm_node_status status; Uint32 startPhase, version; - bool system; struct ndb_mgm_cluster_state *cl; cl = ndb_mgm_get_status(m_mgmsrv); @@ -2222,6 +2160,19 @@ CommandInterpreter::executeStatus(int processId, ndbout << processId << ": Node not found" << endl; return -1; } + if (cl->node_states[i].node_type != NDB_MGM_NODE_TYPE_NDB){ + if (cl->node_states[i].version != 0){ + version = cl->node_states[i].version; + ndbout << "Node "<< cl->node_states[i].node_id <<": connected" ; + ndbout_c(" (Version %d.%d.%d)", + getMajor(version) , + getMinor(version), + getBuild(version)); + + }else + ndbout << "Node "<< cl->node_states[i].node_id <<": not connected" << endl; + return 0; + } status = cl->node_states[i].node_status; startPhase = cl->node_states[i].start_phase; version = cl->node_states[i].version; @@ -2616,6 +2567,7 @@ CommandInterpreter::executeEventReporting(int processId, return retval; } + /***************************************************************************** * Backup *****************************************************************************/ @@ -2731,7 +2683,6 @@ CommandInterpreter::executeStartBackup(char* parameters, bool interactive) return 0; } - int CommandInterpreter::executeAbortBackup(char* parameters) { @@ -2762,233 +2713,4 @@ CommandInterpreter::executeAbortBackup(char* parameters) return -1; } -#ifdef HAVE_GLOBAL_REPLICATION -/***************************************************************************** - * Global Replication - * - * For information about the different commands, see - * GrepReq::Request in file signaldata/grepImpl.cpp. - * - * Below are commands as of 2003-07-05 (may change!): - * START = 0, ///< Start Global Replication (all phases) - * START_METALOG = 1, ///< Start Global Replication (all phases) - * START_METASCAN = 2, ///< Start Global Replication (all phases) - * START_DATALOG = 3, ///< Start Global Replication (all phases) - * START_DATASCAN = 4, ///< Start Global Replication (all phases) - * START_REQUESTOR = 5, ///< Start Global Replication (all phases) - * ABORT = 6, ///< Immediate stop (removes subscription) - * SLOW_STOP = 7, ///< Stop after finishing applying current GCI epoch - * FAST_STOP = 8, ///< Stop after finishing applying all PS GCI epochs - * START_TRANSFER = 9, ///< Start SS-PS transfer - * STOP_TRANSFER = 10, ///< Stop SS-PS transfer - * START_APPLY = 11, ///< Start applying GCI epochs in SS - * STOP_APPLY = 12, ///< Stop applying GCI epochs in SS - * STATUS = 13, ///< Status - * START_SUBSCR = 14, - * REMOVE_BUFFERS = 15, - * DROP_TABLE = 16 - - *****************************************************************************/ - -int -CommandInterpreter::executeRep(char* parameters) -{ - if (emptyString(parameters)) { - ndbout << helpTextRep; - return 0; - } - - char * line = my_strdup(parameters,MYF(MY_WME)); - My_auto_ptr ap1((char*)line); - char * firstToken = strtok(line, " "); - - struct ndb_rep_reply reply; - unsigned int repId; - - - if (!strcasecmp(firstToken, "CONNECT")) { - char * host = strtok(NULL, "\0"); - for (unsigned int i = 0; i < strlen(host); ++i) { - host[i] = tolower(host[i]); - } - - if(host == NULL) - { - ndbout_c("host:port must be specified."); - return -1; - } - - if(rep_connected) { - if(m_repserver != NULL) { - ndb_rep_disconnect(m_repserver); - rep_connected = false; - } - } - - if(m_repserver == NULL) - m_repserver = ndb_rep_create_handle(); - if(ndb_rep_connect(m_repserver, host) < 0){ - ndbout_c("Failed to connect to %s", host); - return -1; - } - else - rep_connected=true; - return 0; - - if(!rep_connected) { - ndbout_c("Not connected to REP server"); - return -1; - } - } - - /******** - * START - ********/ - if (!strcasecmp(firstToken, "START")) { - - unsigned int req; - char *startType = strtok(NULL, "\0"); - - if (startType == NULL) { - req = GrepReq::START; - } else if (!strcasecmp(startType, "SUBSCRIPTION")) { - req = GrepReq::START_SUBSCR; - } else if (!strcasecmp(startType, "METALOG")) { - req = GrepReq::START_METALOG; - } else if (!strcasecmp(startType, "METASCAN")) { - req = GrepReq::START_METASCAN; - } else if (!strcasecmp(startType, "DATALOG")) { - req = GrepReq::START_DATALOG; - } else if (!strcasecmp(startType, "DATASCAN")) { - req = GrepReq::START_DATASCAN; - } else if (!strcasecmp(startType, "REQUESTOR")) { - req = GrepReq::START_REQUESTOR; - } else if (!strcasecmp(startType, "TRANSFER")) { - req = GrepReq::START_TRANSFER; - } else if (!strcasecmp(startType, "APPLY")) { - req = GrepReq::START_APPLY; - } else if (!strcasecmp(startType, "DELETE")) { - req = GrepReq::START_DELETE; - } else { - ndbout_c("Illegal argument to command 'REPLICATION START'"); - return -1; - } - - int result = ndb_rep_command(m_repserver, req, &repId, &reply); - - if (result != 0) { - ndbout << "Start of Global Replication failed" << endl; - return -1; - } else { - ndbout << "Start of Global Replication ordered" << endl; - } - return 0; - } - - /******** - * STOP - ********/ - if (!strcasecmp(firstToken, "STOP")) { - unsigned int req; - char *startType = strtok(NULL, " "); - unsigned int epoch = 0; - - if (startType == NULL) { - /** - * Stop immediately - */ - req = GrepReq::STOP; - } else if (!strcasecmp(startType, "EPOCH")) { - char *strEpoch = strtok(NULL, "\0"); - if(strEpoch == NULL) { - ndbout_c("Epoch expected!"); - return -1; - } - req = GrepReq::STOP; - epoch=atoi(strEpoch); - } else if (!strcasecmp(startType, "SUBSCRIPTION")) { - req = GrepReq::STOP_SUBSCR; - } else if (!strcasecmp(startType, "METALOG")) { - req = GrepReq::STOP_METALOG; - } else if (!strcasecmp(startType, "METASCAN")) { - req = GrepReq::STOP_METASCAN; - } else if (!strcasecmp(startType, "DATALOG")) { - req = GrepReq::STOP_DATALOG; - } else if (!strcasecmp(startType, "DATASCAN")) { - req = GrepReq::STOP_DATASCAN; - } else if (!strcasecmp(startType, "REQUESTOR")) { - req = GrepReq::STOP_REQUESTOR; - } else if (!strcasecmp(startType, "TRANSFER")) { - req = GrepReq::STOP_TRANSFER; - } else if (!strcasecmp(startType, "APPLY")) { - req = GrepReq::STOP_APPLY; - } else if (!strcasecmp(startType, "DELETE")) { - req = GrepReq::STOP_DELETE; - } else { - ndbout_c("Illegal argument to command 'REPLICATION STOP'"); - return -1; - } - int result = ndb_rep_command(m_repserver, req, &repId, &reply, epoch); - - if (result != 0) { - ndbout << "Stop command failed" << endl; - return -1; - } else { - ndbout << "Stop ordered" << endl; - } - return 0; - } - - /********* - * STATUS - *********/ - if (!strcasecmp(firstToken, "STATUS")) { - struct rep_state repstate; - int result = - ndb_rep_get_status(m_repserver, &repId, &reply, &repstate); - - if (result != 0) { - ndbout << "Status request of Global Replication failed" << endl; - return -1; - } else { - ndbout << "Status request of Global Replication ordered" << endl; - ndbout << "See printout at one of the DB nodes" << endl; - ndbout << "(Better status report is under development.)" << endl; - ndbout << " SubscriptionId " << repstate.subid - << " SubscriptionKey " << repstate.subkey << endl; - } - return 0; - } - - /********* - * QUERY (see repapi.h for querable counters) - *********/ - if (!strcasecmp(firstToken, "QUERY")) { - char *query = strtok(NULL, "\0"); - int queryCounter=-1; - if(query != NULL) { - queryCounter = atoi(query); - } - struct rep_state repstate; - unsigned repId = 0; - int result = ndb_rep_query(m_repserver, (QueryCounter)queryCounter, - &repId, &reply, &repstate); - - if (result != 0) { - ndbout << "Query repserver failed" << endl; - return -1; - } else { - ndbout << "Query repserver sucessful" << endl; - ndbout_c("repstate : QueryCounter %d, f=%d l=%d" - " nodegroups %d" , - repstate.queryCounter, - repstate.first[0], repstate.last[0], - repstate.no_of_nodegroups ); - } - return 0; - } - return 0; -} -#endif // HAVE_GLOBAL_REPLICATION - template class Vector;