From f5c3843c377db422b6cf597ba7be25719b78b855 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 23 Apr 2005 00:05:05 +0200 Subject: [PATCH 1/4] Fixes for BUG#10039 "MEMORY engine is reported as HEAP", BUG#9738 "SHOW VARIABLES still displays the deprecated 'log_update' in 5.0", BUG#9542 "MySQL dies with signal 11 when it is using non-existent location of binary logs" mysql-test/r/create.result: MEMORY, not HEAP mysql-test/r/ps_1general.result: order changed in SHOW STORAGE ENGINES mysql-test/r/variables.result: MEMORY, not HEAP sql/handler.cc: the array of storage engine names is searched in index order, so we must put the preferred name before the alias so that the preferred name is printed by MySQL instead of the alias. sql/log.cc: TC_LOG::open(): don't try to open if the index (relevant only for binlog TC log) could not be opened. sql/set_var.cc: --log-update is deprecated and replaced by --log-bin so don't show log_update in SHOW VARIABLES. --- mysql-test/r/create.result | 8 ++++---- mysql-test/r/ps_1general.result | 2 +- mysql-test/r/variables.result | 4 ++-- sql/handler.cc | 4 ++-- sql/log.cc | 7 +++++++ sql/set_var.cc | 1 - 6 files changed, 16 insertions(+), 10 deletions(-) diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result index cafe6f23ccf..4f4400b4a9b 100644 --- a/mysql-test/r/create.result +++ b/mysql-test/r/create.result @@ -210,7 +210,7 @@ drop table if exists t1; SET SESSION storage_engine="heap"; SELECT @@storage_engine; @@storage_engine -HEAP +MEMORY CREATE TABLE t1 (a int not null); show create table t1; Table Create Table @@ -222,7 +222,7 @@ SET SESSION storage_engine="gemini"; ERROR 42000: Unknown table engine 'gemini' SELECT @@storage_engine; @@storage_engine -HEAP +MEMORY CREATE TABLE t1 (a int not null); show create table t1; Table Create Table @@ -371,7 +371,7 @@ drop database mysqltest; SET SESSION storage_engine="heap"; SELECT @@storage_engine; @@storage_engine -HEAP +MEMORY CREATE TABLE t1 (a int not null); show create table t1; Table Create Table @@ -383,7 +383,7 @@ SET SESSION storage_engine="gemini"; ERROR 42000: Unknown table engine 'gemini' SELECT @@storage_engine; @@storage_engine -HEAP +MEMORY CREATE TABLE t1 (a int not null); show create table t1; Table Create Table diff --git a/mysql-test/r/ps_1general.result b/mysql-test/r/ps_1general.result index 0fe907ac8c1..6c616a99fb0 100644 --- a/mysql-test/r/ps_1general.result +++ b/mysql-test/r/ps_1general.result @@ -322,8 +322,8 @@ prepare stmt4 from ' show storage engines '; execute stmt4; Engine Support Comment MyISAM YES/NO Default engine as of MySQL 3.23 with great performance -HEAP YES/NO Alias for MEMORY MEMORY YES/NO Hash based, stored in memory, useful for temporary tables +HEAP YES/NO Alias for MEMORY MERGE YES/NO Collection of identical MyISAM tables MRG_MYISAM YES/NO Alias for MERGE ISAM YES/NO Obsolete storage engine, now replaced by MyISAM diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index 0f4f25dfdb5..b3850bcc72c 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -148,7 +148,7 @@ timed_mutexes OFF set storage_engine=MYISAM, storage_engine="HEAP", global storage_engine="MERGE"; show local variables like 'storage_engine'; Variable_name Value -storage_engine HEAP +storage_engine MEMORY show global variables like 'storage_engine'; Variable_name Value storage_engine MERGE @@ -254,7 +254,7 @@ set storage_engine=MERGE, big_tables=2; ERROR 42000: Variable 'big_tables' can't be set to the value of '2' show local variables like 'storage_engine'; Variable_name Value -storage_engine HEAP +storage_engine MEMORY set SESSION query_cache_size=10000; ERROR HY000: Variable 'query_cache_size' is a GLOBAL variable and should be set with SET GLOBAL set GLOBAL storage_engine=DEFAULT; diff --git a/sql/handler.cc b/sql/handler.cc index 14b8974ece9..542efaba2bf 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -70,10 +70,10 @@ struct show_table_type_st sys_table_types[]= { {"MyISAM", &have_yes, "Default engine as of MySQL 3.23 with great performance", DB_TYPE_MYISAM}, - {"HEAP", &have_yes, - "Alias for MEMORY", DB_TYPE_HEAP}, {"MEMORY", &have_yes, "Hash based, stored in memory, useful for temporary tables", DB_TYPE_HEAP}, + {"HEAP", &have_yes, + "Alias for MEMORY", DB_TYPE_HEAP}, {"MERGE", &have_yes, "Collection of identical MyISAM tables", DB_TYPE_MRG_MYISAM}, {"MRG_MYISAM",&have_yes, diff --git a/sql/log.cc b/sql/log.cc index 1d6bb4cdf41..fc74223d7b6 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2857,6 +2857,13 @@ int TC_LOG_BINLOG::open(const char *opt_name) pthread_mutex_init(&LOCK_prep_xids, MY_MUTEX_INIT_FAST); pthread_cond_init (&COND_prep_xids, 0); + if (!my_b_inited(&index_file)) + { + /* There was a failure to open the index file, can't open the binlog */ + cleanup(); + return 1; + } + if (using_heuristic_recover()) { /* generate a new binlog to mask a corrupted one */ diff --git a/sql/set_var.cc b/sql/set_var.cc index 70d64b5dac6..bb3db177936 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -847,7 +847,6 @@ struct show_var_st init_vars[]= { {"log_slave_updates", (char*) &opt_log_slave_updates, SHOW_MY_BOOL}, #endif {"log_slow_queries", (char*) &opt_slow_log, SHOW_BOOL}, - {"log_update", (char*) &opt_update_log, SHOW_BOOL}, {sys_log_warnings.name, (char*) &sys_log_warnings, SHOW_SYS}, {sys_long_query_time.name, (char*) &sys_long_query_time, SHOW_SYS}, {sys_low_priority_updates.name, (char*) &sys_low_priority_updates, SHOW_SYS}, From ffb64880b7b5381d29341ec7d5cb07da2e81b77f Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 23 Apr 2005 17:33:12 +0200 Subject: [PATCH 2/4] ndb - bug#10029 fix ndb/include/kernel/signaldata/TuxMaint.hpp: handle multipl index update failure atomically ndb/src/kernel/blocks/dbtup/Dbtup.hpp: handle multipl index update failure atomically ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp: handle multipl index update failure atomically ndb/src/kernel/blocks/dbtup/Notes.txt: handle multipl index update failure atomically ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp: handle multipl index update failure atomically ndb/src/ndbapi/ndberror.c: handle multipl index update failure atomically ndb/test/ndbapi/testOIBasic.cpp: handle multipl index update failure atomically --- ndb/include/kernel/signaldata/TuxMaint.hpp | 4 +- ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 8 + ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp | 80 +++++----- ndb/src/kernel/blocks/dbtup/Notes.txt | 25 ++- ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp | 11 ++ ndb/src/ndbapi/ndberror.c | 7 +- ndb/test/ndbapi/testOIBasic.cpp | 160 ++++++++++++++----- 7 files changed, 205 insertions(+), 90 deletions(-) diff --git a/ndb/include/kernel/signaldata/TuxMaint.hpp b/ndb/include/kernel/signaldata/TuxMaint.hpp index 9fee031dc41..4518f0531ea 100644 --- a/ndb/include/kernel/signaldata/TuxMaint.hpp +++ b/ndb/include/kernel/signaldata/TuxMaint.hpp @@ -36,8 +36,8 @@ public: }; enum ErrorCode { NoError = 0, // must be zero - SearchError = 895, // add + found or remove + not found - NoMemError = 827 + SearchError = 901, // add + found or remove + not found + NoMemError = 902 }; STATIC_CONST( SignalLength = 8 ); private: diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index b48546576f9..06cfd420eac 100644 --- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -1777,6 +1777,10 @@ private: Operationrec* const regOperPtr, Tablerec* const regTabPtr); + int addTuxEntries(Signal* signal, + Operationrec* regOperPtr, + Tablerec* regTabPtr); + // these crash the node on error void executeTuxCommitTriggers(Signal* signal, @@ -1787,6 +1791,10 @@ private: Operationrec* regOperPtr, Tablerec* const regTabPtr); + void removeTuxEntries(Signal* signal, + Operationrec* regOperPtr, + Tablerec* regTabPtr); + // ***************************************************************** // Error Handling routines. // ***************************************************************** diff --git a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp index aac5c326cad..575d08efffc 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp @@ -973,25 +973,7 @@ Dbtup::executeTuxInsertTriggers(Signal* signal, req->pageOffset = regOperPtr->pageOffset; req->tupVersion = tupVersion; req->opInfo = TuxMaintReq::OpAdd; - // loop over index list - const ArrayList& triggerList = regTabPtr->tuxCustomTriggers; - TriggerPtr triggerPtr; - triggerList.first(triggerPtr); - while (triggerPtr.i != RNIL) { - ljam(); - req->indexId = triggerPtr.p->indexId; - req->errorCode = RNIL; - EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, - signal, TuxMaintReq::SignalLength); - ljamEntry(); - if (req->errorCode != 0) { - ljam(); - terrorCode = req->errorCode; - return -1; - } - triggerList.next(triggerPtr); - } - return 0; + return addTuxEntries(signal, regOperPtr, regTabPtr); } int @@ -1012,9 +994,18 @@ Dbtup::executeTuxUpdateTriggers(Signal* signal, req->pageOffset = regOperPtr->pageOffset; req->tupVersion = tupVersion; req->opInfo = TuxMaintReq::OpAdd; - // loop over index list + return addTuxEntries(signal, regOperPtr, regTabPtr); +} + +int +Dbtup::addTuxEntries(Signal* signal, + Operationrec* regOperPtr, + Tablerec* regTabPtr) +{ + TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); const ArrayList& triggerList = regTabPtr->tuxCustomTriggers; TriggerPtr triggerPtr; + Uint32 failPtrI; triggerList.first(triggerPtr); while (triggerPtr.i != RNIL) { ljam(); @@ -1026,11 +1017,29 @@ Dbtup::executeTuxUpdateTriggers(Signal* signal, if (req->errorCode != 0) { ljam(); terrorCode = req->errorCode; - return -1; + failPtrI = triggerPtr.i; + goto fail; } triggerList.next(triggerPtr); } return 0; +fail: + req->opInfo = TuxMaintReq::OpRemove; + triggerList.first(triggerPtr); + while (triggerPtr.i != failPtrI) { + ljam(); + req->indexId = triggerPtr.p->indexId; + req->errorCode = RNIL; + EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, + signal, TuxMaintReq::SignalLength); + ljamEntry(); + ndbrequire(req->errorCode == 0); + triggerList.next(triggerPtr); + } +#ifdef VM_TRACE + ndbout << "aborted partial tux update: op " << hex << regOperPtr << endl; +#endif + return -1; } int @@ -1049,7 +1058,6 @@ Dbtup::executeTuxCommitTriggers(Signal* signal, { TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); // get version - // XXX could add prevTupVersion to Operationrec Uint32 tupVersion; if (regOperPtr->optype == ZINSERT) { if (! regOperPtr->deleteInsertFlag) @@ -1087,21 +1095,7 @@ Dbtup::executeTuxCommitTriggers(Signal* signal, req->pageOffset = regOperPtr->pageOffset; req->tupVersion = tupVersion; req->opInfo = TuxMaintReq::OpRemove; - // loop over index list - const ArrayList& triggerList = regTabPtr->tuxCustomTriggers; - TriggerPtr triggerPtr; - triggerList.first(triggerPtr); - while (triggerPtr.i != RNIL) { - ljam(); - req->indexId = triggerPtr.p->indexId; - req->errorCode = RNIL; - EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, - signal, TuxMaintReq::SignalLength); - ljamEntry(); - // commit must succeed - ndbrequire(req->errorCode == 0); - triggerList.next(triggerPtr); - } + removeTuxEntries(signal, regOperPtr, regTabPtr); } void @@ -1132,7 +1126,15 @@ Dbtup::executeTuxAbortTriggers(Signal* signal, req->pageOffset = regOperPtr->pageOffset; req->tupVersion = tupVersion; req->opInfo = TuxMaintReq::OpRemove; - // loop over index list + removeTuxEntries(signal, regOperPtr, regTabPtr); +} + +void +Dbtup::removeTuxEntries(Signal* signal, + Operationrec* regOperPtr, + Tablerec* regTabPtr) +{ + TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); const ArrayList& triggerList = regTabPtr->tuxCustomTriggers; TriggerPtr triggerPtr; triggerList.first(triggerPtr); @@ -1143,7 +1145,7 @@ Dbtup::executeTuxAbortTriggers(Signal* signal, EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, signal, TuxMaintReq::SignalLength); ljamEntry(); - // abort must succeed + // must succeed ndbrequire(req->errorCode == 0); triggerList.next(triggerPtr); } diff --git a/ndb/src/kernel/blocks/dbtup/Notes.txt b/ndb/src/kernel/blocks/dbtup/Notes.txt index 9d47c591fe8..c2973bb0a76 100644 --- a/ndb/src/kernel/blocks/dbtup/Notes.txt +++ b/ndb/src/kernel/blocks/dbtup/Notes.txt @@ -135,6 +135,24 @@ abort DELETE none - 1) alternatively, store prevTupVersion in operation record. +Abort from ordered index error +------------------------------ + +Obviously, index update failure causes operation failure. +The operation is then aborted later by TC. + +The problem here is with multiple indexes. Some may have been +updated successfully before the one that failed. Therefore +the trigger code aborts the successful ones already in +the prepare phase. + +In other words, multiple indexes are treated as one. + +Abort from any cause +-------------------- + +[ hairy stuff ] + Read attributes, query status ----------------------------- @@ -170,14 +188,11 @@ used to decide if the scan can see the tuple. This signal may also be called during any phase since commit/abort of all operations is not done in one time-slice. -Commit and abort ----------------- - -[ hairy stuff ] - Problems -------- Current abort code can destroy a tuple version too early. This happens in test case "ticuur" (insert-commit-update-update-rollback), if abort of first update arrives before abort of second update. + +vim: set textwidth=68: diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp index 389192fd0cf..9f9d4cb68e3 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp @@ -23,6 +23,11 @@ int Dbtux::allocNode(Signal* signal, NodeHandle& node) { + if (ERROR_INSERTED(12007)) { + jam(); + CLEAR_ERROR_INSERT_VALUE; + return TuxMaintReq::NoMemError; + } Frag& frag = node.m_frag; Uint32 pageId = NullTupLoc.getPageId(); Uint32 pageOffset = NullTupLoc.getPageOffset(); @@ -34,6 +39,12 @@ Dbtux::allocNode(Signal* signal, NodeHandle& node) node.m_loc = TupLoc(pageId, pageOffset); node.m_node = reinterpret_cast(node32); ndbrequire(node.m_loc != NullTupLoc && node.m_node != 0); + } else { + switch (errorCode) { + case 827: + errorCode = TuxMaintReq::NoMemError; + break; + } } return errorCode; } diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index 94e1aeb5545..f052200b67d 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -175,10 +175,11 @@ ErrorBundle ErrorCodes[] = { */ { 623, IS, "623" }, { 624, IS, "624" }, - { 625, IS, "Out of memory in Ndb Kernel, index part (increase IndexMemory)" }, + { 625, IS, "Out of memory in Ndb Kernel, hash index part (increase IndexMemory)" }, { 800, IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" }, { 826, IS, "Too many tables and attributes (increase MaxNoOfAttributes or MaxNoOfTables)" }, - { 827, IS, "Out of memory in Ndb Kernel, data part (increase DataMemory)" }, + { 827, IS, "Out of memory in Ndb Kernel, table data (increase DataMemory)" }, + { 902, IS, "Out of memory in Ndb Kernel, ordered index data (increase DataMemory)" }, { 832, IS, "832" }, /** @@ -205,7 +206,7 @@ ErrorBundle ErrorCodes[] = { * Internal errors */ { 892, IE, "Inconsistent hash index. The index needs to be dropped and recreated" }, - { 895, IE, "Inconsistent ordered index. The index needs to be dropped and recreated" }, + { 901, IE, "Inconsistent ordered index. The index needs to be dropped and recreated" }, { 202, IE, "202" }, { 203, IE, "203" }, { 207, IE, "207" }, diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp index e6d3844d18e..9f8da850ff4 100644 --- a/ndb/test/ndbapi/testOIBasic.cpp +++ b/ndb/test/ndbapi/testOIBasic.cpp @@ -228,6 +228,8 @@ struct Par : public Opt { bool m_verify; // deadlock possible bool m_deadlock; + // abort percentabge + unsigned m_abortpct; // timer location Par(const Opt& opt) : Opt(opt), @@ -243,7 +245,8 @@ struct Par : public Opt { m_pctrange(0), m_randomkey(false), m_verify(false), - m_deadlock(false) { + m_deadlock(false), + m_abortpct(0) { } }; @@ -684,7 +687,7 @@ struct Con { NdbResultSet* m_resultset; enum ScanMode { ScanNo = 0, Committed, Latest, Exclusive }; ScanMode m_scanmode; - enum ErrType { ErrNone = 0, ErrDeadlock, ErrOther }; + enum ErrType { ErrNone = 0, ErrDeadlock, ErrNospace, ErrOther }; ErrType m_errtype; Con() : m_ndb(0), m_dic(0), m_tx(0), m_op(0), @@ -705,7 +708,7 @@ struct Con { int setValue(int num, const char* addr); int setBound(int num, int type, const void* value); int execute(ExecType t); - int execute(ExecType t, bool& deadlock); + int execute(ExecType t, bool& deadlock, bool& nospace); int openScanRead(unsigned scanbat, unsigned scanpar); int openScanExclusive(unsigned scanbat, unsigned scanpar); int executeScan(); @@ -818,17 +821,21 @@ Con::execute(ExecType t) } int -Con::execute(ExecType t, bool& deadlock) +Con::execute(ExecType t, bool& deadlock, bool& nospace) { int ret = execute(t); - if (ret != 0) { - if (deadlock && m_errtype == ErrDeadlock) { - LL3("caught deadlock"); - ret = 0; - } + if (ret != 0 && deadlock && m_errtype == ErrDeadlock) { + LL3("caught deadlock"); + ret = 0; } else { deadlock = false; } + if (ret != 0 && nospace && m_errtype == ErrNospace) { + LL3("caught nospace"); + ret = 0; + } else { + nospace = false; + } CHK(ret == 0); return 0; } @@ -940,6 +947,8 @@ Con::printerror(NdbOut& out) die += (code == g_opt.m_die); if (code == 266 || code == 274 || code == 296 || code == 297 || code == 499) m_errtype = ErrDeadlock; + if (code == 826 || code == 827 || code == 902) + m_errtype = ErrNospace; } if (m_op && m_op->getNdbError().code != 0) { LL0(++any << " op : error " << m_op->getNdbError()); @@ -1128,6 +1137,16 @@ irandom(unsigned n) return i; } +static bool +randompct(unsigned pct) +{ + if (pct == 0) + return false; + if (pct >= 100) + return true; + return urandom(100) < pct; +} + // Val - typed column value struct Val { @@ -1565,8 +1584,8 @@ struct Set { // row methods bool exist(unsigned i) const; Row::Op pending(unsigned i) const; - void notpending(unsigned i); - void notpending(const Lst& lst); + void notpending(unsigned i, ExecType et = Commit); + void notpending(const Lst& lst, ExecType et = Commit); void calc(Par par, unsigned i); int insrow(Par par, unsigned i); int updrow(Par par, unsigned i); @@ -1775,23 +1794,30 @@ Set::putval(unsigned i, bool force) } void -Set::notpending(unsigned i) +Set::notpending(unsigned i, ExecType et) { assert(m_row[i] != 0); Row& row = *m_row[i]; - if (row.m_pending == Row::InsOp) - row.m_exist = true; - if (row.m_pending == Row::DelOp) - row.m_exist = false; + if (et == Commit) { + if (row.m_pending == Row::InsOp) + row.m_exist = true; + if (row.m_pending == Row::DelOp) + row.m_exist = false; + } else { + if (row.m_pending == Row::InsOp) + row.m_exist = false; + if (row.m_pending == Row::DelOp) + row.m_exist = true; + } row.m_pending = Row::NoOp; } void -Set::notpending(const Lst& lst) +Set::notpending(const Lst& lst, ExecType et) { for (unsigned j = 0; j < lst.m_cnt; j++) { unsigned i = lst.m_arr[j]; - notpending(i); + notpending(i, et); } } @@ -2121,14 +2147,20 @@ pkinsert(Par par) lst.push(i); if (lst.cnt() == par.m_batch) { bool deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + bool nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); con.closeTransaction(); if (deadlock) { LL1("pkinsert: stop on deadlock"); return 0; } + if (nospace) { + LL1("pkinsert: cnt=" << j << " stop on nospace"); + return 0; + } set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.unlock(); lst.reset(); CHK(con.startTransaction() == 0); @@ -2136,14 +2168,20 @@ pkinsert(Par par) } if (lst.cnt() != 0) { bool deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + bool nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); con.closeTransaction(); if (deadlock) { LL1("pkinsert: stop on deadlock"); return 0; } + if (nospace) { + LL1("pkinsert: end: stop on nospace"); + return 0; + } set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.unlock(); return 0; } @@ -2160,6 +2198,7 @@ pkupdate(Par par) CHK(con.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; for (unsigned j = 0; j < par.m_rows; j++) { unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); @@ -2175,27 +2214,37 @@ pkupdate(Par par) lst.push(i); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); if (deadlock) { LL1("pkupdate: stop on deadlock"); break; } + if (nospace) { + LL1("pkupdate: cnt=" << j << " stop on nospace"); + break; + } con.closeTransaction(); set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.unlock(); lst.reset(); CHK(con.startTransaction() == 0); } } - if (! deadlock && lst.cnt() != 0) { + if (! deadlock && ! nospace && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); if (deadlock) { LL1("pkupdate: stop on deadlock"); + } else if (nospace) { + LL1("pkupdate: end: stop on nospace"); } else { set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.unlock(); } } @@ -2212,6 +2261,7 @@ pkdelete(Par par) CHK(con.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; for (unsigned j = 0; j < par.m_rows; j++) { unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); @@ -2226,27 +2276,31 @@ pkdelete(Par par) lst.push(i); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); if (deadlock) { LL1("pkdelete: stop on deadlock"); break; } con.closeTransaction(); set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.unlock(); lst.reset(); CHK(con.startTransaction() == 0); } } - if (! deadlock && lst.cnt() != 0) { + if (! deadlock && ! nospace && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); if (deadlock) { LL1("pkdelete: stop on deadlock"); } else { set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.unlock(); } } @@ -2730,6 +2784,10 @@ readverify(Par par) if (par.m_noverify) return 0; par.m_verify = true; + if (par.m_abortpct != 0) { + LL2("skip verify in this version"); // implement in 5.0 version + par.m_verify = false; + } CHK(pkread(par) == 0); CHK(scanreadall(par) == 0); return 0; @@ -3028,11 +3086,11 @@ runstep(Par par, const char* fname, TFunc func, unsigned mode) for (n = 0; n < threads; n++) { LL4("start " << n); Thr& thr = *g_thrlist[n]; - thr.m_par.m_tab = par.m_tab; - thr.m_par.m_set = par.m_set; - thr.m_par.m_tmr = par.m_tmr; - thr.m_par.m_lno = par.m_lno; - thr.m_par.m_slno = par.m_slno; + Par oldpar = thr.m_par; + // update parameters + thr.m_par = par; + thr.m_par.m_no = oldpar.m_no; + thr.m_par.m_con = oldpar.m_con; thr.m_func = func; thr.start(); } @@ -3143,6 +3201,24 @@ tbusybuild(Par par) return 0; } +static int +trollback(Par par) +{ + par.m_abortpct = 50; + RUNSTEP(par, droptable, ST); + RUNSTEP(par, createtable, ST); + RUNSTEP(par, invalidatetable, MT); + RUNSTEP(par, pkinsert, MT); + RUNSTEP(par, createindex, ST); + RUNSTEP(par, invalidateindex, MT); + RUNSTEP(par, readverify, ST); + for (par.m_slno = 0; par.m_slno < par.m_subloop; par.m_slno++) { + RUNSTEP(par, mixedoperations, MT); + RUNSTEP(par, readverify, ST); + } + return 0; +} + static int ttimebuild(Par par) { @@ -3252,10 +3328,12 @@ struct TCase { static const TCase tcaselist[] = { TCase("a", tbuild, "index build"), - TCase("b", tpkops, "pk operations"), - TCase("c", tpkopsread, "pk operations and scan reads"), - TCase("d", tmixedops, "pk operations and scan operations"), - TCase("e", tbusybuild, "pk operations and index build"), + // "b" in 5.0 + TCase("c", tpkops, "pk operations"), + TCase("d", tpkopsread, "pk operations and scan reads"), + TCase("e", tmixedops, "pk operations and scan operations"), + TCase("f", tbusybuild, "pk operations and index build"), + TCase("g", trollback, "operations with random rollbacks"), TCase("t", ttimebuild, "time index build"), TCase("u", ttimemaint, "time index maintenance"), TCase("v", ttimescan, "time full scan table vs index on pk"), From 90df3f7ff7827f8b462a174d16b4dd588ab0eb94 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 23 Apr 2005 18:51:18 +0200 Subject: [PATCH 3/4] ndb - post merge 4.1->5.0 ndb/src/kernel/blocks/ERROR_codes.txt: post merge 4.1->5.0 ndb/src/ndbapi/ndberror.c: post merge 4.1->5.0 ndb/test/ndbapi/testOIBasic.cpp: post merge 4.1->5.0 --- ndb/src/kernel/blocks/ERROR_codes.txt | 3 +- ndb/src/ndbapi/ndberror.c | 1 - ndb/test/ndbapi/testOIBasic.cpp | 112 +++++++++----------------- 3 files changed, 38 insertions(+), 78 deletions(-) diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt index a30021607cc..fedddb58c0d 100644 --- a/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/ndb/src/kernel/blocks/ERROR_codes.txt @@ -10,7 +10,7 @@ Next DBTC 8035 Next CMVMI 9000 Next BACKUP 10022 Next DBUTIL 11002 -Next DBTUX 12007 +Next DBTUX 12008 Next SUMA 13001 TESTING NODE FAILURE, ARBITRATION @@ -443,6 +443,7 @@ Test routing of signals: Ordered index: -------------- +12007: Make next alloc node fail with no memory error Dbdict: ------- diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index cb8af1e26f6..ac9bda5a9f3 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -184,7 +184,6 @@ ErrorBundle ErrorCodes[] = { { 826, IS, "Too many tables and attributes (increase MaxNoOfAttributes or MaxNoOfTables)" }, { 827, IS, "Out of memory in Ndb Kernel, table data (increase DataMemory)" }, { 902, IS, "Out of memory in Ndb Kernel, ordered index data (increase DataMemory)" }, - { 902, IS, "Out of memory in Ndb Kernel, data part (increase DataMemory)" }, { 903, IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" }, { 904, IS, "Out of fragment records (increase MaxNoOfOrderedIndexes)" }, { 905, IS, "Out of attribute records (increase MaxNoOfAttributes)" }, diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp index e9e6e7638d3..c7c9f417d1a 100644 --- a/ndb/test/ndbapi/testOIBasic.cpp +++ b/ndb/test/ndbapi/testOIBasic.cpp @@ -164,6 +164,16 @@ irandom(unsigned n) return i; } +static bool +randompct(unsigned pct) +{ + if (pct == 0) + return false; + if (pct >= 100) + return true; + return urandom(100) < pct; +} + // log and error macros static NdbMutex *ndbout_mutex = NULL; @@ -1653,36 +1663,6 @@ createindex(Par par) // data sets -static unsigned -urandom(unsigned n) -{ - if (n == 0) - return 0; - unsigned i = random() % n; - return i; -} - -static int -irandom(unsigned n) -{ - if (n == 0) - return 0; - int i = random() % n; - if (random() & 0x1) - i = -i; - return i; -} - -static bool -randompct(unsigned pct) -{ - if (pct == 0) - return false; - if (pct >= 100) - return true; - return urandom(100) < pct; -} - // Val - typed column value struct Val { @@ -2659,26 +2639,30 @@ Set::pending(unsigned i, unsigned mask) const } void -Set::notpending(unsigned i) +Set::notpending(unsigned i, ExecType et) { assert(m_row[i] != 0); Row& row = *m_row[i]; - if (row.m_pending == Row::InsOp) { - row.m_exist = true; - } else if (row.m_pending == Row::UpdOp) { - ; - } else if (row.m_pending == Row::DelOp) { - row.m_exist = false; + if (et == Commit) { + if (row.m_pending == Row::InsOp) + row.m_exist = true; + if (row.m_pending == Row::DelOp) + row.m_exist = false; + } else { + if (row.m_pending == Row::InsOp) + row.m_exist = false; + if (row.m_pending == Row::DelOp) + row.m_exist = true; } row.m_pending = Row::NoOp; } void -Set::notpending(const Lst& lst) +Set::notpending(const Lst& lst, ExecType et) { for (unsigned j = 0; j < lst.m_cnt; j++) { unsigned i = lst.m_arr[j]; - notpending(i); + notpending(i, et); } } @@ -2870,34 +2854,6 @@ Set::putval(unsigned i, bool force, unsigned n) return 0; } -void -Set::notpending(unsigned i, ExecType et) -{ - assert(m_row[i] != 0); - Row& row = *m_row[i]; - if (et == Commit) { - if (row.m_pending == Row::InsOp) - row.m_exist = true; - if (row.m_pending == Row::DelOp) - row.m_exist = false; - } else { - if (row.m_pending == Row::InsOp) - row.m_exist = false; - if (row.m_pending == Row::DelOp) - row.m_exist = true; - } - row.m_pending = Row::NoOp; -} - -void -Set::notpending(const Lst& lst, ExecType et) -{ - for (unsigned j = 0; j < lst.m_cnt; j++) { - unsigned i = lst.m_arr[j]; - notpending(i, et); - } -} - int Set::verify(Par par, const Set& set2) const { @@ -3511,6 +3467,7 @@ hashindexupdate(Par par, const ITab& itab) CHK(con.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; for (unsigned j = 0; j < par.m_rows; j++) { unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); @@ -3528,7 +3485,7 @@ hashindexupdate(Par par, const ITab& itab) lst.push(i); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + CHK(con.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("hashindexupdate: stop on deadlock [at 1]"); break; @@ -3544,9 +3501,9 @@ hashindexupdate(Par par, const ITab& itab) } if (! deadlock && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + CHK(con.execute(Commit, deadlock, nospace) == 0); if (deadlock) { - LL1("hashindexupdate: stop on deadlock [at 1]"); + LL1("hashindexupdate: stop on deadlock [at 2]"); } else { set.lock(); set.notpending(lst); @@ -3567,6 +3524,7 @@ hashindexdelete(Par par, const ITab& itab) CHK(con.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; for (unsigned j = 0; j < par.m_rows; j++) { unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); @@ -3581,7 +3539,7 @@ hashindexdelete(Par par, const ITab& itab) lst.push(i); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + CHK(con.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("hashindexdelete: stop on deadlock [at 1]"); break; @@ -3596,7 +3554,7 @@ hashindexdelete(Par par, const ITab& itab) } if (! deadlock && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + CHK(con.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("hashindexdelete: stop on deadlock [at 2]"); } else { @@ -3968,6 +3926,7 @@ scanupdatetable(Par par) CHK(con2.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; while (1) { int ret; deadlock = par.m_deadlock; @@ -4003,7 +3962,7 @@ scanupdatetable(Par par) set.unlock(); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con2.execute(Commit, deadlock) == 0); + CHK(con2.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("scanupdatetable: stop on deadlock [at 2]"); goto out; @@ -4020,7 +3979,7 @@ scanupdatetable(Par par) CHK((ret = con.nextScanResult(false)) == 0 || ret == 1 || ret == 2); if (ret == 2 && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con2.execute(Commit, deadlock) == 0); + CHK(con2.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("scanupdatetable: stop on deadlock [at 3]"); goto out; @@ -4067,6 +4026,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) CHK(con2.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; while (1) { int ret; deadlock = par.m_deadlock; @@ -4102,7 +4062,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) set.unlock(); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con2.execute(Commit, deadlock) == 0); + CHK(con2.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("scanupdateindex: stop on deadlock [at 2]"); goto out; @@ -4119,7 +4079,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) CHK((ret = con.nextScanResult(false)) == 0 || ret == 1 || ret == 2); if (ret == 2 && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con2.execute(Commit, deadlock) == 0); + CHK(con2.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("scanupdateindex: stop on deadlock [at 3]"); goto out; From ea49703e0b50c2fb3f1d89513d5e059cdd80ccac Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 24 Apr 2005 22:37:43 +0200 Subject: [PATCH 4/4] Informing of deprecation of mysql_tableinfo by INFORMATION_SCHEMA (and I'm thinking of removing the script in 5.1). scripts/mysql_tableinfo.sh: Informing of deprecation of mysql_tableinfo by INFORMATION_SCHEMA --- scripts/mysql_tableinfo.sh | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/scripts/mysql_tableinfo.sh b/scripts/mysql_tableinfo.sh index f5083a776c6..2ed7e381fa3 100644 --- a/scripts/mysql_tableinfo.sh +++ b/scripts/mysql_tableinfo.sh @@ -6,6 +6,14 @@ use DBI; =head1 NAME +WARNING: MySQL versions 5.0 and above feature the INFORMATION_SCHEMA +pseudo-database which contains always up-to-date metadata information +about all tables. So instead of using this script one can now +simply query the INFORMATION_SCHEMA.SCHEMATA, INFORMATION_SCHEMA.TABLES, +INFORMATION_SCHEMA.COLUMNS, INFORMATION_SCHEMA.STATISTICS pseudo-tables. +Please see the MySQL manual for more information about INFORMATION_SCHEMA. +This script will be removed from the MySQL distribution in version 5.1. + mysql_tableinfo - creates and populates information tables with the output of SHOW DATABASES, SHOW TABLES (or SHOW TABLE STATUS), SHOW COLUMNS and SHOW INDEX. @@ -62,6 +70,19 @@ GetOptions( \%opt, "quiet|q", ) or usage("Invalid option"); +if (!$opt{'quiet'}) + { + print <quote($tbl_like_wild); if (!$opt{'quiet'}) { - print "\n!! This program is doing to do:\n\n"; + print "\n!! This program is going to do:\n\n"; print "**DROP** TABLE ...\n" if ($opt{'clear'} or $opt{'clear-only'}); print "**DELETE** FROM ... WHERE `Database` LIKE $db_like_wild AND `Table` LIKE $tbl_like_wild **INSERT** INTO ... @@ -456,17 +477,14 @@ UNIX domain socket to use when connecting to server =head1 WARRANTY -This software is free and comes without warranty of any kind. You -should never trust backup software without studying the code yourself. -Study the code inside this script and only rely on it if I believe -that it does the right thing for you. +This software is free and comes without warranty of any kind. Patches adding bug fixes, documentation and new features are welcome. =head1 TO DO -Use extended inserts to be faster (for servers with many databases -or tables). But to do that, must care about net-buffer-length. +Nothing: starting from MySQL 5.0, this program is replaced by the +INFORMATION_SCHEMA pseudo-database. =head1 AUTHOR