mirror of
https://github.com/MariaDB/server.git
synced 2025-08-07 00:04:31 +03:00
Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.0
into poseidon.mysql.com:/home/tomas/mysql-5.0-ndb ndb/src/kernel/blocks/dbtc/DbtcMain.cpp: Auto merged ndb/src/mgmsrv/MgmtSrvr.cpp: Auto merged ndb/src/ndbapi/NdbTransaction.cpp: Auto merged ndb/src/ndbapi/Ndbif.cpp: Auto merged ndb/test/ndbapi/testBasic.cpp: Auto merged ndb/test/ndbapi/testIndex.cpp: Auto merged
This commit is contained in:
@@ -133,6 +133,21 @@ a b c
|
|||||||
6 7 2
|
6 7 2
|
||||||
7 8 3
|
7 8 3
|
||||||
8 2 3
|
8 2 3
|
||||||
|
create unique index bi using hash on t2(b);
|
||||||
|
insert into t2 values(9, 3, 1);
|
||||||
|
ERROR 23000: Duplicate entry '' for key 0
|
||||||
|
alter table t2 drop index bi;
|
||||||
|
insert into t2 values(9, 3, 1);
|
||||||
|
select * from t2 order by a;
|
||||||
|
a b c
|
||||||
|
2 3 5
|
||||||
|
3 4 6
|
||||||
|
4 5 8
|
||||||
|
5 6 2
|
||||||
|
6 7 2
|
||||||
|
7 8 3
|
||||||
|
8 2 3
|
||||||
|
9 3 1
|
||||||
drop table t2;
|
drop table t2;
|
||||||
CREATE TABLE t2 (
|
CREATE TABLE t2 (
|
||||||
a int unsigned NOT NULL PRIMARY KEY,
|
a int unsigned NOT NULL PRIMARY KEY,
|
||||||
|
@@ -83,6 +83,14 @@ delete from t2 where a = 1;
|
|||||||
insert into t2 values(8, 2, 3);
|
insert into t2 values(8, 2, 3);
|
||||||
select * from t2 order by a;
|
select * from t2 order by a;
|
||||||
|
|
||||||
|
# Bug #24818 CREATE UNIQUE INDEX (...) USING HASH on a NDB table crashes mysqld
|
||||||
|
create unique index bi using hash on t2(b);
|
||||||
|
-- error 1062
|
||||||
|
insert into t2 values(9, 3, 1);
|
||||||
|
alter table t2 drop index bi;
|
||||||
|
insert into t2 values(9, 3, 1);
|
||||||
|
select * from t2 order by a;
|
||||||
|
|
||||||
drop table t2;
|
drop table t2;
|
||||||
|
|
||||||
CREATE TABLE t2 (
|
CREATE TABLE t2 (
|
||||||
|
@@ -5083,7 +5083,7 @@ void Dbtc::execLQHKEYREF(Signal* signal)
|
|||||||
ptrAss(tcConnectptr, tcConnectRecord);
|
ptrAss(tcConnectptr, tcConnectRecord);
|
||||||
TcConnectRecord * const regTcPtr = tcConnectptr.p;
|
TcConnectRecord * const regTcPtr = tcConnectptr.p;
|
||||||
if (regTcPtr->tcConnectstate == OS_OPERATING) {
|
if (regTcPtr->tcConnectstate == OS_OPERATING) {
|
||||||
apiConnectptr.i = regTcPtr->apiConnect;
|
Uint32 save = apiConnectptr.i = regTcPtr->apiConnect;
|
||||||
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
|
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
|
||||||
ApiConnectRecord * const regApiPtr = apiConnectptr.p;
|
ApiConnectRecord * const regApiPtr = apiConnectptr.p;
|
||||||
compare_transid1 = regApiPtr->transid[0] ^ lqhKeyRef->transId1;
|
compare_transid1 = regApiPtr->transid[0] ^ lqhKeyRef->transId1;
|
||||||
@@ -5194,7 +5194,7 @@ void Dbtc::execLQHKEYREF(Signal* signal)
|
|||||||
regApiPtr->lqhkeyreqrec--; // Compensate for extra during read
|
regApiPtr->lqhkeyreqrec--; // Compensate for extra during read
|
||||||
tcKeyRef->connectPtr = indexOp;
|
tcKeyRef->connectPtr = indexOp;
|
||||||
EXECUTE_DIRECT(DBTC, GSN_TCKEYREF, signal, TcKeyRef::SignalLength);
|
EXECUTE_DIRECT(DBTC, GSN_TCKEYREF, signal, TcKeyRef::SignalLength);
|
||||||
apiConnectptr.i = regTcPtr->apiConnect;
|
apiConnectptr.i = save;
|
||||||
apiConnectptr.p = regApiPtr;
|
apiConnectptr.p = regApiPtr;
|
||||||
} else {
|
} else {
|
||||||
jam();
|
jam();
|
||||||
@@ -5219,6 +5219,8 @@ void Dbtc::execLQHKEYREF(Signal* signal)
|
|||||||
jam();
|
jam();
|
||||||
sendtckeyconf(signal, 1);
|
sendtckeyconf(signal, 1);
|
||||||
regApiPtr->apiConnectstate = CS_CONNECTED;
|
regApiPtr->apiConnectstate = CS_CONNECTED;
|
||||||
|
regApiPtr->m_transaction_nodes.clear();
|
||||||
|
setApiConTimer(apiConnectptr.i, 0,__LINE__);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
} else if (regApiPtr->tckeyrec > 0 || regApiPtr->m_exec_flag) {
|
} else if (regApiPtr->tckeyrec > 0 || regApiPtr->m_exec_flag) {
|
||||||
@@ -11877,17 +11879,6 @@ void Dbtc::execTCKEYREF(Signal* signal)
|
|||||||
case(IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI):
|
case(IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI):
|
||||||
case(IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF): {
|
case(IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF): {
|
||||||
jam();
|
jam();
|
||||||
// If we fail index access for a non-read operation during commit
|
|
||||||
// we abort transaction
|
|
||||||
if (commitFlg == 1) {
|
|
||||||
jam();
|
|
||||||
releaseIndexOperation(regApiPtr, indexOp);
|
|
||||||
apiConnectptr.i = indexOp->connectionIndex;
|
|
||||||
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
|
|
||||||
terrorCode = tcKeyRef->errorCode;
|
|
||||||
abortErrorLab(signal);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
/**
|
/**
|
||||||
* Increase count as it will be decreased below...
|
* Increase count as it will be decreased below...
|
||||||
* (and the code is written to handle failing lookup on "real" table
|
* (and the code is written to handle failing lookup on "real" table
|
||||||
|
@@ -1912,7 +1912,7 @@ MgmtSrvr::handleStatus(NodeId nodeId, bool alive, bool nfComplete)
|
|||||||
m_started_nodes.push_back(nodeId);
|
m_started_nodes.push_back(nodeId);
|
||||||
rep->setEventType(NDB_LE_Connected);
|
rep->setEventType(NDB_LE_Connected);
|
||||||
} else {
|
} else {
|
||||||
rep->setEventType(NDB_LE_Connected);
|
rep->setEventType(NDB_LE_Disconnected);
|
||||||
if(nfComplete)
|
if(nfComplete)
|
||||||
{
|
{
|
||||||
DBUG_VOID_RETURN;
|
DBUG_VOID_RETURN;
|
||||||
|
@@ -473,6 +473,7 @@ NdbTransaction::executeNoBlobs(ExecType aTypeOfExec,
|
|||||||
* This timeout situation can occur if NDB crashes.
|
* This timeout situation can occur if NDB crashes.
|
||||||
*/
|
*/
|
||||||
ndbout << "This timeout should never occur, execute(..)" << endl;
|
ndbout << "This timeout should never occur, execute(..)" << endl;
|
||||||
|
theError.code = 4012;
|
||||||
setOperationErrorCodeAbort(4012); // Error code for "Cluster Failure"
|
setOperationErrorCodeAbort(4012); // Error code for "Cluster Failure"
|
||||||
DBUG_RETURN(-1);
|
DBUG_RETURN(-1);
|
||||||
}//if
|
}//if
|
||||||
@@ -1965,6 +1966,14 @@ NdbTransaction::receiveTCINDXCONF(const TcIndxConf * indxConf,
|
|||||||
theGlobalCheckpointId = tGCI;
|
theGlobalCheckpointId = tGCI;
|
||||||
} else if ((tNoComp >= tNoSent) &&
|
} else if ((tNoComp >= tNoSent) &&
|
||||||
(theLastExecOpInList->theCommitIndicator == 1)){
|
(theLastExecOpInList->theCommitIndicator == 1)){
|
||||||
|
|
||||||
|
if (m_abortOption == AO_IgnoreError && theError.code != 0){
|
||||||
|
/**
|
||||||
|
* There's always a TCKEYCONF when using IgnoreError
|
||||||
|
*/
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
/**********************************************************************/
|
/**********************************************************************/
|
||||||
// We sent the transaction with Commit flag set and received a CONF with
|
// We sent the transaction with Commit flag set and received a CONF with
|
||||||
// no Commit flag set. This is clearly an anomaly.
|
// no Commit flag set. This is clearly an anomaly.
|
||||||
|
@@ -951,6 +951,7 @@ Ndb::check_send_timeout()
|
|||||||
//abort();
|
//abort();
|
||||||
#endif
|
#endif
|
||||||
a_con->theReleaseOnClose = true;
|
a_con->theReleaseOnClose = true;
|
||||||
|
a_con->theError.code = 4012;
|
||||||
a_con->setOperationErrorCodeAbort(4012);
|
a_con->setOperationErrorCodeAbort(4012);
|
||||||
a_con->theCommitStatus = NdbTransaction::NeedAbort;
|
a_con->theCommitStatus = NdbTransaction::NeedAbort;
|
||||||
a_con->theCompletionStatus = NdbTransaction::CompletedFailure;
|
a_con->theCompletionStatus = NdbTransaction::CompletedFailure;
|
||||||
|
@@ -1033,6 +1033,28 @@ runMassiveRollback2(NDBT_Context* ctx, NDBT_Step* step){
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
runBug25090(NDBT_Context* ctx, NDBT_Step* step){
|
||||||
|
|
||||||
|
Ndb* pNdb = GETNDB(step);
|
||||||
|
NdbDictionary::Dictionary * dict = pNdb->getDictionary();
|
||||||
|
|
||||||
|
HugoOperations ops(*ctx->getTab());
|
||||||
|
|
||||||
|
int loops = ctx->getNumLoops();
|
||||||
|
const int rows = ctx->getNumRecords();
|
||||||
|
|
||||||
|
while (loops--)
|
||||||
|
{
|
||||||
|
ops.startTransaction(pNdb);
|
||||||
|
ops.pkReadRecord(pNdb, 1, 1);
|
||||||
|
ops.execute_Commit(pNdb, AO_IgnoreError);
|
||||||
|
sleep(10);
|
||||||
|
ops.closeTransaction(pNdb);
|
||||||
|
}
|
||||||
|
|
||||||
|
return NDBT_OK;
|
||||||
|
}
|
||||||
|
|
||||||
NDBT_TESTSUITE(testBasic);
|
NDBT_TESTSUITE(testBasic);
|
||||||
TESTCASE("PkInsert",
|
TESTCASE("PkInsert",
|
||||||
@@ -1276,6 +1298,10 @@ TESTCASE("Fill",
|
|||||||
INITIALIZER(runPkRead);
|
INITIALIZER(runPkRead);
|
||||||
FINALIZER(runClearTable2);
|
FINALIZER(runClearTable2);
|
||||||
}
|
}
|
||||||
|
TESTCASE("Bug25090",
|
||||||
|
"Verify what happens when we fill the db" ){
|
||||||
|
STEP(runBug25090);
|
||||||
|
}
|
||||||
NDBT_TESTSUITE_END(testBasic);
|
NDBT_TESTSUITE_END(testBasic);
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
|
@@ -1238,7 +1238,64 @@ runBug21384(NDBT_Context* ctx, NDBT_Step* step)
|
|||||||
return NDBT_OK;
|
return NDBT_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
runBug25059(NDBT_Context* ctx, NDBT_Step* step)
|
||||||
|
{
|
||||||
|
Ndb* pNdb = GETNDB(step);
|
||||||
|
NdbDictionary::Dictionary * dict = pNdb->getDictionary();
|
||||||
|
const NdbDictionary::Index * idx = dict->getIndex(pkIdxName, *ctx->getTab());
|
||||||
|
|
||||||
|
HugoOperations ops(*ctx->getTab(), idx);
|
||||||
|
|
||||||
|
int res = NDBT_OK;
|
||||||
|
int loops = ctx->getNumLoops();
|
||||||
|
const int rows = ctx->getNumRecords();
|
||||||
|
|
||||||
|
while (res == NDBT_OK && loops--)
|
||||||
|
{
|
||||||
|
ops.startTransaction(pNdb);
|
||||||
|
ops.pkReadRecord(pNdb, 10 + rand() % rows, rows);
|
||||||
|
int tmp;
|
||||||
|
if (tmp = ops.execute_Commit(pNdb, AO_IgnoreError))
|
||||||
|
{
|
||||||
|
if (tmp == 4012)
|
||||||
|
res = NDBT_FAILED;
|
||||||
|
else
|
||||||
|
if (ops.getTransaction()->getNdbError().code == 4012)
|
||||||
|
res = NDBT_FAILED;
|
||||||
|
}
|
||||||
|
ops.closeTransaction(pNdb);
|
||||||
|
}
|
||||||
|
|
||||||
|
loops = ctx->getNumLoops();
|
||||||
|
while (res == NDBT_OK && loops--)
|
||||||
|
{
|
||||||
|
ops.startTransaction(pNdb);
|
||||||
|
ops.pkUpdateRecord(pNdb, 10 + rand() % rows, rows);
|
||||||
|
int tmp;
|
||||||
|
int arg;
|
||||||
|
switch(rand() % 2){
|
||||||
|
case 0:
|
||||||
|
arg = AbortOnError;
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
arg = AO_IgnoreError;
|
||||||
|
ndbout_c("ignore error");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (tmp = ops.execute_Commit(pNdb, (AbortOption)arg))
|
||||||
|
{
|
||||||
|
if (tmp == 4012)
|
||||||
|
res = NDBT_FAILED;
|
||||||
|
else
|
||||||
|
if (ops.getTransaction()->getNdbError().code == 4012)
|
||||||
|
res = NDBT_FAILED;
|
||||||
|
}
|
||||||
|
ops.closeTransaction(pNdb);
|
||||||
|
}
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
NDBT_TESTSUITE(testIndex);
|
NDBT_TESTSUITE(testIndex);
|
||||||
TESTCASE("CreateAll",
|
TESTCASE("CreateAll",
|
||||||
@@ -1563,6 +1620,14 @@ TESTCASE("Bug21384",
|
|||||||
FINALIZER(createPkIndex_Drop);
|
FINALIZER(createPkIndex_Drop);
|
||||||
FINALIZER(runClearTable);
|
FINALIZER(runClearTable);
|
||||||
}
|
}
|
||||||
|
TESTCASE("Bug25059",
|
||||||
|
"Test that unique indexes and nulls"){
|
||||||
|
TC_PROPERTY("LoggedIndexes", (unsigned)0);
|
||||||
|
INITIALIZER(createPkIndex);
|
||||||
|
INITIALIZER(runLoadTable);
|
||||||
|
STEP(runBug25059);
|
||||||
|
FINALIZER(createPkIndex_Drop);
|
||||||
|
}
|
||||||
NDBT_TESTSUITE_END(testIndex);
|
NDBT_TESTSUITE_END(testIndex);
|
||||||
|
|
||||||
int main(int argc, const char** argv){
|
int main(int argc, const char** argv){
|
||||||
|
@@ -211,6 +211,14 @@ max-time: 500
|
|||||||
cmd: testTimeout
|
cmd: testTimeout
|
||||||
args: T1
|
args: T1
|
||||||
|
|
||||||
|
max-time: 500
|
||||||
|
cmd: testBasic
|
||||||
|
args: -n Bug25090 T1
|
||||||
|
|
||||||
|
max-time: 500
|
||||||
|
cmd: testIndex
|
||||||
|
args: -n Bug25059 -r 3000 T1
|
||||||
|
|
||||||
# SCAN TESTS
|
# SCAN TESTS
|
||||||
#
|
#
|
||||||
max-time: 500
|
max-time: 500
|
||||||
|
Reference in New Issue
Block a user