mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 05:21:33 +03:00
Merge bk-internal.mysql.com:/home/bk/mysql-4.1/
into serg.mylan:/usr/home/serg/Abk/mysql-4.1
This commit is contained in:
@ -1513,6 +1513,8 @@ fil_decr_pending_ibuf_merges(
|
||||
mutex_exit(&(system->mutex));
|
||||
}
|
||||
|
||||
/************************************************************
|
||||
Creates the database directory for a table if it does not exist yet. */
|
||||
static
|
||||
void
|
||||
fil_create_directory_for_tablename(
|
||||
|
@ -1450,9 +1450,11 @@ then
|
||||
then
|
||||
echo "Starting ndbcluster"
|
||||
./ndb/ndbcluster --port-base=$NDBCLUSTER_PORT --small --diskless --initial --data-dir=$MYSQL_TEST_DIR/var || exit 1
|
||||
export NDB_CONNECTSTRING="host=localhost:$NDBCLUSTER_PORT"
|
||||
NDB_CONNECTSTRING="host=localhost:$NDBCLUSTER_PORT"
|
||||
export NDB_CONNECTSTRING
|
||||
else
|
||||
export NDB_CONNECTSTRING="$USE_RUNNING_NDBCLUSTER"
|
||||
NDB_CONNECTSTRING="$USE_RUNNING_NDBCLUSTER"
|
||||
export NDB_CONNECTSTRING
|
||||
echo "Using ndbcluster at $NDB_CONNECTSTRING"
|
||||
fi
|
||||
fi
|
||||
|
@ -86,7 +86,6 @@ fs_name_1=$fs_ndb/node-1-fs
|
||||
fs_name_2=$fs_ndb/node-2-fs
|
||||
|
||||
NDB_HOME=
|
||||
export NDB_CONNECTSTRING
|
||||
if [ ! -x $fsdir ]; then
|
||||
echo "$fsdir missing"
|
||||
exit 1
|
||||
@ -102,7 +101,8 @@ fi
|
||||
|
||||
ndb_host="localhost"
|
||||
ndb_mgmd_port=$port_base
|
||||
export NDB_CONNECTSTRING="host=$ndb_host:$ndb_mgmd_port"
|
||||
NDB_CONNECTSTRING="host=$ndb_host:$ndb_mgmd_port"
|
||||
export NDB_CONNECTSTRING
|
||||
|
||||
start_default_ndbcluster() {
|
||||
|
||||
|
@ -15,7 +15,7 @@ col2 varchar(30) not null,
|
||||
col3 varchar (20) not null,
|
||||
col4 varchar(4) not null,
|
||||
col5 enum('PENDING', 'ACTIVE', 'DISABLED') not null,
|
||||
col6 int not null, to_be_deleted int);
|
||||
col6 int not null, to_be_deleted int) ENGINE=ndbcluster;
|
||||
insert into t1 values (2,4,3,5,"PENDING",1,7);
|
||||
alter table t1
|
||||
add column col4_5 varchar(20) not null after col4,
|
||||
|
@ -6,20 +6,20 @@ attr2 INT,
|
||||
attr3 VARCHAR(10)
|
||||
) ENGINE=ndbcluster;
|
||||
INSERT INTO t1 VALUES (9410,9412, NULL, '9412'), (9411,9413, 17, '9413');
|
||||
SELECT pk1 FROM t1;
|
||||
SELECT pk1 FROM t1 ORDER BY pk1;
|
||||
pk1
|
||||
9410
|
||||
9411
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t1 ORDER BY pk1;
|
||||
pk1 attr1 attr2 attr3
|
||||
9410 9412 NULL 9412
|
||||
9411 9413 17 9413
|
||||
SELECT t1.* FROM t1;
|
||||
SELECT t1.* FROM t1 ORDER BY pk1;
|
||||
pk1 attr1 attr2 attr3
|
||||
9410 9412 NULL 9412
|
||||
9411 9413 17 9413
|
||||
UPDATE t1 SET attr1=1 WHERE pk1=9410;
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t1 ORDER BY pk1;
|
||||
pk1 attr1 attr2 attr3
|
||||
9410 1 NULL 9412
|
||||
9411 9413 17 9413
|
||||
@ -115,13 +115,17 @@ SELECT * FROM t1;
|
||||
id id2
|
||||
1234 7890
|
||||
DELETE FROM t1;
|
||||
INSERT INTO t1 values(3456, 7890), (3456, 7890), (3456, 7890);
|
||||
SELECT * FROM t1;
|
||||
INSERT INTO t1 values(3456, 7890), (3456, 7890), (3456, 7890), (3454, 7890);
|
||||
SELECT * FROM t1 ORDER BY id;
|
||||
id id2
|
||||
3454 7890
|
||||
3456 7890
|
||||
3456 7890
|
||||
3456 7890
|
||||
DELETE FROM t1 WHERE id = 3456;
|
||||
SELECT * FROM t1 ORDER BY id;
|
||||
id id2
|
||||
3454 7890
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
pk1 INT NOT NULL PRIMARY KEY,
|
||||
|
@ -11,6 +11,11 @@ x y
|
||||
2 two
|
||||
start transaction;
|
||||
insert into t1 values (3,'three');
|
||||
select * from t1 order by x;
|
||||
x y
|
||||
1 one
|
||||
2 two
|
||||
3 three
|
||||
start transaction;
|
||||
select * from t1 order by x;
|
||||
x y
|
||||
|
@ -29,7 +29,7 @@ col2 varchar(30) not null,
|
||||
col3 varchar (20) not null,
|
||||
col4 varchar(4) not null,
|
||||
col5 enum('PENDING', 'ACTIVE', 'DISABLED') not null,
|
||||
col6 int not null, to_be_deleted int);
|
||||
col6 int not null, to_be_deleted int) ENGINE=ndbcluster;
|
||||
insert into t1 values (2,4,3,5,"PENDING",1,7);
|
||||
alter table t1
|
||||
add column col4_5 varchar(20) not null after col4,
|
||||
|
@ -21,13 +21,13 @@ CREATE TABLE t1 (
|
||||
|
||||
INSERT INTO t1 VALUES (9410,9412, NULL, '9412'), (9411,9413, 17, '9413');
|
||||
|
||||
SELECT pk1 FROM t1;
|
||||
SELECT * FROM t1;
|
||||
SELECT t1.* FROM t1;
|
||||
SELECT pk1 FROM t1 ORDER BY pk1;
|
||||
SELECT * FROM t1 ORDER BY pk1;
|
||||
SELECT t1.* FROM t1 ORDER BY pk1;
|
||||
|
||||
# Update on record by primary key
|
||||
UPDATE t1 SET attr1=1 WHERE pk1=9410;
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t1 ORDER BY pk1;
|
||||
|
||||
# Update primary key
|
||||
UPDATE t1 SET pk1=2 WHERE attr1=1;
|
||||
@ -85,9 +85,10 @@ UPDATE t1 SET id=1234 WHERE id2=7890;
|
||||
SELECT * FROM t1;
|
||||
DELETE FROM t1;
|
||||
|
||||
INSERT INTO t1 values(3456, 7890), (3456, 7890), (3456, 7890);
|
||||
SELECT * FROM t1;
|
||||
INSERT INTO t1 values(3456, 7890), (3456, 7890), (3456, 7890), (3454, 7890);
|
||||
SELECT * FROM t1 ORDER BY id;
|
||||
DELETE FROM t1 WHERE id = 3456;
|
||||
SELECT * FROM t1 ORDER BY id;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
|
@ -25,10 +25,13 @@ connection con2;
|
||||
select * from t1 order by x;
|
||||
|
||||
connection con1;
|
||||
start transaction; insert into t1 values (3,'three');
|
||||
start transaction;
|
||||
insert into t1 values (3,'three');
|
||||
select * from t1 order by x;
|
||||
|
||||
connection con2;
|
||||
start transaction; select * from t1 order by x;
|
||||
start transaction;
|
||||
select * from t1 order by x;
|
||||
|
||||
connection con1;
|
||||
commit;
|
||||
|
@ -23,6 +23,7 @@ ndbapi/NdbReceiver.hpp \
|
||||
ndbapi/NdbResultSet.hpp \
|
||||
ndbapi/NdbScanFilter.hpp \
|
||||
ndbapi/NdbScanOperation.hpp \
|
||||
ndbapi/NdbIndexScanOperation.hpp \
|
||||
ndbapi/ndberror.h
|
||||
|
||||
mgmapiinclude_HEADERS = \
|
||||
|
@ -762,7 +762,7 @@ BitmaskPOD<size>::overlaps(BitmaskPOD<size> that)
|
||||
template <unsigned size>
|
||||
class Bitmask : public BitmaskPOD<size> {
|
||||
public:
|
||||
Bitmask() { clear();}
|
||||
Bitmask() { this->clear();}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -218,6 +218,7 @@ ndbout << "Ptr: " << ptr.p->word32 << " \tIndex: " << tmp_string << " \tValue: "
|
||||
#define ZREL_FRAG 6
|
||||
#define ZREL_DIR 7
|
||||
#define ZREPORT_MEMORY_USAGE 8
|
||||
#define ZLCP_OP_WRITE_RT_BREAK 9
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ERROR CODES */
|
||||
@ -1190,6 +1191,7 @@ private:
|
||||
void zpagesize_error(const char* where);
|
||||
|
||||
void reportMemoryUsage(Signal* signal, int gth);
|
||||
void lcp_write_op_to_undolog(Signal* signal);
|
||||
|
||||
|
||||
// Initialisation
|
||||
|
@ -46,13 +46,17 @@ Dbacc::remainingUndoPages(){
|
||||
ndbrequire(HeadPage>=TailPage);
|
||||
|
||||
Uint32 UsedPages = HeadPage - TailPage;
|
||||
Uint32 Remaining = cundopagesize - UsedPages;
|
||||
Int32 Remaining = cundopagesize - UsedPages;
|
||||
|
||||
// There can not be more than cundopagesize remaining
|
||||
ndbrequire(Remaining<=cundopagesize);
|
||||
|
||||
if (Remaining <= 0){
|
||||
// No more undolog, crash node
|
||||
progError(__LINE__,
|
||||
ERR_NO_MORE_UNDOLOG,
|
||||
"There are more than 1Mbyte undolog writes outstanding");
|
||||
}
|
||||
return Remaining;
|
||||
}//Dbacc::remainingUndoPages()
|
||||
}
|
||||
|
||||
void
|
||||
Dbacc::updateLastUndoPageIdWritten(Signal* signal, Uint32 aNewValue){
|
||||
@ -193,6 +197,17 @@ void Dbacc::execCONTINUEB(Signal* signal)
|
||||
return;
|
||||
}
|
||||
|
||||
case ZLCP_OP_WRITE_RT_BREAK:
|
||||
{
|
||||
operationRecPtr.i= signal->theData[1];
|
||||
fragrecptr.i= signal->theData[2];
|
||||
lcpConnectptr.i= signal->theData[3];
|
||||
ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
|
||||
ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
|
||||
ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
|
||||
lcp_write_op_to_undolog(signal);
|
||||
return;
|
||||
}
|
||||
default:
|
||||
ndbrequire(false);
|
||||
break;
|
||||
@ -7697,32 +7712,70 @@ void Dbacc::execACC_LCPREQ(Signal* signal)
|
||||
fragrecptr.p->lcpMaxOverDirIndex = fragrecptr.p->lastOverIndex;
|
||||
fragrecptr.p->createLcp = ZTRUE;
|
||||
operationRecPtr.i = fragrecptr.p->lockOwnersList;
|
||||
while (operationRecPtr.i != RNIL) {
|
||||
jam();
|
||||
ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
|
||||
lcp_write_op_to_undolog(signal);
|
||||
}
|
||||
|
||||
if ((operationRecPtr.p->operation == ZINSERT) ||
|
||||
(operationRecPtr.p->elementIsDisappeared == ZTRUE)){
|
||||
void
|
||||
Dbacc::lcp_write_op_to_undolog(Signal* signal)
|
||||
{
|
||||
bool delay_continueb= false;
|
||||
Uint32 i, j;
|
||||
for (i= 0; i < 16; i++) {
|
||||
jam();
|
||||
if (remainingUndoPages() <= ZMIN_UNDO_PAGES_AT_COMMIT) {
|
||||
jam();
|
||||
delay_continueb= true;
|
||||
break;
|
||||
}
|
||||
for (j= 0; j < 32; j++) {
|
||||
if (operationRecPtr.i == RNIL) {
|
||||
jam();
|
||||
break;
|
||||
}
|
||||
jam();
|
||||
ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
|
||||
|
||||
if ((operationRecPtr.p->operation == ZINSERT) ||
|
||||
(operationRecPtr.p->elementIsDisappeared == ZTRUE)){
|
||||
/*******************************************************************
|
||||
* Only log inserts and elements that are marked as dissapeared.
|
||||
* All other operations update the element header and that is handled
|
||||
* when pages are written to disk
|
||||
********************************************************************/
|
||||
undopageptr.i = (cundoposition>>ZUNDOPAGEINDEXBITS) & (cundopagesize-1);
|
||||
ptrAss(undopageptr, undopage);
|
||||
theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
|
||||
tundoindex = theadundoindex + ZUNDOHEADSIZE;
|
||||
undopageptr.i = (cundoposition>>ZUNDOPAGEINDEXBITS) & (cundopagesize-1);
|
||||
ptrAss(undopageptr, undopage);
|
||||
theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
|
||||
tundoindex = theadundoindex + ZUNDOHEADSIZE;
|
||||
|
||||
writeUndoOpInfo(signal);/* THE INFORMATION ABOUT ELEMENT HEADER, STORED*/
|
||||
/* IN OP REC, IS WRITTEN AT UNDO PAGES */
|
||||
cundoElemIndex = 0;/* DEFAULT VALUE USED BY WRITE_UNDO_HEADER SUBROTINE */
|
||||
writeUndoHeader(signal, RNIL, UndoHeader::ZOP_INFO); /* WRITE THE HEAD OF THE UNDO ELEMENT */
|
||||
checkUndoPages(signal); /* SEND UNDO PAGE TO DISK WHEN A GROUP OF */
|
||||
writeUndoOpInfo(signal);/* THE INFORMATION ABOUT ELEMENT HEADER, STORED*/
|
||||
/* IN OP REC, IS WRITTEN AT UNDO PAGES */
|
||||
cundoElemIndex = 0;/* DEFAULT VALUE USED BY WRITE_UNDO_HEADER SUBROTINE */
|
||||
writeUndoHeader(signal, RNIL, UndoHeader::ZOP_INFO); /* WRITE THE HEAD OF THE UNDO ELEMENT */
|
||||
checkUndoPages(signal); /* SEND UNDO PAGE TO DISK WHEN A GROUP OF */
|
||||
/* UNDO PAGES,CURRENTLY 8, IS FILLED */
|
||||
}//if
|
||||
|
||||
operationRecPtr.i = operationRecPtr.p->nextLockOwnerOp;
|
||||
}//while
|
||||
}
|
||||
operationRecPtr.i = operationRecPtr.p->nextLockOwnerOp;
|
||||
}
|
||||
if (operationRecPtr.i == RNIL) {
|
||||
jam();
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (operationRecPtr.i != RNIL) {
|
||||
jam();
|
||||
signal->theData[0]= ZLCP_OP_WRITE_RT_BREAK;
|
||||
signal->theData[1]= operationRecPtr.i;
|
||||
signal->theData[2]= fragrecptr.i;
|
||||
signal->theData[3]= lcpConnectptr.i;
|
||||
if (delay_continueb) {
|
||||
jam();
|
||||
sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 10, 4);
|
||||
} else {
|
||||
jam();
|
||||
sendSignal(cownBlockref, GSN_CONTINUEB, signal, 4, JBB);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
signal->theData[0] = fragrecptr.p->lcpLqhPtr;
|
||||
sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_LCPSTARTED,
|
||||
@ -7735,8 +7788,7 @@ void Dbacc::execACC_LCPREQ(Signal* signal)
|
||||
signal->theData[0] = lcpConnectptr.i;
|
||||
signal->theData[1] = fragrecptr.i;
|
||||
sendSignal(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 2, JBB);
|
||||
return;
|
||||
}//Dbacc::execACC_LCPREQ()
|
||||
}
|
||||
|
||||
/* ******************--------------------------------------------------------------- */
|
||||
/* ACC_SAVE_PAGES A GROUP OF PAGES IS ALLOCATED. THE PAGES AND OVERFLOW */
|
||||
@ -8595,12 +8647,6 @@ void Dbacc::checkUndoPages(Signal* signal)
|
||||
* RECORDS IN
|
||||
*/
|
||||
Uint16 nextUndoPageId = tundoPageId + 1;
|
||||
if (nextUndoPageId > (clastUndoPageIdWritten + cundopagesize)){
|
||||
// No more undolog, crash node
|
||||
progError(__LINE__,
|
||||
ERR_NO_MORE_UNDOLOG,
|
||||
"There are more than 1Mbyte undolog writes outstanding");
|
||||
}
|
||||
updateUndoPositionPage(signal, nextUndoPageId << ZUNDOPAGEINDEXBITS);
|
||||
|
||||
if ((tundoPageId & (ZWRITE_UNDOPAGESIZE - 1)) == (ZWRITE_UNDOPAGESIZE - 1)) {
|
||||
|
@ -998,7 +998,12 @@ public:
|
||||
* It will receive max 16 tuples in each request
|
||||
*/
|
||||
struct ScanFragRec {
|
||||
ScanFragRec(){}
|
||||
ScanFragRec(){
|
||||
stopFragTimer();
|
||||
lqhBlockref = 0;
|
||||
scanFragState = IDLE;
|
||||
scanRec = RNIL;
|
||||
}
|
||||
/**
|
||||
* ScanFragState
|
||||
* WAIT_GET_PRIMCONF : Waiting for DIGETPRIMCONF when starting a new
|
||||
|
@ -187,7 +187,7 @@ NDB_MAIN(mgmsrv){
|
||||
"Please check if the port is already used,\n"
|
||||
"(perhaps a mgmtsrvr is already running),\n"
|
||||
"and if you are executing on the correct computer",
|
||||
glob.interface_name, glob.port);
|
||||
(glob.interface_name ? glob.interface_name : "*"), glob.port);
|
||||
goto error_end;
|
||||
}
|
||||
free(glob.interface_name);
|
||||
|
@ -1452,7 +1452,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
|
||||
alterTable(&tSignal, ptr)
|
||||
: createTable(&tSignal, ptr);
|
||||
|
||||
if (haveAutoIncrement) {
|
||||
if (!alter && haveAutoIncrement) {
|
||||
// if (!ndb.setAutoIncrementValue(impl.m_internalName.c_str(), autoIncrementValue)) {
|
||||
if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(), autoIncrementValue)) {
|
||||
m_error.code = 4336;
|
||||
|
@ -589,13 +589,14 @@ Ndb::releaseSignal(NdbApiSignal* aSignal)
|
||||
#if defined VM_TRACE
|
||||
// Check that signal is not null
|
||||
assert(aSignal != NULL);
|
||||
|
||||
#if 0
|
||||
// Check that signal is not already in list
|
||||
NdbApiSignal* tmp = theSignalIdleList;
|
||||
while (tmp != NULL){
|
||||
assert(tmp != aSignal);
|
||||
tmp = tmp->next();
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
creleaseSignals++;
|
||||
aSignal->next(theSignalIdleList);
|
||||
|
@ -34,7 +34,8 @@ public:
|
||||
int records,
|
||||
int batch = 512,
|
||||
bool allowConstraintViolation = true,
|
||||
int doSleep = 0);
|
||||
int doSleep = 0,
|
||||
bool oneTrans = false);
|
||||
int scanReadRecords(Ndb*,
|
||||
int records,
|
||||
int abort = 0,
|
||||
|
@ -29,9 +29,18 @@
|
||||
* delete should be visible to same transaction
|
||||
*
|
||||
*/
|
||||
int runLoadTable2(NDBT_Context* ctx, NDBT_Step* step)
|
||||
{
|
||||
int records = ctx->getNumRecords();
|
||||
HugoTransactions hugoTrans(*ctx->getTab());
|
||||
if (hugoTrans.loadTable(GETNDB(step), records, 512, false, 0, true) != 0){
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
||||
int runLoadTable(NDBT_Context* ctx, NDBT_Step* step){
|
||||
|
||||
int runLoadTable(NDBT_Context* ctx, NDBT_Step* step)
|
||||
{
|
||||
int records = ctx->getNumRecords();
|
||||
HugoTransactions hugoTrans(*ctx->getTab());
|
||||
if (hugoTrans.loadTable(GETNDB(step), records) != 0){
|
||||
@ -1255,6 +1264,11 @@ TESTCASE("MassiveRollback2",
|
||||
INITIALIZER(runMassiveRollback2);
|
||||
FINALIZER(runClearTable2);
|
||||
}
|
||||
TESTCASE("MassiveTransaction",
|
||||
"Test very large insert transaction"){
|
||||
INITIALIZER(runLoadTable2);
|
||||
FINALIZER(runClearTable2);
|
||||
}
|
||||
NDBT_TESTSUITE_END(testBasic);
|
||||
|
||||
int main(int argc, const char** argv){
|
||||
|
@ -693,12 +693,14 @@ HugoTransactions::loadTable(Ndb* pNdb,
|
||||
int records,
|
||||
int batch,
|
||||
bool allowConstraintViolation,
|
||||
int doSleep){
|
||||
int doSleep,
|
||||
bool oneTrans){
|
||||
int check;
|
||||
int retryAttempt = 0;
|
||||
int retryMax = 5;
|
||||
NdbConnection *pTrans;
|
||||
NdbOperation *pOp;
|
||||
bool first_batch = true;
|
||||
|
||||
const int org = batch;
|
||||
const int cols = tab.getNoOfColumns();
|
||||
@ -707,7 +709,7 @@ HugoTransactions::loadTable(Ndb* pNdb,
|
||||
batch = (batch * 256); // -> 512 -> 65536k per commit
|
||||
batch = batch/bytes; //
|
||||
batch = batch == 0 ? 1 : batch;
|
||||
|
||||
|
||||
if(batch != org){
|
||||
g_info << "batch = " << org << " rowsize = " << bytes
|
||||
<< " -> rows/commit = " << batch << endl;
|
||||
@ -715,7 +717,7 @@ HugoTransactions::loadTable(Ndb* pNdb,
|
||||
|
||||
g_info << "|- Inserting records..." << endl;
|
||||
for (int c=0 ; c<records ; ){
|
||||
|
||||
bool closeTrans;
|
||||
if (retryAttempt >= retryMax){
|
||||
g_info << "Record " << c << " could not be inserted, has retried "
|
||||
<< retryAttempt << " times " << endl;
|
||||
@ -726,19 +728,22 @@ HugoTransactions::loadTable(Ndb* pNdb,
|
||||
if (doSleep > 0)
|
||||
NdbSleep_MilliSleep(doSleep);
|
||||
|
||||
pTrans = pNdb->startTransaction();
|
||||
if (first_batch || !oneTrans) {
|
||||
first_batch = false;
|
||||
pTrans = pNdb->startTransaction();
|
||||
|
||||
if (pTrans == NULL) {
|
||||
const NdbError err = pNdb->getNdbError();
|
||||
|
||||
if (pTrans == NULL) {
|
||||
const NdbError err = pNdb->getNdbError();
|
||||
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
NdbSleep_MilliSleep(50);
|
||||
retryAttempt++;
|
||||
continue;
|
||||
if (err.status == NdbError::TemporaryError){
|
||||
ERR(err);
|
||||
NdbSleep_MilliSleep(50);
|
||||
retryAttempt++;
|
||||
continue;
|
||||
}
|
||||
ERR(err);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
ERR(err);
|
||||
return NDBT_FAILED;
|
||||
}
|
||||
|
||||
for(int b = 0; b < batch && c+b<records; b++){
|
||||
@ -768,7 +773,13 @@ HugoTransactions::loadTable(Ndb* pNdb,
|
||||
}
|
||||
|
||||
// Execute the transaction and insert the record
|
||||
check = pTrans->execute( Commit );
|
||||
if (!oneTrans || (c + batch) >= records) {
|
||||
closeTrans = true;
|
||||
check = pTrans->execute( Commit );
|
||||
} else {
|
||||
closeTrans = false;
|
||||
check = pTrans->execute( NoCommit );
|
||||
}
|
||||
if(check == -1 ) {
|
||||
const NdbError err = pTrans->getNdbError();
|
||||
pNdb->closeTransaction(pTrans);
|
||||
@ -811,8 +822,10 @@ HugoTransactions::loadTable(Ndb* pNdb,
|
||||
break;
|
||||
}
|
||||
}
|
||||
else{
|
||||
pNdb->closeTransaction(pTrans);
|
||||
else{
|
||||
if (closeTrans) {
|
||||
pNdb->closeTransaction(pTrans);
|
||||
}
|
||||
}
|
||||
|
||||
// Step to next record
|
||||
|
@ -3642,11 +3642,19 @@ ha_innobase::create(
|
||||
}
|
||||
|
||||
if (current_thd->query != NULL) {
|
||||
|
||||
error = row_table_add_foreign_constraints(trx,
|
||||
current_thd->query, norm_name);
|
||||
|
||||
error = convert_error_code_to_mysql(error, NULL);
|
||||
LEX_STRING q;
|
||||
if (thd->convert_string(&q, system_charset_info,
|
||||
current_thd->query,
|
||||
current_thd->query_length,
|
||||
current_thd->charset())) {
|
||||
error = HA_ERR_OUT_OF_MEM;
|
||||
} else {
|
||||
error = row_table_add_foreign_constraints(trx,
|
||||
q.str, norm_name);
|
||||
|
||||
error = convert_error_code_to_mysql(error, NULL);
|
||||
}
|
||||
|
||||
if (error) {
|
||||
innobase_commit_low(trx);
|
||||
|
Reference in New Issue
Block a user