1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-08-05 16:15:50 +03:00

MCOL-5 Correct the issue of double-unlocking the mutex. It was supposed to be a lock, not unlock

This commit is contained in:
David Hall
2016-08-08 16:36:53 -05:00
parent 8de8666046
commit b57af447a4
2 changed files with 21 additions and 19 deletions

View File

@@ -1099,7 +1099,7 @@ private:
uint32_t fExtentsPerSegFile;//config num of Extents Per Segment File uint32_t fExtentsPerSegFile;//config num of Extents Per Segment File
boost::shared_ptr<boost::thread> cThread; //consumer thread boost::shared_ptr<boost::thread> cThread; //consumer thread
boost::shared_ptr<boost::thread> pThread; //producer thread boost::shared_ptr<boost::thread> pThread; //producer thread
boost::mutex mutex; boost::mutex tplMutex;
boost::mutex dlMutex; boost::mutex dlMutex;
boost::mutex cpMutex; boost::mutex cpMutex;
boost::mutex serializeJoinerMutex; boost::mutex serializeJoinerMutex;

View File

@@ -1147,9 +1147,9 @@ void TupleBPS::join()
{ {
if (msgsRecvd < msgsSent) { if (msgsRecvd < msgsSent) {
// wake up the sending thread, it should drain the input dl and exit // wake up the sending thread, it should drain the input dl and exit
mutex.lock(); boost::unique_lock<boost::mutex> tplLock(tplMutex);
condvarWakeupProducer.notify_all(); condvarWakeupProducer.notify_all();
mutex.unlock(); tplLock.unlock();
} }
if (pThread) if (pThread)
@@ -1291,21 +1291,22 @@ void TupleBPS::interleaveJobs(vector<Job> *jobs) const
void TupleBPS::sendJobs(const vector<Job> &jobs) void TupleBPS::sendJobs(const vector<Job> &jobs)
{ {
uint32_t i; uint32_t i;
boost::unique_lock<boost::mutex> tplLock(tplMutex, boost::defer_lock);
for (i = 0; i < jobs.size() && !cancelled(); i++) { for (i = 0; i < jobs.size() && !cancelled(); i++) {
//cout << "sending a job for dbroot " << jobs[i].dbroot << ", PM " << jobs[i].connectionNum << endl; //cout << "sending a job for dbroot " << jobs[i].dbroot << ", PM " << jobs[i].connectionNum << endl;
fDec->write(uniqueID, *(jobs[i].msg)); fDec->write(uniqueID, *(jobs[i].msg));
mutex.lock(); tplLock.lock();
msgsSent += jobs[i].expectedResponses; msgsSent += jobs[i].expectedResponses;
if (recvWaiting) if (recvWaiting)
condvar.notify_all(); condvar.notify_all();
while ((msgsSent - msgsRecvd > fMaxOutstandingRequests << LOGICAL_EXTENT_CONVERTER) while ((msgsSent - msgsRecvd > fMaxOutstandingRequests << LOGICAL_EXTENT_CONVERTER)
&& !fDie) { && !fDie) {
sendWaiting = true; sendWaiting = true;
condvarWakeupProducer.wait(mutex); condvarWakeupProducer.wait(tplLock);
sendWaiting = false; sendWaiting = false;
} }
mutex.unlock(); tplLock.unlock();
} }
} }
@@ -1695,10 +1696,10 @@ void TupleBPS::sendPrimitiveMessages()
} }
abort: abort:
mutex.lock(); boost::unique_lock<boost::mutex> tplLock(tplMutex);
finishedSending = true; finishedSending = true;
condvar.notify_all(); condvar.notify_all();
mutex.unlock(); tplLock.unlock();
} }
struct _CPInfo { struct _CPInfo {
@@ -1763,6 +1764,7 @@ void TupleBPS::receiveMultiPrimitiveMessages(uint32_t threadID)
StepTeleStats sts; StepTeleStats sts;
sts.query_uuid = fQueryUuid; sts.query_uuid = fQueryUuid;
sts.step_uuid = fStepUuid; sts.step_uuid = fStepUuid;
boost::unique_lock<boost::mutex> tplLock(tplMutex, boost::defer_lock);
try try
{ {
@@ -1853,13 +1855,13 @@ try
#endif #endif
} }
mutex.lock(); tplLock.lock();
while (1) { while (1) {
// sync with the send side // sync with the send side
while (!finishedSending && msgsSent == msgsRecvd) { while (!finishedSending && msgsSent == msgsRecvd) {
recvWaiting++; recvWaiting++;
condvar.wait(mutex); condvar.wait(tplLock);
recvWaiting--; recvWaiting--;
} }
@@ -1920,13 +1922,13 @@ try
break; break;
} }
if (size == 0) { if (size == 0) {
mutex.unlock(); tplLock.unlock();
usleep(2000 * fNumThreads); usleep(2000 * fNumThreads);
mutex.lock(); tplLock.lock();
continue; continue;
} }
mutex.unlock(); tplLock.unlock();
// cout << "thread " << threadID << " has " << size << " Bytestreams\n"; // cout << "thread " << threadID << " has " << size << " Bytestreams\n";
for (i = 0; i < size && !cancelled(); i++) { for (i = 0; i < size && !cancelled(); i++) {
@@ -1938,7 +1940,7 @@ try
if (bs->length() == 0 || hdr->Status > 0) if (bs->length() == 0 || hdr->Status > 0)
{ {
/* PM errors mean this should abort right away instead of draining the PM backlog */ /* PM errors mean this should abort right away instead of draining the PM backlog */
mutex.lock(); tplLock.lock();
if (bs->length() == 0) if (bs->length() == 0)
{ {
errorMessage(IDBErrorInfo::instance()->errorMsg(ERR_PRIMPROC_DOWN)); errorMessage(IDBErrorInfo::instance()->errorMsg(ERR_PRIMPROC_DOWN));
@@ -2129,7 +2131,7 @@ try
} }
cpv.clear(); cpv.clear();
mutex.lock(); tplLock.lock();
if (fOid >= 3000) if (fOid >= 3000)
{ {
@@ -2162,7 +2164,7 @@ out:
if (++recvExited == fNumThreads) { if (++recvExited == fNumThreads) {
if (doJoin && smallOuterJoiner != -1 && !cancelled()) { if (doJoin && smallOuterJoiner != -1 && !cancelled()) {
mutex.unlock(); tplLock.unlock();
/* If this was a left outer join, this needs to put the unmatched /* If this was a left outer join, this needs to put the unmatched
rows from the joiner into the output rows from the joiner into the output
XXXPAT: This might be a problem if later steps depend XXXPAT: This might be a problem if later steps depend
@@ -2220,7 +2222,7 @@ out:
else else
rgDataToDl(joinedData, local_outputRG, dlp); rgDataToDl(joinedData, local_outputRG, dlp);
} }
mutex.unlock(); tplLock.lock();
} }
if (traceOn() && fOid>=3000) { if (traceOn() && fOid>=3000) {
@@ -2245,7 +2247,7 @@ out:
BPPIsAllocated = false; BPPIsAllocated = false;
} }
} }
// catch and do nothing. Let it continues with the clean up and profiling // catch and do nothing. Let it continue with the clean up and profiling
catch (const std::exception& e) catch (const std::exception& e)
{ {
cerr << "tuple-bps caught: " << e.what() << endl; cerr << "tuple-bps caught: " << e.what() << endl;
@@ -2268,7 +2270,7 @@ out:
fPhysicalIO += physIO_Thread; fPhysicalIO += physIO_Thread;
fCacheIO += cachedIO_Thread; fCacheIO += cachedIO_Thread;
fBlockTouched += touchedBlocks_Thread; fBlockTouched += touchedBlocks_Thread;
mutex.unlock(); tplLock.unlock();
if (fTableOid >= 3000 && lastThread) if (fTableOid >= 3000 && lastThread)
{ {