You've already forked mariadb-columnstore-engine
mirror of
https://github.com/mariadb-corporation/mariadb-columnstore-engine.git
synced 2025-08-07 03:22:57 +03:00
MCOL-1212 Diuring abort, ensure that all normal threads are complete before running abort threads. Otherwise, they clash.
This commit is contained in:
@@ -1067,6 +1067,10 @@ void JobList::abort()
|
|||||||
fQuery[i]->abort();
|
fQuery[i]->abort();
|
||||||
for (i = 0; i < fProject.size(); i++)
|
for (i = 0; i < fProject.size(); i++)
|
||||||
fProject[i]->abort();
|
fProject[i]->abort();
|
||||||
|
for (i = 0; i < fQuery.size(); i++)
|
||||||
|
fQuery[i]->join();
|
||||||
|
for (i = 0; i < fProject.size(); i++)
|
||||||
|
fProject[i]->join();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -4691,21 +4691,6 @@ void TupleAggregateStep::threadedAggregateRowGroups(uint32_t threadID)
|
|||||||
|
|
||||||
if (more)
|
if (more)
|
||||||
{
|
{
|
||||||
// Trying out a ramp-up strategy for starting the
|
|
||||||
// first phase threads to cut overhead on big systems
|
|
||||||
// processing small result
|
|
||||||
// sets. On every non-zero read from the input FIFO,
|
|
||||||
// and if there is more data to read, the
|
|
||||||
// first thread will start another thread until the
|
|
||||||
// maximum number is reached.
|
|
||||||
#if 0
|
|
||||||
if (threadID == 0 && fFirstPhaseThreadCount < fNumOfThreads &&
|
|
||||||
dlIn->more(fInputIter))
|
|
||||||
{
|
|
||||||
fFirstPhaseRunners.push_back(jobstepThreadPool.invoke(ThreadedAggregator(this, fFirstPhaseThreadCount)));
|
|
||||||
fFirstPhaseThreadCount++;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
fRowGroupIns[threadID].setData(&rgData);
|
fRowGroupIns[threadID].setData(&rgData);
|
||||||
fMemUsage[threadID] += fRowGroupIns[threadID].getSizeWithStrings();
|
fMemUsage[threadID] += fRowGroupIns[threadID].getSizeWithStrings();
|
||||||
if (!fRm->getMemory(fRowGroupIns[threadID].getSizeWithStrings(), fSessionMemLimit))
|
if (!fRm->getMemory(fRowGroupIns[threadID].getSizeWithStrings(), fSessionMemLimit))
|
||||||
@@ -4971,28 +4956,16 @@ uint64_t TupleAggregateStep::doThreadedAggregate(ByteStream& bs, RowGroupDL* dlp
|
|||||||
{
|
{
|
||||||
initializeMultiThread();
|
initializeMultiThread();
|
||||||
|
|
||||||
// This block of code starts all threads at the start
|
vector<uint64_t> runners; // thread pool handles
|
||||||
fFirstPhaseThreadCount = fNumOfThreads;
|
runners.reserve(fNumOfThreads); // to prevent a resize during use
|
||||||
fFirstPhaseRunners.clear();
|
// Start the aggregator threads
|
||||||
fFirstPhaseRunners.reserve(fNumOfThreads); // to prevent a resize during use
|
for (i = 0; i < fNumOfThreads; i++)
|
||||||
for (i = 0; i < fNumOfThreads; i++)
|
{
|
||||||
{
|
runners.push_back(jobstepThreadPool.invoke(ThreadedAggregator(this, i)));
|
||||||
fFirstPhaseRunners.push_back(jobstepThreadPool.invoke(ThreadedAggregator(this, i)));
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#if 0
|
// Now wait for all those threads
|
||||||
// This block of code starts one thread, relies on doThreadedAggregation()
|
jobstepThreadPool.join(runners);
|
||||||
// For reasons unknown, this doesn't work right with threadpool
|
|
||||||
// to start more as needed
|
|
||||||
fFirstPhaseRunners.clear();
|
|
||||||
fFirstPhaseRunners.reserve(fNumOfThreads); // to prevent a resize during use
|
|
||||||
fFirstPhaseThreadCount = 1;
|
|
||||||
fFirstPhaseRunners.push_back(jobstepThreadPool.invoke(ThreadedAggregator(this, 0)));
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Now wait for that thread plus all the threads it may have spawned
|
|
||||||
jobstepThreadPool.join(fFirstPhaseRunners);
|
|
||||||
fFirstPhaseRunners.clear();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dynamic_cast<RowAggregationDistinct*>(fAggregator.get()) && fAggregator->aggMapKeyLength() > 0)
|
if (dynamic_cast<RowAggregationDistinct*>(fAggregator.get()) && fAggregator->aggMapKeyLength() > 0)
|
||||||
|
@@ -186,8 +186,6 @@ private:
|
|||||||
bool fIsMultiThread;
|
bool fIsMultiThread;
|
||||||
int fInputIter; // iterator
|
int fInputIter; // iterator
|
||||||
boost::scoped_array<uint64_t> fMemUsage;
|
boost::scoped_array<uint64_t> fMemUsage;
|
||||||
std::vector<uint64_t> fFirstPhaseRunners; // thread pool handles
|
|
||||||
uint32_t fFirstPhaseThreadCount;
|
|
||||||
|
|
||||||
boost::shared_ptr<int64_t> fSessionMemLimit;
|
boost::shared_ptr<int64_t> fSessionMemLimit;
|
||||||
};
|
};
|
||||||
|
Reference in New Issue
Block a user