You've already forked mariadb-columnstore-engine
mirror of
https://github.com/mariadb-corporation/mariadb-columnstore-engine.git
synced 2025-07-01 06:21:41 +03:00
Merge branch 'develop-1.1' into 1.1-merge-up-20180621
This commit is contained in:
@ -33,6 +33,8 @@ using namespace logging;
|
||||
#include "prioritythreadpool.h"
|
||||
using namespace boost;
|
||||
|
||||
#include "dbcon/joblist/primitivemsg.h"
|
||||
|
||||
namespace threadpool
|
||||
{
|
||||
|
||||
@ -51,9 +53,9 @@ PriorityThreadPool::PriorityThreadPool(uint targetWeightPerRun, uint highThreads
|
||||
|
||||
cout << "started " << highThreads << " high, " << midThreads << " med, " << lowThreads
|
||||
<< " low.\n";
|
||||
threadCounts[HIGH] = highThreads;
|
||||
threadCounts[MEDIUM] = midThreads;
|
||||
threadCounts[LOW] = lowThreads;
|
||||
defaultThreadCounts[HIGH] = threadCounts[HIGH] = highThreads;
|
||||
defaultThreadCounts[MEDIUM] = threadCounts[MEDIUM] = midThreads;
|
||||
defaultThreadCounts[LOW] = threadCounts[LOW] = lowThreads;
|
||||
}
|
||||
|
||||
PriorityThreadPool::~PriorityThreadPool()
|
||||
@ -68,6 +70,23 @@ void PriorityThreadPool::addJob(const Job& job, bool useLock)
|
||||
if (useLock)
|
||||
lk.lock();
|
||||
|
||||
// Create any missing threads
|
||||
if (defaultThreadCounts[HIGH] != threadCounts[HIGH])
|
||||
{
|
||||
threads.create_thread(ThreadHelper(this, HIGH));
|
||||
threadCounts[HIGH]++;
|
||||
}
|
||||
if (defaultThreadCounts[MEDIUM] != threadCounts[MEDIUM])
|
||||
{
|
||||
threads.create_thread(ThreadHelper(this, MEDIUM));
|
||||
threadCounts[MEDIUM]++;
|
||||
}
|
||||
if (defaultThreadCounts[LOW] != threadCounts[LOW])
|
||||
{
|
||||
threads.create_thread(ThreadHelper(this, LOW));
|
||||
threadCounts[LOW]++;
|
||||
}
|
||||
|
||||
if (job.priority > 66)
|
||||
jobQueues[HIGH].push_back(job);
|
||||
else if (job.priority > 33)
|
||||
@ -113,80 +132,146 @@ void PriorityThreadPool::threadFcn(const Priority preferredQueue) throw()
|
||||
vector<bool> reschedule;
|
||||
uint32_t rescheduleCount;
|
||||
uint32_t queueSize;
|
||||
bool running = false;
|
||||
|
||||
while (!_stop)
|
||||
try
|
||||
{
|
||||
while (!_stop) {
|
||||
|
||||
mutex::scoped_lock lk(mutex);
|
||||
|
||||
queue = pickAQueue(preferredQueue);
|
||||
mutex::scoped_lock lk(mutex);
|
||||
|
||||
queue = pickAQueue(preferredQueue);
|
||||
if (jobQueues[queue].empty()) {
|
||||
if (jobQueues[queue].empty())
|
||||
{
|
||||
newJob.wait(lk);
|
||||
continue;
|
||||
}
|
||||
newJob.wait(lk);
|
||||
continue;
|
||||
}
|
||||
|
||||
queueSize = jobQueues[queue].size();
|
||||
weight = 0;
|
||||
// 3 conditions stop this thread from grabbing all jobs in the queue
|
||||
//
|
||||
// 1: The weight limit has been exceeded
|
||||
// 2: The queue is empty
|
||||
// 3: It has grabbed more than half of the jobs available &
|
||||
// should leave some to the other threads
|
||||
queueSize = jobQueues[queue].size();
|
||||
weight = 0;
|
||||
// 3 conditions stop this thread from grabbing all jobs in the queue
|
||||
//
|
||||
// 1: The weight limit has been exceeded
|
||||
// 2: The queue is empty
|
||||
// 3: It has grabbed more than half of the jobs available &
|
||||
// should leave some to the other threads
|
||||
|
||||
while ((weight < weightPerRun) && (!jobQueues[queue].empty())
|
||||
&& (runList.size() <= queueSize / 2))
|
||||
while ((weight < weightPerRun) && (!jobQueues[queue].empty())
|
||||
&& (runList.size() <= queueSize/2)) {
|
||||
{
|
||||
runList.push_back(jobQueues[queue].front());
|
||||
jobQueues[queue].pop_front();
|
||||
weight += runList.back().weight;
|
||||
}
|
||||
runList.push_back(jobQueues[queue].front());
|
||||
jobQueues[queue].pop_front();
|
||||
weight += runList.back().weight;
|
||||
}
|
||||
|
||||
lk.unlock();
|
||||
lk.unlock();
|
||||
|
||||
reschedule.resize(runList.size());
|
||||
rescheduleCount = 0;
|
||||
|
||||
for (i = 0; i < runList.size() && !_stop; i++)
|
||||
reschedule.resize(runList.size());
|
||||
rescheduleCount = 0;
|
||||
for (i = 0; i < runList.size() && !_stop; i++) {
|
||||
{
|
||||
try
|
||||
{
|
||||
reschedule[i] = false;
|
||||
reschedule[i] = (*(runList[i].functor))();
|
||||
|
||||
if (reschedule[i])
|
||||
rescheduleCount++;
|
||||
reschedule[i] = false;
|
||||
running = true;
|
||||
reschedule[i] = (*(runList[i].functor))();
|
||||
running = false;
|
||||
if (reschedule[i])
|
||||
rescheduleCount++;
|
||||
}
|
||||
catch (std::exception& e)
|
||||
{
|
||||
cerr << e.what() << endl;
|
||||
}
|
||||
}
|
||||
|
||||
// no real work was done, prevent intensive busy waiting
|
||||
if (rescheduleCount == runList.size())
|
||||
usleep(1000);
|
||||
// no real work was done, prevent intensive busy waiting
|
||||
if (rescheduleCount == runList.size())
|
||||
usleep(1000);
|
||||
|
||||
if (rescheduleCount > 0)
|
||||
if (rescheduleCount > 0) {
|
||||
{
|
||||
lk.lock();
|
||||
lk.lock();
|
||||
|
||||
for (i = 0; i < runList.size(); i++)
|
||||
if (reschedule[i])
|
||||
addJob(runList[i], false);
|
||||
for (i = 0; i < runList.size(); i++)
|
||||
if (reschedule[i])
|
||||
addJob(runList[i], false);
|
||||
|
||||
if (rescheduleCount > 1)
|
||||
newJob.notify_all();
|
||||
else
|
||||
newJob.notify_one();
|
||||
if (rescheduleCount > 1)
|
||||
newJob.notify_all();
|
||||
else
|
||||
newJob.notify_one();
|
||||
|
||||
lk.unlock();
|
||||
lk.unlock();
|
||||
}
|
||||
|
||||
runList.clear();
|
||||
}
|
||||
|
||||
runList.clear();
|
||||
}
|
||||
catch (std::exception &ex)
|
||||
{
|
||||
// Log the exception and exit this thread
|
||||
try
|
||||
{
|
||||
threadCounts[queue]--;
|
||||
#ifndef NOLOGGING
|
||||
logging::Message::Args args;
|
||||
logging::Message message(5);
|
||||
args.add("threadFcn: Caught exception: ");
|
||||
args.add(ex.what());
|
||||
|
||||
message.format( args );
|
||||
|
||||
logging::LoggingID lid(22);
|
||||
logging::MessageLog ml(lid);
|
||||
|
||||
ml.logErrorMessage( message );
|
||||
#endif
|
||||
if (running)
|
||||
sendErrorMsg(runList[i].uniqueID, runList[i].stepID, runList[i].sock);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
||||
// Log the exception and exit this thread
|
||||
try
|
||||
{
|
||||
threadCounts[queue]--;
|
||||
#ifndef NOLOGGING
|
||||
logging::Message::Args args;
|
||||
logging::Message message(6);
|
||||
args.add("threadFcn: Caught unknown exception!");
|
||||
|
||||
message.format( args );
|
||||
|
||||
logging::LoggingID lid(22);
|
||||
logging::MessageLog ml(lid);
|
||||
|
||||
ml.logErrorMessage( message );
|
||||
#endif
|
||||
if (running)
|
||||
sendErrorMsg(runList[i].uniqueID, runList[i].stepID, runList[i].sock);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PriorityThreadPool::sendErrorMsg(uint32_t id, uint32_t step, primitiveprocessor::SP_UM_IOSOCK sock)
|
||||
{
|
||||
ISMPacketHeader ism;
|
||||
PrimitiveHeader ph = {0};
|
||||
|
||||
ism.Status = logging::primitiveServerErr;
|
||||
ph.UniqueID = id;
|
||||
ph.StepID = step;
|
||||
ByteStream msg(sizeof(ISMPacketHeader) + sizeof(PrimitiveHeader));
|
||||
msg.append((uint8_t *) &ism, sizeof(ism));
|
||||
msg.append((uint8_t *) &ph, sizeof(ph));
|
||||
|
||||
sock->write(msg);
|
||||
}
|
||||
|
||||
void PriorityThreadPool::stop()
|
||||
|
Reference in New Issue
Block a user