1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-07-30 19:23:07 +03:00

MCOL-513 Threadpool to unlimited threads when queuesize = 0. Idle down after 10 minutes

This commit is contained in:
David Hall
2017-02-13 11:56:28 -06:00
parent c2344accc9
commit e09b7e10c5
8 changed files with 236 additions and 204 deletions

View File

@ -74,7 +74,7 @@ JobList::JobList(bool isEM) :
JobList::~JobList() JobList::~JobList()
{ {
vector<boost::thread *> joiners; vector<boost::thread *> joiners;
boost::thread *tmp; // boost::thread *tmp;
try try
{ {
if (fIsRunning) if (fIsRunning)

View File

@ -56,7 +56,7 @@ namespace joblist
{ {
boost::mutex JobStep::fLogMutex; //=PTHREAD_MUTEX_INITIALIZER; boost::mutex JobStep::fLogMutex; //=PTHREAD_MUTEX_INITIALIZER;
ThreadPool JobStep::jobstepThreadPool(100,200); ThreadPool JobStep::jobstepThreadPool(defaultJLThreadPoolSize, 0);
ostream& operator<<(ostream& os, const JobStep* rhs) ostream& operator<<(ostream& os, const JobStep* rhs)
{ {

View File

@ -60,7 +60,8 @@ namespace joblist
const uint64_t defaultHUATotalMem = 8 * 1024 * 1024 * 1024ULL; const uint64_t defaultHUATotalMem = 8 * 1024 * 1024 * 1024ULL;
const uint32_t defaultTupleDLMaxSize = 64 * 1024; const uint32_t defaultTupleDLMaxSize = 64 * 1024;
const uint32_t defaultTupleMaxBuckets = 256;
const uint32_t defaultJLThreadPoolSize = 100;
//pcolscan.cpp //pcolscan.cpp
const uint32_t defaultScanLbidReqLimit = 10000; const uint32_t defaultScanLbidReqLimit = 10000;
@ -160,7 +161,7 @@ namespace joblist
unsigned getHjNumThreads() const { return fHjNumThreads; } //getUintVal(fHashJoinStr, "NumThreads", defaultNumThreads); } unsigned getHjNumThreads() const { return fHjNumThreads; } //getUintVal(fHashJoinStr, "NumThreads", defaultNumThreads); }
uint64_t getHjMaxElems() const { return getUintVal(fHashJoinStr, "MaxElems", defaultHJMaxElems); } uint64_t getHjMaxElems() const { return getUintVal(fHashJoinStr, "MaxElems", defaultHJMaxElems); }
uint32_t getHjFifoSizeLargeSide() const { return getUintVal(fHashJoinStr, "FifoSizeLargeSide", defaultHJFifoSizeLargeSide); } uint32_t getHjFifoSizeLargeSide() const { return getUintVal(fHashJoinStr, "FifoSizeLargeSide", defaultHJFifoSizeLargeSide); }
uint32_t getHjCPUniqueLimit() const { return getUintVal(fHashJoinStr, "CPUniqueLimit", defaultHjCPUniqueLimit); } uint32_t getHjCPUniqueLimit() const { return getUintVal(fHashJoinStr, "CPUniqueLimit", defaultHjCPUniqueLimit); }
uint64_t getPMJoinMemLimit() const { return pmJoinMemLimit; } uint64_t getPMJoinMemLimit() const { return pmJoinMemLimit; }
uint32_t getJLFlushInterval() const { return getUintVal(fJobListStr, "FlushInterval", defaultFlushInterval); } uint32_t getJLFlushInterval() const { return getUintVal(fJobListStr, "FlushInterval", defaultFlushInterval); }
@ -168,6 +169,10 @@ namespace joblist
uint32_t getJlScanLbidReqLimit() const { return getUintVal(fJobListStr, "ScanLbidReqLimit",defaultScanLbidReqLimit); } uint32_t getJlScanLbidReqLimit() const { return getUintVal(fJobListStr, "ScanLbidReqLimit",defaultScanLbidReqLimit); }
uint32_t getJlScanLbidReqThreshold() const { return getUintVal(fJobListStr,"ScanLbidReqThreshold", defaultScanLbidReqThreshold); } uint32_t getJlScanLbidReqThreshold() const { return getUintVal(fJobListStr,"ScanLbidReqThreshold", defaultScanLbidReqThreshold); }
// @MCOL-513 - Added threadpool to JobSteps
uint32_t getJLThreadPoolSize() const { return getUintVal(fJobListStr, "ThreadPoolSize", defaultJLThreadPoolSize); }
std::string getJlThreadPoolDebug() const { return getStringVal(fJobListStr, "ThreadPoolDebug", "N"); }
// @bug 1264 - Added LogicalBlocksPerScan configurable which determines the number of blocks contained in each BPS scan request. // @bug 1264 - Added LogicalBlocksPerScan configurable which determines the number of blocks contained in each BPS scan request.
uint32_t getJlLogicalBlocksPerScan() const { return getUintVal(fJobListStr,"LogicalBlocksPerScan", defaultLogicalBlocksPerScan); } uint32_t getJlLogicalBlocksPerScan() const { return getUintVal(fJobListStr,"LogicalBlocksPerScan", defaultLogicalBlocksPerScan); }
uint32_t getJlProjectBlockReqLimit() const { return getUintVal(fJobListStr, "ProjectBlockReqLimit", defaultProjectBlockReqLimit ); } uint32_t getJlProjectBlockReqLimit() const { return getUintVal(fJobListStr, "ProjectBlockReqLimit", defaultProjectBlockReqLimit ); }
@ -180,9 +185,9 @@ namespace joblist
uint32_t getJlMaxOutstandingRequests() const { return getUintVal(fJobListStr,"MaxOutstandingRequests", defaultMaxOutstandingRequests);} uint32_t getJlMaxOutstandingRequests() const { return getUintVal(fJobListStr,"MaxOutstandingRequests", defaultMaxOutstandingRequests);}
uint32_t getJlJoinerChunkSize() const { return getUintVal(fJobListStr,"JoinerChunkSize", defaultJoinerChunkSize);} uint32_t getJlJoinerChunkSize() const { return getUintVal(fJobListStr,"JoinerChunkSize", defaultJoinerChunkSize);}
int getPsCount() const { return getUintVal(fPrimitiveServersStr, "Count", defaultPSCount ); } int getPsCount() const { return getUintVal(fPrimitiveServersStr, "Count", defaultPSCount ); }
int getPsConnectionsPerPrimProc() const { return getUintVal(fPrimitiveServersStr, "ConnectionsPerPrimProc", defaultConnectionsPerPrimProc); } int getPsConnectionsPerPrimProc() const { return getUintVal(fPrimitiveServersStr, "ConnectionsPerPrimProc", defaultConnectionsPerPrimProc); }
uint32_t getPsLBID_Shift() const { return getUintVal(fPrimitiveServersStr, "LBID_Shift", defaultLBID_Shift ); } uint32_t getPsLBID_Shift() const { return getUintVal(fPrimitiveServersStr, "LBID_Shift", defaultLBID_Shift ); }
std::string getScTempDiskPath() const { return getStringVal(fSystemConfigStr, "TempDiskPath", defaultTempDiskPath ); } std::string getScTempDiskPath() const { return getStringVal(fSystemConfigStr, "TempDiskPath", defaultTempDiskPath ); }
uint64_t getScTempSaveSize() const { return getUintVal(fSystemConfigStr, "TempSaveSize", defaultTempSaveSize); } uint64_t getScTempSaveSize() const { return getUintVal(fSystemConfigStr, "TempSaveSize", defaultTempSaveSize); }

View File

@ -1393,6 +1393,20 @@ int main(int argc, char* argv[])
} }
} }
// It's possible that PM modules use this threadpool. Only ExeMgr creates
// massive amounts of threads and needs to be settable. It's also possible that
// other process on this UM module use this threadpool. In this case, they share.
// We can't call rm functions during the static creation because rm has a isExeMgr
// flag that is set upon first creation. For the pool, who has no idea if it is ExeMgr,
// to create the singleton rm would be wrong, no matter which way we set the flag.
JobStep::jobstepThreadPool.setMaxThreads(rm->getJLThreadPoolSize());
JobStep::jobstepThreadPool.setName("ExeMgr");
if (rm->getJlThreadPoolDebug() == "Y" || rm->getJlThreadPoolDebug() == "y")
{
JobStep::jobstepThreadPool.setDebug(true);
JobStep::jobstepThreadPool.invoke(ThreadPoolMonitor(&JobStep::jobstepThreadPool));
}
int serverThreads = rm->getEmServerThreads(); int serverThreads = rm->getEmServerThreads();
int serverQueueSize = rm->getEmServerQueueSize(); int serverQueueSize = rm->getEmServerQueueSize();
int maxPct = rm->getEmMaxPct(); int maxPct = rm->getEmMaxPct();
@ -1433,13 +1447,6 @@ int main(int argc, char* argv[])
} }
} }
// if (!JobStep::jobstepThreadPool.debug())
// {
// JobStep::jobstepThreadPool.setName("ExeMgr");
// JobStep::jobstepThreadPool.setDebug(true);
// JobStep::jobstepThreadPool.invoke(ThreadPoolMonitor(&JobStep::jobstepThreadPool));
// }
threadpool::ThreadPool exeMgrThreadPool(serverThreads, serverQueueSize); threadpool::ThreadPool exeMgrThreadPool(serverThreads, serverQueueSize);
for (;;) for (;;)
{ {

View File

@ -503,6 +503,7 @@
as many threads are available across all PMs. --> as many threads are available across all PMs. -->
<!-- <ProcessorThreadsPerScan>16</ProcessorThreadsPerScan> --> <!-- <ProcessorThreadsPerScan>16</ProcessorThreadsPerScan> -->
<MaxOutstandingRequests>20</MaxOutstandingRequests> <MaxOutstandingRequests>20</MaxOutstandingRequests>
<ThreadPoolSize>100</ThreadPoolSize>
</JobList> </JobList>
<TupleWSDL> <TupleWSDL>
<MaxSize>1M</MaxSize> <!-- Max size in bytes per bucket --> <MaxSize>1M</MaxSize> <!-- Max size in bytes per bucket -->
@ -515,9 +516,9 @@
<!-- <RowAggrRowGroupsPerThread>20</RowAggrRowGroupsPerThread> --> <!-- Default value is 20 --> <!-- <RowAggrRowGroupsPerThread>20</RowAggrRowGroupsPerThread> --> <!-- Default value is 20 -->
</RowAggregation> </RowAggregation>
<CrossEngineSupport> <CrossEngineSupport>
<Host>unassigned</Host> <Host>127.0.0.1</Host>
<Port>3306</Port> <Port>3306</Port>
<User>unassigned</User> <User>root</User>
<Password></Password> <Password></Password>
</CrossEngineSupport> </CrossEngineSupport>
<QueryStats> <QueryStats>

View File

@ -495,8 +495,9 @@
is 20 extents worth of work for the PMs to process at any given time. is 20 extents worth of work for the PMs to process at any given time.
ProcessorThreadsPerScan * MaxOutstandingRequests should be at least ProcessorThreadsPerScan * MaxOutstandingRequests should be at least
as many threads are available across all PMs. --> as many threads are available across all PMs. -->
<!-- <ProcessorThreadsPerScan>16</ProcessorThreadsPerScan> --> <!-- <ProcessorThreadsPerScan>16</ProcessorThreadsPerScan> -->
<MaxOutstandingRequests>20</MaxOutstandingRequests> <MaxOutstandingRequests>20</MaxOutstandingRequests>
<ThreadPoolSize>100</ThreadPoolSize>
</JobList> </JobList>
<TupleWSDL> <TupleWSDL>
<MaxSize>1M</MaxSize> <!-- Max size in bytes per bucket --> <MaxSize>1M</MaxSize> <!-- Max size in bytes per bucket -->
@ -509,9 +510,9 @@
<!-- <RowAggrRowGroupsPerThread>20</RowAggrRowGroupsPerThread> --> <!-- Default value is 20 --> <!-- <RowAggrRowGroupsPerThread>20</RowAggrRowGroupsPerThread> --> <!-- Default value is 20 -->
</RowAggregation> </RowAggregation>
<CrossEngineSupport> <CrossEngineSupport>
<Host>unassigned</Host> <Host>127.0.0.1</Host>
<Port>3306</Port> <Port>3306</Port>
<User>unassigned</User> <User>root</User>
<Password></Password> <Password></Password>
</CrossEngineSupport> </CrossEngineSupport>
<QueryStats> <QueryStats>

View File

@ -30,35 +30,34 @@ using namespace logging;
#include "threadpool.h" #include "threadpool.h"
#include <iomanip> #include <iomanip>
#include <sstream> #include <sstream>
#include "boost/date_time/posix_time/posix_time_types.hpp"
namespace threadpool namespace threadpool
{ {
ThreadPool::ThreadPool() ThreadPool::ThreadPool()
:fMaxThreads( 0 ), fQueueSize( 0 ) :fMaxThreads( 0 ), fQueueSize( 0 )
{ {
init(); init();
} }
ThreadPool::ThreadPool( size_t maxThreads, size_t queueSize ) ThreadPool::ThreadPool( size_t maxThreads, size_t queueSize )
:fMaxThreads( maxThreads ), fQueueSize( queueSize ) :fMaxThreads( maxThreads ), fQueueSize( queueSize )
{ {
init(); init();
if (fQueueSize == 0)
fQueueSize = fMaxThreads*2;
} }
ThreadPool::~ThreadPool() throw() ThreadPool::~ThreadPool() throw()
{ {
// delete fThreadCreated;
try try
{ {
stop(); stop();
} }
catch(...) catch (...)
{} {
}
} }
void ThreadPool::init() void ThreadPool::init()
@ -66,13 +65,12 @@ void ThreadPool::init()
fThreadCount = 0; fThreadCount = 0;
fGeneralErrors = 0; fGeneralErrors = 0;
fFunctorErrors = 0; fFunctorErrors = 0;
waitingFunctorsSize = 0; waitingFunctorsSize = 0;
issued = 0; fIssued = 0;
fDebug = false; fDebug = false;
fStop = false; fStop = false;
// fThreadCreated = new NoOp();
fNextFunctor = fWaitingFunctors.end(); fNextFunctor = fWaitingFunctors.end();
fNextHandle=1; fNextHandle=1;
} }
void ThreadPool::setQueueSize(size_t queueSize) void ThreadPool::setQueueSize(size_t queueSize)
@ -88,11 +86,6 @@ void ThreadPool::setMaxThreads(size_t maxThreads)
fMaxThreads = maxThreads; fMaxThreads = maxThreads;
} }
void ThreadPool::setThreadCreatedListener(const Functor_T &f)
{
// fThreadCreated = f;
}
void ThreadPool::stop() void ThreadPool::stop()
{ {
boost::mutex::scoped_lock lock1(fMutex); boost::mutex::scoped_lock lock1(fMutex);
@ -111,7 +104,7 @@ void ThreadPool::wait()
while (waitingFunctorsSize > 0) while (waitingFunctorsSize > 0)
{ {
fThreadAvailable.wait(lock1); fThreadAvailable.wait(lock1);
//cerr << "woke!" << endl; //cerr << "woke!" << endl;
} }
} }
@ -121,22 +114,22 @@ void ThreadPool::join(uint64_t thrHandle)
while (waitingFunctorsSize > 0) while (waitingFunctorsSize > 0)
{ {
Container_T::iterator iter; Container_T::iterator iter;
Container_T::iterator end = fWaitingFunctors.end(); Container_T::iterator end = fWaitingFunctors.end();
bool foundit = false; bool foundit = false;
for (iter = fWaitingFunctors.begin(); iter != end; ++iter) for (iter = fWaitingFunctors.begin(); iter != end; ++iter)
{ {
foundit = false; foundit = false;
if (iter->hndl == thrHandle) if (iter->hndl == thrHandle)
{ {
foundit = true; foundit = true;
break; break;
} }
} }
if (!foundit) if (!foundit)
{ {
break; break;
} }
fThreadAvailable.wait(lock1); fThreadAvailable.wait(lock1);
} }
} }
@ -147,32 +140,32 @@ void ThreadPool::join(std::vector<uint64_t> thrHandle)
while (waitingFunctorsSize > 0) while (waitingFunctorsSize > 0)
{ {
Container_T::iterator iter; Container_T::iterator iter;
Container_T::iterator end = fWaitingFunctors.end(); Container_T::iterator end = fWaitingFunctors.end();
bool foundit = false; bool foundit = false;
for (iter = fWaitingFunctors.begin(); iter != end; ++iter) for (iter = fWaitingFunctors.begin(); iter != end; ++iter)
{ {
foundit = false; foundit = false;
std::vector<uint64_t>::iterator thrIter; std::vector<uint64_t>::iterator thrIter;
std::vector<uint64_t>::iterator thrEnd = thrHandle.end(); std::vector<uint64_t>::iterator thrEnd = thrHandle.end();
for (thrIter = thrHandle.begin(); thrIter != thrEnd; ++thrIter) for (thrIter = thrHandle.begin(); thrIter != thrEnd; ++thrIter)
{ {
if (iter->hndl == *thrIter) if (iter->hndl == *thrIter)
{ {
foundit = true; foundit = true;
break; break;
} }
} }
if (foundit == true) if (foundit == true)
{ {
break; break;
} }
} }
// If we didn't find any of the handles, then all are complete // If we didn't find any of the handles, then all are complete
if (!foundit) if (!foundit)
{ {
break; break;
} }
fThreadAvailable.wait(lock1); fThreadAvailable.wait(lock1);
} }
} }
@ -180,13 +173,12 @@ void ThreadPool::join(std::vector<uint64_t> thrHandle)
uint64_t ThreadPool::invoke(const Functor_T &threadfunc) uint64_t ThreadPool::invoke(const Functor_T &threadfunc)
{ {
boost::mutex::scoped_lock lock1(fMutex); boost::mutex::scoped_lock lock1(fMutex);
uint64_t thrHandle=0; uint64_t thrHandle=0;
for(;;) for (;;)
{ {
try try
{ {
if ( waitingFunctorsSize < fThreadCount) if (waitingFunctorsSize < fThreadCount)
{ {
// Don't create a thread unless it's needed. There // Don't create a thread unless it's needed. There
// is a thread available to service this request. // is a thread available to service this request.
@ -197,33 +189,34 @@ uint64_t ThreadPool::invoke(const Functor_T &threadfunc)
bool bAdded = false; bool bAdded = false;
if ( waitingFunctorsSize < fQueueSize) if (waitingFunctorsSize < fQueueSize || fQueueSize == 0)
{ {
// Don't create a thread unless you have to // Don't create a thread unless you have to
thrHandle = addFunctor(threadfunc); thrHandle = addFunctor(threadfunc);
bAdded = true; bAdded = true;
} }
if ( fThreadCount < fMaxThreads) // fQueueSize = 0 disables the queue and is an indicator to allow any number of threads to actually run.
if (fThreadCount < fMaxThreads || fQueueSize == 0)
{ {
++fThreadCount; ++fThreadCount;
lock1.unlock(); lock1.unlock();
fThreads.create_thread(beginThreadFunc(*this)); fThreads.create_thread(beginThreadFunc(*this));
if (fDebug) if (fDebug)
{ {
ostringstream oss; ostringstream oss;
oss << "invoke: Starting thread " << fThreadCount << " max " << fMaxThreads oss << "invoke: Starting thread " << fThreadCount << " max " << fMaxThreads
<< " queue " << fQueueSize; << " queue " << fQueueSize;
logging::Message::Args args; logging::Message::Args args;
logging::Message message(0); logging::Message message(0);
args.add(oss.str()); args.add(oss.str());
message.format( args ); message.format( args );
logging::LoggingID lid(22); logging::LoggingID lid(22);
logging::MessageLog ml(lid); logging::MessageLog ml(lid);
ml.logWarningMessage( message ); ml.logWarningMessage( message );
} }
if (bAdded) if (bAdded)
break; break;
@ -241,22 +234,22 @@ uint64_t ThreadPool::invoke(const Functor_T &threadfunc)
break; break;
} }
if (fDebug) if (fDebug)
{ {
logging::Message::Args args; logging::Message::Args args;
logging::Message message(5); logging::Message message(5);
args.add("invoke: Blocked waiting for thread. Count "); args.add("invoke: Blocked waiting for thread. Count ");
args.add(fThreadCount); args.add(fThreadCount);
args.add("max "); args.add("max ");
args.add(fMaxThreads); args.add(fMaxThreads);
message.format( args ); message.format( args );
logging::LoggingID lid(22); logging::LoggingID lid(22);
logging::MessageLog ml(lid); logging::MessageLog ml(lid);
ml.logWarningMessage( message ); ml.logWarningMessage( message );
} }
fThreadAvailable.wait(lock1); fThreadAvailable.wait(lock1);
} }
catch(...) catch (...)
{ {
++fGeneralErrors; ++fGeneralErrors;
throw; throw;
@ -264,18 +257,16 @@ uint64_t ThreadPool::invoke(const Functor_T &threadfunc)
} }
fNeedThread.notify_one(); fNeedThread.notify_one();
return thrHandle; return thrHandle;
} }
void ThreadPool::beginThread() throw() void ThreadPool::beginThread() throw()
{ {
try try
{ {
// fThreadCreated();
boost::mutex::scoped_lock lock1(fMutex); boost::mutex::scoped_lock lock1(fMutex);
boost::system_time timeout = boost::get_system_time()+boost::posix_time::minutes(10);
for(;;) for (;;)
{ {
if (fStop) if (fStop)
break; break;
@ -283,51 +274,80 @@ void ThreadPool::beginThread() throw()
if (fNextFunctor == fWaitingFunctors.end()) if (fNextFunctor == fWaitingFunctors.end())
{ {
// Wait until someone needs a thread // Wait until someone needs a thread
fNeedThread.wait(lock1); // Add the timed waait for queueSize == 0 so we can idle away threads
// over fMaxThreads
if (fQueueSize > 0)
{
fNeedThread.wait(lock1);
}
else
{
// Wait no more than 10 minutes
if (fNeedThread.timed_wait(lock1, timeout) == boost::cv_status::timeout)
{
if (fThreadCount > fMaxThreads)
{
--fThreadCount;
return;
}
}
}
} }
else else
{ {
/* Need to tune these magic #s */ /* Need to tune these magic #s */
vector<Container_T::iterator> todoList;
int i, num;
Container_T::const_iterator iter;
vector<Container_T::iterator> todoList; /* Use num to control how many jobs are issued to a single thread
int i, num; should you want to batch more than one */
Container_T::const_iterator iter; num = (waitingFunctorsSize - fIssued >= 1 ? 1 : 0);
/* Use this to control how many jobs are issued to a single thread */ for (i = 0; i < num; i++)
num = (waitingFunctorsSize - issued >= 1 ? 1 : 0); todoList.push_back(fNextFunctor++);
for (i = 0; i < num; i++) fIssued += num;
todoList.push_back(fNextFunctor++);
issued += num;
// cerr << "got " << num << " jobs." << endl; // cerr << "got " << num << " jobs." << endl;
// cerr << "got " << num << " jobs. waitingFunctorsSize=" << // cerr << "got " << num << " jobs. waitingFunctorsSize=" <<
// waitingFunctorsSize << " issued=" << issued << " fThreadCount=" << // waitingFunctorsSize << " fIssued=" << fIssued << " fThreadCount=" <<
// fThreadCount << endl; // fThreadCount << endl;
lock1.unlock(); lock1.unlock();
for (i = 0; i < num; i++) { for (i = 0; i < num; i++)
try { {
(*todoList[i]).functor(); try
} {
catch(exception &e) { (*todoList[i]).functor();
++fFunctorErrors; }
cerr << e.what() << endl; catch (exception &e)
} {
} ++fFunctorErrors;
lock1.lock(); #ifndef NOLOGGING
logging::Message::Args args;
logging::Message message(5);
args.add("ThreadPool: Caught exception during execution: ");
args.add(e.what());
message.format( args );
logging::LoggingID lid(22);
logging::MessageLog ml(lid);
ml.logErrorMessage( message );
#endif
}
}
lock1.lock();
issued -= num; fIssued -= num;
waitingFunctorsSize -= num; waitingFunctorsSize -= num;
for (i = 0; i < num; i++) for (i = 0; i < num; i++)
fWaitingFunctors.erase(todoList[i]); fWaitingFunctors.erase(todoList[i]);
/* /*
if (waitingFunctorsSize != fWaitingFunctors.size()) if (waitingFunctorsSize != fWaitingFunctors.size())
cerr << "size mismatch! fake size=" << waitingFunctorsSize << cerr << "size mismatch! fake size=" << waitingFunctorsSize <<
" real size=" << fWaitingFunctors.size() << endl; " real size=" << fWaitingFunctors.size() << endl;
*/ */
timeout = boost::get_system_time()+boost::posix_time::minutes(10);
fThreadAvailable.notify_all(); fThreadAvailable.notify_all();
} }
} }
} }
@ -353,12 +373,12 @@ void ThreadPool::beginThread() throw()
ml.logErrorMessage( message ); ml.logErrorMessage( message );
#endif #endif
} }
catch(...) catch (...)
{ {
} }
} }
catch(...) catch (...)
{ {
++fGeneralErrors; ++fGeneralErrors;
@ -379,7 +399,7 @@ void ThreadPool::beginThread() throw()
ml.logErrorMessage( message ); ml.logErrorMessage( message );
#endif #endif
} }
catch(...) catch (...)
{ {
} }
} }
@ -393,16 +413,16 @@ uint64_t ThreadPool::addFunctor(const Functor_T &func)
bAtEnd = true; bAtEnd = true;
// PoolFunction_T poolFunction(fNextHandle, func); // PoolFunction_T poolFunction(fNextHandle, func);
PoolFunction_T poolFunction; PoolFunction_T poolFunction;
poolFunction.hndl = fNextHandle; poolFunction.hndl = fNextHandle;
poolFunction.functor = func; poolFunction.functor = func;
fWaitingFunctors.push_back(poolFunction); fWaitingFunctors.push_back(poolFunction);
waitingFunctorsSize++; waitingFunctorsSize++;
if (bAtEnd) if (bAtEnd)
{ {
--fNextFunctor; --fNextFunctor;
} }
return fNextHandle++; return fNextHandle++;
} }
void ThreadPool::dump() void ThreadPool::dump()
@ -415,47 +435,48 @@ void ThreadPool::dump()
void ThreadPoolMonitor::operator()() void ThreadPoolMonitor::operator()()
{ {
ostringstream filename; ostringstream filename;
filename << "/var/log/mariadb/columnstore/trace/ThreadPool_" << fPool->name() << ".log"; filename << "/var/log/mariadb/columnstore/trace/ThreadPool_" << fPool->name() << ".log";
fLog = new ofstream(filename.str().c_str()); fLog = new ofstream(filename.str().c_str());
for (;;) for (;;)
{ {
if (!fLog || !fLog->is_open()) if (!fLog || !fLog->is_open())
{ {
ostringstream oss; ostringstream oss;
oss << "ThreadPoolMonitor " << fPool->name() << " has no file "; oss << "ThreadPoolMonitor " << fPool->name() << " has no file ";
logging::Message::Args args; logging::Message::Args args;
logging::Message message(0); logging::Message message(0);
args.add(oss.str()); args.add(oss.str());
message.format( args ); message.format( args );
logging::LoggingID lid(22); logging::LoggingID lid(22);
logging::MessageLog ml(lid); logging::MessageLog ml(lid);
ml.logWarningMessage( message ); ml.logWarningMessage( message );
return; return;
} }
// Get a timestamp for output. // Get a timestamp for output.
struct tm tm; struct tm tm;
struct timeval tv; struct timeval tv;
gettimeofday(&tv, 0); gettimeofday(&tv, 0);
localtime_r(&tv.tv_sec, &tm); localtime_r(&tv.tv_sec, &tm);
(*fLog) << setfill('0') (*fLog) << setfill('0')
<< setw(2) << tm.tm_hour << ':' << setw(2) << tm.tm_hour << ':'
<< setw(2) << tm.tm_min << ':' << setw(2) << tm.tm_min << ':'
<< setw(2) << tm.tm_sec << setw(2) << tm.tm_sec
<< '.' << '.'
<< setw(4) << tv.tv_usec/100 << setw(4) << tv.tv_usec/100
<< " Name " << fPool->fName << " Name " << fPool->fName
<< " Active " << fPool->waitingFunctorsSize << " Active " << fPool->waitingFunctorsSize
<< " Most " << fPool->fThreadCount << " Most " << fPool->fThreadCount
<< " Max " << fPool->fMaxThreads << " Max " << fPool->fMaxThreads
<< " Q " << fPool->fQueueSize << " Q " << fPool->fQueueSize
<< endl; << endl;
// struct timespec req = { 0, 1000 * 100 }; //100 usec // struct timespec req = { 0, 1000 * 100 }; //100 usec
// nanosleep(&req, 0); // nanosleep(&req, 0);
sleep(2); sleep(2);
} }
} }
} // namespace threadpool } // namespace threadpool

View File

@ -74,7 +74,10 @@ public:
* @param maxThreads the maximum number of threads in this pool. This is the maximum number * @param maxThreads the maximum number of threads in this pool. This is the maximum number
* of simultaneuous operations that can go on. * of simultaneuous operations that can go on.
* @param queueSize the maximum number of work tasks in the queue. This is the maximum * @param queueSize the maximum number of work tasks in the queue. This is the maximum
* number of jobs that can queue up in the work list before invoke() blocks. * number of jobs that can queue up in the work list before invoke() blocks.
* If 0, then threads never block and total threads may
* exceed maxThreads. Nothing waits. Thread count will
* idle down to maxThreads when less work is required.
*/ */
EXPORT explicit ThreadPool( size_t maxThreads, size_t queueSize ); EXPORT explicit ThreadPool( size_t maxThreads, size_t queueSize );
@ -108,11 +111,6 @@ public:
*/ */
inline size_t getMaxThreads() const { return fMaxThreads; } inline size_t getMaxThreads() const { return fMaxThreads; }
/** @brief register a functor to be called when a new thread
* is created
*/
EXPORT void setThreadCreatedListener(const Functor_T &f) ;
/** @brief queue size accessor /** @brief queue size accessor
* *
*/ */
@ -218,9 +216,8 @@ private:
typedef std::list<PoolFunction_T> Container_T; typedef std::list<PoolFunction_T> Container_T;
Container_T fWaitingFunctors; Container_T fWaitingFunctors;
Container_T::iterator fNextFunctor; Container_T::iterator fNextFunctor;
// Functor_T * fThreadCreated;
uint32_t issued; uint32_t fIssued;
boost::mutex fMutex; boost::mutex fMutex;
boost::condition fThreadAvailable; // triggered when a thread is available boost::condition fThreadAvailable; // triggered when a thread is available
boost::condition fNeedThread; // triggered when a thread is needed boost::condition fNeedThread; // triggered when a thread is needed