1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-07-30 19:23:07 +03:00

Revert "No boost condition (#2822)" (#2828)

This reverts commit f916e64927.
This commit is contained in:
Roman Nozdrin
2023-04-22 13:49:50 +01:00
committed by GitHub
parent f916e64927
commit 4fe9cd64a3
245 changed files with 2007 additions and 1261 deletions

View File

@ -51,7 +51,7 @@ using namespace BRM;
namespace
{
// Only one of the cacheutils fcns can run at a time
std::mutex CacheOpsMutex;
boost::mutex CacheOpsMutex;
// This global is updated only w/ atomic ops
volatile uint32_t MultiReturnCode;
@ -151,7 +151,7 @@ namespace cacheutils
*/
int flushPrimProcCache()
{
std::unique_lock lk(CacheOpsMutex);
boost::mutex::scoped_lock lk(CacheOpsMutex);
try
{
@ -180,7 +180,7 @@ int flushPrimProcBlocks(const BRM::BlockList_t& list)
if (list.empty())
return 0;
std::unique_lock lk(CacheOpsMutex);
boost::mutex::scoped_lock lk(CacheOpsMutex);
#if defined(__LP64__) || defined(_WIN64)
@ -239,7 +239,7 @@ int flushPrimProcAllverBlocks(const vector<LBID_t>& list)
try
{
std::unique_lock lk(CacheOpsMutex);
boost::mutex::scoped_lock lk(CacheOpsMutex);
rc = sendToAll(bs);
return rc;
}
@ -258,7 +258,7 @@ int flushOIDsFromCache(const vector<BRM::OID_t>& oids)
* uint32_t * - OID array
*/
std::unique_lock lk(CacheOpsMutex, std::defer_lock);
boost::mutex::scoped_lock lk(CacheOpsMutex, boost::defer_lock_t());
ByteStream bs;
ISMPacketHeader ism;
@ -287,7 +287,7 @@ int flushPartition(const std::vector<BRM::OID_t>& oids, set<BRM::LogicalPartitio
* uint32_t * - OID array
*/
std::unique_lock lk(CacheOpsMutex, std::defer_lock);
boost::mutex::scoped_lock lk(CacheOpsMutex, boost::defer_lock_t());
ByteStream bs;
ISMPacketHeader ism;
@ -314,7 +314,7 @@ int dropPrimProcFdCache()
try
{
std::unique_lock lk(CacheOpsMutex);
boost::mutex::scoped_lock lk(CacheOpsMutex);
int rc = sendToAll(bs);
return rc;
}

View File

@ -24,7 +24,7 @@ using namespace messageqcpp;
namespace
{
idbdatafile::SMComm* instance = NULL;
std::mutex m;
boost::mutex m;
}; // namespace
namespace idbdatafile
@ -34,7 +34,7 @@ SMComm* SMComm::get()
if (instance)
return instance;
std::unique_lock sl(m);
boost::mutex::scoped_lock sl(m);
if (instance)
return instance;

View File

@ -68,7 +68,7 @@ SocketPool::SocketPool()
SocketPool::~SocketPool()
{
std::unique_lock lock(mutex);
boost::mutex::scoped_lock lock(mutex);
for (uint i = 0; i < allSockets.size(); i++)
::close(allSockets[i]);
@ -213,7 +213,7 @@ retry:
int SocketPool::getSocket()
{
std::unique_lock lock(mutex);
boost::mutex::scoped_lock lock(mutex);
int clientSocket;
if (freeSockets.size() == 0 && allSockets.size() < maxSockets)
@ -253,7 +253,7 @@ int SocketPool::getSocket()
void SocketPool::returnSocket(const int sock)
{
std::unique_lock lock(mutex);
boost::mutex::scoped_lock lock(mutex);
// cout << "returning socket " << sock << endl;
freeSockets.push_back(sock);
socketAvailable.notify_one();
@ -261,7 +261,7 @@ void SocketPool::returnSocket(const int sock)
void SocketPool::remoteClosed(const int sock)
{
std::unique_lock lock(mutex);
boost::mutex::scoped_lock lock(mutex);
// cout << "closing socket " << sock << endl;
::close(sock);
for (vector<int>::iterator i = allSockets.begin(); i != allSockets.end(); ++i)

View File

@ -20,9 +20,8 @@
#include <deque>
#include <boost/utility.hpp>
#include <map>
#include <mutex>
#include <condition_variable>
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition_variable.hpp>
#include "bytestream.h"
@ -47,8 +46,8 @@ class SocketPool : public boost::noncopyable
std::vector<int> allSockets;
std::deque<int> freeSockets;
std::mutex mutex;
std::condition_variable socketAvailable;
boost::mutex mutex;
boost::condition_variable socketAvailable;
uint maxSockets;
static const uint defaultSockets = 20;
};

View File

@ -69,12 +69,12 @@ const fs::path defaultConfigFilePath(configDefaultFileName);
namespace config
{
std::mutex Config::fInstanceMapMutex;
boost::mutex Config::fInstanceMapMutex;
Config::configMap_t Config::fInstanceMap;
// duplicate to that in the Config class
std::mutex Config::fXmlLock;
boost::mutex Config::fXmlLock;
// duplicate to that in the Config class
std::mutex Config::fWriteXmlLock;
boost::mutex Config::fWriteXmlLock;
std::atomic_bool globHasConfig;
ConfigUniqPtr globConfigInstancePtr;
@ -103,7 +103,7 @@ Config* Config::makeConfig(const string& cf)
{
// To save against the moment zero race when multiple threads hits
// this scope.
std::unique_lock lk(fInstanceMapMutex);
boost::mutex::scoped_lock lk(fInstanceMapMutex);
if (globConfigInstancePtr)
{
globConfigInstancePtr->checkAndReloadConfig();
@ -118,12 +118,12 @@ Config* Config::makeConfig(const string& cf)
return globConfigInstancePtr.get();
}
std::unique_lock lk(fInstanceMapMutex);
boost::mutex::scoped_lock lk(fInstanceMapMutex);
globConfigInstancePtr->checkAndReloadConfig();
return globConfigInstancePtr.get();
}
std::unique_lock lk(fInstanceMapMutex);
boost::mutex::scoped_lock lk(fInstanceMapMutex);
if (fInstanceMap.find(cf) == fInstanceMap.end())
{
@ -439,7 +439,7 @@ void Config::writeConfig(const string& configFile) const
void Config::write(void) const
{
std::unique_lock lk(fWriteXmlLock);
boost::mutex::scoped_lock lk(fWriteXmlLock);
write(fConfigFile);
}
@ -632,7 +632,7 @@ std::string Config::getTempFileDir(Config::TempDirPurpose what)
void Config::ConfigDeleter::operator()(Config* config)
{
std::unique_lock lk(fInstanceMapMutex);
boost::mutex::scoped_lock lk(fInstanceMapMutex);
for (Config::configMap_t::iterator iter = fInstanceMap.begin(); iter != fInstanceMap.end(); ++iter)
{

View File

@ -240,9 +240,9 @@ class Config
Config(const std::string& configFile);
static configMap_t fInstanceMap;
static std::mutex fInstanceMapMutex;
static std::mutex fXmlLock;
static std::mutex fWriteXmlLock;
static boost::mutex fInstanceMapMutex;
static boost::mutex fXmlLock;
static boost::mutex fWriteXmlLock;
xmlDocPtr fDoc;
const std::string fConfigFile;

View File

@ -22,8 +22,7 @@
*
****************************************************************************/
#include "functor_json.h"
#include <map>
#include <mutex>
#include <boost/thread/mutex.hpp>
#include "funcexp.h"
#include "functor_all.h"
@ -51,11 +50,11 @@ namespace funcexp
FuncExp* FuncExp::fInstance = 0;
/* static */
std::mutex FuncExp::fInstanceMutex;
boost::mutex FuncExp::fInstanceMutex;
FuncExp* FuncExp::instance()
{
std::unique_lock lk(fInstanceMutex);
boost::mutex::scoped_lock lk(fInstanceMutex);
if (!fInstance)
fInstance = new FuncExp();

View File

@ -27,8 +27,7 @@
#include <string>
#include <vector>
#include <tr1/unordered_map>
#include <map>
#include <mutex>
#include <boost/thread/mutex.hpp>
#include "rowgroup.h"
#include "returnedcolumn.h"
@ -96,7 +95,7 @@ class FuncExp
private:
static FuncExp* fInstance;
static std::mutex fInstanceMutex;
static boost::mutex fInstanceMutex;
FuncMap fFuncMap;
FuncExp();
};

View File

@ -35,7 +35,7 @@
using namespace std;
namespace
{
std::mutex fac_guard;
boost::mutex fac_guard;
}
namespace idbdatafile
@ -45,7 +45,7 @@ IDBFactory::FactoryMap IDBFactory::s_plugins;
bool IDBFactory::installDefaultPlugins()
{
// protect these methods since we are changing our static data structure
std::unique_lock lock(fac_guard);
boost::mutex::scoped_lock lock(fac_guard);
s_plugins.emplace(IDBDataFile::BUFFERED, FileFactoryEnt(IDBDataFile::BUFFERED, "buffered", new BufferedFileFactory(),
new PosixFileSystem()));
@ -58,7 +58,7 @@ bool IDBFactory::installDefaultPlugins()
bool IDBFactory::installPlugin(const std::string& plugin)
{
// protect these methods since we are changing our static data structure
std::unique_lock lock(fac_guard);
boost::mutex::scoped_lock lock(fac_guard);
void* handle = dlopen(plugin.c_str(), RTLD_LAZY);

View File

@ -45,7 +45,7 @@ bool IDBPolicy::s_bUseRdwrMemBuffer = false;
int64_t IDBPolicy::s_hdfsRdwrBufferMaxSize = 0;
std::string IDBPolicy::s_hdfsRdwrScratch;
bool IDBPolicy::s_configed = false;
std::mutex IDBPolicy::s_mutex;
boost::mutex IDBPolicy::s_mutex;
std::vector<uint16_t> IDBPolicy::s_PreallocSpace;
void IDBPolicy::init(bool bEnableLogging, bool bUseRdwrMemBuffer, const string& hdfsRdwrScratch,
@ -175,7 +175,7 @@ IDBFileSystem& IDBPolicy::getFs(const std::string& path)
void IDBPolicy::configIDBPolicy()
{
// make sure this is done once.
std::unique_lock lk(s_mutex);
boost::mutex::scoped_lock lk(s_mutex);
if (s_configed)
return;

View File

@ -21,8 +21,7 @@
#include <vector>
#include <stdint.h>
#include <map>
#include <mutex>
#include <boost/thread/mutex.hpp>
#include "IDBDataFile.h"
#include "IDBFileSystem.h"
@ -154,7 +153,7 @@ class IDBPolicy
static std::string s_hdfsRdwrScratch;
static int64_t s_hdfsRdwrBufferMaxSize;
static bool s_configed;
static std::mutex s_mutex;
static boost::mutex s_mutex;
};
inline const std::string& IDBPolicy::hdfsRdwrScratch()

View File

@ -175,10 +175,10 @@ class TestRunner
IDBDataFile* m_file;
unsigned m_open_opts;
int m_id;
static std::mutex m_guard;
static boost::mutex m_guard;
};
std::mutex TestRunner::m_guard;
boost::mutex TestRunner::m_guard;
void thread_func2(TestRunner& trun)
{
@ -1486,7 +1486,7 @@ bool TestRunner::doBlock(unsigned int blocknum, unsigned char tag, unsigned int
void TestRunner::logMsg(LogLevel level, const string& msg, bool bold)
{
std::unique_lock lock(m_guard);
boost::mutex::scoped_lock lock(m_guard);
if (bold)
cout << "\033[0;1m";

View File

@ -54,7 +54,7 @@ TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::R
uint i;
getBucketCount();
m_bucketLocks.reset(new std::mutex[bucketCount]);
m_bucketLocks.reset(new boost::mutex[bucketCount]);
if (smallRG.getColTypes()[smallJoinColumn] == CalpontSystemCatalog::LONGDOUBLE)
{
@ -171,7 +171,7 @@ TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::R
_pool[i] = alloc.getPoolAllocator();
ht[i].reset(new typelesshash_t(10, hasher(), typelesshash_t::key_equal(), alloc));
}
m_bucketLocks.reset(new std::mutex[bucketCount]);
m_bucketLocks.reset(new boost::mutex[bucketCount]);
smallRG.initRow(&smallNullRow);

View File

@ -549,8 +549,8 @@ class TupleJoiner
int numCores;
uint bucketCount;
uint bucketMask;
boost::scoped_array<std::mutex> m_bucketLocks;
std::mutex m_typelessLock, m_cpValuesLock;
boost::scoped_array<boost::mutex> m_bucketLocks;
boost::mutex m_typelessLock, m_cpValuesLock;
utils::Hasher_r bucketPicker;
const uint32_t bpSeed = 0x4545e1d7; // an arbitrary random #
threadpool::ThreadPool* jobstepThreadPool;

View File

@ -47,11 +47,11 @@ using namespace config;
namespace logging
{
IDBErrorInfo* IDBErrorInfo::fInstance = 0;
std::mutex mx;
boost::mutex mx;
IDBErrorInfo* IDBErrorInfo::instance()
{
std::unique_lock lk(mx);
boost::mutex::scoped_lock lk(mx);
if (!fInstance)
fInstance = new IDBErrorInfo();

View File

@ -49,7 +49,7 @@ const string Logger::logMessage(LOG_TYPE logLevel, Message::MessageID mid, const
return logMessage(logLevel, msg, logInfo);
/*
std::unique_lock lk(fLogLock);
boost::mutex::scoped_lock lk(fLogLock);
fMl1.logData(logInfo);
switch (logLevel)
@ -77,7 +77,7 @@ const string Logger::logMessage(LOG_TYPE logLevel, Message::MessageID mid, const
const std::string Logger::logMessage(LOG_TYPE logLevel, const Message& msg, const LoggingID& logInfo)
{
std::unique_lock lk(fLogLock);
boost::mutex::scoped_lock lk(fLogLock);
fMl1.logData(logInfo);
switch (logLevel)

View File

@ -83,7 +83,7 @@ class Logger
MsgMap fMsgMap;
MessageLog fMl1;
std::mutex fLogLock;
boost::mutex fLogLock;
};
typedef boost::shared_ptr<Logger> SPL;

View File

@ -44,7 +44,7 @@ using namespace config;
#include "format.h"
namespace
{
std::mutex mx;
boost::mutex mx;
bool catalogLoaded = false;
typedef map<int, string> CatMap;
@ -152,7 +152,7 @@ const string Message::lookupMessage(const MessageID& msgid)
{
if (!catalogLoaded)
{
std::unique_lock lock(mx);
boost::mutex::scoped_lock lock(mx);
if (!catalogLoaded)
{

View File

@ -49,7 +49,7 @@ ByteStreamPool::~ByteStreamPool()
ByteStream* ByteStreamPool::getByteStream()
{
std::unique_lock s(mutex);
boost::mutex::scoped_lock s(mutex);
ByteStream* ret;
if (!freeByteStreams.empty())
@ -69,7 +69,7 @@ void ByteStreamPool::returnByteStream(ByteStream* bs)
delete bs;
else
{
std::unique_lock s(mutex);
boost::mutex::scoped_lock s(mutex);
if (freeByteStreams.size() > maxFreeBuffers)
delete bs;
else

View File

@ -24,8 +24,7 @@ Initially, 'large' is defined as 1MB.
*/
#include <deque>
#include <map>
#include <mutex>
#include <boost/thread/mutex.hpp>
#include "bytestream.h"
namespace messageqcpp
@ -44,7 +43,7 @@ class ByteStreamPool
private:
std::deque<ByteStream*> freeByteStreams;
std::mutex mutex;
boost::mutex mutex;
uint maxBufferSize;
uint maxFreeBuffers;
};

View File

@ -71,7 +71,7 @@ static uint64_t TimeSpecToSeconds(struct timespec* ts)
MessageQueueClient* MessageQueueClientPool::getInstance(const std::string& dnOrIp, uint64_t port)
{
auto lock = std::unique_lock(lockedMap.queueMutex);
auto lock = std::scoped_lock(lockedMap.queueMutex);
std::ostringstream oss;
oss << dnOrIp << "_" << port;
@ -100,7 +100,7 @@ MessageQueueClient* MessageQueueClientPool::getInstance(const std::string& dnOrI
MessageQueueClient* MessageQueueClientPool::getInstance(const std::string& module)
{
auto lock = std::unique_lock(lockedMap.queueMutex);
auto lock = std::scoped_lock(lockedMap.queueMutex);
MessageQueueClient* returnClient = MessageQueueClientPool::findInPool(module);
@ -194,7 +194,7 @@ void MessageQueueClientPool::releaseInstance(MessageQueueClient* client)
if (client == NULL)
return;
auto lock = std::unique_lock(lockedMap.queueMutex);
auto lock = std::scoped_lock(lockedMap.queueMutex);
auto it = lockedMap.clientMap.begin();
while (it != lockedMap.clientMap.end())
@ -223,7 +223,7 @@ void MessageQueueClientPool::deleteInstance(MessageQueueClient* client)
return;
auto lock = std::unique_lock(lockedMap.queueMutex);
auto lock = std::scoped_lock(lockedMap.queueMutex);
auto it = lockedMap.clientMap.begin();
while (it != lockedMap.clientMap.end())

View File

@ -18,22 +18,19 @@
#include <unistd.h>
#include <stdint.h>
#include <ctime>
#include <sys/time.h>
#include <boost/uuid/uuid.hpp>
#include <boost/uuid/uuid_generators.hpp>
#include <map>
#include <mutex>
#include <boost/thread/mutex.hpp>
using namespace boost;
#include "querytele.h"
namespace
{
// It's not clear that random_generator is thread-safe, so we'll just mutex it...
uuids::random_generator uuidgen;
std::mutex uuidgenMtx;
mutex uuidgenMtx;
} // namespace
namespace querytele
@ -41,7 +38,7 @@ namespace querytele
/*static*/
uuids::uuid QueryTeleClient::genUUID()
{
std::unique_lock lk(uuidgenMtx);
mutex::scoped_lock lk(uuidgenMtx);
return uuidgen();
}

View File

@ -48,7 +48,7 @@ struct TsTeleQueue
typedef std::queue<T> TeleQueue;
TeleQueue queue;
std::mutex queueMtx;
boost::mutex queueMtx;
};
TsTeleQueue<querytele::StepTele> stQueue;
@ -56,7 +56,7 @@ TsTeleQueue<querytele::QueryTele> qtQueue;
TsTeleQueue<querytele::ImportTele> itQueue;
volatile bool isInited = false;
std::mutex initMux;
boost::mutex initMux;
std::shared_ptr<att::TSocket> fSocket;
std::shared_ptr<att::TBufferedTransport> fTransport;
@ -195,9 +195,9 @@ void log_step(const querytele::StepTele& stdata)
void TeleConsumer()
{
bool didSomeWork = false;
std::unique_lock itlk(itQueue.queueMtx, std::defer_lock);
std::unique_lock qtlk(qtQueue.queueMtx, std::defer_lock);
std::unique_lock stlk(stQueue.queueMtx, std::defer_lock);
boost::mutex::scoped_lock itlk(itQueue.queueMtx, boost::defer_lock);
boost::mutex::scoped_lock qtlk(qtQueue.queueMtx, boost::defer_lock);
boost::mutex::scoped_lock stlk(stQueue.queueMtx, boost::defer_lock);
querytele::QueryTeleServiceClient client(fProtocol);
try
@ -333,7 +333,7 @@ QueryTeleProtoImpl::QueryTeleProtoImpl(const QueryTeleServerParms& sp) : fServer
if (fServerParms.host.empty() || fServerParms.port == 0)
return;
std::unique_lock lk(initMux);
boost::mutex::scoped_lock lk(initMux);
atomicops::atomicMb();
@ -354,7 +354,7 @@ int QueryTeleProtoImpl::enqStepTele(const StepTele& stdata)
{
try
{
std::unique_lock lk(stQueue.queueMtx);
boost::mutex::scoped_lock lk(stQueue.queueMtx);
// @bug6088 - Added conditions below to always log progress SUMMARY and START messages to avoid completed
// queries showing up with progress 0
@ -391,7 +391,7 @@ int QueryTeleProtoImpl::enqQueryTele(const QueryTele& qtdata)
{
try
{
std::unique_lock lk(qtQueue.queueMtx);
boost::mutex::scoped_lock lk(qtQueue.queueMtx);
if (qtQueue.queue.size() >= MaxQueueElems)
{
@ -413,7 +413,7 @@ int QueryTeleProtoImpl::enqImportTele(const ImportTele& itdata)
{
try
{
std::unique_lock lk(itQueue.queueMtx);
boost::mutex::scoped_lock lk(itQueue.queueMtx);
if (itQueue.queue.size() >= MaxQueueElems)
{
@ -435,7 +435,7 @@ int QueryTeleProtoImpl::waitForQueues()
{
try
{
std::unique_lock lk(itQueue.queueMtx);
boost::mutex::scoped_lock lk(itQueue.queueMtx);
while (!itQueue.queue.empty())
{

View File

@ -86,7 +86,7 @@ uint64_t StringStore::storeString(const uint8_t* data, uint32_t len)
return numeric_limits<uint64_t>::max();
//@bug6065, make StringStore::storeString() thread safe
std::unique_lock lk(fMutex, std::defer_lock);
boost::mutex::scoped_lock lk(fMutex, boost::defer_lock);
if (fUseStoreStringMutex)
lk.lock();
@ -212,7 +212,7 @@ uint32_t UserDataStore::storeUserData(mcsv1sdk::mcsv1Context& context,
return numeric_limits<uint32_t>::max();
}
std::unique_lock lk(fMutex, std::defer_lock);
boost::mutex::scoped_lock lk(fMutex, boost::defer_lock);
if (fUseUserDataMutex)
lk.lock();

View File

@ -34,8 +34,7 @@
#include <cassert>
#include <boost/shared_ptr.hpp>
#include <map>
#include <mutex>
#include <boost/thread/mutex.hpp>
#include <cmath>
#include <cfloat>
#include <execinfo.h>
@ -190,7 +189,7 @@ class StringStore
std::vector<std::shared_ptr<uint8_t[]>> longStrings;
bool empty = true;
bool fUseStoreStringMutex = false; //@bug6065, make StringStore::storeString() thread safe
std::mutex fMutex;
boost::mutex fMutex;
};
// Where we store user data for UDA(n)F
@ -248,7 +247,7 @@ class UserDataStore
std::vector<StoreData> vStoreData;
bool fUseUserDataMutex = false;
std::mutex fMutex;
boost::mutex fMutex;
};

View File

@ -5,7 +5,7 @@ include_directories( ${ENGINE_COMMON_INCLUDES} )
########### next target ###############
set(rwlock_LIB_SRCS rwlock.cpp)
set(rwlock_LIB_SRCS rwlock.cpp rwlock_local.cpp)
add_library(rwlock SHARED ${rwlock_LIB_SRCS})
add_dependencies(rwlock external_boost)

View File

@ -58,7 +58,7 @@ using namespace rwlock;
// This mutex needs to be fully instantiated by the runtime static object
// init mechanism or the lock in makeRWLockShmImpl() will fail
std::mutex instanceMapMutex;
boost::mutex instanceMapMutex;
typedef std::tr1::unordered_map<int, RWLockShmImpl*> LockMap_t;
// Windows doesn't init static objects the same as Linux, so make this a ptr
LockMap_t* lockMapPtr = 0;
@ -113,7 +113,7 @@ namespace rwlock
/*static*/
RWLockShmImpl* RWLockShmImpl::makeRWLockShmImpl(int key, bool* excl)
{
std::unique_lock lk(instanceMapMutex);
boost::mutex::scoped_lock lk(instanceMapMutex);
LockMap_t::iterator iter;
if (!lockMapPtr)

View File

@ -0,0 +1,323 @@
/* Copyright (C) 2014 InfiniDB, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
/*****************************************************************************
* $Id$
*
****************************************************************************/
/*
* Brief description of the file contents
*
* More detailed description
*/
#include <iostream>
using namespace std;
#include <boost/thread.hpp>
#include <boost/thread/condition.hpp>
using namespace boost;
#define RWLOCK_LOCAL_DLLEXPORT
#include "rwlock_local.h"
#undef RWLOCK_LOCAL_DLLEXPORT
// semaphore numbers
#define MUTEX 0
#define READERS 1
#define WRITERS 2
#ifdef DEBUG
using namespace std;
#define PRINTSTATE() \
cerr << " reading = " << state.reading << endl \
<< " writing = " << state.writing << endl \
<< " readerswaiting = " << state.readerswaiting << endl \
<< " writerswaiting = " << state.writerswaiting << endl;
#define CHECKSAFETY() \
if (!((state.reading == 0 && (state.writing == 0 || state.writing == 1)) || \
(state.reading > 0 && state.writing == 0))) \
{ \
cerr << "RWLock_local::" << __func__ << ": safety invariant violation" << endl; \
PRINTSTATE(); \
throw std::logic_error("RWLock_local: safety invariant violation"); \
}
#define CHECKLIVENESS() \
if (!((!(state.readerswaiting > 0 || state.writerswaiting > 0) || \
(state.reading > 0 || state.writing > 0)) || \
(!(state.reading == 0 && state.writing == 0) || \
(state.readerswaiting == 0 && state.writerswaiting == 0)))) \
{ \
cerr << "RWLock_local::" << __func__ << ": liveness invariant violation" << endl; \
PRINTSTATE(); \
throw std::logic_error("RWLock_local: liveness invariant violation"); \
}
#undef CHECKLIVENESS
#define CHECKLIVENESS()
#endif
namespace rwlock
{
RWLock_local::RWLock_local()
{
state.reading = 0;
state.readerswaiting = 0;
state.writing = 0;
state.writerswaiting = 0;
}
RWLock_local::~RWLock_local()
{
}
void RWLock_local::read_lock()
{
mutex.lock();
#ifdef DEBUG
CHECKSAFETY();
CHECKLIVENESS();
#endif
if (state.writerswaiting > 0 || state.writing > 0)
{
state.readerswaiting++;
#ifdef DEBUG
CHECKSAFETY();
CHECKLIVENESS();
#endif
while (state.writerswaiting > 0 || state.writing > 0)
okToRead.wait(mutex);
state.readerswaiting--;
}
state.reading++;
#ifdef DEBUG
CHECKSAFETY();
CHECKLIVENESS();
#endif
mutex.unlock();
}
void RWLock_local::read_unlock()
{
mutex.lock();
#ifdef DEBUG
CHECKSAFETY();
CHECKLIVENESS();
#endif
state.reading--;
if (state.writerswaiting > 0 && state.reading == 0)
okToWrite.notify_one();
#ifdef DEBUG
CHECKSAFETY();
CHECKLIVENESS();
#endif
mutex.unlock();
}
void RWLock_local::write_lock()
{
mutex.lock();
#ifdef DEBUG
CHECKSAFETY();
CHECKLIVENESS();
#endif
if (state.writing > 0 || state.reading > 0)
{
state.writerswaiting++;
#ifdef DEBUG
CHECKSAFETY();
CHECKLIVENESS();
#endif
while (state.writing > 0 || state.reading > 0)
okToWrite.wait(mutex);
state.writerswaiting--;
}
state.writing++;
#ifdef DEBUG
CHECKSAFETY();
CHECKLIVENESS();
#endif
}
void RWLock_local::write_unlock()
{
#ifdef DEBUG
CHECKSAFETY();
CHECKLIVENESS();
#endif
state.writing--;
if (state.writerswaiting > 0)
okToWrite.notify_one();
else if (state.readerswaiting > 0)
okToRead.notify_all();
#ifdef DEBUG
CHECKSAFETY();
CHECKLIVENESS();
#endif
mutex.unlock();
}
void RWLock_local::upgrade_to_write()
{
mutex.lock();
#ifdef DEBUG
CHECKSAFETY();
CHECKLIVENESS();
#endif
state.reading--;
// try to cut in line
if (state.reading == 0)
{
state.writing++;
#ifdef DEBUG
CHECKSAFETY();
CHECKLIVENESS();
#endif
return;
}
// cut & paste from write_lock()
if (state.writing > 0 || state.reading > 0)
{
state.writerswaiting++;
#ifdef DEBUG
CHECKSAFETY();
CHECKLIVENESS();
#endif
while (state.writing > 0 || state.reading > 0)
okToWrite.wait(mutex);
state.writerswaiting--;
}
state.writing++;
}
/* It's safe (and necessary) to simply convert this writer to a reader without
blocking */
void RWLock_local::downgrade_to_read()
{
#ifdef DEBUG
CHECKSAFETY();
CHECKLIVENESS();
#endif
state.writing--;
if (state.readerswaiting > 0)
okToRead.notify_all();
state.reading++;
#ifdef DEBUG
CHECKSAFETY();
CHECKLIVENESS();
#endif
mutex.unlock();
}
void RWLock_local::lock()
{
mutex.lock();
}
void RWLock_local::unlock()
{
mutex.unlock();
}
int RWLock_local::getWriting()
{
return state.writing;
}
int RWLock_local::getReading()
{
return state.reading;
}
int RWLock_local::getWritersWaiting()
{
return state.writerswaiting;
}
int RWLock_local::getReadersWaiting()
{
return state.readerswaiting;
}
ScopedRWLock_local::ScopedRWLock_local(RWLock_local* l, rwlock_mode m)
{
thelock = l;
mode = m;
assert(m == R || m == W);
locked = false;
lock();
}
ScopedRWLock_local::~ScopedRWLock_local()
{
if (locked)
unlock();
}
void ScopedRWLock_local::lock()
{
if (mode == R)
thelock->read_lock();
else
thelock->write_lock();
locked = true;
}
void ScopedRWLock_local::unlock()
{
if (mode == R)
thelock->read_unlock();
else
thelock->write_unlock();
locked = false;
}
} // namespace rwlock

192
utils/rwlock/rwlock_local.h Normal file
View File

@ -0,0 +1,192 @@
/* Copyright (C) 2014 InfiniDB, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
/******************************************************************************
* $Id$
*
*****************************************************************************/
/** @file
* class RWLock_local interface
*/
#pragma once
#include <boost/thread.hpp>
#include <boost/thread/condition.hpp>
#define EXPORT
namespace rwlock
{
/** @brief Implements RW locks for use across threads & processes
*
* Implements RW locks for use across threads & processes. Every
* instance that shares a lock must be instantiated using the same
* key. There is 'no limit' on the number of RW locks that can
* exist on the system at any one time.
*
* Summary of operation:
* - readers can work concurrently
* - writers get exclusive access
* - writers have priority
* - all state persists across all invocations sharing a given key
*
* Note: because state has to persist, it will have to be cleaned
* up somewhere else. Crashes while holding a read or write lock will
* eventually deadlock the set of processes that share the same key obviously.
*/
class RWLock_local
{
public:
class not_excl : public std::exception
{
public:
virtual const char* what() const throw()
{
return "not_excl";
}
};
class wouldblock : public std::exception
{
public:
virtual const char* what() const throw()
{
return "wouldblock";
}
};
/** @brief Keyed constructor.
*
* Instantiate an RWLock_local with the given key. All instances that
* share a key share the same lock.
*
* @param key The key
* @param excl If true and this is the first instance with the
* supplied key, it will return holding the write lock. If true and
* this is not the first instance, it will throw not_excl. The intent
* is similar to the IPC_EXCL flag in the sem/shm implementations.
*/
EXPORT RWLock_local();
EXPORT ~RWLock_local();
/** @brief Grab a read lock
*
* Grab a read lock. This will block iff writers are waiting or
* a writer is active.
*
* @param block (For testing only) If false, will throw
* wouldblock instead of blocking
*/
EXPORT void read_lock();
/** @brief Release a read lock.
*
* Release a read lock.
*/
EXPORT void read_unlock();
/** @brief Grab a write lock
*
* Grab a write lock. This will block while another writer or reader is
* active and will have exclusive access on waking.
*
* @param block (For testing only) If false, will throw
* wouldblock instead of blocking
*/
EXPORT void write_lock();
/** @brief Release a write lock.
*
* Release a write lock.
*/
EXPORT void write_unlock();
/** @brief Upgrade a read lock to a write lock
*
* Upgrade a read lock to a write lock. It may have to block
* if there are other readers currently reading. No guarantees of atomicity.
*/
EXPORT void upgrade_to_write();
/** @brief Downgrade a write lock to a read lock
*
* Downgrade a write lock to a read lock. The conversion happens
* atomically.
*/
EXPORT void downgrade_to_read();
/* These are for white box testing only */
EXPORT void lock();
EXPORT void unlock();
EXPORT int getWriting();
EXPORT int getReading();
EXPORT int getWritersWaiting();
EXPORT int getReadersWaiting();
private:
// Not copyable
RWLock_local(const RWLock_local& rwl);
RWLock_local& operator=(const RWLock_local& rwl);
/// the layout of the shmseg
struct State
{
int writerswaiting, writing, readerswaiting, reading;
} state;
boost::mutex mutex;
boost::condition okToRead;
boost::condition okToWrite;
};
enum rwlock_mode
{
R,
W
};
class ScopedRWLock_local
{
public:
ScopedRWLock_local(RWLock_local*, rwlock_mode);
~ScopedRWLock_local();
void lock();
void unlock();
private:
explicit ScopedRWLock_local()
{
}
explicit ScopedRWLock_local(const ScopedRWLock_local&)
{
}
ScopedRWLock_local& operator=(const ScopedRWLock_local&)
{
return *this;
}
RWLock_local* thelock;
rwlock_mode mode;
bool locked;
};
#undef EXPORT
} // namespace rwlock

296
utils/rwlock/tdriver-rw.cpp Normal file
View File

@ -0,0 +1,296 @@
/* Copyright (C) 2014 InfiniDB, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
/*****************************************************************************
* $Id$
*
****************************************************************************/
/** @file
* Brief description of the file contents
*
* More detailed description
*/
#include <iostream>
#include <sys/types.h>
#include <stdexcept>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <pthread.h>
#include <cppunit/extensions/HelperMacros.h>
#include "rwlock.h"
#include "rwlock_local.h"
using namespace std;
using namespace rwlock;
int threadStop;
static void* RWRunner(void* arg)
{
struct timeval tv;
int op, op2, interval;
RWLock* rwlock;
gettimeofday(&tv, NULL);
rwlock = new RWLock(reinterpret_cast<int64_t>(arg));
while (!threadStop)
{
op = rand_r(reinterpret_cast<uint32_t*>(&tv.tv_usec)) % 10;
if (op < 8) // read
{
interval = rand_r(reinterpret_cast<uint32_t*>(&tv.tv_usec)) % 100000;
rwlock->read_lock();
rwlock->lock();
CPPUNIT_ASSERT(rwlock->getReading() > 0);
CPPUNIT_ASSERT(rwlock->getWriting() == 0);
rwlock->unlock();
usleep(interval);
op2 = rand_r(reinterpret_cast<uint32_t*>(&tv.tv_usec)) % 2;
if (op2)
{
rwlock->upgrade_to_write();
rwlock->lock();
CPPUNIT_ASSERT(rwlock->getReading() == 0);
CPPUNIT_ASSERT(rwlock->getWriting() == 1);
rwlock->unlock();
usleep(interval);
rwlock->write_unlock();
}
else
{
/* For testing the lock recovery code in the BRM workernodes */
/*
int crash = rand_r((uint32_t *) &tv.tv_usec) % 100;
if (crash > 0) // 1% chance of crashing
rwlock->read_unlock();
*/
}
}
else if (op < 9) // write
{
interval = rand_r(reinterpret_cast<uint32_t*>(&tv.tv_usec)) % 100000;
rwlock->write_lock();
rwlock->lock();
CPPUNIT_ASSERT(rwlock->getReading() == 0);
CPPUNIT_ASSERT(rwlock->getWriting() == 1);
rwlock->unlock();
usleep(interval);
op2 = rand_r(reinterpret_cast<uint32_t*>(&tv.tv_usec)) % 2;
if (op2)
{
rwlock->downgrade_to_read();
rwlock->lock();
CPPUNIT_ASSERT(rwlock->getReading() > 0);
CPPUNIT_ASSERT(rwlock->getWriting() == 0);
rwlock->unlock();
usleep(interval);
rwlock->read_unlock();
}
else
rwlock->write_unlock();
}
else if (op == 9) // delete
{
delete rwlock;
rwlock = new RWLock(reinterpret_cast<int64_t>(arg));
}
}
delete rwlock;
pthread_exit(0);
}
static void* RWRunner_local(void* arg)
{
struct timeval tv;
int op, op2, interval;
RWLock_local* rwlock = reinterpret_cast<RWLock_local*>(arg);
gettimeofday(&tv, NULL);
while (!threadStop)
{
op = rand_r(reinterpret_cast<uint32_t*>(&tv.tv_usec)) % 10;
// cout << "doing op " << op << endl;
switch (op)
{
case 0: // read
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
case 8:
{
interval = rand_r(reinterpret_cast<uint32_t*>(&tv.tv_usec)) % 100000;
rwlock->read_lock();
rwlock->lock();
CPPUNIT_ASSERT(rwlock->getReading() > 0);
CPPUNIT_ASSERT(rwlock->getWriting() == 0);
rwlock->unlock();
usleep(interval);
op2 = rand_r(reinterpret_cast<uint32_t*>(&tv.tv_usec)) % 2;
if (op2)
{
rwlock->upgrade_to_write();
// rwlock->lock();
CPPUNIT_ASSERT(rwlock->getReading() == 0);
CPPUNIT_ASSERT(rwlock->getWriting() == 1);
// rwlock->unlock();
usleep(interval);
rwlock->write_unlock();
}
break;
}
case 9: // write
{
interval = rand_r(reinterpret_cast<uint32_t*>(&tv.tv_usec)) % 100000;
rwlock->write_lock();
// rwlock->lock();
CPPUNIT_ASSERT(rwlock->getReading() == 0);
CPPUNIT_ASSERT(rwlock->getWriting() == 1);
// rwlock->unlock();
usleep(interval);
op2 = rand_r(reinterpret_cast<uint32_t*>(&tv.tv_usec)) % 2;
if (op2)
{
rwlock->downgrade_to_read();
rwlock->lock();
CPPUNIT_ASSERT(rwlock->getReading() > 0);
CPPUNIT_ASSERT(rwlock->getWriting() == 0);
rwlock->unlock();
usleep(interval);
rwlock->read_unlock();
}
else
rwlock->write_unlock();
break;
}
default: break;
}
}
pthread_exit(0);
}
class RWLockTest : public CppUnit::TestFixture
{
CPPUNIT_TEST_SUITE(RWLockTest);
CPPUNIT_TEST(LongRWTest_1);
// CPPUNIT_TEST(LongRWLocalTest_1);
CPPUNIT_TEST_SUITE_END();
private:
public:
void LongRWTest_1()
{
int key = 0x20000; // the extentmap key
const int threadCount = 30;
int i;
pthread_t threads[threadCount];
cerr << endl
<< "Multithreaded RWLock test. "
"This runs for 60 minutes."
<< endl;
threadStop = 0;
for (i = 0; i < threadCount; i++)
{
if (pthread_create(&threads[i], NULL, RWRunner, reinterpret_cast<void*>(key)) < 0)
throw logic_error("Error creating threads for the ipc test");
}
sleep(3600);
threadStop = 1;
for (i = 0; i < threadCount; i++)
{
cerr << "Waiting for thread #" << i << endl;
pthread_join(threads[i], NULL);
}
}
void LongRWLocalTest_1()
{
const int threadCount = 40;
int i;
pthread_t threads[threadCount];
RWLock_local rwlock;
cerr << endl
<< "Multithreaded RWLock_local test. "
"This runs for 30-60 seconds."
<< endl;
threadStop = 0;
for (i = 0; i < threadCount; i++)
{
if (pthread_create(&threads[i], NULL, RWRunner_local, reinterpret_cast<void*>(&rwlock)) < 0)
throw logic_error("Error creating threads for the local test");
}
sleep(30);
threadStop = 1;
for (i = 0; i < threadCount; i++)
{
cerr << "Waiting for thread #" << i << endl;
pthread_join(threads[i], NULL);
}
}
};
CPPUNIT_TEST_SUITE_REGISTRATION(RWLockTest);
#include <cppunit/extensions/TestFactoryRegistry.h>
#include <cppunit/ui/text/TestRunner.h>
int main(int argc, char** argv)
{
CppUnit::TextUi::TestRunner runner;
CppUnit::TestFactoryRegistry& registry = CppUnit::TestFactoryRegistry::getRegistry();
runner.addTest(registry.makeTest());
bool wasSuccessful = runner.run("", false);
return (wasSuccessful ? 0 : 1);
}

View File

@ -24,9 +24,8 @@
#include <string>
using namespace std;
#include <map>
#include <mutex>
#include <boost/thread/mutex.hpp>
using namespace boost;
#include "installdir.h"
#include "configcpp.h"
@ -40,14 +39,14 @@ using namespace std;
namespace startup
{
/* static */
std::mutex StartUp::fTmpDirLock;
boost::mutex StartUp::fTmpDirLock;
/* static */
string* StartUp::fTmpDirp = 0;
/* static */
const string StartUp::tmpDir()
{
std::unique_lock lk(fTmpDirLock);
boost::mutex::scoped_lock lk(fTmpDirLock);
if (fTmpDirp)
return *fTmpDirp;

View File

@ -25,8 +25,7 @@
#pragma once
#include <string>
#include <map>
#include <mutex>
#include <boost/thread/mutex.hpp>
namespace startup
{
@ -47,7 +46,7 @@ class StartUp
StartUp(const StartUp& rhs);
StartUp& operator=(const StartUp& rhs);
static std::mutex fTmpDirLock;
static boost::mutex fTmpDirLock;
static std::string* fTmpDirp;
};

View File

@ -24,7 +24,7 @@
***************************************************************************/
#include "blockrequestprocessor.h"
#include "rwlock_local.h"
#include "dbrm.h"
#include <sys/time.h>
#include <pthread.h>

View File

@ -66,7 +66,7 @@ FileBufferMgr::~FileBufferMgr()
void FileBufferMgr::flushCache()
{
std::scoped_lock lk(fWLock);
mutex::scoped_lock lk(fWLock);
fbList.clear();
fbSet.clear();
fFBPool.clear();
@ -83,7 +83,7 @@ bool FileBufferMgr::exists(const BRM::LBID_t& lbid, const BRM::VER_t& ver) const
FileBuffer* FileBufferMgr::findPtr(const HashObject_t& keyFb)
{
std::scoped_lock lk(fWLock);
mutex::scoped_lock lk(fWLock);
filebuffer_uset_iter_t it = fbSet.find(keyFb);
if (fbSet.end() != it)
@ -101,7 +101,7 @@ bool FileBufferMgr::find(const HashObject_t& keyFb, FileBuffer& fb)
{
bool ret = false;
std::scoped_lock lk(fWLock);
mutex::scoped_lock lk(fWLock);
filebuffer_uset_iter_t it = fbSet.find(keyFb);
if (fbSet.end() != it)
@ -123,7 +123,7 @@ bool FileBufferMgr::find(const HashObject_t& keyFb, void* bufferPtr)
if (gPMProfOn && gPMStatsPtr)
gPMStatsPtr->markEvent(keyFb.lbid, pthread_self(), gSession, 'L');
std::scoped_lock lk(fWLock);
mutex::scoped_lock lk(fWLock);
if (gPMProfOn && gPMStatsPtr)
gPMStatsPtr->markEvent(keyFb.lbid, pthread_self(), gSession, 'M');
@ -150,7 +150,7 @@ bool FileBufferMgr::find(const HashObject_t& keyFb, void* bufferPtr)
bool FileBufferMgr::exists(const HashObject_t& fb) const
{
bool find_bool = false;
std::scoped_lock lk(fWLock);
mutex::scoped_lock lk(fWLock);
filebuffer_uset_iter_t it = fbSet.find(fb);
if (it != fbSet.end())
@ -175,7 +175,7 @@ const int FileBufferMgr::insert(const BRM::LBID_t lbid, const BRM::VER_t ver, co
if (gPMProfOn && gPMStatsPtr)
gPMStatsPtr->markEvent(lbid, pthread_self(), gSession, 'I');
std::scoped_lock lk(fWLock);
mutex::scoped_lock lk(fWLock);
HashObject_t fbIndex = {lbid, ver, 0};
filebuffer_pair_t pr = fbSet.insert(fbIndex);

View File

@ -26,7 +26,7 @@
#include <pthread.h>
#include "blocksize.h"
#include "filebuffer.h"
#include "rwlock_local.h"
#include <tr1/unordered_set>
#include <boost/thread.hpp>
@ -176,7 +176,7 @@ class FileBufferMgr
uint32_t fMaxNumBlocks; // the max number of blockSz blocks to keep in the Cache list
uint32_t fBlockSz; // size in bytes size of a data block - probably 8
mutable std::mutex fWLock;
mutable boost::mutex fWLock;
mutable filebuffer_uset_t fbSet;
mutable filebuffer_list_t fbList; // rename this

View File

@ -55,7 +55,7 @@ class Logger
typedef std::map<logging::Message::MessageID, logging::Message> MsgMap;
MsgMap fMsgMap;
std::mutex fLogLock;
boost::mutex fLogLock;
logging::MessageLog fMl1;
};

View File

@ -180,7 +180,7 @@ class StatMon
void operator()() const
{
// struct timespec ts = { 60 * 1, 0 };
std::scoped_lock lk(traceFileMapMutex);
mutex::scoped_lock lk(traceFileMapMutex);
TraceFileMap_t::iterator iter;
TraceFileMap_t::iterator end;
@ -239,7 +239,7 @@ void Stats::touchedLBID(uint64_t lbid, pthread_t thdid, uint32_t session)
if (lbid < 0 || session == 0)
return;
std::scoped_lock lk(traceFileMapMutex);
mutex::scoped_lock lk(traceFileMapMutex);
TraceFileMap_t::iterator iter = traceFileMap.find(session);
if (iter == traceFileMap.end())
@ -257,7 +257,7 @@ void Stats::markEvent(const uint64_t lbid, const pthread_t thdid, const uint32_t
if (lbid < 0 || session == 0)
return;
std::scoped_lock lk(traceFileMapMutex);
mutex::scoped_lock lk(traceFileMapMutex);
TraceFileMap_t::iterator iter = traceFileMap.find(session);
if (iter == traceFileMap.end())

View File

@ -24,9 +24,8 @@
#include <sstream>
#include <stdexcept>
#include <boost/thread/thread.hpp>
#include <map>
#include <mutex>
#include <condition_variable>
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/function.hpp>
#include <atomic>

View File

@ -72,7 +72,7 @@ PriorityThreadPool::~PriorityThreadPool()
void PriorityThreadPool::addJob(const Job& job, bool useLock)
{
boost::thread* newThread;
std::unique_lock lk(mutex, std::defer_lock);
boost::mutex::scoped_lock lk(mutex, boost::defer_lock_t());
if (useLock)
lk.lock();
@ -129,7 +129,7 @@ void PriorityThreadPool::removeJobs(uint32_t id)
{
list<Job>::iterator it;
std::unique_lock lk(mutex);
boost::mutex::scoped_lock lk(mutex);
for (uint32_t i = 0; i < _COUNT; i++)
for (it = jobQueues[i].begin(); it != jobQueues[i].end();)
@ -169,7 +169,7 @@ void PriorityThreadPool::threadFcn(const Priority preferredQueue) throw()
{
while (!_stop)
{
std::unique_lock lk(mutex);
boost::mutex::scoped_lock lk(mutex);
queue = pickAQueue(preferredQueue);

View File

@ -30,9 +30,8 @@
#include <sstream>
#include <stdexcept>
#include <boost/thread/thread.hpp>
#include <map>
#include <mutex>
#include <condition_variable>
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/function.hpp>
#include <atomic>
@ -114,7 +113,7 @@ class PriorityThreadPool
{
return blockedThreads;
}
protected:
private:
struct ThreadHelper
@ -141,8 +140,8 @@ class PriorityThreadPool
std::list<Job> jobQueues[3]; // higher indexes = higher priority
uint32_t threadCounts[3];
uint32_t defaultThreadCounts[3];
std::mutex mutex;
std::condition_variable newJob;
boost::mutex mutex;
boost::condition newJob;
boost::thread_group threads;
bool _stop;
uint32_t weightPerRun;

View File

@ -34,7 +34,7 @@ using namespace std;
#include "threadpool.h"
int thecount = 0;
std::mutex mutex;
boost::mutex mutex;
class ThreadPoolTestSuite : public CppUnit::TestFixture
{
@ -56,7 +56,7 @@ class ThreadPoolTestSuite : public CppUnit::TestFixture
fData++;
}
std::unique_lock lock(mutex);
boost::mutex::scoped_lock lock(mutex);
std::cout << "count = " << ++thecount << ' ' << fData << std::endl;
}

View File

@ -32,7 +32,6 @@ using namespace logging;
#include "threadnaming.h"
#include <iomanip>
#include <sstream>
#include <chrono>
#include "boost/date_time/posix_time/posix_time_types.hpp"
#include "mcsconfig.h"
@ -53,7 +52,7 @@ ThreadPool::~ThreadPool() throw()
{
try
{
std::unique_lock initLock(fInitMutex);
boost::mutex::scoped_lock initLock(fInitMutex);
stop();
}
catch (...)
@ -63,7 +62,7 @@ ThreadPool::~ThreadPool() throw()
void ThreadPool::init()
{
std::unique_lock initLock(fInitMutex);
boost::mutex::scoped_lock initLock(fInitMutex);
fThreadCount = 0;
fGeneralErrors = 0;
fFunctorErrors = 0;
@ -78,20 +77,20 @@ void ThreadPool::init()
void ThreadPool::setQueueSize(size_t queueSize)
{
std::unique_lock lock1(fMutex);
boost::mutex::scoped_lock lock1(fMutex);
fQueueSize = queueSize;
}
void ThreadPool::pruneThread()
{
utils::setThreadName("pruneThread");
std::unique_lock<std::mutex> lock2(fPruneMutex);
boost::unique_lock<boost::mutex> lock2(fPruneMutex);
while (true)
{
if (fStop)
return;
if (fPruneThreadEnd.wait_for(lock2, std::chrono::minutes{1}) == std::cv_status::timeout)
if (fPruneThreadEnd.wait_for(lock2, boost::chrono::minutes{1}) == boost::cv_status::timeout)
{
while (!fPruneThreads.empty())
{
@ -120,13 +119,13 @@ void ThreadPool::pruneThread()
void ThreadPool::setMaxThreads(size_t maxThreads)
{
std::unique_lock lock1(fMutex);
boost::mutex::scoped_lock lock1(fMutex);
fMaxThreads = maxThreads;
}
void ThreadPool::stop()
{
std::unique_lock lock1(fMutex);
boost::mutex::scoped_lock lock1(fMutex);
if (fStop)
return; // Was stopped earlier
fStop = true;
@ -141,7 +140,7 @@ void ThreadPool::stop()
void ThreadPool::wait()
{
std::unique_lock lock1(fMutex);
boost::mutex::scoped_lock lock1(fMutex);
while (waitingFunctorsSize > 0)
{
@ -152,7 +151,7 @@ void ThreadPool::wait()
void ThreadPool::join(uint64_t thrHandle)
{
std::unique_lock lock1(fMutex);
boost::mutex::scoped_lock lock1(fMutex);
while (waitingFunctorsSize > 0)
{
@ -182,7 +181,7 @@ void ThreadPool::join(uint64_t thrHandle)
void ThreadPool::join(std::vector<uint64_t>& thrHandle)
{
std::unique_lock lock1(fMutex);
boost::mutex::scoped_lock lock1(fMutex);
while (waitingFunctorsSize > 0)
{
@ -223,7 +222,7 @@ void ThreadPool::join(std::vector<uint64_t>& thrHandle)
uint64_t ThreadPool::invoke(const Functor_T& threadfunc)
{
std::unique_lock lock1(fMutex);
boost::mutex::scoped_lock lock1(fMutex);
uint64_t thrHandle = 0;
for (;;)
@ -320,7 +319,7 @@ void ThreadPool::beginThread() throw()
utils::setThreadName("Idle");
try
{
std::unique_lock<std::mutex> lock1(fMutex);
boost::unique_lock<boost::mutex> lock1(fMutex);
for (;;)
{
@ -339,11 +338,11 @@ void ThreadPool::beginThread() throw()
else
{
// Wait no more than 10 minutes
if (fNeedThread.wait_for(lock1, std::chrono::minutes{10}) == std::cv_status::timeout)
if (fNeedThread.wait_for(lock1, boost::chrono::minutes{10}) == boost::cv_status::timeout)
{
if (fThreadCount > fMaxThreads)
{
std::unique_lock lock2(fPruneMutex);
boost::mutex::scoped_lock lock2(fPruneMutex);
fPruneThreads.push(boost::this_thread::get_id());
--fThreadCount;
return;

View File

@ -37,9 +37,8 @@
#include <stack>
#include <stdint.h>
#include <boost/thread/thread.hpp>
#include <map>
#include <mutex>
#include <condition_variable>
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition_variable.hpp>
#include <boost/thread/locks.hpp>
#include <boost/bind.hpp>
#include <boost/shared_ptr.hpp>
@ -346,9 +345,9 @@ class ThreadPool
Container_T::iterator fNextFunctor;
uint32_t fIssued;
std::mutex fMutex;
std::condition_variable fThreadAvailable; // triggered when a thread is available
std::condition_variable fNeedThread; // triggered when a thread is needed
boost::mutex fMutex;
boost::condition_variable fThreadAvailable; // triggered when a thread is available
boost::condition_variable fNeedThread; // triggered when a thread is needed
ThreadPoolGroup fThreads;
bool fStop;
@ -359,9 +358,9 @@ class ThreadPool
std::string fName; // Optional to add a name to the pool for debugging.
bool fDebug;
std::mutex fInitMutex;
std::mutex fPruneMutex;
std::condition_variable fPruneThreadEnd;
boost::mutex fInitMutex;
boost::mutex fPruneMutex;
boost::condition_variable fPruneThreadEnd;
boost::thread* fPruneThread;
std::stack<boost::thread::id> fPruneThreads; // A list of stale thread IDs to be joined
};

View File

@ -30,7 +30,7 @@ using namespace std;
#include "threadpool.h"
int64_t thecount = 0;
std::mutex mutex;
boost::mutex mutex;
const string timeNow()
{
@ -68,7 +68,7 @@ struct foo
// simulate some work
fData++;
std::unique_lock lock(mutex);
boost::mutex::scoped_lock lock(mutex);
std::cout << "foo thd = " << fThd << " start " << start << " fin " << timeNow() << std::endl;
}
@ -123,7 +123,7 @@ int main(int argc, char** argv)
}
}
std::unique_lock lock(mutex);
boost::mutex::scoped_lock lock(mutex);
}
// Wait until all of the queued up and in-progress work has finished

View File

@ -74,19 +74,19 @@ void WeightedThreadPool::init()
void WeightedThreadPool::setQueueSize(size_t queueSize)
{
std::unique_lock lock1(fMutex);
boost::mutex::scoped_lock lock1(fMutex);
fQueueSize = queueSize;
}
void WeightedThreadPool::setMaxThreads(size_t maxThreads)
{
std::unique_lock lock1(fMutex);
boost::mutex::scoped_lock lock1(fMutex);
fMaxThreads = maxThreads;
}
void WeightedThreadPool::setMaxThreadWeight(size_t maxWeight)
{
std::unique_lock lock1(fMutex);
boost::mutex::scoped_lock lock1(fMutex);
fMaxThreadWeight = maxWeight;
}
@ -97,7 +97,7 @@ void WeightedThreadPool::setThreadCreatedListener(const Functor_T& f)
void WeightedThreadPool::stop()
{
std::unique_lock lock1(fMutex);
boost::mutex::scoped_lock lock1(fMutex);
fStop = true;
lock1.unlock();
@ -107,7 +107,7 @@ void WeightedThreadPool::stop()
void WeightedThreadPool::wait()
{
std::unique_lock lock1(fMutex);
boost::mutex::scoped_lock lock1(fMutex);
while (fWaitingFunctorsSize > 0)
{
@ -119,7 +119,7 @@ void WeightedThreadPool::wait()
void WeightedThreadPool::removeJobs(uint32_t id)
{
std::unique_lock lock1(fMutex);
boost::mutex::scoped_lock lock1(fMutex);
Container_T::iterator it;
it = fNextFunctor;
@ -146,7 +146,7 @@ void WeightedThreadPool::removeJobs(uint32_t id)
void WeightedThreadPool::invoke(const Functor_T& threadfunc, uint32_t functor_weight, uint32_t id)
{
std::unique_lock lock1(fMutex);
boost::mutex::scoped_lock lock1(fMutex);
for (;;)
{
@ -216,7 +216,7 @@ void WeightedThreadPool::beginThread() throw()
try
{
// fThreadCreated();
std::unique_lock lock1(fMutex);
boost::mutex::scoped_lock lock1(fMutex);
for (;;)
{

View File

@ -30,9 +30,8 @@
#include <sstream>
#include <stdexcept>
#include <boost/thread/thread.hpp>
#include <map>
#include <mutex>
#include <condition_variable>
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition.hpp>
#include <boost/bind.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/function.hpp>
@ -222,9 +221,9 @@ class WeightedThreadPool
Container_T::iterator fNextFunctor;
uint32_t issued;
std::mutex fMutex;
std::condition_variable fThreadAvailable; // triggered when a thread is available
std::condition_variable fNeedThread; // triggered when a thread is needed
boost::mutex fMutex;
boost::condition fThreadAvailable; // triggered when a thread is available
boost::condition fNeedThread; // triggered when a thread is needed
boost::thread_group fThreads;
bool fStop;

View File

@ -31,7 +31,7 @@ using namespace std;
#include "weightedthreadpool.h"
int thecount = 0;
std::mutex mutex;
boost::mutex mutex;
// Functor class
struct foo
@ -42,7 +42,7 @@ struct foo
// simulate some work
fData++;
// std::unique_lock lock(mutex);
// boost::mutex::scoped_lock lock(mutex);
// std::cout << "foo count = " << ++thecount << " " << fData << std::endl;
}
@ -70,7 +70,7 @@ int main(int argc, char** argv)
pool.invoke(bar, 25);
}
std::unique_lock lock(mutex);
boost::mutex::scoped_lock lock(mutex);
std::cout << "count = " << ++thecount << std::endl;
// Wait until all of the queued up and in-progress work has finished