1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-07-29 08:21:15 +03:00

MCOL-5499 Enable ControlFlow for same node communication processing path to avoid DEC queue overloading (#2847)

This commit is contained in:
Roman Nozdrin
2023-06-07 13:42:47 +01:00
committed by GitHub
parent 23a969dbe2
commit 9e1eac448a
9 changed files with 193 additions and 202 deletions

View File

@ -26,6 +26,7 @@
#include <mutex>
#include "bppsendthread.h"
#include "resourcemanager.h"
#include "serviceexemgr.h"
namespace primitiveprocessor
{
@ -33,32 +34,9 @@ extern uint32_t connectionsPerUM;
extern uint32_t BPPCount;
BPPSendThread::BPPSendThread()
: die(false)
, gotException(false)
, mainThreadWaiting(false)
, sizeThreshold(100)
, msgsLeft(-1)
, waiting(false)
, sawAllConnections(false)
, fcEnabled(false)
, currentByteSize(0)
{
maxByteSize = joblist::ResourceManager::instance()->getMaxBPPSendQueue();
runner = boost::thread(Runner_t(this));
}
BPPSendThread::BPPSendThread(uint32_t initMsgsLeft)
: die(false)
, gotException(false)
, mainThreadWaiting(false)
, sizeThreshold(100)
, msgsLeft(initMsgsLeft)
, waiting(false)
, sawAllConnections(false)
, fcEnabled(false)
, currentByteSize(0)
{
maxByteSize = joblist::ResourceManager::instance()->getMaxBPPSendQueue();
queueBytesThresh = joblist::ResourceManager::instance()->getBPPSendThreadBytesThresh();
queueMsgThresh = joblist::ResourceManager::instance()->getBPPSendThreadMsgThresh();
runner = boost::thread(Runner_t(this));
}
@ -74,7 +52,7 @@ void BPPSendThread::sendResult(const Msg_t& msg, bool newConnection)
if (sizeTooBig())
{
std::unique_lock<std::mutex> sl1(respondLock);
while (currentByteSize >= maxByteSize && msgQueue.size() > 3 && !die)
while (currentByteSize >= queueBytesThresh && msgQueue.size() > 3 && !die)
{
fProcessorPool->incBlockedThreads();
okToRespond.wait(sl1);
@ -119,7 +97,7 @@ void BPPSendThread::sendResults(const vector<Msg_t>& msgs, bool newConnection)
if (sizeTooBig())
{
std::unique_lock<std::mutex> sl1(respondLock);
while (currentByteSize >= maxByteSize && msgQueue.size() > 3 && !die)
while (currentByteSize >= queueBytesThresh && msgQueue.size() > 3 && !die)
{
fProcessorPool->incBlockedThreads();
okToRespond.wait(sl1);
@ -166,7 +144,6 @@ void BPPSendThread::sendMore(int num)
{
std::unique_lock<std::mutex> sl(ackLock);
// cout << "got an ACK for " << num << " msgsLeft=" << msgsLeft << endl;
if (num == -1)
fcEnabled = false;
else if (num == 0)
@ -256,18 +233,27 @@ void BPPSendThread::mainLoop()
bsSize = msg[msgsSent].msg->lengthWithHdrOverhead();
try
// Same node processing path
if (!sock)
{
boost::mutex::scoped_lock sl2(*lock);
sock->write(*msg[msgsSent].msg);
// cout << "sent 1 msg\n";
auto* exeMgrDecPtr = exemgr::globServiceExeMgr->getDec();
assert(exeMgrDecPtr);
exeMgrDecPtr->addDataToOutput(msg[msgsSent].msg);
}
catch (std::exception& e)
else
{
sl.lock();
exceptionString = e.what();
gotException = true;
return;
try
{
boost::mutex::scoped_lock sl2(*lock);
sock->write(*msg[msgsSent].msg);
}
catch (std::exception& e)
{
sl.lock();
exceptionString = e.what();
gotException = true;
return;
}
}
(void)atomicops::atomicDec(&msgsLeft);
@ -275,7 +261,7 @@ void BPPSendThread::mainLoop()
msg[msgsSent].msg.reset();
}
if (fProcessorPool->blockedThreadCount() > 0 && currentByteSize < maxByteSize)
if (fProcessorPool->blockedThreadCount() > 0 && currentByteSize < queueBytesThresh)
{
okToRespond.notify_one();
}