1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-08-01 06:46:55 +03:00

MCOL-2244 Columnstore execution threads now have names describe

the threads operation. This should simplify CPU bottlenecks troubleshooting.
This commit is contained in:
Roman Nozdrin
2019-03-13 10:19:43 +03:00
parent 2509d833fc
commit a0b3424603
18 changed files with 109 additions and 87 deletions

View File

@ -28,6 +28,7 @@
#include "jobstep.h" #include "jobstep.h"
#include "primitivestep.h" #include "primitivestep.h"
#include "threadnaming.h"
using namespace std; using namespace std;
@ -192,6 +193,7 @@ protected:
Runner(CrossEngineStep* step) : fStep(step) { } Runner(CrossEngineStep* step) : fStep(step) { }
void operator()() void operator()()
{ {
utils::setThreadName("CESRunner");
fStep->execute(); fStep->execute();
} }

View File

@ -1,4 +1,5 @@
/* Copyright (C) 2014 InfiniDB, Inc. /* Copyright (C) 2014 InfiniDB, Inc.
Copyright (C) 2019 MariaDB Corporation.
This program is free software; you can redistribute it and/or This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License modify it under the terms of the GNU General Public License
@ -18,6 +19,7 @@
#include "jobstep.h" #include "jobstep.h"
#include "tuplehashjoin.h" #include "tuplehashjoin.h"
#include "joinpartition.h" #include "joinpartition.h"
#include "threadnaming.h"
#include "../../utils/threadpool/prioritythreadpool.h" #include "../../utils/threadpool/prioritythreadpool.h"
#ifndef DISKJOINSTEP_H #ifndef DISKJOINSTEP_H
@ -65,6 +67,7 @@ private:
Runner(DiskJoinStep* d) : djs(d) { } Runner(DiskJoinStep* d) : djs(d) { }
void operator()() void operator()()
{ {
utils::setThreadName("DJSMainRunner");
djs->mainRunner(); djs->mainRunner();
} }
DiskJoinStep* djs; DiskJoinStep* djs;
@ -92,6 +95,7 @@ private:
Loader(DiskJoinStep* d) : djs(d) { } Loader(DiskJoinStep* d) : djs(d) { }
void operator()() void operator()()
{ {
utils::setThreadName("DJSLoader");
djs->loadFcn(); djs->loadFcn();
} }
DiskJoinStep* djs; DiskJoinStep* djs;
@ -114,6 +118,7 @@ private:
Builder(DiskJoinStep* d) : djs(d) { } Builder(DiskJoinStep* d) : djs(d) { }
void operator()() void operator()()
{ {
utils::setThreadName("DJSBuilder");
djs->buildFcn(); djs->buildFcn();
} }
DiskJoinStep* djs; DiskJoinStep* djs;
@ -126,6 +131,7 @@ private:
Joiner(DiskJoinStep* d) : djs(d) { } Joiner(DiskJoinStep* d) : djs(d) { }
void operator()() void operator()()
{ {
utils::setThreadName("DJSJoiner");
djs->joinFcn(); djs->joinFcn();
} }
DiskJoinStep* djs; DiskJoinStep* djs;

View File

@ -63,6 +63,8 @@ using namespace rowgroup;
#include "querytele.h" #include "querytele.h"
using namespace querytele; using namespace querytele;
#include "threadnaming.h"
namespace joblist namespace joblist
{ {
@ -75,6 +77,7 @@ struct pDictionaryScanPrimitive
{ {
try try
{ {
utils::setThreadName("DSSScan");
fPDictScan->sendPrimitiveMessages(); fPDictScan->sendPrimitiveMessages();
} }
catch (runtime_error& re) catch (runtime_error& re)
@ -99,6 +102,7 @@ struct pDictionaryScanAggregator
{ {
try try
{ {
utils::setThreadName("DSSAgg");
fPDictScan->receivePrimitiveMessages(); fPDictScan->receivePrimitiveMessages();
} }
catch (runtime_error& re) catch (runtime_error& re)

View File

@ -31,6 +31,7 @@
#include "jobstep.h" #include "jobstep.h"
#include "joblist.h" #include "joblist.h"
#include "funcexpwrapper.h" #include "funcexpwrapper.h"
#include "threadnaming.h"
namespace joblist namespace joblist
{ {
@ -264,6 +265,7 @@ protected:
Runner(SubAdapterStep* step) : fStep(step) { } Runner(SubAdapterStep* step) : fStep(step) { }
void operator()() void operator()()
{ {
utils::setThreadName("SQSRunner");
fStep->execute(); fStep->execute();
} }

View File

@ -70,6 +70,8 @@ using namespace BRM;
#include "rowgroup.h" #include "rowgroup.h"
using namespace rowgroup; using namespace rowgroup;
#include "threadnaming.h"
#include "querytele.h" #include "querytele.h"
using namespace querytele; using namespace querytele;
@ -106,6 +108,7 @@ struct TupleBPSPrimitive
{ {
try try
{ {
utils::setThreadName("BPSPrimitive");
fBatchPrimitiveStep->sendPrimitiveMessages(); fBatchPrimitiveStep->sendPrimitiveMessages();
} }
catch (std::exception& re) catch (std::exception& re)
@ -135,6 +138,7 @@ struct TupleBPSAggregators
{ {
try try
{ {
utils::setThreadName("BPSAggregator");
fBatchPrimitiveStepCols->receiveMultiPrimitiveMessages(fThreadId); fBatchPrimitiveStepCols->receiveMultiPrimitiveMessages(fThreadId);
} }
catch (std::exception& re) catch (std::exception& re)
@ -276,7 +280,6 @@ TupleBPS::TupleBPS(const pColStep& rhs, const JobInfo& jobInfo) :
hasPCFilter = hasPMFilter = hasRIDFilter = hasSegmentFilter = hasDBRootFilter = hasSegmentDirFilter = hasPCFilter = hasPMFilter = hasRIDFilter = hasSegmentFilter = hasDBRootFilter = hasSegmentDirFilter =
hasPartitionFilter = hasMaxFilter = hasMinFilter = hasLBIDFilter = hasExtentIDFilter = false; hasPartitionFilter = hasMaxFilter = hasMinFilter = hasLBIDFilter = hasExtentIDFilter = false;
// cout << "TBPSCount = " << ++TBPSCount << endl;
} }
TupleBPS::TupleBPS(const pColScanStep& rhs, const JobInfo& jobInfo) : TupleBPS::TupleBPS(const pColScanStep& rhs, const JobInfo& jobInfo) :
@ -336,8 +339,6 @@ TupleBPS::TupleBPS(const pColScanStep& rhs, const JobInfo& jobInfo) :
fBPP->setTxnID(fTxnId); fBPP->setTxnID(fTxnId);
fTraceFlags = rhs.fTraceFlags; fTraceFlags = rhs.fTraceFlags;
fBPP->setTraceFlags(fTraceFlags); fBPP->setTraceFlags(fTraceFlags);
// if (fOid>=3000)
// cout << "BPS:initalized from pColScanStep. fSessionId=" << fSessionId << endl;
fBPP->setStepID(fStepId); fBPP->setStepID(fStepId);
fBPP->setOutputType(ROW_GROUP); fBPP->setOutputType(ROW_GROUP);
fPhysicalIO = 0; fPhysicalIO = 0;
@ -352,9 +353,6 @@ TupleBPS::TupleBPS(const pColScanStep& rhs, const JobInfo& jobInfo) :
hasUMJoin = false; hasUMJoin = false;
fRunExecuted = false; fRunExecuted = false;
smallOuterJoiner = -1; smallOuterJoiner = -1;
// @1098 initialize scanFlags to be true
//scanFlags.assign(numExtents, true);
//runtimeCPFlags.assign(numExtents, true);
bop = BOP_AND; bop = BOP_AND;
runRan = joinRan = false; runRan = joinRan = false;
@ -405,9 +403,6 @@ TupleBPS::TupleBPS(const PassThruStep& rhs, const JobInfo& jobInfo) :
fTraceFlags = rhs.fTraceFlags; fTraceFlags = rhs.fTraceFlags;
fBPP->setTraceFlags(fTraceFlags); fBPP->setTraceFlags(fTraceFlags);
fBPP->setOutputType(ROW_GROUP); fBPP->setOutputType(ROW_GROUP);
// if (fOid>=3000)
// cout << "BPS:initalized from PassThruStep. fSessionId=" << fSessionId << endl;
finishedSending = sendWaiting = false; finishedSending = sendWaiting = false;
fSwallowRows = false; fSwallowRows = false;
fNumBlksSkipped = 0; fNumBlksSkipped = 0;
@ -437,7 +432,6 @@ TupleBPS::TupleBPS(const PassThruStep& rhs, const JobInfo& jobInfo) :
hasPCFilter = hasPMFilter = hasRIDFilter = hasSegmentFilter = hasDBRootFilter = hasSegmentDirFilter = hasPCFilter = hasPMFilter = hasRIDFilter = hasSegmentFilter = hasDBRootFilter = hasSegmentDirFilter =
hasPartitionFilter = hasMaxFilter = hasMinFilter = hasLBIDFilter = hasExtentIDFilter = false; hasPartitionFilter = hasMaxFilter = hasMinFilter = hasLBIDFilter = hasExtentIDFilter = false;
// cout << "TBPSCount = " << ++TBPSCount << endl;
} }
TupleBPS::TupleBPS(const pDictionaryStep& rhs, const JobInfo& jobInfo) : TupleBPS::TupleBPS(const pDictionaryStep& rhs, const JobInfo& jobInfo) :
@ -463,7 +457,6 @@ TupleBPS::TupleBPS(const pDictionaryStep& rhs, const JobInfo& jobInfo) :
fStepCount = 1; fStepCount = 1;
fCPEvaluated = false; fCPEvaluated = false;
fEstimatedRows = 0; fEstimatedRows = 0;
//fColType = rhs.colType();
alias(rhs.alias()); alias(rhs.alias());
view(rhs.view()); view(rhs.view());
name(rhs.name()); name(rhs.name());
@ -472,8 +465,6 @@ TupleBPS::TupleBPS(const pDictionaryStep& rhs, const JobInfo& jobInfo) :
fBPP.reset(new BatchPrimitiveProcessorJL(fRm)); fBPP.reset(new BatchPrimitiveProcessorJL(fRm));
initializeConfigParms(); initializeConfigParms();
fBPP->setSessionID(fSessionId); fBPP->setSessionID(fSessionId);
// if (fOid>=3000)
// cout << "BPS:initalized from DictionaryStep. fSessionId=" << fSessionId << endl;
fBPP->setStepID(fStepId); fBPP->setStepID(fStepId);
fBPP->setQueryContext(fVerId); fBPP->setQueryContext(fVerId);
fBPP->setTxnID(fTxnId); fBPP->setTxnID(fTxnId);
@ -506,7 +497,6 @@ TupleBPS::TupleBPS(const pDictionaryStep& rhs, const JobInfo& jobInfo) :
hasPCFilter = hasPMFilter = hasRIDFilter = hasSegmentFilter = hasDBRootFilter = hasSegmentDirFilter = hasPCFilter = hasPMFilter = hasRIDFilter = hasSegmentFilter = hasDBRootFilter = hasSegmentDirFilter =
hasPartitionFilter = hasMaxFilter = hasMinFilter = hasLBIDFilter = hasExtentIDFilter = false; hasPartitionFilter = hasMaxFilter = hasMinFilter = hasLBIDFilter = hasExtentIDFilter = false;
// cout << "TBPSCount = " << ++TBPSCount << endl;
} }
TupleBPS::~TupleBPS() TupleBPS::~TupleBPS()
@ -541,7 +531,6 @@ TupleBPS::~TupleBPS()
fDec->removeQueue(uniqueID); fDec->removeQueue(uniqueID);
} }
// cout << "~TBPSCount = " << --TBPSCount << endl;
} }
void TupleBPS::setBPP(JobStep* jobStep) void TupleBPS::setBPP(JobStep* jobStep)
@ -558,7 +547,6 @@ void TupleBPS::setBPP(JobStep* jobStep)
if (pseudo) if (pseudo)
{ {
//cout << "adding a pseudo col filter" << endl;
fBPP->addFilterStep(*pseudo); fBPP->addFilterStep(*pseudo);
if (pseudo->filterCount() > 0) if (pseudo->filterCount() > 0)
@ -690,8 +678,6 @@ void TupleBPS::setProjectBPP(JobStep* jobStep1, JobStep* jobStep2)
colWidth = (pcsp->colType()).colWidth; colWidth = (pcsp->colType()).colWidth;
projectOids.push_back(jobStep1->oid()); projectOids.push_back(jobStep1->oid());
// if (fOid>=3000)
// cout << "Adding project step pColStep and pDictionaryStep to BPS" << endl;
} }
else else
{ {
@ -708,8 +694,6 @@ void TupleBPS::setProjectBPP(JobStep* jobStep1, JobStep* jobStep2)
projectOids.push_back(jobStep1->oid()); projectOids.push_back(jobStep1->oid());
colWidth = (psth->colType()).colWidth; colWidth = (psth->colType()).colWidth;
// if (fOid>=3000)
// cout << "Adding project step PassThruStep and pDictionaryStep to BPS" << endl;
} }
} }
} }
@ -723,7 +707,6 @@ void TupleBPS::setProjectBPP(JobStep* jobStep1, JobStep* jobStep2)
if (pseudo) if (pseudo)
{ {
//cout << "adding a pseudo col projection" << endl;
fBPP->addProjectStep(*pseudo); fBPP->addProjectStep(*pseudo);
} }
else else
@ -835,7 +818,6 @@ void TupleBPS::storeCasualPartitionInfo(const bool estimateRowCounts)
} }
} }
//cout << "cp column number=" << cpColVec.size() << " 1st col extents size= " << scanFlags.size() << endl;
if (cpColVec.size() == 0) if (cpColVec.size() == 0)
return; return;
@ -900,12 +882,8 @@ void TupleBPS::startAggregationThread()
fProducerThreads.push_back(jobstepThreadPool.invoke(TupleBPSAggregators(this, fNumThreads - 1))); fProducerThreads.push_back(jobstepThreadPool.invoke(TupleBPSAggregators(this, fNumThreads - 1)));
} }
//#include "boost/date_time/posix_time/posix_time.hpp"
void TupleBPS::serializeJoiner() void TupleBPS::serializeJoiner()
{ {
// boost::posix_time::ptime start, stop;
// start = boost::posix_time::microsec_clock::local_time();
ByteStream bs; ByteStream bs;
bool more = true; bool more = true;
@ -925,8 +903,6 @@ void TupleBPS::serializeJoiner()
bs.restart(); bs.restart();
} }
// stop = boost::posix_time::microsec_clock::local_time();
// cout << "serializing took " << stop-start << endl;
} }
void TupleBPS::serializeJoiner(uint32_t conn) void TupleBPS::serializeJoiner(uint32_t conn)
@ -957,8 +933,6 @@ void TupleBPS::prepCasualPartitioning()
{ {
if (fOid >= 3000) if (fOid >= 3000)
{ {
//if (scanFlags[i] && !runtimeCPFlags[i])
// cout << "runtime flags eliminated an extent!\n";
scanFlags[i] = scanFlags[i] && runtimeCPFlags[i]; scanFlags[i] = scanFlags[i] && runtimeCPFlags[i];
if (scanFlags[i] && lbidList->CasualPartitionDataType(fColType.colDataType, if (scanFlags[i] && lbidList->CasualPartitionDataType(fColType.colDataType,
@ -1209,22 +1183,10 @@ void TupleBPS::run()
if (fe2) if (fe2)
{ {
//if (fDelivery) {
// fe2Data.reinit(fe2Output);
// fe2Output.setData(&fe2Data);
//}
primRowGroup.initRow(&fe2InRow); primRowGroup.initRow(&fe2InRow);
fe2Output.initRow(&fe2OutRow); fe2Output.initRow(&fe2OutRow);
} }
/*
if (doJoin) {
for (uint32_t z = 0; z < smallSideCount; z++)
cout << "join #" << z << " " << "0x" << hex << tjoiners[z]->getJoinType()
<< std::dec << " typeless: " << (uint32_t) tjoiners[z]->isTypelessJoin() << endl;
}
*/
try try
{ {
fDec->addDECEventListener(this); fDec->addDECEventListener(this);
@ -1330,7 +1292,6 @@ void TupleBPS::sendError(uint16_t status)
} }
fBPP->reset(); fBPP->reset();
// msgsSent++; // not expecting a response from this msg
finishedSending = true; finishedSending = true;
condvar.notify_all(); condvar.notify_all();
condvarWakeupProducer.notify_all(); condvarWakeupProducer.notify_all();
@ -1441,7 +1402,6 @@ void TupleBPS::sendJobs(const vector<Job>& jobs)
for (i = 0; i < jobs.size() && !cancelled(); i++) for (i = 0; i < jobs.size() && !cancelled(); i++)
{ {
//cout << "sending a job for dbroot " << jobs[i].dbroot << ", PM " << jobs[i].connectionNum << endl;
fDec->write(uniqueID, *(jobs[i].msg)); fDec->write(uniqueID, *(jobs[i].msg));
tplLock.lock(); tplLock.lock();
msgsSent += jobs[i].expectedResponses; msgsSent += jobs[i].expectedResponses;
@ -1785,20 +1745,17 @@ void TupleBPS::makeJobs(vector<Job>* jobs)
if (!inBounds) if (!inBounds)
{ {
//cout << "out of bounds" << endl;
continue; continue;
} }
if (!scanFlags[i]) if (!scanFlags[i])
{ {
//cout << "CP elimination" << endl;
fNumBlksSkipped += lbidsToScan; fNumBlksSkipped += lbidsToScan;
continue; continue;
} }
if (!processPseudoColFilters(i, dbRootPMMap)) if (!processPseudoColFilters(i, dbRootPMMap))
{ {
//cout << "Skipping an extent due to pseudo-column filter elimination" << endl;
fNumBlksSkipped += lbidsToScan; fNumBlksSkipped += lbidsToScan;
continue; continue;
} }
@ -1840,8 +1797,6 @@ void TupleBPS::makeJobs(vector<Job>* jobs)
} }
} }
// cout << " session " << fSessionId << " idx = " << i << " HWM = " << scannedExtents[i].HWM
// << " ... will scan " << lbidsToScan << " lbids\n";
// the # of logical blocks in this extent // the # of logical blocks in this extent
if (lbidsToScan % fColType.colWidth) if (lbidsToScan % fColType.colWidth)
@ -1857,22 +1812,17 @@ void TupleBPS::makeJobs(vector<Job>* jobs)
#else #else
blocksPerJob = max(blocksToScan / fProcessorThreadsPerScan, 16U); blocksPerJob = max(blocksToScan / fProcessorThreadsPerScan, 16U);
#endif #endif
//cout << "blocks to scan = " << blocksToScan << " blocks per job = " << blocksPerJob <<
// " HWM == " << scannedExtents[i].HWM << endl;
startingLBID = scannedExtents[i].range.start; startingLBID = scannedExtents[i].range.start;
while (blocksToScan > 0) while (blocksToScan > 0)
{ {
uint32_t blocksThisJob = min(blocksToScan, blocksPerJob); uint32_t blocksThisJob = min(blocksToScan, blocksPerJob);
//cout << "starting LBID = " << startingLBID << " count = " << blocksThisJob <<
// " dbroot = " << scannedExtents[i].dbRoot << endl;
fBPP->setLBID(startingLBID, scannedExtents[i]); fBPP->setLBID(startingLBID, scannedExtents[i]);
fBPP->setCount(blocksThisJob); fBPP->setCount(blocksThisJob);
bs.reset(new ByteStream()); bs.reset(new ByteStream());
fBPP->runBPP(*bs, (*dbRootConnectionMap)[scannedExtents[i].dbRoot]); fBPP->runBPP(*bs, (*dbRootConnectionMap)[scannedExtents[i].dbRoot]);
//cout << "making job for connection # " << (*dbRootConnectionMap)[scannedExtents[i].dbRoot] << endl;
jobs->push_back(Job(scannedExtents[i].dbRoot, (*dbRootConnectionMap)[scannedExtents[i].dbRoot], jobs->push_back(Job(scannedExtents[i].dbRoot, (*dbRootConnectionMap)[scannedExtents[i].dbRoot],
blocksThisJob, bs)); blocksThisJob, bs));
blocksToScan -= blocksThisJob; blocksToScan -= blocksThisJob;
@ -1881,7 +1831,6 @@ void TupleBPS::makeJobs(vector<Job>* jobs)
} }
} }
// cout << "session " << fSessionId << " sees " << extentCounter << " extents" << endl;
} }
void TupleBPS::sendPrimitiveMessages() void TupleBPS::sendPrimitiveMessages()
@ -1901,19 +1850,16 @@ void TupleBPS::sendPrimitiveMessages()
} }
catch (const IDBExcept& e) catch (const IDBExcept& e)
{ {
//cout << "Caught IDBExcept" << e.what() << e.errorCode() << endl;
sendError(e.errorCode()); sendError(e.errorCode());
processError(e.what(), e.errorCode(), "TupleBPS::sendPrimitiveMessages()"); processError(e.what(), e.errorCode(), "TupleBPS::sendPrimitiveMessages()");
} }
catch (const std::exception& ex) catch (const std::exception& ex)
{ {
//cout << "Caught exception" << endl;
sendError(ERR_TUPLE_BPS); sendError(ERR_TUPLE_BPS);
processError(ex.what(), ERR_TUPLE_BPS, "TupleBPS::sendPrimitiveMessages()"); processError(ex.what(), ERR_TUPLE_BPS, "TupleBPS::sendPrimitiveMessages()");
} }
catch (...) catch (...)
{ {
//cout << "Caught ..." << endl;
sendError(ERR_TUPLE_BPS); sendError(ERR_TUPLE_BPS);
processError("unknown", ERR_TUPLE_BPS, "TupleBPS::sendPrimitiveMessages()"); processError("unknown", ERR_TUPLE_BPS, "TupleBPS::sendPrimitiveMessages()");
} }
@ -2189,7 +2135,6 @@ void TupleBPS::receiveMultiPrimitiveMessages(uint32_t threadID)
tplLock.unlock(); tplLock.unlock();
// cout << "thread " << threadID << " has " << size << " Bytestreams\n";
for (i = 0; i < size && !cancelled(); i++) for (i = 0; i < size && !cancelled(); i++)
{ {
ByteStream* bs = bsv[i].get(); ByteStream* bs = bsv[i].get();
@ -2244,18 +2189,16 @@ void TupleBPS::receiveMultiPrimitiveMessages(uint32_t threadID)
local_outputRG.resetRowGroup(local_primRG.getBaseRid()); local_outputRG.resetRowGroup(local_primRG.getBaseRid());
local_outputRG.setDBRoot(local_primRG.getDBRoot()); local_outputRG.setDBRoot(local_primRG.getDBRoot());
local_primRG.getRow(0, &largeSideRow); local_primRG.getRow(0, &largeSideRow);
//cout << "large-side raw data: " << local_primRG.toString() << endl;
//cout << "jointype = " << tjoiners[0]->getJoinType() << endl;
for (k = 0; k < local_primRG.getRowCount() && !cancelled(); k++, largeSideRow.nextRow()) for (k = 0; k < local_primRG.getRowCount() && !cancelled(); k++, largeSideRow.nextRow())
{ {
//cout << "TBPS: Large side row: " << largeSideRow.toString() << endl;
matchCount = 0; matchCount = 0;
for (j = 0; j < smallSideCount; j++) for (j = 0; j < smallSideCount; j++)
{ {
tjoiners[j]->match(largeSideRow, k, threadID, &joinerOutput[j]); tjoiners[j]->match(largeSideRow, k, threadID, &joinerOutput[j]);
/* Debugging code to print the matches #ifdef JLF_DEBUG
// Debugging code to print the matches
Row r; Row r;
joinerMatchesRGs[j].initRow(&r); joinerMatchesRGs[j].initRow(&r);
cout << joinerOutput[j].size() << " matches: \n"; cout << joinerOutput[j].size() << " matches: \n";
@ -2263,7 +2206,7 @@ void TupleBPS::receiveMultiPrimitiveMessages(uint32_t threadID)
r.setPointer(joinerOutput[j][z]); r.setPointer(joinerOutput[j][z]);
cout << " " << r.toString() << endl; cout << " " << r.toString() << endl;
} }
*/ #endif
matchCount = joinerOutput[j].size(); matchCount = joinerOutput[j].size();
if (tjoiners[j]->inUM()) if (tjoiners[j]->inUM())
@ -2271,7 +2214,6 @@ void TupleBPS::receiveMultiPrimitiveMessages(uint32_t threadID)
/* Count the # of rows that pass the join filter */ /* Count the # of rows that pass the join filter */
if (tjoiners[j]->hasFEFilter() && matchCount > 0) if (tjoiners[j]->hasFEFilter() && matchCount > 0)
{ {
//cout << "doing FE filter" << endl;
vector<Row::Pointer> newJoinerOutput; vector<Row::Pointer> newJoinerOutput;
applyMapping(fergMappings[smallSideCount], largeSideRow, &joinFERow); applyMapping(fergMappings[smallSideCount], largeSideRow, &joinFERow);
@ -2311,10 +2253,6 @@ void TupleBPS::receiveMultiPrimitiveMessages(uint32_t threadID)
if (tjoiners[j]->antiJoin()) if (tjoiners[j]->antiJoin())
{ {
matchCount = (matchCount ? 0 : 1); matchCount = (matchCount ? 0 : 1);
// if (matchCount)
// cout << "in the result\n";
// else
// cout << "not in the result\n";
} }
if (matchCount == 0) if (matchCount == 0)
@ -2380,7 +2318,6 @@ void TupleBPS::receiveMultiPrimitiveMessages(uint32_t threadID)
} }
else else
{ {
// cout << "TBPS: sending unjoined data\n";
rgDatav.push_back(rgData); rgDatav.push_back(rgData);
} }
@ -2488,7 +2425,6 @@ out:
{ {
smallSideRows[i].setPointer(unmatched[j]); smallSideRows[i].setPointer(unmatched[j]);
// cout << "small side Row: " << smallSideRows[i].toString() << endl;
for (k = 0; k < smallSideCount; k++) for (k = 0; k < smallSideCount; k++)
{ {
if (i == k) if (i == k)
@ -2499,8 +2435,6 @@ out:
applyMapping(largeMapping, largeNull, &joinedBaseRow); applyMapping(largeMapping, largeNull, &joinedBaseRow);
joinedBaseRow.setRid(0); joinedBaseRow.setRid(0);
// cout << "outer row is " << joinedBaseRow.toString() << endl;
// joinedBaseRow.setRid(largeSideRow.getRelRid());
joinedBaseRow.nextRow(); joinedBaseRow.nextRow();
local_outputRG.incRowCount(); local_outputRG.incRowCount();
@ -2785,8 +2719,6 @@ inline bool TupleBPS::scanit(uint64_t rid)
fbo = rid >> rpbShift; fbo = rid >> rpbShift;
extentIndex = fbo >> divShift; extentIndex = fbo >> divShift;
//if (scanFlags[extentIndex] && !runtimeCPFlags[extentIndex])
// cout << "HJ feedback eliminated an extent!\n";
return scanFlags[extentIndex] && runtimeCPFlags[extentIndex]; return scanFlags[extentIndex] && runtimeCPFlags[extentIndex];
} }
@ -2908,7 +2840,6 @@ void TupleBPS::generateJoinResultSet(const vector<vector<Row::Pointer> >& joiner
{ {
smallRow.setPointer(joinerOutput[depth][i]); smallRow.setPointer(joinerOutput[depth][i]);
applyMapping(mappings[depth], smallRow, &baseRow); applyMapping(mappings[depth], smallRow, &baseRow);
// cout << "depth " << depth << ", size " << joinerOutput[depth].size() << ", row " << i << ": " << smallRow.toString() << endl;
generateJoinResultSet(joinerOutput, baseRow, mappings, depth + 1, generateJoinResultSet(joinerOutput, baseRow, mappings, depth + 1,
outputRG, rgData, outputData, smallRows, joinedRow); outputRG, rgData, outputData, smallRows, joinedRow);
} }
@ -2926,7 +2857,6 @@ void TupleBPS::generateJoinResultSet(const vector<vector<Row::Pointer> >& joiner
{ {
uint32_t dbRoot = outputRG.getDBRoot(); uint32_t dbRoot = outputRG.getDBRoot();
uint64_t baseRid = outputRG.getBaseRid(); uint64_t baseRid = outputRG.getBaseRid();
// cout << "GJRS adding data\n";
outputData->push_back(rgData); outputData->push_back(rgData);
rgData = RGData(outputRG); rgData = RGData(outputRG);
outputRG.setData(&rgData); outputRG.setData(&rgData);
@ -2935,11 +2865,8 @@ void TupleBPS::generateJoinResultSet(const vector<vector<Row::Pointer> >& joiner
outputRG.getRow(0, &joinedRow); outputRG.getRow(0, &joinedRow);
} }
// cout << "depth " << depth << ", size " << joinerOutput[depth].size() << ", row " << i << ": " << smallRow.toString() << endl;
applyMapping(mappings[depth], smallRow, &baseRow); applyMapping(mappings[depth], smallRow, &baseRow);
copyRow(baseRow, &joinedRow); copyRow(baseRow, &joinedRow);
//memcpy(joinedRow.getData(), baseRow.getData(), joinedRow.getSize());
//cout << "(step " << fStepId << ") fully joined row is: " << joinedRow.toString() << endl;
} }
} }
} }
@ -3104,7 +3031,6 @@ void TupleBPS::processFE2_oneRG(RowGroup& input, RowGroup& output, Row& inRow,
if (ret) if (ret)
{ {
applyMapping(fe2Mapping, inRow, &outRow); applyMapping(fe2Mapping, inRow, &outRow);
//cout << "fe2 passed row: " << outRow.toString() << endl;
outRow.setRid(inRow.getRelRid()); outRow.setRid(inRow.getRelRid());
output.incRowCount(); output.incRowCount();
outRow.nextRow(); outRow.nextRow();
@ -3153,7 +3079,6 @@ void TupleBPS::processFE2(RowGroup& input, RowGroup& output, Row& inRow, Row& ou
output.getBaseRid() != input.getBaseRid() output.getBaseRid() != input.getBaseRid()
) )
{ {
// cout << "FE2 produced a full RG\n";
results.push_back(result); results.push_back(result);
result = RGData(output); result = RGData(output);
output.setData(&result); output.setData(&result);
@ -3167,12 +3092,9 @@ void TupleBPS::processFE2(RowGroup& input, RowGroup& output, Row& inRow, Row& ou
if (output.getRowCount() > 0) if (output.getRowCount() > 0)
{ {
// cout << "FE2 produced " << output.getRowCount() << " rows\n";
results.push_back(result); results.push_back(result);
} }
// else
// cout << "no rows from FE2\n";
rgData->swap(results); rgData->swap(results);
} }

View File

@ -1,4 +1,5 @@
/* Copyright (C) 2014 InfiniDB, Inc. /* Copyright (C) 2014 InfiniDB, Inc.
Copyright (C) 2019 MariaDB Corporation.
This program is free software; you can redistribute it and/or This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License modify it under the terms of the GNU General Public License
@ -23,6 +24,7 @@
#include "jobstep.h" #include "jobstep.h"
#include "rowaggregation.h" #include "rowaggregation.h"
#include "threadnaming.h"
#include <boost/thread.hpp> #include <boost/thread.hpp>
@ -138,6 +140,7 @@ private:
Aggregator(TupleAggregateStep* step) : fStep(step) { } Aggregator(TupleAggregateStep* step) : fStep(step) { }
void operator()() void operator()()
{ {
utils::setThreadName("TASAggr");
fStep->doAggregate(); fStep->doAggregate();
} }
@ -153,6 +156,7 @@ private:
{} {}
void operator()() void operator()()
{ {
utils::setThreadName("TASThrAggr");
fStep->threadedAggregateRowGroups(fThreadID); fStep->threadedAggregateRowGroups(fThreadID);
} }
@ -171,6 +175,7 @@ private:
} }
void operator()() void operator()()
{ {
utils::setThreadName("TASThr2ndPAggr");
for (uint32_t i = 0; i < bucketCount; i++) for (uint32_t i = 0; i < bucketCount; i++)
fStep->doThreadedSecondPhaseAggregate(fThreadID + i); fStep->doThreadedSecondPhaseAggregate(fThreadID + i);
} }

View File

@ -51,6 +51,7 @@ using namespace rowgroup;
#include "hasher.h" #include "hasher.h"
#include "stlpoolallocator.h" #include "stlpoolallocator.h"
#include "threadnaming.h"
using namespace utils; using namespace utils;
#include "querytele.h" #include "querytele.h"
@ -314,6 +315,7 @@ void TupleAnnexStep::execute()
void TupleAnnexStep::executeNoOrderBy() void TupleAnnexStep::executeNoOrderBy()
{ {
utils::setThreadName("TASwoOrd");
RGData rgDataIn; RGData rgDataIn;
RGData rgDataOut; RGData rgDataOut;
bool more = false; bool more = false;
@ -399,6 +401,7 @@ void TupleAnnexStep::executeNoOrderBy()
void TupleAnnexStep::executeNoOrderByWithDistinct() void TupleAnnexStep::executeNoOrderByWithDistinct()
{ {
utils::setThreadName("TASwoOrdDist");
scoped_ptr<DistinctMap_t> distinctMap(new DistinctMap_t(10, TAHasher(this), TAEq(this))); scoped_ptr<DistinctMap_t> distinctMap(new DistinctMap_t(10, TAHasher(this), TAEq(this)));
vector<RGData> dataVec; vector<RGData> dataVec;
RGData rgDataIn; RGData rgDataIn;
@ -500,6 +503,7 @@ void TupleAnnexStep::executeNoOrderByWithDistinct()
void TupleAnnexStep::executeWithOrderBy() void TupleAnnexStep::executeWithOrderBy()
{ {
utils::setThreadName("TASwOrd");
RGData rgDataIn; RGData rgDataIn;
RGData rgDataOut; RGData rgDataOut;
bool more = false; bool more = false;

View File

@ -22,6 +22,7 @@
#define JOBLIST_TUPLECONSTANTSTEP_H #define JOBLIST_TUPLECONSTANTSTEP_H
#include "jobstep.h" #include "jobstep.h"
#include "threadnaming.h"
namespace joblist namespace joblist
{ {
@ -98,6 +99,7 @@ protected:
Runner(TupleConstantStep* step) : fStep(step) { } Runner(TupleConstantStep* step) : fStep(step) { }
void operator()() void operator()()
{ {
utils::setThreadName("TCSRunner");
fStep->execute(); fStep->execute();
} }

View File

@ -1,4 +1,5 @@
/* Copyright (C) 2014 InfiniDB, Inc. /* Copyright (C) 2014 InfiniDB, Inc.
Copyright (C) 2019 MariaDB Corporation.
This program is free software; you can redistribute it and/or This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License modify it under the terms of the GNU General Public License
@ -25,6 +26,7 @@
#include "calpontsystemcatalog.h" #include "calpontsystemcatalog.h"
#include "hasher.h" #include "hasher.h"
#include "tuplejoiner.h" #include "tuplejoiner.h"
#include "threadnaming.h"
#include <boost/shared_ptr.hpp> #include <boost/shared_ptr.hpp>
#include <map> #include <map>
#include <string> #include <string>
@ -450,6 +452,7 @@ private:
HJRunner(TupleHashJoinStep* hj) : HJ(hj) { } HJRunner(TupleHashJoinStep* hj) : HJ(hj) { }
void operator()() void operator()()
{ {
utils::setThreadName("HJSBigSide");
HJ->hjRunner(); HJ->hjRunner();
} }
TupleHashJoinStep* HJ; TupleHashJoinStep* HJ;
@ -459,6 +462,7 @@ private:
SmallRunner(TupleHashJoinStep* hj, uint32_t i) : HJ(hj), index(i) { } SmallRunner(TupleHashJoinStep* hj, uint32_t i) : HJ(hj), index(i) { }
void operator()() void operator()()
{ {
utils::setThreadName("HJSSmallSide");
HJ->smallRunnerFcn(index); HJ->smallRunnerFcn(index);
} }
TupleHashJoinStep* HJ; TupleHashJoinStep* HJ;

View File

@ -23,6 +23,7 @@
#include "jobstep.h" #include "jobstep.h"
#include "expressionstep.h" #include "expressionstep.h"
#include "threadnaming.h"
// forward reference // forward reference
namespace fucexp namespace fucexp
@ -97,6 +98,7 @@ protected:
Runner(TupleHavingStep* step) : fStep(step) { } Runner(TupleHavingStep* step) : fStep(step) { }
void operator()() void operator()()
{ {
utils::setThreadName("HVSRunner");
fStep->execute(); fStep->execute();
} }

View File

@ -35,6 +35,7 @@
#endif #endif
#include "stlpoolallocator.h" #include "stlpoolallocator.h"
#include "threadnaming.h"
#ifndef TUPLEUNION2_H_ #ifndef TUPLEUNION2_H_
#define TUPLEUNION2_H_ #define TUPLEUNION2_H_
@ -155,6 +156,7 @@ private:
Runner(TupleUnion* t, uint32_t in) : tu(t), index(in) { } Runner(TupleUnion* t, uint32_t in) : tu(t), index(in) { }
void operator()() void operator()()
{ {
utils::setThreadName("TUSRunner");
tu->readInput(index); tu->readInput(index);
} }
}; };

View File

@ -25,6 +25,7 @@
#include "jobstep.h" #include "jobstep.h"
#include "rowgroup.h" #include "rowgroup.h"
#include "windowfunctioncolumn.h" #include "windowfunctioncolumn.h"
#include "threadnaming.h"
namespace execplan namespace execplan
{ {
@ -153,6 +154,7 @@ private:
Runner(WindowFunctionStep* step) : fStep(step) { } Runner(WindowFunctionStep* step) : fStep(step) { }
void operator()() void operator()()
{ {
utils::setThreadName("WFSRunner");
fStep->execute(); fStep->execute();
} }

View File

@ -52,6 +52,7 @@ using namespace boost;
#include "fixedallocator.h" #include "fixedallocator.h"
#include "blockcacheclient.h" #include "blockcacheclient.h"
#include "MonitorProcMem.h" #include "MonitorProcMem.h"
#include "threadnaming.h"
#define MAX64 0x7fffffffffffffffLL #define MAX64 0x7fffffffffffffffLL
#define MIN64 0x8000000000000000LL #define MIN64 0x8000000000000000LL
@ -156,7 +157,6 @@ BatchPrimitiveProcessor::BatchPrimitiveProcessor(ByteStream& b, double prefetch,
sendThread = bppst; sendThread = bppst;
pthread_mutex_init(&objLock, NULL); pthread_mutex_init(&objLock, NULL);
initBPP(b); initBPP(b);
// cerr << "made a BPP\n";
} }
#if 0 #if 0
@ -1961,6 +1961,7 @@ void BatchPrimitiveProcessor::makeResponse()
int BatchPrimitiveProcessor::operator()() int BatchPrimitiveProcessor::operator()()
{ {
utils::setThreadName("PPBatchPrimProc");
if (currentBlockOffset == 0) if (currentBlockOffset == 0)
{ {
#ifdef PRIMPROC_STOPWATCH // TODO: needs to be brought up-to-date #ifdef PRIMPROC_STOPWATCH // TODO: needs to be brought up-to-date

View File

@ -142,6 +142,7 @@ int BPPSeeder::operator()()
pthread_t tid = 0; pthread_t tid = 0;
boost::mutex::scoped_lock scoped(bppLock, boost::defer_lock_t()); boost::mutex::scoped_lock scoped(bppLock, boost::defer_lock_t());
try try
{ {
if (firstRun) if (firstRun)

View File

@ -91,6 +91,8 @@ using namespace idbdatafile;
using namespace threadpool; using namespace threadpool;
#include "threadnaming.h"
#include "atomicops.h" #include "atomicops.h"
#ifndef O_BINARY #ifndef O_BINARY
@ -925,6 +927,7 @@ struct AsynchLoader
void operator()() void operator()()
{ {
utils::setThreadName("PPAsyncLoader");
bool cached = false; bool cached = false;
uint32_t rCount = 0; uint32_t rCount = 0;
char buf[BLOCK_SIZE]; char buf[BLOCK_SIZE];
@ -1159,6 +1162,7 @@ void DictScanJob::write(const ByteStream& bs)
int DictScanJob::operator()() int DictScanJob::operator()()
{ {
utils::setThreadName("PPDictScanJob");
uint8_t data[DATA_BLOCK_SIZE]; uint8_t data[DATA_BLOCK_SIZE];
uint32_t output_buf_size = MAX_BUFFER_SIZE; uint32_t output_buf_size = MAX_BUFFER_SIZE;
uint32_t session; uint32_t session;
@ -1338,6 +1342,7 @@ struct BPPHandler
LastJoiner(boost::shared_ptr<BPPHandler> r, SBS b) : BPPHandlerFunctor(r, b) { } LastJoiner(boost::shared_ptr<BPPHandler> r, SBS b) : BPPHandlerFunctor(r, b) { }
int operator()() int operator()()
{ {
utils::setThreadName("PPHandLastJoiner");
return rt->lastJoinerMsg(*bs, dieTime); return rt->lastJoinerMsg(*bs, dieTime);
} }
}; };
@ -1347,6 +1352,7 @@ struct BPPHandler
Create(boost::shared_ptr<BPPHandler> r, SBS b) : BPPHandlerFunctor(r, b) { } Create(boost::shared_ptr<BPPHandler> r, SBS b) : BPPHandlerFunctor(r, b) { }
int operator()() int operator()()
{ {
utils::setThreadName("PPHandCreate");
rt->createBPP(*bs); rt->createBPP(*bs);
return 0; return 0;
} }
@ -1357,6 +1363,7 @@ struct BPPHandler
Destroy(boost::shared_ptr<BPPHandler> r, SBS b) : BPPHandlerFunctor(r, b) { } Destroy(boost::shared_ptr<BPPHandler> r, SBS b) : BPPHandlerFunctor(r, b) { }
int operator()() int operator()()
{ {
utils::setThreadName("PPHandDestroy");
return rt->destroyBPP(*bs, dieTime); return rt->destroyBPP(*bs, dieTime);
} }
}; };
@ -1366,6 +1373,7 @@ struct BPPHandler
AddJoiner(boost::shared_ptr<BPPHandler> r, SBS b) : BPPHandlerFunctor(r, b) { } AddJoiner(boost::shared_ptr<BPPHandler> r, SBS b) : BPPHandlerFunctor(r, b) { }
int operator()() int operator()()
{ {
utils::setThreadName("PPHandAddJoiner");
return rt->addJoinerToBPP(*bs, dieTime); return rt->addJoinerToBPP(*bs, dieTime);
} }
}; };
@ -1375,6 +1383,7 @@ struct BPPHandler
Abort(boost::shared_ptr<BPPHandler> r, SBS b) : BPPHandlerFunctor(r, b) { } Abort(boost::shared_ptr<BPPHandler> r, SBS b) : BPPHandlerFunctor(r, b) { }
int operator()() int operator()()
{ {
utils::setThreadName("PPHandAbort");
return rt->doAbort(*bs, dieTime); return rt->doAbort(*bs, dieTime);
} }
}; };
@ -1751,6 +1760,7 @@ public:
virtual int execute() = 0; virtual int execute() = 0;
int operator()() int operator()()
{ {
utils::setThreadName("PPDictOp");
int ret; int ret;
ret = execute(); ret = execute();
@ -1967,6 +1977,7 @@ struct ReadThread
void operator()() void operator()()
{ {
utils::setThreadName("PPReadThread");
boost::shared_ptr<threadpool::PriorityThreadPool> procPoolPtr = boost::shared_ptr<threadpool::PriorityThreadPool> procPoolPtr =
fPrimitiveServerPtr->getProcessorThreadPool(); fPrimitiveServerPtr->getProcessorThreadPool();
SBS bs; SBS bs;
@ -2376,6 +2387,7 @@ struct ServerThread
void operator()() void operator()()
{ {
utils::setThreadName("PPServerThr");
IOSocket ios; IOSocket ios;
try try

View File

@ -8,7 +8,8 @@ set(common_LIB_SRCS
poolallocator.cpp poolallocator.cpp
cgroupconfigurator.cpp cgroupconfigurator.cpp
MonitorProcMem.cpp MonitorProcMem.cpp
nullvaluemanip.cpp) nullvaluemanip.cpp
threadnaming.cpp)
add_library(common SHARED ${common_LIB_SRCS}) add_library(common SHARED ${common_LIB_SRCS})

View File

@ -0,0 +1,26 @@
/* Copyright (C) 2019 MariaDB Corporaton
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
#include <sys/prctl.h>
namespace utils
{
void setThreadName(const char *threadName)
{
prctl(PR_SET_NAME, threadName, 0, 0, 0);
}
} // end of namespace

View File

@ -0,0 +1,24 @@
/* Copyright (C) 2019 MariaDB Corporaton
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
#ifndef H_SETTHREADNAME
#define H_SETTHREADNAME
namespace utils
{
void setThreadName(const char *threadName);
} // end of namespace
#endif