You've already forked mariadb-columnstore-engine
mirror of
https://github.com/mariadb-corporation/mariadb-columnstore-engine.git
synced 2025-07-29 08:21:15 +03:00
fix(PrimProc): MCOL-5651 Add a workaround to avoid choosing an incorr… (#3320)
* fix(PrimProc): MCOL-5651 Add a workaround to avoid choosing an incorrect TupleHashJoinStep as a joiner
This commit is contained in:
committed by
GitHub
parent
42be2cb7e0
commit
842a3c8a40
@ -1478,7 +1478,7 @@ const JobStepVector doSimpleFilter(SimpleFilter* sf, JobInfo& jobInfo)
|
||||
{
|
||||
JobStepVector jsv;
|
||||
|
||||
if (sf == 0)
|
||||
if (sf == nullptr)
|
||||
return jsv;
|
||||
|
||||
// cout << "doSimpleFilter " << endl;
|
||||
|
@ -3078,7 +3078,7 @@ void createPostJoinFilters(const JobInfo& jobInfo, TableInfoMap& tableInfoMap,
|
||||
|
||||
if (jobInfo.trace)
|
||||
{
|
||||
if (postJoinFilters.size())
|
||||
if (!postJoinFilters.empty())
|
||||
{
|
||||
cout << "Post join filters created." << endl;
|
||||
for (auto* filter : postJoinFilters)
|
||||
@ -3100,37 +3100,37 @@ SP_JoinInfo joinToLargeTable(uint32_t large, TableInfoMap& tableInfoMap, JobInfo
|
||||
set<uint32_t>& tableSet = tableInfoMap[large].fJoinedTables;
|
||||
vector<uint32_t>& adjList = tableInfoMap[large].fAdjacentList;
|
||||
uint32_t prevLarge = (uint32_t)getPrevLarge(large, tableInfoMap);
|
||||
bool root = (prevLarge == (uint32_t)-1) ? true : false;
|
||||
bool root = (prevLarge == (uint32_t)-1);
|
||||
uint32_t link = large;
|
||||
uint32_t cId = -1;
|
||||
|
||||
// Get small sides ready.
|
||||
for (vector<uint32_t>::iterator i = adjList.begin(); i != adjList.end(); i++)
|
||||
for (unsigned int& adj : adjList)
|
||||
{
|
||||
if (tableInfoMap[*i].fVisited == false)
|
||||
if (!tableInfoMap[adj].fVisited)
|
||||
{
|
||||
cId = *i;
|
||||
smallSides.push_back(joinToLargeTable(*i, tableInfoMap, jobInfo, joinOrder, joinEdgesToRestore));
|
||||
cId = adj;
|
||||
smallSides.push_back(joinToLargeTable(adj, tableInfoMap, jobInfo, joinOrder, joinEdgesToRestore));
|
||||
|
||||
tableSet.insert(tableInfoMap[*i].fJoinedTables.begin(), tableInfoMap[*i].fJoinedTables.end());
|
||||
tableSet.insert(tableInfoMap[adj].fJoinedTables.begin(), tableInfoMap[adj].fJoinedTables.end());
|
||||
}
|
||||
}
|
||||
|
||||
// Join with its small sides, if not a leaf node.
|
||||
if (smallSides.size() > 0)
|
||||
if (!smallSides.empty())
|
||||
{
|
||||
// non-leaf node, need a join
|
||||
SJSTEP spjs = tableInfoMap[large].fQuerySteps.back();
|
||||
BatchPrimitive* bps = dynamic_cast<BatchPrimitive*>(spjs.get());
|
||||
SubAdapterStep* tsas = dynamic_cast<SubAdapterStep*>(spjs.get());
|
||||
TupleHashJoinStep* thjs = dynamic_cast<TupleHashJoinStep*>(spjs.get());
|
||||
auto* bps = dynamic_cast<BatchPrimitive*>(spjs.get());
|
||||
auto* tsas = dynamic_cast<SubAdapterStep*>(spjs.get());
|
||||
auto* thjs = dynamic_cast<TupleHashJoinStep*>(spjs.get());
|
||||
|
||||
// @bug6158, try to put BPS on large side if possible
|
||||
if (tsas && smallSides.size() == 1)
|
||||
{
|
||||
SJSTEP sspjs = tableInfoMap[cId].fQuerySteps.back();
|
||||
BatchPrimitive* sbps = dynamic_cast<BatchPrimitive*>(sspjs.get());
|
||||
TupleHashJoinStep* sthjs = dynamic_cast<TupleHashJoinStep*>(sspjs.get());
|
||||
auto* sbps = dynamic_cast<BatchPrimitive*>(sspjs.get());
|
||||
auto* sthjs = dynamic_cast<TupleHashJoinStep*>(sspjs.get());
|
||||
|
||||
if (sbps || (sthjs && sthjs->tokenJoin() == cId))
|
||||
{
|
||||
@ -3143,7 +3143,7 @@ SP_JoinInfo joinToLargeTable(uint32_t large, TableInfoMap& tableInfoMap, JobInfo
|
||||
largeJoinInfo->fDl = tableInfoMap[large].fDl;
|
||||
largeJoinInfo->fRowGroup = tableInfoMap[large].fRowGroup;
|
||||
|
||||
TableJoinMap::iterator mit = jobInfo.tableJoinMap.find(make_pair(large, cId));
|
||||
auto mit = jobInfo.tableJoinMap.find(make_pair(large, cId));
|
||||
|
||||
if (mit == jobInfo.tableJoinMap.end())
|
||||
throw runtime_error("Join step not found.");
|
||||
@ -3158,7 +3158,7 @@ SP_JoinInfo joinToLargeTable(uint32_t large, TableInfoMap& tableInfoMap, JobInfo
|
||||
|
||||
bps = sbps;
|
||||
thjs = sthjs;
|
||||
tsas = NULL;
|
||||
tsas = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3173,7 +3173,7 @@ SP_JoinInfo joinToLargeTable(uint32_t large, TableInfoMap& tableInfoMap, JobInfo
|
||||
size_t dcf = 0; // for dictionary column filters, 0 if thjs is null.
|
||||
RowGroup largeSideRG = tableInfoMap[large].fRowGroup;
|
||||
|
||||
if (thjs && thjs->tokenJoin() == large)
|
||||
if (thjs && thjs->tokenJoin() == large && thjs->tupleId1() != thjs->tupleId2())
|
||||
{
|
||||
dcf = thjs->getLargeKeys().size();
|
||||
largeSideRG = thjs->getLargeRowGroup();
|
||||
@ -3195,9 +3195,9 @@ SP_JoinInfo joinToLargeTable(uint32_t large, TableInfoMap& tableInfoMap, JobInfo
|
||||
vector<vector<uint32_t>> smallKeyIndices;
|
||||
vector<vector<uint32_t>> largeKeyIndices;
|
||||
|
||||
for (vector<SP_JoinInfo>::iterator i = smallSides.begin(); i != smallSides.end(); i++)
|
||||
for (auto& smallSide : smallSides)
|
||||
{
|
||||
JoinInfo* info = i->get();
|
||||
JoinInfo* info = smallSide.get();
|
||||
smallSideDLs.push_back(info->fDl);
|
||||
smallSideRGs.push_back(info->fRowGroup);
|
||||
jointypes.push_back(info->fJoinData.fTypes[0]);
|
||||
@ -3207,8 +3207,8 @@ SP_JoinInfo joinToLargeTable(uint32_t large, TableInfoMap& tableInfoMap, JobInfo
|
||||
vector<uint32_t> largeIndices;
|
||||
const vector<uint32_t>& keys1 = info->fJoinData.fLeftKeys;
|
||||
const vector<uint32_t>& keys2 = info->fJoinData.fRightKeys;
|
||||
vector<uint32_t>::const_iterator k1 = keys1.begin();
|
||||
vector<uint32_t>::const_iterator k2 = keys2.begin();
|
||||
auto k1 = keys1.begin();
|
||||
auto k2 = keys2.begin();
|
||||
uint32_t stid = getTableKey(jobInfo, *k1);
|
||||
tableNames.push_back(jobInfo.keyInfo->tupleKeyVec[stid].fTable);
|
||||
|
||||
@ -3267,7 +3267,8 @@ SP_JoinInfo joinToLargeTable(uint32_t large, TableInfoMap& tableInfoMap, JobInfo
|
||||
traces.push_back(oss.str());
|
||||
}
|
||||
|
||||
if (bps || tsas)
|
||||
// If the tupleIDs are the same it's not a join, so a new TupleHashJoinStep must be created
|
||||
if (bps || tsas || (thjs && thjs->tupleId1() == thjs->tupleId2()))
|
||||
{
|
||||
thjs = new TupleHashJoinStep(jobInfo);
|
||||
thjs->tableOid1(smallSides[0]->fTableOid);
|
||||
@ -4379,11 +4380,15 @@ inline void joinTables(JobStepVector& joinSteps, TableInfoMap& tableInfoMap, Job
|
||||
{
|
||||
uint32_t largestTable = getLargestTable(jobInfo, tableInfoMap, overrideLargeSideEstimate);
|
||||
|
||||
if (jobInfo.outerOnTable.size() == 0)
|
||||
if (jobInfo.outerOnTable.empty())
|
||||
{
|
||||
joinToLargeTable(largestTable, tableInfoMap, jobInfo, joinOrder, jobInfo.joinEdgesToRestore);
|
||||
}
|
||||
else
|
||||
{
|
||||
joinTablesInOrder(largestTable, joinSteps, tableInfoMap, jobInfo, joinOrder);
|
||||
}
|
||||
}
|
||||
|
||||
void makeNoTableJobStep(JobStepVector& querySteps, JobStepVector& projectSteps,
|
||||
DeliveredTableMap& deliverySteps, JobInfo& jobInfo)
|
||||
@ -4407,14 +4412,14 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
const boost::shared_ptr<TupleKeyInfo>& keyInfo = jobInfo.keyInfo;
|
||||
cout << "query steps:" << endl;
|
||||
|
||||
for (JobStepVector::iterator i = querySteps.begin(); i != querySteps.end(); ++i)
|
||||
for (const auto& step: querySteps)
|
||||
{
|
||||
TupleHashJoinStep* thjs = dynamic_cast<TupleHashJoinStep*>(i->get());
|
||||
auto* thjs = dynamic_cast<TupleHashJoinStep*>(step.get());
|
||||
|
||||
if (thjs == NULL)
|
||||
if (thjs == nullptr)
|
||||
{
|
||||
int64_t id = ((*i)->tupleId() != (uint64_t)-1) ? (*i)->tupleId() : -1;
|
||||
cout << typeid(*(i->get())).name() << ": " << (*i)->oid() << " " << id << " "
|
||||
int64_t id = (step->tupleId() != (uint64_t)-1) ? step->tupleId() : -1;
|
||||
cout << typeid(step.get()).name() << ": " << step->oid() << " " << id << " "
|
||||
<< (int)((id != -1) ? getTableKey(jobInfo, id) : -1) << endl;
|
||||
}
|
||||
else
|
||||
@ -4430,16 +4435,18 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
|
||||
cout << "project steps:" << endl;
|
||||
|
||||
for (JobStepVector::iterator i = projectSteps.begin(); i != projectSteps.end(); ++i)
|
||||
for (const auto& prStep: projectSteps)
|
||||
{
|
||||
cout << typeid(*(i->get())).name() << ": " << (*i)->oid() << " " << (*i)->tupleId() << " "
|
||||
<< getTableKey(jobInfo, (*i)->tupleId()) << endl;
|
||||
cout << typeid(prStep.get()).name() << ": " << prStep->oid() << " " << prStep->tupleId() << " "
|
||||
<< getTableKey(jobInfo, prStep->tupleId()) << endl;
|
||||
}
|
||||
|
||||
cout << "delivery steps:" << endl;
|
||||
|
||||
for (DeliveredTableMap::iterator i = deliverySteps.begin(); i != deliverySteps.end(); ++i)
|
||||
cout << typeid(*(i->second.get())).name() << endl;
|
||||
for (const auto& [_, value]: deliverySteps)
|
||||
{
|
||||
cout << typeid(value.get()).name() << endl;
|
||||
}
|
||||
|
||||
cout << "\nTable Info: (key oid name alias view sub)" << endl;
|
||||
|
||||
@ -4452,7 +4459,7 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
CalpontSystemCatalog::OID oid = keyInfo->tupleKeyVec[i].fId;
|
||||
string alias = keyInfo->tupleKeyVec[i].fTable;
|
||||
|
||||
if (alias.length() < 1)
|
||||
if (alias.empty())
|
||||
alias = "N/A";
|
||||
|
||||
string name = keyInfo->keyName[i];
|
||||
@ -4462,10 +4469,10 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
|
||||
string view = keyInfo->tupleKeyVec[i].fView;
|
||||
|
||||
if (view.length() < 1)
|
||||
if (view.empty())
|
||||
view = "N/A";
|
||||
|
||||
int sid = keyInfo->tupleKeyVec[i].fSubId;
|
||||
auto sid = keyInfo->tupleKeyVec[i].fSubId;
|
||||
cout << i << "\t" << oid << "\t" << name << "\t" << alias << "\t" << view << "\t" << hex << sid << dec
|
||||
<< endl;
|
||||
}
|
||||
@ -4479,7 +4486,7 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
int64_t tid = jobInfo.keyInfo->colKeyToTblKey[i];
|
||||
string alias = keyInfo->tupleKeyVec[i].fTable;
|
||||
|
||||
if (alias.length() < 1)
|
||||
if (alias.empty())
|
||||
alias = "N/A";
|
||||
|
||||
// Expression IDs are borrowed from systemcatalog IDs, which are not used in tuple.
|
||||
@ -4502,10 +4509,10 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
|
||||
string view = keyInfo->tupleKeyVec[i].fView;
|
||||
|
||||
if (view.length() < 1)
|
||||
if (view.empty())
|
||||
view = "N/A";
|
||||
|
||||
int sid = keyInfo->tupleKeyVec[i].fSubId;
|
||||
auto sid = keyInfo->tupleKeyVec[i].fSubId;
|
||||
cout << i << "\t" << oid << "\t" << tid << "\t" << name << "\t" << alias << "\t" << view << "\t" << hex
|
||||
<< sid << dec << endl;
|
||||
}
|
||||
@ -4514,7 +4521,7 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
}
|
||||
|
||||
// @bug 2771, handle no table select query
|
||||
if (jobInfo.tableList.size() < 1)
|
||||
if (jobInfo.tableList.empty())
|
||||
{
|
||||
makeNoTableJobStep(querySteps, projectSteps, deliverySteps, jobInfo);
|
||||
return;
|
||||
@ -4537,33 +4544,33 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
}
|
||||
|
||||
// Set of the columns being projected.
|
||||
for (TupleInfoVector::iterator i = jobInfo.pjColList.begin(); i != jobInfo.pjColList.end(); i++)
|
||||
for (auto i = jobInfo.pjColList.begin(); i != jobInfo.pjColList.end(); i++)
|
||||
jobInfo.returnColSet.insert(i->key);
|
||||
|
||||
// Strip constantbooleanquerySteps
|
||||
for (uint64_t i = 0; i < querySteps.size();)
|
||||
{
|
||||
TupleConstantBooleanStep* bs = dynamic_cast<TupleConstantBooleanStep*>(querySteps[i].get());
|
||||
ExpressionStep* es = dynamic_cast<ExpressionStep*>(querySteps[i].get());
|
||||
auto* bs = dynamic_cast<TupleConstantBooleanStep*>(querySteps[i].get());
|
||||
auto* es = dynamic_cast<ExpressionStep*>(querySteps[i].get());
|
||||
|
||||
if (bs != NULL)
|
||||
if (bs != nullptr)
|
||||
{
|
||||
// cosntant step
|
||||
if (bs->boolValue() == false)
|
||||
if (!bs->boolValue())
|
||||
jobInfo.constantFalse = true;
|
||||
|
||||
querySteps.erase(querySteps.begin() + i);
|
||||
}
|
||||
else if (es != NULL && es->tableKeys().size() == 0)
|
||||
else if (es != nullptr && es->tableKeys().empty())
|
||||
{
|
||||
// constant expression
|
||||
ParseTree* p = es->expressionFilter(); // filter
|
||||
|
||||
if (p != NULL)
|
||||
if (p != nullptr)
|
||||
{
|
||||
Row r; // dummy row
|
||||
|
||||
if (funcexp::FuncExp::instance()->evaluate(r, p) == false)
|
||||
if (!funcexp::FuncExp::instance()->evaluate(r, p))
|
||||
jobInfo.constantFalse = true;
|
||||
|
||||
querySteps.erase(querySteps.begin() + i);
|
||||
@ -4582,7 +4589,7 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
{
|
||||
bool exist = false;
|
||||
|
||||
for (JobStepVector::iterator j = steps.begin(); j != steps.end() && !exist; ++j)
|
||||
for (auto j = steps.begin(); j != steps.end() && !exist; ++j)
|
||||
{
|
||||
if (jobInfo.functionJoins[i] == j->get())
|
||||
exist = true;
|
||||
@ -4597,37 +4604,37 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
|
||||
// Make sure each query step has an output DL
|
||||
// This is necessary for toString() method on most steps
|
||||
for (JobStepVector::iterator it = steps.begin(); it != steps.end(); ++it)
|
||||
for (auto& step: steps)
|
||||
{
|
||||
// if (dynamic_cast<OrDelimiter*>(it->get()))
|
||||
// continue;
|
||||
|
||||
if (it->get()->outputAssociation().outSize() == 0)
|
||||
if (step->outputAssociation().outSize() == 0)
|
||||
{
|
||||
JobStepAssociation jsa;
|
||||
AnyDataListSPtr adl(new AnyDataList());
|
||||
RowGroupDL* dl = new RowGroupDL(1, jobInfo.fifoSize);
|
||||
dl->OID(it->get()->oid());
|
||||
auto* dl = new RowGroupDL(1, jobInfo.fifoSize);
|
||||
dl->OID(step->oid());
|
||||
adl->rowGroupDL(dl);
|
||||
jsa.outAdd(adl);
|
||||
it->get()->outputAssociation(jsa);
|
||||
step->outputAssociation(jsa);
|
||||
}
|
||||
}
|
||||
|
||||
// Populate the TableInfo map with the job steps keyed by table ID.
|
||||
JobStepVector joinSteps;
|
||||
JobStepVector& expSteps = jobInfo.crossTableExpressions;
|
||||
JobStepVector::iterator it = querySteps.begin();
|
||||
JobStepVector::iterator end = querySteps.end();
|
||||
auto it = querySteps.begin();
|
||||
auto end = querySteps.end();
|
||||
|
||||
while (it != end)
|
||||
{
|
||||
// Separate table joins from other predicates.
|
||||
TupleHashJoinStep* thjs = dynamic_cast<TupleHashJoinStep*>(it->get());
|
||||
ExpressionStep* exps = dynamic_cast<ExpressionStep*>(it->get());
|
||||
SubAdapterStep* subs = dynamic_cast<SubAdapterStep*>(it->get());
|
||||
auto* thjs = dynamic_cast<TupleHashJoinStep*>(it->get());
|
||||
auto* exps = dynamic_cast<ExpressionStep*>(it->get());
|
||||
auto* subs = dynamic_cast<SubAdapterStep*>(it->get());
|
||||
|
||||
if (thjs != NULL && thjs->tupleId1() != thjs->tupleId2())
|
||||
if (thjs && thjs->tupleId1() != thjs->tupleId2())
|
||||
{
|
||||
// simple column and constant column semi join
|
||||
if (thjs->tableOid2() == 0 && thjs->schema2().empty())
|
||||
@ -4685,8 +4692,8 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
|
||||
// keep a join map
|
||||
pair<uint32_t, uint32_t> tablePair(tid1, tid2);
|
||||
TableJoinMap::iterator m1 = jobInfo.tableJoinMap.find(tablePair);
|
||||
TableJoinMap::iterator m2 = jobInfo.tableJoinMap.end();
|
||||
auto m1 = jobInfo.tableJoinMap.find(tablePair);
|
||||
auto m2 = jobInfo.tableJoinMap.end();
|
||||
|
||||
if (m1 == jobInfo.tableJoinMap.end())
|
||||
{
|
||||
@ -4782,17 +4789,17 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
m1->second.fJoinId = m2->second.fJoinId = thjs->joinId();
|
||||
}
|
||||
// Separate the expressions
|
||||
else if (exps != NULL && subs == NULL)
|
||||
else if (exps && !subs)
|
||||
{
|
||||
const vector<uint32_t>& tables = exps->tableKeys();
|
||||
const vector<uint32_t>& columns = exps->columnKeys();
|
||||
bool tableInOuterQuery = false;
|
||||
set<uint32_t> tableSet; // involved unique tables
|
||||
|
||||
for (uint64_t i = 0; i < tables.size(); ++i)
|
||||
for (unsigned int table: tables)
|
||||
{
|
||||
if (find(jobInfo.tableList.begin(), jobInfo.tableList.end(), tables[i]) != jobInfo.tableList.end())
|
||||
tableSet.insert(tables[i]);
|
||||
if (find(jobInfo.tableList.begin(), jobInfo.tableList.end(), table) != jobInfo.tableList.end())
|
||||
tableSet.insert(table);
|
||||
else
|
||||
tableInOuterQuery = true;
|
||||
}
|
||||
@ -4814,10 +4821,10 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
tableInfoMap[tables[i]].fProjectCols.push_back(c);
|
||||
jobInfo.pjColList.push_back(getTupleInfo(c, jobInfo));
|
||||
jobInfo.returnColSet.insert(c);
|
||||
const SimpleColumn* sc = dynamic_cast<const SimpleColumn*>(exps->columns()[i]);
|
||||
const auto* sc = dynamic_cast<const SimpleColumn*>(exps->columns()[i]);
|
||||
|
||||
if (sc != NULL)
|
||||
jobInfo.deliveredCols.push_back(SRCP(sc->clone()));
|
||||
if (sc)
|
||||
jobInfo.deliveredCols.emplace_back(sc->clone());
|
||||
}
|
||||
}
|
||||
|
||||
@ -4831,8 +4838,8 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
// single table and not in join on clause
|
||||
uint32_t tid = tables[0];
|
||||
|
||||
for (uint64_t i = 0; i < columns.size(); ++i)
|
||||
tableInfoMap[tid].fColsInExp1.push_back(columns[i]);
|
||||
for (unsigned int column : columns)
|
||||
tableInfoMap[tid].fColsInExp1.push_back(column);
|
||||
|
||||
tableInfoMap[tid].fOneTableExpSteps.push_back(*it);
|
||||
}
|
||||
@ -4848,9 +4855,8 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
}
|
||||
|
||||
// resolve after join: cross table or on clause conditions
|
||||
for (uint64_t i = 0; i < columns.size(); ++i)
|
||||
for (unsigned int cid : columns)
|
||||
{
|
||||
uint32_t cid = columns[i];
|
||||
uint32_t tid = getTableKey(jobInfo, cid);
|
||||
tableInfoMap[tid].fColsInExp2.push_back(cid);
|
||||
}
|
||||
@ -4887,7 +4893,7 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
outjoinPredicateAdjust(tableInfoMap, jobInfo);
|
||||
|
||||
// @bug4021, make sure there is real column to scan
|
||||
for (TableInfoMap::iterator it = tableInfoMap.begin(); it != tableInfoMap.end(); it++)
|
||||
for (auto it = tableInfoMap.begin(); it != tableInfoMap.end(); it++)
|
||||
{
|
||||
uint32_t tableUid = it->first;
|
||||
|
||||
@ -4895,8 +4901,8 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
continue;
|
||||
|
||||
JobStepVector& steps = tableInfoMap[tableUid].fQuerySteps;
|
||||
JobStepVector::iterator s = steps.begin();
|
||||
JobStepVector::iterator p = steps.end();
|
||||
auto s = steps.begin();
|
||||
auto p = steps.end();
|
||||
|
||||
for (; s != steps.end(); s++)
|
||||
{
|
||||
@ -4910,7 +4916,7 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
|
||||
if (s == steps.end())
|
||||
{
|
||||
map<uint64_t, SRCP>::iterator t = jobInfo.tableColMap.find(tableUid);
|
||||
auto t = jobInfo.tableColMap.find(tableUid);
|
||||
|
||||
if (t == jobInfo.tableColMap.end())
|
||||
{
|
||||
@ -4919,7 +4925,7 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
throw runtime_error(msg);
|
||||
}
|
||||
|
||||
SimpleColumn* sc = dynamic_cast<SimpleColumn*>(t->second.get());
|
||||
auto* sc = dynamic_cast<SimpleColumn*>(t->second.get());
|
||||
CalpontSystemCatalog::OID oid = sc->oid();
|
||||
CalpontSystemCatalog::OID tblOid = tableOid(sc, jobInfo.csc);
|
||||
CalpontSystemCatalog::ColType ct = sc->colType();
|
||||
@ -4946,30 +4952,30 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
}
|
||||
|
||||
// @bug3767, error out scalar subquery with aggregation and correlated additional comparison.
|
||||
if (jobInfo.hasAggregation && (jobInfo.correlateSteps.size() > 0))
|
||||
if (jobInfo.hasAggregation && (!jobInfo.correlateSteps.empty()))
|
||||
{
|
||||
// expression filter
|
||||
ExpressionStep* exp = NULL;
|
||||
ExpressionStep* exp = nullptr;
|
||||
|
||||
for (it = jobInfo.correlateSteps.begin(); it != jobInfo.correlateSteps.end(); it++)
|
||||
{
|
||||
if (((exp = dynamic_cast<ExpressionStep*>(it->get())) != NULL) && (!exp->functionJoin()))
|
||||
if (((exp = dynamic_cast<ExpressionStep*>(it->get())) != nullptr) && (!exp->functionJoin()))
|
||||
break;
|
||||
|
||||
exp = NULL;
|
||||
exp = nullptr;
|
||||
}
|
||||
|
||||
// correlated join step
|
||||
TupleHashJoinStep* thjs = NULL;
|
||||
TupleHashJoinStep* thjs = nullptr;
|
||||
|
||||
for (it = jobInfo.correlateSteps.begin(); it != jobInfo.correlateSteps.end(); it++)
|
||||
{
|
||||
if ((thjs = dynamic_cast<TupleHashJoinStep*>(it->get())) != NULL)
|
||||
if ((thjs = dynamic_cast<TupleHashJoinStep*>(it->get())) != nullptr)
|
||||
break;
|
||||
}
|
||||
|
||||
// @bug5202, error out not equal correlation and aggregation in subquery.
|
||||
if ((exp != NULL) && (thjs != NULL) && (thjs->getJoinType() & CORRELATED))
|
||||
if (exp && thjs && (thjs->getJoinType() & CORRELATED))
|
||||
throw IDBExcept(IDBErrorInfo::instance()->errorMsg(ERR_NON_SUPPORT_NEQ_AGG_SUB),
|
||||
ERR_NON_SUPPORT_NEQ_AGG_SUB);
|
||||
}
|
||||
@ -4985,7 +4991,7 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
it++;
|
||||
}
|
||||
|
||||
for (TupleInfoVector::iterator j = jobInfo.pjColList.begin(); j != jobInfo.pjColList.end(); j++)
|
||||
for (auto j = jobInfo.pjColList.begin(); j != jobInfo.pjColList.end(); j++)
|
||||
{
|
||||
if (jobInfo.keyInfo->tupleKeyVec[j->tkey].fId == CNX_EXP_TABLE_ID)
|
||||
continue;
|
||||
@ -5000,9 +5006,9 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
|
||||
for (it = retExp.begin(); it != retExp.end(); ++it)
|
||||
{
|
||||
ExpressionStep* exp = dynamic_cast<ExpressionStep*>(it->get());
|
||||
auto* exp = dynamic_cast<ExpressionStep*>(it->get());
|
||||
|
||||
if (exp == NULL)
|
||||
if (exp == nullptr)
|
||||
throw runtime_error("Not an expression.");
|
||||
|
||||
for (uint64_t i = 0; i < exp->columnKeys().size(); ++i)
|
||||
@ -5023,7 +5029,7 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
TableInfoMap::iterator mit;
|
||||
|
||||
for (mit = tableInfoMap.begin(); mit != tableInfoMap.end(); mit++)
|
||||
if (combineJobStepsByTable(mit, jobInfo) == false)
|
||||
if (!combineJobStepsByTable(mit, jobInfo))
|
||||
throw runtime_error("combineJobStepsByTable failed.");
|
||||
|
||||
// 2. join the combined steps together to form the spanning tree
|
||||
@ -5031,9 +5037,9 @@ void associateTupleJobSteps(JobStepVector& querySteps, JobStepVector& projectSte
|
||||
joinTables(joinSteps, tableInfoMap, jobInfo, joinOrder, overrideLargeSideEstimate);
|
||||
|
||||
// 3. put the steps together
|
||||
for (vector<uint32_t>::iterator i = joinOrder.begin(); i != joinOrder.end(); ++i)
|
||||
querySteps.insert(querySteps.end(), tableInfoMap[*i].fQuerySteps.begin(),
|
||||
tableInfoMap[*i].fQuerySteps.end());
|
||||
for (uint32_t i: joinOrder)
|
||||
querySteps.insert(querySteps.end(), tableInfoMap[i].fQuerySteps.begin(),
|
||||
tableInfoMap[i].fQuerySteps.end());
|
||||
|
||||
adjustLastStep(querySteps, deliverySteps, jobInfo); // to match the select clause
|
||||
}
|
||||
|
@ -97,10 +97,10 @@ TupleHashJoinStep::TupleHashJoinStep(const JobInfo& jobInfo)
|
||||
fExtendedInfo = "THJS: ";
|
||||
joinType = INIT;
|
||||
joinThreadCount = resourceManager->getJlNumScanReceiveThreads();
|
||||
largeBPS = NULL;
|
||||
largeBPS = nullptr;
|
||||
moreInput = true;
|
||||
fQtc.stepParms().stepType = StepTeleStats::T_HJS;
|
||||
outputDL = NULL;
|
||||
outputDL = nullptr;
|
||||
ownsOutputDL = false;
|
||||
djsSmallUsage = jobInfo.smallSideUsage;
|
||||
djsSmallLimit = jobInfo.smallSideLimit;
|
||||
|
63
mysql-test/columnstore/bugfixes/mcol-5651.result
Normal file
63
mysql-test/columnstore/bugfixes/mcol-5651.result
Normal file
@ -0,0 +1,63 @@
|
||||
DROP DATABASE IF EXISTS `mcol_5651`;
|
||||
CREATE DATABASE `mcol_5651`;
|
||||
USE `mcol_5651`;
|
||||
CREATE TABLE mcs (id INTEGER NOT NULL, type VARCHAR(10) NOT NULL, sn VARCHAR(30), descr VARCHAR(100), sdate DATETIME) ENGINE=Columnstore;
|
||||
INSERT INTO mcs VALUES
|
||||
(1,'a','1',NULL,'2024-01-11 18:36:01'),
|
||||
(2,'a','1',NULL,'2024-01-11 18:36:03'),
|
||||
(3,'a','1',NULL,'2024-01-11 18:36:04'),
|
||||
(4,'a','1',NULL,'2024-01-11 18:36:06'),
|
||||
(5,'a','1',NULL,'2024-01-11 18:36:07'),
|
||||
(6,'a','1',NULL,'2024-01-12 13:04:15'),
|
||||
(7,'a','1',NULL,'2024-01-12 13:04:17'),
|
||||
(8,'a','1',NULL,'2024-01-12 13:04:18'),
|
||||
(9,'a','1',NULL,'2024-01-12 13:04:20'),
|
||||
(10,'a','1',NULL,'2024-01-11 18:35:58'),
|
||||
(11,'a','1',NULL,'2024-01-11 18:30:00'),
|
||||
(12,'a','1',NULL,'2024-01-11 18:30:00'),
|
||||
(13,'a','1',NULL,'2024-01-11 18:30:03'),
|
||||
(14,'a','1',NULL,'2024-01-11 18:30:03'),
|
||||
(15,'a','1','a','2024-01-11 18:30:02'),
|
||||
(16,'a','1',NULL,'2024-01-11 18:30:03'),
|
||||
(17,'a','1',NULL,'2024-01-11 18:30:03'),
|
||||
(18,'a','1',NULL,'2024-01-11 18:30:03'),
|
||||
(19,'a','1',NULL,'2024-01-12 18:53:02'),
|
||||
(20,'a','1',NULL,'2024-01-12 18:53:02'),
|
||||
(21,'a','1',NULL,'2024-01-12 19:27:47'),
|
||||
(22,'a','1',NULL,'2024-01-12 19:27:48'),
|
||||
(23,'a','1',NULL,'2024-01-13 01:47:26'),
|
||||
(24,'a','1',NULL,'2024-01-13 01:47:26'),
|
||||
(25,'a','1',NULL,'2024-01-13 01:47:26'),
|
||||
(26,'a','1',NULL,'2024-01-13 01:47:26'),
|
||||
(27,'a','1',NULL,'2024-01-13 01:47:26'),
|
||||
(28,'a','1',NULL,'2024-01-13 01:47:26');
|
||||
SELECT s1.id, count(*)
|
||||
FROM mcs AS s1
|
||||
WHERE s1.type = 'a'
|
||||
AND s1.sdate BETWEEN '2026-01-05 16:21:00' - INTERVAL 24 MONTH AND '2026-01-05 16:21:00'
|
||||
AND EXISTS (
|
||||
SELECT s.SN, s.sdate
|
||||
FROM mcs AS s
|
||||
WHERE s.type = 'a'
|
||||
AND s.sdate BETWEEN '2026-01-05 16:21:00' - INTERVAL 24 MONTH AND '2026-01-05 16:21:00'
|
||||
AND s.descr = 'a'
|
||||
AND s.SN = s1.sn
|
||||
AND s1.sdate BETWEEN s.sdate - INTERVAL 10 HOUR AND s.sdate + INTERVAL 10 HOUR
|
||||
)
|
||||
GROUP BY 1 ORDER BY 2,1;
|
||||
id count(*)
|
||||
1 1
|
||||
2 1
|
||||
3 1
|
||||
4 1
|
||||
5 1
|
||||
10 1
|
||||
11 1
|
||||
12 1
|
||||
13 1
|
||||
14 1
|
||||
15 1
|
||||
16 1
|
||||
17 1
|
||||
18 1
|
||||
DROP DATABASE `mcol_5651`;
|
56
mysql-test/columnstore/bugfixes/mcol-5651.test
Normal file
56
mysql-test/columnstore/bugfixes/mcol-5651.test
Normal file
@ -0,0 +1,56 @@
|
||||
--source ../include/have_columnstore.inc
|
||||
--disable_warnings
|
||||
DROP DATABASE IF EXISTS `mcol_5651`;
|
||||
--enable_warnings
|
||||
CREATE DATABASE `mcol_5651`;
|
||||
USE `mcol_5651`;
|
||||
|
||||
CREATE TABLE mcs (id INTEGER NOT NULL, type VARCHAR(10) NOT NULL, sn VARCHAR(30), descr VARCHAR(100), sdate DATETIME) ENGINE=Columnstore;
|
||||
|
||||
INSERT INTO mcs VALUES
|
||||
(1,'a','1',NULL,'2024-01-11 18:36:01'),
|
||||
(2,'a','1',NULL,'2024-01-11 18:36:03'),
|
||||
(3,'a','1',NULL,'2024-01-11 18:36:04'),
|
||||
(4,'a','1',NULL,'2024-01-11 18:36:06'),
|
||||
(5,'a','1',NULL,'2024-01-11 18:36:07'),
|
||||
(6,'a','1',NULL,'2024-01-12 13:04:15'),
|
||||
(7,'a','1',NULL,'2024-01-12 13:04:17'),
|
||||
(8,'a','1',NULL,'2024-01-12 13:04:18'),
|
||||
(9,'a','1',NULL,'2024-01-12 13:04:20'),
|
||||
(10,'a','1',NULL,'2024-01-11 18:35:58'),
|
||||
(11,'a','1',NULL,'2024-01-11 18:30:00'),
|
||||
(12,'a','1',NULL,'2024-01-11 18:30:00'),
|
||||
(13,'a','1',NULL,'2024-01-11 18:30:03'),
|
||||
(14,'a','1',NULL,'2024-01-11 18:30:03'),
|
||||
(15,'a','1','a','2024-01-11 18:30:02'),
|
||||
(16,'a','1',NULL,'2024-01-11 18:30:03'),
|
||||
(17,'a','1',NULL,'2024-01-11 18:30:03'),
|
||||
(18,'a','1',NULL,'2024-01-11 18:30:03'),
|
||||
(19,'a','1',NULL,'2024-01-12 18:53:02'),
|
||||
(20,'a','1',NULL,'2024-01-12 18:53:02'),
|
||||
(21,'a','1',NULL,'2024-01-12 19:27:47'),
|
||||
(22,'a','1',NULL,'2024-01-12 19:27:48'),
|
||||
(23,'a','1',NULL,'2024-01-13 01:47:26'),
|
||||
(24,'a','1',NULL,'2024-01-13 01:47:26'),
|
||||
(25,'a','1',NULL,'2024-01-13 01:47:26'),
|
||||
(26,'a','1',NULL,'2024-01-13 01:47:26'),
|
||||
(27,'a','1',NULL,'2024-01-13 01:47:26'),
|
||||
(28,'a','1',NULL,'2024-01-13 01:47:26');
|
||||
|
||||
SELECT s1.id, count(*)
|
||||
FROM mcs AS s1
|
||||
WHERE s1.type = 'a'
|
||||
AND s1.sdate BETWEEN '2026-01-05 16:21:00' - INTERVAL 24 MONTH AND '2026-01-05 16:21:00'
|
||||
AND EXISTS (
|
||||
SELECT s.SN, s.sdate
|
||||
FROM mcs AS s
|
||||
WHERE s.type = 'a'
|
||||
AND s.sdate BETWEEN '2026-01-05 16:21:00' - INTERVAL 24 MONTH AND '2026-01-05 16:21:00'
|
||||
AND s.descr = 'a'
|
||||
AND s.SN = s1.sn
|
||||
AND s1.sdate BETWEEN s.sdate - INTERVAL 10 HOUR AND s.sdate + INTERVAL 10 HOUR
|
||||
)
|
||||
GROUP BY 1 ORDER BY 2,1;
|
||||
|
||||
# cleanup
|
||||
DROP DATABASE `mcol_5651`;
|
@ -39,7 +39,7 @@
|
||||
#include <sstream>
|
||||
#include <set>
|
||||
#include "serviceexemgr.h"
|
||||
#include <stdlib.h>
|
||||
#include <cstdlib>
|
||||
using namespace std;
|
||||
|
||||
#include <boost/thread.hpp>
|
||||
@ -148,7 +148,7 @@ BatchPrimitiveProcessor::BatchPrimitiveProcessor()
|
||||
pp.setLogicalBlockMode(true);
|
||||
pp.setBlockPtr((int*)blockData);
|
||||
pp.setBlockPtrAux((int*)blockDataAux);
|
||||
pthread_mutex_init(&objLock, NULL);
|
||||
pthread_mutex_init(&objLock, nullptr);
|
||||
}
|
||||
|
||||
BatchPrimitiveProcessor::BatchPrimitiveProcessor(ByteStream& b, double prefetch,
|
||||
@ -209,7 +209,7 @@ BatchPrimitiveProcessor::BatchPrimitiveProcessor(ByteStream& b, double prefetch,
|
||||
pp.setBlockPtr((int*)blockData);
|
||||
pp.setBlockPtrAux((int*)blockDataAux);
|
||||
sendThread = bppst;
|
||||
pthread_mutex_init(&objLock, NULL);
|
||||
pthread_mutex_init(&objLock, nullptr);
|
||||
initBPP(b);
|
||||
}
|
||||
|
||||
@ -417,7 +417,6 @@ void BatchPrimitiveProcessor::initBPP(ByteStream& bs)
|
||||
for (i = 0; i < joinerCount; i++)
|
||||
{
|
||||
smallSideRowLengths[i] = smallSideRGs[i].getRowSize();
|
||||
;
|
||||
smallSideRowData[i] = RGData(smallSideRGs[i], tJoinerSizes[i]);
|
||||
smallSideRGs[i].setData(&smallSideRowData[i]);
|
||||
smallSideRGs[i].resetRowGroup(0);
|
||||
@ -467,7 +466,7 @@ void BatchPrimitiveProcessor::initBPP(ByteStream& bs)
|
||||
{
|
||||
hasFilterStep = true;
|
||||
|
||||
if (dynamic_cast<StrFilterCmd*>(filterSteps[i].get()) != NULL)
|
||||
if (dynamic_cast<StrFilterCmd*>(filterSteps[i].get()) != nullptr)
|
||||
filtOnString = true;
|
||||
}
|
||||
else if (type == Command::DICT_STEP || type == Command::RID_TO_STRING)
|
||||
@ -498,10 +497,9 @@ void BatchPrimitiveProcessor::initBPP(ByteStream& bs)
|
||||
bs >> *(fAggregator.get());
|
||||
|
||||
// If there's UDAF involved, set up for PM processing
|
||||
for (uint64_t i = 0; i < fAggregator->getAggFunctions().size(); i++)
|
||||
for (const auto & pcol : fAggregator->getAggFunctions())
|
||||
{
|
||||
RowUDAFFunctionCol* rowUDAF =
|
||||
dynamic_cast<RowUDAFFunctionCol*>(fAggregator->getAggFunctions()[i].get());
|
||||
auto* rowUDAF = dynamic_cast<RowUDAFFunctionCol*>(pcol.get());
|
||||
|
||||
if (rowUDAF)
|
||||
{
|
||||
@ -553,10 +551,10 @@ void BatchPrimitiveProcessor::resetBPP(ByteStream& bs, const SP_UM_MUTEX& w, con
|
||||
ridMap = 0;
|
||||
baseRid = absRids[0] & 0xffffffffffffe000ULL;
|
||||
|
||||
for (uint32_t i = 0; i < ridCount; i++)
|
||||
for (uint32_t j = 0; j < ridCount; j++)
|
||||
{
|
||||
relRids[i] = absRids[i] - baseRid;
|
||||
ridMap |= 1 << (relRids[i] >> 9);
|
||||
relRids[j] = absRids[j] - baseRid;
|
||||
ridMap |= 1 << (relRids[j] >> 9);
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -583,7 +581,7 @@ void BatchPrimitiveProcessor::resetBPP(ByteStream& bs, const SP_UM_MUTEX& w, con
|
||||
projectSteps[i]->resetCommand(bs);
|
||||
}
|
||||
|
||||
idbassert(bs.length() == 0);
|
||||
idbassert(bs.empty());
|
||||
|
||||
/* init vars not part of the BS */
|
||||
currentBlockOffset = 0;
|
||||
@ -1098,7 +1096,7 @@ void BatchPrimitiveProcessor::initProcessor()
|
||||
}
|
||||
}
|
||||
|
||||
if (fAggregator.get() != NULL)
|
||||
if (fAggregator.get() != nullptr)
|
||||
{
|
||||
fAggRowGroupData.reinit(fAggregateRG);
|
||||
fAggregateRG.setData(&fAggRowGroupData);
|
||||
@ -1164,7 +1162,6 @@ uint32_t BatchPrimitiveProcessor::executeTupleJoin(uint32_t startRid, RowGroup&
|
||||
for (j = 0; j < joinerCount; j++)
|
||||
{
|
||||
bool found;
|
||||
|
||||
if (UNLIKELY(joinTypes[j] & ANTI))
|
||||
{
|
||||
if (joinTypes[j] & WITHFCNEXP)
|
||||
@ -1184,7 +1181,7 @@ uint32_t BatchPrimitiveProcessor::executeTupleJoin(uint32_t startRid, RowGroup&
|
||||
largeKey = oldRow.getIntField(colIndex);
|
||||
uint bucket = bucketPicker((char*)&largeKey, 8, bpSeed) & ptMask;
|
||||
|
||||
bool joinerIsEmpty = tJoiners[j][bucket]->empty() ? true : false;
|
||||
bool joinerIsEmpty = tJoiners[j][bucket]->empty();
|
||||
|
||||
found = (tJoiners[j][bucket]->find(largeKey) != tJoiners[j][bucket]->end());
|
||||
isNull = oldRow.isNullValue(colIndex);
|
||||
@ -1221,8 +1218,8 @@ uint32_t BatchPrimitiveProcessor::executeTupleJoin(uint32_t startRid, RowGroup&
|
||||
{
|
||||
bool hasNull = false;
|
||||
|
||||
for (uint32_t z = 0; z < tlLargeSideKeyColumns[j].size(); z++)
|
||||
if (oldRow.isNullValue(tlLargeSideKeyColumns[j][z]))
|
||||
for (unsigned int column: tlLargeSideKeyColumns[j])
|
||||
if (oldRow.isNullValue(column))
|
||||
{
|
||||
hasNull = true;
|
||||
break;
|
||||
@ -1396,7 +1393,7 @@ void BatchPrimitiveProcessor::execute()
|
||||
{
|
||||
ColumnCommand* col = dynamic_cast<ColumnCommand*>(filterSteps[0].get());
|
||||
|
||||
if ((col != NULL) && (col->getFilterCount() == 0) && (col->getLBID() != 0))
|
||||
if ((col != nullptr) && (col->getFilterCount() == 0) && (col->getLBID() != 0))
|
||||
{
|
||||
// stored in last pos in relLBID[] and asyncLoaded[]
|
||||
uint64_t p = projectCount;
|
||||
@ -2452,7 +2449,7 @@ SBPP BatchPrimitiveProcessor::duplicate()
|
||||
for (i = 0; i < projectCount; ++i)
|
||||
bpp->projectSteps[i] = projectSteps[i]->duplicate();
|
||||
|
||||
if (fAggregator.get() != NULL)
|
||||
if (fAggregator.get() != nullptr)
|
||||
{
|
||||
bpp->fAggregateRG = fAggregateRG;
|
||||
bpp->fAggregator.reset(new RowAggregation(fAggregator->getGroupByCols(), fAggregator->getAggFunctions()));
|
||||
@ -2551,7 +2548,7 @@ void BatchPrimitiveProcessor::asyncLoadProjectColumns()
|
||||
// only care about column commands
|
||||
ColumnCommand* col = dynamic_cast<ColumnCommand*>(projectSteps[i].get());
|
||||
|
||||
if (col != NULL)
|
||||
if (col != nullptr)
|
||||
{
|
||||
asyncLoaded[i] = asyncLoaded[i] && (relLBID[i] % blocksReadAhead != 0);
|
||||
relLBID[i] += col->getWidth();
|
||||
@ -2706,12 +2703,14 @@ inline void BatchPrimitiveProcessor::getJoinResults(const Row& r, uint32_t jInde
|
||||
{
|
||||
bool hasNullValue = false;
|
||||
|
||||
for (uint32_t i = 0; i < tlLargeSideKeyColumns[jIndex].size(); i++)
|
||||
if (r.isNullValue(tlLargeSideKeyColumns[jIndex][i]))
|
||||
for (unsigned int column: tlLargeSideKeyColumns[jIndex])
|
||||
{
|
||||
if (r.isNullValue(column))
|
||||
{
|
||||
hasNullValue = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (hasNullValue)
|
||||
{
|
||||
|
@ -599,7 +599,7 @@ void TupleJoiner::match(rowgroup::Row& largeSideRow, uint32_t largeRowIndex, uin
|
||||
|
||||
if (UNLIKELY(inUM() && (joinType & MATCHNULLS) && !isNull && !typelessJoin))
|
||||
{
|
||||
if (largeRG.getColType(largeKeyColumns[0]) == CalpontSystemCatalog::LONGDOUBLE)
|
||||
if (largeRG.getColType(largeKeyColumns[0]) == CalpontSystemCatalog::LONGDOUBLE && ld)
|
||||
{
|
||||
uint bucket = bucketPicker((char*)&(joblist::LONGDOUBLENULL), sizeof(joblist::LONGDOUBLENULL), bpSeed) &
|
||||
bucketMask;
|
||||
@ -608,7 +608,7 @@ void TupleJoiner::match(rowgroup::Row& largeSideRow, uint32_t largeRowIndex, uin
|
||||
for (; range.first != range.second; ++range.first)
|
||||
matches->push_back(range.first->second);
|
||||
}
|
||||
else if (!largeRG.usesStringTable())
|
||||
else if (!smallRG.usesStringTable())
|
||||
{
|
||||
auto nullVal = getJoinNullValue();
|
||||
uint bucket = bucketPicker((char*)&nullVal, sizeof(nullVal), bpSeed) & bucketMask;
|
||||
|
Reference in New Issue
Block a user