1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-08-01 06:46:55 +03:00

MCOL-1201 manual rebase with develop. Obsoletes branch MCOL-1201

This commit is contained in:
David Hall
2018-05-11 09:50:10 -05:00
parent c8c3b23e32
commit ec3a3846c3
3 changed files with 707 additions and 509 deletions

View File

@ -1097,7 +1097,8 @@ void TupleAggregateStep::prep1PhaseAggregate(
uint32_t bigIntWidth = sizeof(int64_t);
uint32_t bigUintWidth = sizeof(uint64_t);
// For UDAF
uint32_t projColsUDAFIndex = 0;
uint32_t projColsUDAFIdx = 0;
uint32_t udafcParamIdx = 0;
UDAFColumn* udafc = NULL;
mcsv1sdk::mcsv1_UDAF* pUDAFFunc = NULL;
// for count column of average function
@ -1139,6 +1140,7 @@ void TupleAggregateStep::prep1PhaseAggregate(
// populate the aggregate rowgroup
AGG_MAP aggFuncMap;
uint64_t outIdx = 0;
for (uint64_t i = 0; i < returnedColVec.size(); i++)
{
@ -1156,8 +1158,9 @@ void TupleAggregateStep::prep1PhaseAggregate(
typeAgg.push_back(ti.dtype);
widthAgg.push_back(ti.width);
SP_ROWAGG_FUNC_t funct(new RowAggFunctionCol(
aggOp, stats, 0, i, jobInfo.cntStarPos));
aggOp, stats, 0, outIdx, jobInfo.cntStarPos));
functionVec.push_back(funct);
++outIdx;
continue;
}
@ -1173,9 +1176,10 @@ void TupleAggregateStep::prep1PhaseAggregate(
typeAgg.push_back(ti.dtype);
widthAgg.push_back(width);
SP_ROWAGG_FUNC_t funct(new RowAggFunctionCol(
aggOp, stats, 0, i, -1));
aggOp, stats, 0, outIdx, -1));
functionVec.push_back(funct);
++outIdx;
continue;
}
@ -1221,16 +1225,17 @@ void TupleAggregateStep::prep1PhaseAggregate(
widthAgg.push_back(width[colProj]);
if (groupBy[it->second]->fOutputColumnIndex == (uint32_t) - 1)
groupBy[it->second]->fOutputColumnIndex = i;
groupBy[it->second]->fOutputColumnIndex = outIdx;
else
functionVec.push_back(SP_ROWAGG_FUNC_t(
new RowAggFunctionCol(
ROWAGG_DUP_FUNCT,
ROWAGG_FUNCT_UNDEFINE,
-1,
i,
outIdx,
groupBy[it->second]->fOutputColumnIndex)));
++outIdx;
continue;
}
else if (find(jobInfo.expressionVec.begin(), jobInfo.expressionVec.end(), key) !=
@ -1243,6 +1248,7 @@ void TupleAggregateStep::prep1PhaseAggregate(
precisionAgg.push_back(ti.precision);
typeAgg.push_back(ti.dtype);
widthAgg.push_back(ti.width);
++outIdx;
continue;
}
else if (jobInfo.groupConcatInfo.columns().find(key) !=
@ -1255,6 +1261,7 @@ void TupleAggregateStep::prep1PhaseAggregate(
precisionAgg.push_back(precisionProj[colProj]);
typeAgg.push_back(typeProj[colProj]);
widthAgg.push_back(width[colProj]);
++outIdx;
continue;
}
else if (jobInfo.windowSet.find(key) != jobInfo.windowSet.end())
@ -1266,6 +1273,7 @@ void TupleAggregateStep::prep1PhaseAggregate(
precisionAgg.push_back(precisionProj[colProj]);
typeAgg.push_back(typeProj[colProj]);
widthAgg.push_back(width[colProj]);
++outIdx;
continue;
}
else
@ -1286,16 +1294,16 @@ void TupleAggregateStep::prep1PhaseAggregate(
if (aggOp == ROWAGG_UDAF)
{
std::vector<SRCP>::iterator it = jobInfo.projectionCols.begin() + projColsUDAFIndex;
std::vector<SRCP>::iterator it = jobInfo.projectionCols.begin() + projColsUDAFIdx;
for (; it != jobInfo.projectionCols.end(); it++)
{
udafc = dynamic_cast<UDAFColumn*>((*it).get());
projColsUDAFIndex++;
projColsUDAFIdx++;
if (udafc)
{
pUDAFFunc = udafc->getContext().getFunction();
// Create a RowAggFunctionCol (UDAF subtype) with the context.
funct.reset(new RowUDAFFunctionCol(udafc->getContext(), colProj, i));
funct.reset(new RowUDAFFunctionCol(udafc->getContext(), colProj, outIdx));
break;
}
}
@ -1306,7 +1314,7 @@ void TupleAggregateStep::prep1PhaseAggregate(
}
else
{
funct.reset(new RowAggFunctionCol(aggOp, stats, colProj, i));
funct.reset(new RowAggFunctionCol(aggOp, stats, colProj, outIdx));
}
functionVec.push_back(funct);
@ -1477,6 +1485,14 @@ void TupleAggregateStep::prep1PhaseAggregate(
precisionAgg.push_back(udafFuncCol->fUDAFContext.getPrecision());
typeAgg.push_back(udafFuncCol->fUDAFContext.getResultType());
widthAgg.push_back(udafFuncCol->fUDAFContext.getColWidth());
// If the first param is const
udafcParamIdx = 0;
ConstantColumn* cc = dynamic_cast<ConstantColumn*>(udafc->aggParms()[udafcParamIdx].get());
if (cc)
{
funct->fpConstCol = udafc->aggParms()[udafcParamIdx];
}
++udafcParamIdx;
break;
}
@ -1488,6 +1504,13 @@ void TupleAggregateStep::prep1PhaseAggregate(
precisionAgg.push_back(precisionProj[colProj]);
typeAgg.push_back(typeProj[colProj]);
widthAgg.push_back(width[colProj]);
// If the param is const
ConstantColumn* cc = dynamic_cast<ConstantColumn*>(udafc->aggParms()[udafcParamIdx].get());
if (cc)
{
funct->fpConstCol = udafc->aggParms()[udafcParamIdx];
}
++udafcParamIdx;
}
break;
@ -1520,6 +1543,11 @@ void TupleAggregateStep::prep1PhaseAggregate(
{
aggFuncMap.insert(make_pair(boost::make_tuple(key, aggOp, pUDAFFunc), funct->fOutputColumnIndex));
}
if (aggOp != ROWAGG_MULTI_PARM)
{
++outIdx;
}
}
// now fix the AVG function, locate the count(column) position
@ -1671,12 +1699,14 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
uint32_t bigIntWidth = sizeof(int64_t);
// map key = column key, operation (enum), and UDAF pointer if UDAF.
AGG_MAP aggFuncMap;
set<uint32_t> avgSet;
// set<uint32_t> avgSet;
list<uint32_t> multiParmIndexes;
// fOR udaf
UDAFColumn* udafc = NULL;
mcsv1sdk::mcsv1_UDAF* pUDAFFunc = NULL;
uint32_t projColsUDAFIndex = 0;
uint32_t projColsUDAFIdx = 0;
uint32_t udafcParamIdx = 0;
// for count column of average function
map<uint32_t, SP_ROWAGG_FUNC_t> avgFuncMap, avgDistFuncMap;
@ -1825,9 +1855,9 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
}
// skip sum / count(column) if avg is also selected
if ((aggOp == ROWAGG_SUM || aggOp == ROWAGG_COUNT_COL_NAME) &&
(avgSet.find(aggKey) != avgSet.end()))
continue;
// if ((aggOp == ROWAGG_SUM || aggOp == ROWAGG_COUNT_COL_NAME) &&
// (avgSet.find(aggKey) != avgSet.end()))
// continue;
if (aggOp == ROWAGG_DISTINCT_SUM ||
aggOp == ROWAGG_DISTINCT_AVG ||
@ -1840,12 +1870,12 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
if (aggOp == ROWAGG_UDAF)
{
std::vector<SRCP>::iterator it = jobInfo.projectionCols.begin() + projColsUDAFIndex;
std::vector<SRCP>::iterator it = jobInfo.projectionCols.begin() + projColsUDAFIdx;
for (; it != jobInfo.projectionCols.end(); it++)
{
udafc = dynamic_cast<UDAFColumn*>((*it).get());
projColsUDAFIndex++;
projColsUDAFIdx++;
if (udafc)
{
@ -2063,7 +2093,7 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
typeAgg.push_back(udafFuncCol->fUDAFContext.getResultType());
widthAgg.push_back(udafFuncCol->fUDAFContext.getColWidth());
++colAgg;
// UDAF Dummy holder for UserData struct
// Column for index of UDAF UserData struct
oidsAgg.push_back(oidsProj[colProj]);
keysAgg.push_back(aggKey);
scaleAgg.push_back(0);
@ -2071,6 +2101,14 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
typeAgg.push_back(CalpontSystemCatalog::UBIGINT);
widthAgg.push_back(sizeof(uint64_t));
funct->fAuxColumnIndex = colAgg++;
// If the first param is const
udafcParamIdx = 0;
ConstantColumn* cc = dynamic_cast<ConstantColumn*>(udafc->aggParms()[udafcParamIdx].get());
if (cc)
{
funct->fpConstCol = udafc->aggParms()[udafcParamIdx];
}
++udafcParamIdx;
break;
}
@ -2082,7 +2120,15 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
precisionAgg.push_back(precisionProj[colProj]);
typeAgg.push_back(typeProj[colProj]);
widthAgg.push_back(widthProj[colProj]);
multiParmIndexes.push_back(colAgg);
++colAgg;
// If the param is const
ConstantColumn* cc = dynamic_cast<ConstantColumn*>(udafc->aggParms()[udafcParamIdx].get());
if (cc)
{
funct->fpConstCol = udafc->aggParms()[udafcParamIdx];
}
++udafcParamIdx;
}
break;
@ -2122,7 +2168,8 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
// check if the count column for AVG is also a returned column,
// if so, replace the "-1" to actual position in returned vec.
AGG_MAP aggDupFuncMap;
pUDAFFunc = NULL;
projColsUDAFIdx = 0;
int64_t multiParms = 0;
// copy over the groupby vector
// update the outputColumnIndex if returned
@ -2133,8 +2180,8 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
aggFuncMap.insert(make_pair(boost::make_tuple(keysAgg[i], 0, pUDAFFunc), i));
}
projColsUDAFIndex = 0;
// locate the return column position in aggregated rowgroup
uint64_t outIdx = 0;
for (uint64_t i = 0; i < returnedColVec.size(); i++)
{
udafc = NULL;
@ -2144,23 +2191,11 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
RowAggFunctionType stats = statsFuncIdMap(returnedColVec[i].second);
int colAgg = -1;
if (aggOp == ROWAGG_UDAF)
if (aggOp == ROWAGG_MULTI_PARM)
{
std::vector<SRCP>::iterator it = jobInfo.projectionCols.begin() + projColsUDAFIndex;
for (; it != jobInfo.projectionCols.end(); it++)
{
udafc = dynamic_cast<UDAFColumn*>((*it).get());
projColsUDAFIndex++;
if (udafc)
{
pUDAFFunc = udafc->getContext().getFunction();
break;
}
}
if (it == jobInfo.projectionCols.end())
{
throw logic_error("(1)prep1PhaseDistinctAggregate: A UDAF function is called but there\'s not enough UDAFColumns");
}
// Skip on final agg.: Extra parms for an aggregate have no work there.
++multiParms;
continue;
}
if (find(jobInfo.distinctColVec.begin(), jobInfo.distinctColVec.end(), retKey) !=
@ -2188,6 +2223,25 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
}
}
if (aggOp == ROWAGG_UDAF)
{
std::vector<SRCP>::iterator it = jobInfo.projectionCols.begin() + projColsUDAFIdx;
for (; it != jobInfo.projectionCols.end(); it++)
{
udafc = dynamic_cast<UDAFColumn*>((*it).get());
projColsUDAFIdx++;
if (udafc)
{
pUDAFFunc = udafc->getContext().getFunction();
break;
}
}
if (it == jobInfo.projectionCols.end())
{
throw logic_error("(1)prep1PhaseDistinctAggregate: A UDAF function is called but there\'s not enough UDAFColumns");
}
}
switch (aggOp)
{
case ROWAGG_DISTINCT_AVG:
@ -2438,7 +2492,7 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
if (returnColMissing)
{
Message::Args args;
args.add(keyName(i, retKey, jobInfo));
args.add(keyName(outIdx, retKey, jobInfo));
string emsg = IDBErrorInfo::instance()->
errorMsg(ERR_NOT_GROUPBY_EXPRESSION, args);
cerr << "prep1PhaseDistinctAggregate: " << emsg << " oid="
@ -2462,7 +2516,7 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
if (jobInfo.groupByColVec[j] == retKey)
{
if (groupByNoDist[j]->fOutputColumnIndex == (uint32_t) - 1)
groupByNoDist[j]->fOutputColumnIndex = i;
groupByNoDist[j]->fOutputColumnIndex = outIdx;
else
dupGroupbyIndex = groupByNoDist[j]->fOutputColumnIndex;
}
@ -2472,7 +2526,7 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
if (dupGroupbyIndex != -1)
functionVec2.push_back(SP_ROWAGG_FUNC_t(
new RowAggFunctionCol(
ROWAGG_DUP_FUNCT, ROWAGG_FUNCT_UNDEFINE, -1, i, dupGroupbyIndex)));
ROWAGG_DUP_FUNCT, ROWAGG_FUNCT_UNDEFINE, -1, outIdx, dupGroupbyIndex)));
}
else
{
@ -2480,11 +2534,11 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
SP_ROWAGG_FUNC_t funct;
if (aggOp == ROWAGG_UDAF)
{
funct.reset(new RowUDAFFunctionCol(udafc->getContext(), colAgg, i));
funct.reset(new RowUDAFFunctionCol(udafc->getContext(), colAgg, outIdx));
}
else
{
funct.reset(new RowAggFunctionCol(aggOp, stats, colAgg, i));
funct.reset(new RowAggFunctionCol(aggOp, stats, colAgg, outIdx));
}
if (aggOp == ROWAGG_COUNT_NO_OP)
@ -2521,6 +2575,7 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
else if (returnedColVec[i].second == AggregateColumn::DISTINCT_AVG)
avgDistFuncMap.insert(make_pair(returnedColVec[i].first, funct));
}
++outIdx;
} // for (i
// now fix the AVG function, locate the count(column) position
@ -2538,7 +2593,7 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
}
// there is avg(k), but no count(k) in the select list
uint64_t lastCol = returnedColVec.size();
uint64_t lastCol = outIdx;
for (map<uint32_t, SP_ROWAGG_FUNC_t>::iterator k = avgFuncMap.begin(); k != avgFuncMap.end(); k++)
{
@ -2753,6 +2808,11 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
SP_ROWAGG_GRPBY_t groupby(new RowAggGroupByCol(j, k));
groupBySub.push_back(groupby);
// Keep a count of the parms after the first for any aggregate.
// These will be skipped and the count needs to be subtracted
// from where the aux column will be.
int64_t multiParms = 0;
// tricky part : 2 function vectors
// -- dummy function vector for sub-aggregator, which does distinct only
// -- aggregate function on this distinct column for rowAggDist
@ -2760,6 +2820,11 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
for (uint64_t k = 0; k < returnedColVec.size(); k++)
{
if (functionIdMap(returnedColVec[i].second) == ROWAGG_MULTI_PARM)
{
++multiParms;
continue;
}
if (returnedColVec[k].first != distinctColKey)
continue;
@ -2780,7 +2845,7 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
f->fStatsFunction,
groupBySub.size() - 1,
f->fOutputColumnIndex,
f->fAuxColumnIndex));
f->fAuxColumnIndex-multiParms));
functionSub2.push_back(funct);
}
}
@ -2799,9 +2864,15 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
{
vector<SP_ROWAGG_FUNC_t> functionSub1 = functionNoDistVec;
vector<SP_ROWAGG_FUNC_t> functionSub2;
int64_t multiParms = 0;
for (uint64_t k = 0; k < returnedColVec.size(); k++)
{
if (functionIdMap(returnedColVec[k].second) == ROWAGG_MULTI_PARM)
{
++multiParms;
continue;
}
// search non-distinct functions in functionVec
vector<SP_ROWAGG_FUNC_t>::iterator it = functionVec2.begin();
@ -2817,7 +2888,7 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
udafFuncCol->fUDAFContext,
udafFuncCol->fInputColumnIndex,
udafFuncCol->fOutputColumnIndex,
udafFuncCol->fAuxColumnIndex));
udafFuncCol->fAuxColumnIndex-multiParms));
functionSub2.push_back(funct);
}
else if ((f->fOutputColumnIndex == k) &&
@ -2839,7 +2910,7 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(
f->fStatsFunction,
f->fInputColumnIndex,
f->fOutputColumnIndex,
f->fAuxColumnIndex));
f->fAuxColumnIndex-multiParms));
functionSub2.push_back(funct);
}
}
@ -2893,7 +2964,8 @@ void TupleAggregateStep::prep2PhasesAggregate(
set<uint32_t> avgSet;
vector<std::pair<uint32_t, int> >& returnedColVec = jobInfo.returnedColVec;
// For UDAF
uint32_t projColsUDAFIndex = 0;
uint32_t projColsUDAFIdx = 0;
uint32_t udafcParamIdx = 0;
UDAFColumn* udafc = NULL;
mcsv1sdk::mcsv1_UDAF* pUDAFFunc = NULL;
@ -3073,11 +3145,11 @@ void TupleAggregateStep::prep2PhasesAggregate(
if (aggOp == ROWAGG_UDAF)
{
std::vector<SRCP>::iterator it = jobInfo.projectionCols.begin() + projColsUDAFIndex;
std::vector<SRCP>::iterator it = jobInfo.projectionCols.begin() + projColsUDAFIdx;
for (; it != jobInfo.projectionCols.end(); it++)
{
udafc = dynamic_cast<UDAFColumn*>((*it).get());
projColsUDAFIndex++;
projColsUDAFIdx++;
if (udafc)
{
pUDAFFunc = udafc->getContext().getFunction();
@ -3305,6 +3377,14 @@ void TupleAggregateStep::prep2PhasesAggregate(
typeAggPm.push_back(CalpontSystemCatalog::UBIGINT);
widthAggPm.push_back(bigUintWidth);
funct->fAuxColumnIndex = colAggPm++;
// If the first param is const
udafcParamIdx = 0;
ConstantColumn* cc = dynamic_cast<ConstantColumn*>(udafc->aggParms()[udafcParamIdx].get());
if (cc)
{
funct->fpConstCol = udafc->aggParms()[udafcParamIdx];
}
++udafcParamIdx;
break;
}
@ -3317,6 +3397,13 @@ void TupleAggregateStep::prep2PhasesAggregate(
typeAggPm.push_back(typeProj[colProj]);
widthAggPm.push_back(width[colProj]);
colAggPm++;
// If the param is const
ConstantColumn* cc = dynamic_cast<ConstantColumn*>(udafc->aggParms()[udafcParamIdx].get());
if (cc)
{
funct->fpConstCol = udafc->aggParms()[udafcParamIdx];
}
++udafcParamIdx;
}
break;
@ -3342,7 +3429,7 @@ void TupleAggregateStep::prep2PhasesAggregate(
map<uint32_t, SP_ROWAGG_FUNC_t> avgFuncMap;
AGG_MAP aggDupFuncMap;
projColsUDAFIndex = 0;
projColsUDAFIdx = 0;
// copy over the groupby vector
// update the outputColumnIndex if returned
for (uint64_t i = 0; i < groupByPm.size(); i++)
@ -3372,12 +3459,12 @@ void TupleAggregateStep::prep2PhasesAggregate(
udafc = NULL;
if (aggOp == ROWAGG_UDAF)
{
std::vector<SRCP>::iterator it = jobInfo.projectionCols.begin() + projColsUDAFIndex;
std::vector<SRCP>::iterator it = jobInfo.projectionCols.begin() + projColsUDAFIdx;
for (; it != jobInfo.projectionCols.end(); it++)
{
udafc = dynamic_cast<UDAFColumn*>((*it).get());
projColsUDAFIndex++;
projColsUDAFIdx++;
if (udafc)
{
pUDAFFunc = udafc->getContext().getFunction();
@ -3703,7 +3790,8 @@ void TupleAggregateStep::prep2PhasesDistinctAggregate(
set<uint32_t> avgSet, avgDistSet;
vector<std::pair<uint32_t, int> >& returnedColVec = jobInfo.returnedColVec;
// For UDAF
uint32_t projColsUDAFIndex = 0;
uint32_t projColsUDAFIdx = 0;
uint32_t udafcParamIdx = 0;
UDAFColumn* udafc = NULL;
mcsv1sdk::mcsv1_UDAF* pUDAFFunc = NULL;
@ -3919,11 +4007,11 @@ void TupleAggregateStep::prep2PhasesDistinctAggregate(
if (aggOp == ROWAGG_UDAF)
{
std::vector<SRCP>::iterator it = jobInfo.projectionCols.begin() + projColsUDAFIndex;
std::vector<SRCP>::iterator it = jobInfo.projectionCols.begin() + projColsUDAFIdx;
for (; it != jobInfo.projectionCols.end(); it++)
{
udafc = dynamic_cast<UDAFColumn*>((*it).get());
projColsUDAFIndex++;
projColsUDAFIdx++;
if (udafc)
{
pUDAFFunc = udafc->getContext().getFunction();
@ -4147,6 +4235,14 @@ void TupleAggregateStep::prep2PhasesDistinctAggregate(
typeAggPm.push_back(CalpontSystemCatalog::UBIGINT);
widthAggPm.push_back(sizeof(uint64_t));
funct->fAuxColumnIndex = colAggPm++;
// If the first param is const
udafcParamIdx = 0;
ConstantColumn* cc = dynamic_cast<ConstantColumn*>(udafc->aggParms()[udafcParamIdx].get());
if (cc)
{
funct->fpConstCol = udafc->aggParms()[udafcParamIdx];
}
++udafcParamIdx;
break;
}
@ -4160,6 +4256,13 @@ void TupleAggregateStep::prep2PhasesDistinctAggregate(
widthAggPm.push_back(width[colProj]);
multiParmIndexes.push_back(colAggPm);
colAggPm++;
// If the param is const
ConstantColumn* cc = dynamic_cast<ConstantColumn*>(udafc->aggParms()[udafcParamIdx].get());
if (cc)
{
funct->fpConstCol = udafc->aggParms()[udafcParamIdx];
}
++udafcParamIdx;
}
break;
@ -4208,9 +4311,10 @@ void TupleAggregateStep::prep2PhasesDistinctAggregate(
funct.reset(new RowUDAFFunctionCol(
udafFuncCol->fUDAFContext,
udafFuncCol->fOutputColumnIndex,
udafFuncCol->fOutputColumnIndex,
udafFuncCol->fOutputColumnIndex-multiParms,
udafFuncCol->fAuxColumnIndex-multiParms));
functionNoDistVec.push_back(funct);
pUDAFFunc = udafFuncCol->fUDAFContext.getFunction();
}
else
{
@ -4218,9 +4322,10 @@ void TupleAggregateStep::prep2PhasesDistinctAggregate(
funcPm->fAggFunction,
funcPm->fStatsFunction,
funcPm->fOutputColumnIndex,
funcPm->fOutputColumnIndex,
funcPm->fOutputColumnIndex-multiParms,
funcPm->fAuxColumnIndex-multiParms));
functionNoDistVec.push_back(funct);
pUDAFFunc = NULL;
}
}
@ -4251,7 +4356,7 @@ void TupleAggregateStep::prep2PhasesDistinctAggregate(
// These will be skipped and the count needs to be subtracted
// from where the aux column will be.
int64_t multiParms = 0;
projColsUDAFIndex = 0;
projColsUDAFIdx = 0;
// check if the count column for AVG is also a returned column,
// if so, replace the "-1" to actual position in returned vec.
map<uint32_t, SP_ROWAGG_FUNC_t> avgFuncMap, avgDistFuncMap;
@ -4286,11 +4391,11 @@ void TupleAggregateStep::prep2PhasesDistinctAggregate(
if (aggOp == ROWAGG_UDAF)
{
std::vector<SRCP>::iterator it = jobInfo.projectionCols.begin() + projColsUDAFIndex;
std::vector<SRCP>::iterator it = jobInfo.projectionCols.begin() + projColsUDAFIdx;
for (; it != jobInfo.projectionCols.end(); it++)
{
udafc = dynamic_cast<UDAFColumn*>((*it).get());
projColsUDAFIndex++;
projColsUDAFIdx++;
if (udafc)
{
pUDAFFunc = udafc->getContext().getFunction();
@ -4436,6 +4541,7 @@ void TupleAggregateStep::prep2PhasesDistinctAggregate(
precisionAggDist.push_back(precisionAggUm[colUm]);
typeAggDist.push_back(typeAggUm[colUm]);
widthAggDist.push_back(widthAggUm[colUm]);
colUm -= multiParms;
}
// not a direct hit -- a returned column is not already in the RG from PMs
@ -4472,8 +4578,16 @@ void TupleAggregateStep::prep2PhasesDistinctAggregate(
oidsAggDist.push_back(oidsAggUm[colUm]);
keysAggDist.push_back(retKey);
scaleAggDist.push_back(0);
precisionAggDist.push_back(19);
typeAggDist.push_back(CalpontSystemCatalog::BIGINT);
if (isUnsigned(typeAggUm[colUm]))
{
precisionAggDist.push_back(20);
typeAggDist.push_back(CalpontSystemCatalog::UBIGINT);
}
else
{
precisionAggDist.push_back(19);
typeAggDist.push_back(CalpontSystemCatalog::BIGINT);
}
widthAggDist.push_back(bigIntWidth);
}
}