1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-08-01 06:46:55 +03:00

MCOL-5493: First portion of UBSan fixes (#2842)

Multiple UB fixes
This commit is contained in:
Leonid Fedorov
2023-06-02 17:02:09 +03:00
committed by GitHub
parent 0a2e9760ee
commit 8f93fc3623
31 changed files with 274 additions and 916 deletions

View File

@ -270,7 +270,6 @@ ENDIF()
MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-deprecated-copy" DEBUG RELEASE RELWITHDEBINFO MINSIZEREL) MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-deprecated-copy" DEBUG RELEASE RELWITHDEBINFO MINSIZEREL)
MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-deprecated-declarations" DEBUG RELEASE RELWITHDEBINFO MINSIZEREL) MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-deprecated-declarations" DEBUG RELEASE RELWITHDEBINFO MINSIZEREL)
MY_CHECK_AND_SET_COMPILER_FLAG("-Werror -Wall -Wextra") MY_CHECK_AND_SET_COMPILER_FLAG("-Werror -Wall -Wextra")
SET (ENGINE_LDFLAGS "-Wl,--no-as-needed -Wl,--add-needed") SET (ENGINE_LDFLAGS "-Wl,--no-as-needed -Wl,--add-needed")
SET (ENGINE_DT_LIB datatypes) SET (ENGINE_DT_LIB datatypes)

View File

@ -40,9 +40,11 @@ optparse.define short=T long=tsan desc="Build with TSAN" variable=TSAN default=f
optparse.define short=U long=ubsan desc="Build with UBSAN" variable=UBSAN default=false value=true optparse.define short=U long=ubsan desc="Build with UBSAN" variable=UBSAN default=false value=true
optparse.define short=P long=report-path desc="Path for storing reports and profiles" variable=REPORT_PATH default="/core" optparse.define short=P long=report-path desc="Path for storing reports and profiles" variable=REPORT_PATH default="/core"
optparse.define short=N long=ninja desc="Build with ninja" variable=USE_NINJA default=false value=true optparse.define short=N long=ninja desc="Build with ninja" variable=USE_NINJA default=false value=true
optparse.define short=T long=draw-deps desc="Draw dependencies graph" variable=DRAW_DEPS default=false value=true optparse.define short=G long=draw-deps desc="Draw dependencies graph" variable=DRAW_DEPS default=false value=true
optparse.define short=M long=skip-smoke desc="Skip final smoke test" variable=SKIP_SMOKE default=false value=true optparse.define short=M long=skip-smoke desc="Skip final smoke test" variable=SKIP_SMOKE default=false value=true
optparse.define short=n long=no-clean-install desc="Do not perform a clean install (keep existing db files)" variable=NO_CLEAN default=false value=true optparse.define short=n long=no-clean-install desc="Do not perform a clean install (keep existing db files)" variable=NO_CLEAN default=false value=true
optparse.define short=j long=parallel desc="Number of paralles for build" variable=CPUS default=$(getconf _NPROCESSORS_ONLN)
optparse.define short=F long=show-build-flags desc="Print CMake flags, while build" variable=PRINT_CMAKE_FLAGS default=false
source $( optparse.build ) source $( optparse.build )
@ -140,9 +142,9 @@ stop_service()
check_service() check_service()
{ {
if systemctl is-active --quiet $1; then if systemctl is-active --quiet $1; then
message "$1 service started$color_green OK $color_normal" message "$1 $color_normal[$color_green OK $color_normal]"
else else
error "$1 service failed" message "$1 $color_normal[$color_red Fail $color_normal]"
service $1 status service $1 status
fi fi
} }
@ -154,8 +156,14 @@ start_service()
systemctl start mariadb-columnstore systemctl start mariadb-columnstore
systemctl start mariadb systemctl start mariadb
check_service mariadb-columnstore
check_service mariadb check_service mariadb
check_service mariadb-columnstore
check_service mcs-controllernode
check_service mcs-ddlproc
check_service mcs-dmlproc
check_service mcs-primproc
check_service mcs-workernode@1
check_service mcs-writeengineserver
} }
clean_old_installation() clean_old_installation()
@ -290,10 +298,11 @@ build()
MDB_CMAKE_FLAGS="${MDB_CMAKE_FLAGS} -DRPM=sles15" MDB_CMAKE_FLAGS="${MDB_CMAKE_FLAGS} -DRPM=sles15"
fi fi
if [[ $PRINT_CMAKE_FLAGS = true ]] ; then
message "Building with flags" message "Building with flags"
newline_array ${MDB_CMAKE_FLAGS[@]} newline_array ${MDB_CMAKE_FLAGS[@]}
fi
local CPUS=$(getconf _NPROCESSORS_ONLN)
message "Configuring cmake silently" message "Configuring cmake silently"
${CMAKE_BIN_NAME} -DCMAKE_BUILD_TYPE=$MCS_BUILD_TYPE $MDB_CMAKE_FLAGS . | spinner ${CMAKE_BIN_NAME} -DCMAKE_BUILD_TYPE=$MCS_BUILD_TYPE $MDB_CMAKE_FLAGS . | spinner
message_split message_split
@ -400,7 +409,7 @@ fix_config_files()
if grep -q UBSAN $MDB_SERVICE_FILE; then if grep -q UBSAN $MDB_SERVICE_FILE; then
warn "MDB Server has UBSAN options in $MDB_SERVICE_FILE, check it's compatibility" warn "MDB Server has UBSAN options in $MDB_SERVICE_FILE, check it's compatibility"
else else
echo Environment="'UBSAN_OPTIONS=abort_on_error=0,log_path=${REPORT_PATH}/ubsan.mariadb'" >> $MDB_SERVICE_FILE echo Environment="'UBSAN_OPTIONS=abort_on_error=0,print_stacktrace=true,log_path=${REPORT_PATH}/ubsan.mariadb'" >> $MDB_SERVICE_FILE
message "UBSAN options were added to $MDB_SERVICE_FILE" message "UBSAN options were added to $MDB_SERVICE_FILE"
fi fi
fi fi
@ -498,7 +507,7 @@ smoke()
message "Selecting magic numbers" message "Selecting magic numbers"
MAGIC=`mysql -N test < $MDB_SOURCE_PATH/storage/columnstore/columnstore/tests/scripts/smoke.sql` MAGIC=`mysql -N test < $MDB_SOURCE_PATH/storage/columnstore/columnstore/tests/scripts/smoke.sql`
if [[ $MAGIC == '42' ]] ; then if [[ $MAGIC == '42' ]] ; then
message "Great answer correct" message "Great answer correct!"
else else
warn "Smoke failed, answer is '$MAGIC'" warn "Smoke failed, answer is '$MAGIC'"
fi fi

View File

@ -325,8 +325,7 @@ void ArithmeticColumn::serialize(messageqcpp::ByteStream& b) const
ObjectReader::writeParseTree(fExpression, b); ObjectReader::writeParseTree(fExpression, b);
b << fTableAlias; b << fTableAlias;
b << fData; b << fData;
const ByteStream::doublebyte tmp = fAsc; b << (uint8_t)fAsc;
b << tmp;
} }
void ArithmeticColumn::unserialize(messageqcpp::ByteStream& b) void ArithmeticColumn::unserialize(messageqcpp::ByteStream& b)
@ -340,9 +339,7 @@ void ArithmeticColumn::unserialize(messageqcpp::ByteStream& b)
fExpression = ObjectReader::createParseTree(b); fExpression = ObjectReader::createParseTree(b);
b >> fTableAlias; b >> fTableAlias;
b >> fData; b >> fData;
ByteStream::doublebyte tmp; b >> (uint8_t&)fAsc;
b >> tmp;
fAsc = (tmp);
fSimpleColumnList.clear(); fSimpleColumnList.clear();
fExpression->walk(getSimpleCols, &fSimpleColumnList); fExpression->walk(getSimpleCols, &fSimpleColumnList);

View File

@ -187,7 +187,7 @@ class ArithmeticColumn : public ReturnedColumn
private: private:
std::string fTableAlias; // table alias for this column std::string fTableAlias; // table alias for this column
bool fAsc; // asc flag for order by column bool fAsc = false; // asc flag for order by column
std::string fData; std::string fData;
/** build expression tree /** build expression tree
@ -272,7 +272,7 @@ class ArithmeticColumn : public ReturnedColumn
} }
private: private:
ParseTree* fExpression; ParseTree* fExpression = nullptr;
using TreeNode::evaluate; using TreeNode::evaluate;
void evaluate(rowgroup::Row& row) void evaluate(rowgroup::Row& row)
{ {

View File

@ -96,8 +96,7 @@ CalpontSelectExecutionPlan::CalpontSelectExecutionPlan(
const ReturnedColumnList& returnedCols, ParseTree* filters, const SelectList& subSelects, const ReturnedColumnList& returnedCols, ParseTree* filters, const SelectList& subSelects,
const GroupByColumnList& groupByCols, ParseTree* having, const OrderByColumnList& orderByCols, const GroupByColumnList& groupByCols, ParseTree* having, const OrderByColumnList& orderByCols,
const string alias, const int location, const bool dependent) const string alias, const int location, const bool dependent)
: fLocalQuery(GLOBAL_QUERY) : fReturnedCols(returnedCols)
, fReturnedCols(returnedCols)
, fFilters(filters) , fFilters(filters)
, fSubSelects(subSelects) , fSubSelects(subSelects)
, fGroupByCols(groupByCols) , fGroupByCols(groupByCols)
@ -106,55 +105,15 @@ CalpontSelectExecutionPlan::CalpontSelectExecutionPlan(
, fTableAlias(alias) , fTableAlias(alias)
, fLocation(location) , fLocation(location)
, fDependent(dependent) , fDependent(dependent)
, fTxnID(-1)
, fTraceFlags(TRACE_NONE)
, fStatementID(0)
, fDistinct(false)
, fOverrideLargeSideEstimate(false)
, fDistinctUnionNum(0)
, fSubType(MAIN_SELECT)
, fLimitStart(0)
, fLimitNum(-1)
, fHasOrderBy(false)
, fStringScanThreshold(ULONG_MAX)
, fQueryType(SELECT)
, fPriority(querystats::DEFAULT_USER_PRIORITY_LEVEL) , fPriority(querystats::DEFAULT_USER_PRIORITY_LEVEL)
, fStringTableThreshold(20)
, fOrderByThreads(1)
, fDJSSmallSideLimit(0)
, fDJSLargeSideLimit(0)
, fDJSPartitionSize(100 * 1024 * 1024)
, // 100MB mem usage for disk based join
fUMMemLimit(numeric_limits<int64_t>::max())
, fIsDML(false)
{ {
fUuid = QueryTeleClient::genUUID(); fUuid = QueryTeleClient::genUUID();
} }
CalpontSelectExecutionPlan::CalpontSelectExecutionPlan(string data) CalpontSelectExecutionPlan::CalpontSelectExecutionPlan(string data)
: fLocalQuery(GLOBAL_QUERY) : fData(data)
, fData(data)
, fTxnID(-1)
, fTraceFlags(TRACE_NONE)
, fStatementID(0)
, fDistinct(false)
, fOverrideLargeSideEstimate(false)
, fDistinctUnionNum(0)
, fSubType(MAIN_SELECT)
, fLimitStart(0)
, fLimitNum(-1)
, fHasOrderBy(false)
, fStringScanThreshold(ULONG_MAX)
, fQueryType(SELECT)
, fPriority(querystats::DEFAULT_USER_PRIORITY_LEVEL) , fPriority(querystats::DEFAULT_USER_PRIORITY_LEVEL)
, fStringTableThreshold(20)
, fOrderByThreads(1)
, fDJSSmallSideLimit(0)
, fDJSLargeSideLimit(0)
, fDJSPartitionSize(100 * 1024 * 1024)
, // 100MB mem usage for disk based join
fUMMemLimit(numeric_limits<int64_t>::max())
, fIsDML(false)
{ {
fUuid = QueryTeleClient::genUUID(); fUuid = QueryTeleClient::genUUID();
} }

View File

@ -801,7 +801,7 @@ class CalpontSelectExecutionPlan : public CalpontExecutionPlan
/** /**
* If set, then the local PM only option is turned on * If set, then the local PM only option is turned on
*/ */
uint32_t fLocalQuery; uint32_t fLocalQuery = GLOBAL_QUERY;
/** /**
* A list of ReturnedColumn objects * A list of ReturnedColumn objects
@ -822,7 +822,7 @@ class CalpontSelectExecutionPlan : public CalpontExecutionPlan
/** /**
* A tree of Filter objects * A tree of Filter objects
*/ */
ParseTree* fFilters; ParseTree* fFilters = nullptr;
/** /**
* A list of CalpontExecutionPlan objects * A list of CalpontExecutionPlan objects
*/ */
@ -846,11 +846,11 @@ class CalpontSelectExecutionPlan : public CalpontExecutionPlan
/** /**
* An enum indicating the location of this select statement in the enclosing select statement * An enum indicating the location of this select statement in the enclosing select statement
*/ */
int fLocation; int fLocation = 0;
/** /**
* A flag indicating if this sub-select is dependent on the enclosing query or is constant * A flag indicating if this sub-select is dependent on the enclosing query or is constant
*/ */
bool fDependent; bool fDependent = false;
/** /**
* SQL representation of this execution plan * SQL representation of this execution plan
@ -859,57 +859,57 @@ class CalpontSelectExecutionPlan : public CalpontExecutionPlan
static ColumnMap fColMap; // for getplan to use. class-wise map static ColumnMap fColMap; // for getplan to use. class-wise map
ColumnMap fColumnMap; // for ExeMgr to use. not shared between objects ColumnMap fColumnMap; // for ExeMgr to use. not shared between objects
uint32_t fSessionID; uint32_t fSessionID = 0;
int fTxnID; // SQLEngine only needs the ID value int fTxnID = -1; // SQLEngine only needs the ID value
BRM::QueryContext fVerID; BRM::QueryContext fVerID;
// @bug5316. remove static // @bug5316. remove static
std::string fSchemaName; std::string fSchemaName;
std::string fTableName; std::string fTableName;
uint32_t fTraceFlags; uint32_t fTraceFlags = TRACE_NONE;
/** /**
* One-up statementID number for this session (fSessionID) * One-up statementID number for this session (fSessionID)
*/ */
uint32_t fStatementID; uint32_t fStatementID = 0;
RMParmVec frmParms; RMParmVec frmParms;
TableList fTableList; TableList fTableList;
SelectList fDerivedTableList; SelectList fDerivedTableList;
bool fDistinct; bool fDistinct = false;
bool fOverrideLargeSideEstimate; bool fOverrideLargeSideEstimate = false;
// for union // for union
SelectList fUnionVec; SelectList fUnionVec;
uint8_t fDistinctUnionNum; uint8_t fDistinctUnionNum = 0;
// for subselect // for subselect
uint64_t fSubType; uint64_t fSubType = MAIN_SELECT;
std::string fDerivedTbAlias; std::string fDerivedTbAlias;
std::string fDerivedTbView; std::string fDerivedTbView;
// for limit // for limit
uint64_t fLimitStart; uint64_t fLimitStart = 0;
uint64_t fLimitNum; uint64_t fLimitNum = -1;
// for parent select order by // for parent select order by
bool fHasOrderBy; bool fHasOrderBy = false;
// for Select clause subquery // for Select clause subquery
SelectList fSelectSubList; SelectList fSelectSubList;
// @bug3321, for string scan blocks // @bug3321, for string scan blocks
uint64_t fStringScanThreshold; uint64_t fStringScanThreshold = ULONG_MAX;
// query type // query type
uint32_t fQueryType; uint32_t fQueryType = SELECT;
uint32_t fPriority; uint32_t fPriority;
uint32_t fStringTableThreshold; uint32_t fStringTableThreshold = 20;
// for specific handlers processing, e.g. GROUP BY // for specific handlers processing, e.g. GROUP BY
bool fSpecHandlerProcessed; bool fSpecHandlerProcessed = false;
uint32_t fOrderByThreads; uint32_t fOrderByThreads = 1;
// Derived table involved in the query. For derived table optimization // Derived table involved in the query. For derived table optimization
std::vector<SCSEP> fSubSelectList; std::vector<SCSEP> fSubSelectList;
@ -917,14 +917,12 @@ class CalpontSelectExecutionPlan : public CalpontExecutionPlan
boost::uuids::uuid fUuid; boost::uuids::uuid fUuid;
/* Disk-based join vars */ /* Disk-based join vars */
uint64_t fDJSSmallSideLimit; uint64_t fDJSSmallSideLimit = 0;
uint64_t fDJSLargeSideLimit; uint64_t fDJSLargeSideLimit = 0;
uint64_t fDJSPartitionSize; uint64_t fDJSPartitionSize = 100 * 1024 * 1024;
int64_t fUMMemLimit; int64_t fUMMemLimit = numeric_limits<int64_t>::max();
bool fIsDML; bool fIsDML = false;
long fTimeZone = 0;
long fTimeZone;
std::vector<execplan::ParseTree*> fDynamicParseTreeVec; std::vector<execplan::ParseTree*> fDynamicParseTreeVec;
}; };

View File

@ -27,61 +27,11 @@ namespace joblist
// //
static const int showOidInDataList_Index = std::ios_base::xalloc(); static const int showOidInDataList_Index = std::ios_base::xalloc();
/*static*/
AnyDataList::DataListTypes AnyDataList::dlType(const DataList_t* dl)
{
if (dl == 0)
return UNKNOWN_DATALIST;
// if (typeid(*dl) == typeid(BandedDataList)) return BANDED_DATALIST;
// if (typeid(*dl) == typeid(WorkingSetDataList)) return WORKING_SET_DATALIST;
if (typeid(*dl) == typeid(FifoDataList))
return FIFO_DATALIST;
// if (typeid(*dl) == typeid(BucketDataList)) return BUCKET_DATALIST;
// if (typeid(*dl) == typeid(ConstantDataList_t)) return CONSTANT_DATALIST;
// if (typeid(*dl) == typeid(SortedWSDL)) return SORTED_WORKING_SET_DATALIST;
// if (typeid(*dl) == typeid(ZonedDL)) return ZONED_DATALIST;
// if (typeid(*dl) == typeid(DeliveryWSDL)) return DELIVERYWSDL;
if (typeid(*dl) == typeid(RowGroupDL))
return ROWGROUP_DATALIST;
return UNKNOWN_DATALIST;
}
AnyDataList::DataListTypes AnyDataList::strDlType(const StrDataList* dl)
{
if (dl == 0)
return UNKNOWN_DATALIST;
// if (typeid(*dl) == typeid(StringDataList)) return STRINGBANDED_DATALIST;
// if (typeid(*dl) == typeid(StringFifoDataList)) return STRINGFIFO_DATALIST;
// if (typeid(*dl) == typeid(StringBucketDataList)) return STRINGBUCKET_DATALIST;
if (typeid(*dl) == typeid(StrDataList))
return STRING_DATALIST;
// if (typeid(*dl) == typeid(StringConstantDataList_t)) return STRINGCONSTANT_DATALIST;
// if (typeid(*dl) == typeid(StringSortedWSDL)) return STRINGSORTED_WORKING_SET_DATALIST;
// if (typeid(*dl) == typeid(StringZonedDL)) return STRINGZONED_DATALIST;
return UNKNOWN_DATALIST;
}
// AnyDataList::DataListTypes AnyDataList::tupleDlType(const TupleDataList* dl)
//{
// if (dl == 0) return UNKNOWN_DATALIST;
// if (typeid(*dl) == typeid(TupleBucketDataList)) return TUPLEBUCKET_DATALIST;
// return UNKNOWN_DATALIST;
//}
std::ostream& operator<<(std::ostream& oss, const AnyDataListSPtr& dl) std::ostream& operator<<(std::ostream& oss, const AnyDataListSPtr& dl)
{ {
DataList_t* dle = NULL;
StrDataList* dls = NULL;
// DoubleDataList * dld = NULL;
// TupleBucketDataList * dlt = NULL;
bool withOid = (oss.iword(showOidInDataList_Index) != 0); bool withOid = (oss.iword(showOidInDataList_Index) != 0);
if ((dle = dl->dataList()) != NULL) if (auto* dle = dl->rowGroupDL(); dle != nullptr)
{ {
if (withOid) if (withOid)
oss << dle->OID() << " "; oss << dle->OID() << " ";
@ -95,50 +45,8 @@ std::ostream& operator<<(std::ostream& oss, const AnyDataListSPtr& dl)
elemSizeStr << "(" << dle->getDiskElemSize1st() << "," << dle->getDiskElemSize2nd() << ")"; elemSizeStr << "(" << dle->getDiskElemSize1st() << "," << dle->getDiskElemSize2nd() << ")";
} }
oss << "(0x" << std::hex << (ptrdiff_t)dle << std::dec << "[" << AnyDataList::dlType(dle) << "]" oss << "(0x" << std::hex << (ptrdiff_t)dle << std::dec << elemSizeStr.str() << ")";
<< elemSizeStr.str() << ")";
} }
else if ((dls = dl->stringDataList()) != NULL)
{
if (withOid)
oss << dls->OID() << " ";
//...If this datalist is saved to disk, then include the saved
//...element size in the printed information.
std::ostringstream elemSizeStr;
if (dls->useDisk())
{
elemSizeStr << "(" << dls->getDiskElemSize1st() << "," << dls->getDiskElemSize2nd() << ")";
}
oss << "(0x" << std::hex << (ptrdiff_t)dls << std::dec << "[" << AnyDataList::strDlType(dls) << "]"
<< elemSizeStr.str() << ")";
}
// else if ((dld = dl->doubleDL()) != NULL)
// {
// if (withOid)
// oss << dld->OID() << " ";
//
// //...If this datalist is saved to disk, then include the saved
// //...element size in the printed information.
// std::ostringstream elemSizeStr;
// if ( dld->useDisk() )
// {
// elemSizeStr << "(" << dld->getDiskElemSize1st() << "," <<
// dld->getDiskElemSize2nd() << ")";
// }
//
// oss << "(0x"
// << std::hex << (ptrdiff_t)dld << std::dec << "[" <<
// AnyDataList::DOUBLE_DATALIST << "])";
// }
// else if ((dlt = dl->tupleBucketDL()) != NULL)
// {
// oss << dlt->OID() << " (0x";
// oss << std::hex << (ptrdiff_t)dlt << std::dec << "[" << AnyDataList::TUPLEBUCKET_DATALIST << "]),
//";
// }
else else
{ {
oss << "0 (0x0000 [0])"; oss << "0 (0x0000 [0])";

View File

@ -346,187 +346,29 @@ namespace joblist
class AnyDataList class AnyDataList
{ {
public: public:
AnyDataList() : fDl3(0), fDl6(0), fDl9(0), fDisown(false) AnyDataList() = default;
{
}
~AnyDataList()
{
if (!fDisown)
{
delete fDl3;
delete fDl6;
delete fDl9;
}
}
// AnyDataList() : fDl1(0), fDl2(0), fDl3(0), fDl4(0), fDl5(0), fDl6(0), fDl7(0), fDl8(0), fDl9(0), ~AnyDataList() = default;
// fDl10(0), fDl11(0), fDl12(0), fDl13(0), fDl14(0), fDl15(0), fDl16(0), fDl17(0), fDl18(0),
// fDl19(0), fDl20(0), fDisown(false) { }
// ~AnyDataList() { if (!fDisown) { delete fDl1; delete fDl2; delete fDl3; delete fDl4;
// delete fDl5; delete fDl6; delete fDl7; delete fDl8; delete fDl9; delete fDl10; delete fDl11;
// delete fDl12; delete fDl13; delete fDl14; delete fDl15; delete fDl16; delete fDl17;
// delete fDl18; delete fDl19; delete fDl20; } }
// disown() fixes the problem of multiple ownership of a single DL,
// or one on the stack
// In the world of bad ideas these are at the top. The whole point of this class is to manage
// dynamically allocated data in an automatic way. These 2 methods circumvent this, and they
// are not necessary in any event, because you can safely share AnyDataList's via a AnyDataListSPtr.
inline void disown() __attribute__((deprecated))
{
fDisown = true;
}
inline void posess() __attribute__((deprecated))
{
fDisown = false;
}
// inline void bandedDL(BandedDataList* dl) { fDl1 = dl; }
// inline BandedDataList* bandedDL() { return fDl1; }
// inline const BandedDataList* bandedDL() const { return fDl1; }
//
// inline void workingSetDL(WorkingSetDataList* dl) { fDl2 = dl; }
// inline WorkingSetDataList* workingSetDL() { return fDl2; }
// inline const WorkingSetDataList* workingSetDL() const { return fDl2; }
//
inline void fifoDL(FifoDataList* dl)
{
fDl3 = dl;
}
inline FifoDataList* fifoDL()
{
return fDl3;
}
inline const FifoDataList* fifoDL() const
{
return fDl3;
}
//
// inline void bucketDL(BucketDataList* dl) { fDl4 = dl; }
// inline BucketDataList* bucketDL() { return fDl4; }
// inline const BucketDataList* bucketDL() const { return fDl4; }
//
// inline void constantDL(ConstantDataList_t* dl) { fDl5 = dl; }
// inline ConstantDataList_t* constantDL() { return fDl5; }
// inline const ConstantDataList_t* constantDL() const { return fDl5; }
//
// inline void sortedWSDL(SortedWSDL* dl) { fDl13 = dl; }
// inline SortedWSDL* sortedWSDL() { return fDl13; }
// inline const SortedWSDL* sortedWSDL() const { return fDl13; }
//
// inline void zonedDL(ZonedDL* dl) { fDl15 = dl; }
// inline ZonedDL* zonedDL() { return fDl15; }
// inline const ZonedDL* zonedDL() const { return fDl15; }
//
inline void stringDL(StringFifoDataList* dl)
{
fDl6 = dl;
}
inline StringFifoDataList* stringDL()
{
return fDl6;
}
inline const StringFifoDataList* stringDL() const
{
return fDl6;
}
//
// inline void stringBandedDL(StringDataList* dl) { fDl10 = dl; }
// inline StringDataList* stringBandedDL() { return fDl10; }
// inline const StringDataList* stringBandedDL() const { return fDl10; }
//
// inline void stringBucketDL(StringBucketDataList* dl) { fDl11 = dl; }
// inline StringBucketDataList* stringBucketDL() { return fDl11; }
// inline const StringBucketDataList* stringBucketDL() const { return fDl11; }
//
// inline void stringConstantDL(StringConstantDataList_t* dl) { fDl12 = dl; }
// inline StringConstantDataList_t* stringConstantDL() { return fDl12; }
// inline const StringConstantDataList_t* stringConstantDL() const { return fDl12; }
//
// inline void stringSortedWSDL(StringSortedWSDL* dl) { fDl14 = dl; }
// inline StringSortedWSDL* stringSortedWSDL() { return fDl14; }
// inline const StringSortedWSDL* stringSortedWSDL() const { return fDl14; }
//
// inline void stringZonedDL(StringZonedDL* dl) { fDl16 = dl; }
// inline StringZonedDL* stringZonedDL() { return fDl16; }
// inline const StringZonedDL* stringZonedDL() const { return fDl16; }
//
// inline void tupleBucketDL(TupleBucketDataList* dl) { fDl18 = dl; }
// inline TupleBucketDataList* tupleBucketDL() { return fDl18; }
// inline const TupleBucketDataList* tupleBucketDL() const { return fDl18; }
//
// inline void deliveryWSDL(DeliveryWSDL *dl) { fDl19 = dl; }
// inline DeliveryWSDL * deliveryWSDL() { return fDl19; }
// inline const DeliveryWSDL * deliveryWSDL() const { return fDl19; }
inline void rowGroupDL(boost::shared_ptr<RowGroupDL> dl) inline void rowGroupDL(boost::shared_ptr<RowGroupDL> dl)
{ {
fDl20 = dl; fDatalist = dl;
} }
inline void rowGroupDL(RowGroupDL* dl) inline void rowGroupDL(RowGroupDL* dl)
{ {
fDl20.reset(dl); fDatalist.reset(dl);
} }
inline RowGroupDL* rowGroupDL() inline RowGroupDL* rowGroupDL()
{ {
return fDl20.get(); return fDatalist.get();
} }
inline const RowGroupDL* rowGroupDL() const inline const RowGroupDL* rowGroupDL() const
{ {
return fDl20.get(); return fDatalist.get();
} }
DataList_t* dataList()
{
if (fDl3 != NULL)
return reinterpret_cast<DataList_t*>(fDl3);
else if (fDl9 != NULL)
return fDl9;
return reinterpret_cast<DataList_t*>(fDl20.get());
// if (fDl1 != NULL) return fDl1;
// else if (fDl2 != NULL) return fDl2;
// else if (fDl3 != NULL) return reinterpret_cast<DataList_t*>(fDl3);
// else if (fDl4 != NULL) return fDl4;
// else if (fDl9 != NULL) return fDl9;
// else if (fDl13 != NULL) return fDl13;
// else if (fDl15 != NULL) return fDl15;
// else if (fDl19 != NULL) return fDl19;
// else if (fDl20 != NULL) return reinterpret_cast<DataList_t*>(fDl20);
// else return fDl5;
}
//
StrDataList* stringDataList()
{
// if (fDl6 != NULL) return reinterpret_cast<StrDataList*>(fDl6);
// else if (fDl10 != NULL) return fDl10;
// else if (fDl11 != NULL) return fDl11;
// else if (fDl12 != NULL) return fDl12;
// else if (fDl14 != NULL) return fDl14;
// else if (fDl16 != NULL) return fDl16;
// return fDl8;
return reinterpret_cast<StrDataList*>(fDl6);
}
//
// TupleDataList* tupleDataList() {
// if (fDl18 != NULL) return fDl18;
// return fDl17;
// }
//
// /* fDl{7,8} store base class pointers. For consistency, maybe strDataList
// should consider fDl6 also. */
// inline StrDataList * strDataList()
// { return fDl8; }
//
// inline void strDataList(StrDataList *d)
// { fDl8 = d; }
//
// inline DoubleDataList * doubleDL()
// { return fDl7; }
//
// inline void doubleDL(DoubleDataList *d)
// { fDl7 = d; }
enum DataListTypes enum DataListTypes
{ {
@ -552,34 +394,10 @@ class AnyDataList
ROWGROUP_DATALIST ROWGROUP_DATALIST
}; };
static DataListTypes dlType(const DataList_t* dl);
static DataListTypes strDlType(const StrDataList* dl);
// static DataListTypes tupleDlType(const TupleDataList* dl);
uint32_t getNumConsumers() uint32_t getNumConsumers()
{ {
// if (fDl1 != NULL) return fDl1->getNumConsumers(); if (fDatalist)
// else if (fDl2 != NULL) return fDl2->getNumConsumers();
// else if (fDl3 != NULL) return fDl3->getNumConsumers();
// else if (fDl6 != NULL) return fDl6->getNumConsumers();
// else if (fDl10 != NULL) return fDl10->getNumConsumers();
// else if (fDl13 != NULL) return fDl13->getNumConsumers();
// else if (fDl14 != NULL) return fDl14->getNumConsumers();
// else if (fDl15 != NULL) return fDl15->getNumConsumers();
// else if (fDl16 != NULL) return fDl16->getNumConsumers();
// else if (fDl4 != NULL) return 1;
// else if (fDl11 != NULL) return 1;
// else if (fDl18 != NULL) return 1;
// else if (fDl19 != NULL) return fDl19->getNumConsumers();
// else if (fDl20 != NULL) return 1;
// else return 0;
if (fDl20)
return 1; return 1;
else if (fDl3 != NULL)
return fDl3->getNumConsumers();
else if (fDl6 != NULL)
return fDl6->getNumConsumers();
return 0; return 0;
} }
@ -592,27 +410,7 @@ class AnyDataList
private: private:
AnyDataList(const AnyDataList& rhs); AnyDataList(const AnyDataList& rhs);
AnyDataList& operator=(const AnyDataList& rhs); AnyDataList& operator=(const AnyDataList& rhs);
boost::shared_ptr<RowGroupDL> fDatalist;
// BandedDataList* fDl1;
// WorkingSetDataList* fDl2;
FifoDataList* fDl3;
// BucketDataList* fDl4;
// ConstantDataList_t* fDl5;
StringFifoDataList* fDl6;
// DoubleDataList* fDl7;
// StrDataList* fDl8;
DataList_t* fDl9;
// StringDataList* fDl10;
// StringBucketDataList* fDl11;
// StringConstantDataList_t* fDl12;
// SortedWSDL* fDl13;
// StringSortedWSDL* fDl14;
// ZonedDL* fDl15;
// StringZonedDL* fDl16;
// TupleDataList* fDl17;
// TupleBucketDataList *fDl18;
// DeliveryWSDL *fDl19;
boost::shared_ptr<RowGroupDL> fDl20;
bool fDisown; bool fDisown;
}; };

View File

@ -624,21 +624,15 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
{ {
// not strings, no need for dictionary steps, output fifo datalist // not strings, no need for dictionary steps, output fifo datalist
AnyDataListSPtr spdl1(new AnyDataList()); AnyDataListSPtr spdl1(new AnyDataList());
FifoDataList* dl1 = new FifoDataList(1, jobInfo.fifoSize);
spdl1->fifoDL(dl1);
dl1->OID(sc1->oid());
JobStepAssociation outJs1; JobStepAssociation outJs1;
outJs1.outAdd(spdl1); outJs1.outAdd(spdl1);
pcs1->outputAssociation(outJs1); pcs1->outputAssociation(outJs1);
AnyDataListSPtr spdl2(new AnyDataList()); AnyDataListSPtr spdl2(new AnyDataList());
FifoDataList* dl2 = new FifoDataList(1, jobInfo.fifoSize);
spdl2->fifoDL(dl2);
dl2->OID(sc2->oid());
JobStepAssociation outJs2; JobStepAssociation outJs2;
outJs2.outAdd(spdl2); outJs2.outAdd(spdl2);
pcs2->outputAssociation(outJs2); pcs2->outputAssociation(outJs2);
pcs2->inputAssociation(outJs1); pcs2->inputAssociation(outJs1);
@ -682,9 +676,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 1 step 1 (pcolstep) output // data list for column 1 step 1 (pcolstep) output
AnyDataListSPtr spdl11(new AnyDataList()); AnyDataListSPtr spdl11(new AnyDataList());
FifoDataList* dl11 = new FifoDataList(1, jobInfo.fifoSize);
spdl11->fifoDL(dl11);
dl11->OID(sc1->oid());
JobStepAssociation outJs1; JobStepAssociation outJs1;
outJs1.outAdd(spdl11); outJs1.outAdd(spdl11);
@ -692,9 +683,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 1 step 2 (pdictionarystep) output // data list for column 1 step 2 (pdictionarystep) output
AnyDataListSPtr spdl12(new AnyDataList()); AnyDataListSPtr spdl12(new AnyDataList());
StringFifoDataList* dl12 = new StringFifoDataList(1, jobInfo.fifoSize);
spdl12->stringDL(dl12);
dl12->OID(sc1->oid());
JobStepAssociation outJs2; JobStepAssociation outJs2;
outJs2.outAdd(spdl12); outJs2.outAdd(spdl12);
@ -715,9 +703,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 2 step 1 (pcolstep) output // data list for column 2 step 1 (pcolstep) output
AnyDataListSPtr spdl21(new AnyDataList()); AnyDataListSPtr spdl21(new AnyDataList());
FifoDataList* dl21 = new FifoDataList(1, jobInfo.fifoSize);
spdl21->fifoDL(dl21);
dl21->OID(sc2->oid());
JobStepAssociation outJs3; JobStepAssociation outJs3;
outJs3.outAdd(spdl21); outJs3.outAdd(spdl21);
@ -730,9 +715,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 2 step 2 (pdictionarystep) output // data list for column 2 step 2 (pdictionarystep) output
AnyDataListSPtr spdl22(new AnyDataList()); AnyDataListSPtr spdl22(new AnyDataList());
StringFifoDataList* dl22 = new StringFifoDataList(1, jobInfo.fifoSize);
spdl22->stringDL(dl22);
dl22->OID(sc2->oid());
JobStepAssociation outJs4; JobStepAssociation outJs4;
outJs4.outAdd(spdl22); outJs4.outAdd(spdl22);
@ -789,9 +771,7 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 1 step 1 (pcolstep) output // data list for column 1 step 1 (pcolstep) output
AnyDataListSPtr spdl11(new AnyDataList()); AnyDataListSPtr spdl11(new AnyDataList());
FifoDataList* dl11 = new FifoDataList(1, jobInfo.fifoSize);
spdl11->fifoDL(dl11);
dl11->OID(sc1->oid());
JobStepAssociation outJs1; JobStepAssociation outJs1;
outJs1.outAdd(spdl11); outJs1.outAdd(spdl11);
@ -799,9 +779,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 1 step 2 (pdictionarystep) output // data list for column 1 step 2 (pdictionarystep) output
AnyDataListSPtr spdl12(new AnyDataList()); AnyDataListSPtr spdl12(new AnyDataList());
StringFifoDataList* dl12 = new StringFifoDataList(1, jobInfo.fifoSize);
spdl12->stringDL(dl12);
dl12->OID(sc1->oid());
JobStepAssociation outJs2; JobStepAssociation outJs2;
outJs2.outAdd(spdl12); outJs2.outAdd(spdl12);
@ -814,9 +791,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 2 step 1 (pcolstep) output // data list for column 2 step 1 (pcolstep) output
AnyDataListSPtr spdl21(new AnyDataList()); AnyDataListSPtr spdl21(new AnyDataList());
FifoDataList* dl21 = new FifoDataList(1, jobInfo.fifoSize);
spdl21->fifoDL(dl21);
dl21->OID(sc2->oid());
JobStepAssociation outJs3; JobStepAssociation outJs3;
outJs3.outAdd(spdl21); outJs3.outAdd(spdl21);
@ -858,9 +832,7 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// extra steps for string column greater than eight bytes -- from token to string // extra steps for string column greater than eight bytes -- from token to string
// data list for column 1 step 1 (pcolstep) output // data list for column 1 step 1 (pcolstep) output
AnyDataListSPtr spdl11(new AnyDataList()); AnyDataListSPtr spdl11(new AnyDataList());
FifoDataList* dl11 = new FifoDataList(1, jobInfo.fifoSize);
spdl11->fifoDL(dl11);
dl11->OID(sc1->oid());
JobStepAssociation outJs1; JobStepAssociation outJs1;
outJs1.outAdd(spdl11); outJs1.outAdd(spdl11);
@ -868,9 +840,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 1 step 2 (pdictionarystep) output // data list for column 1 step 2 (pdictionarystep) output
AnyDataListSPtr spdl12(new AnyDataList()); AnyDataListSPtr spdl12(new AnyDataList());
StringFifoDataList* dl12 = new StringFifoDataList(1, jobInfo.fifoSize);
spdl12->stringDL(dl12);
dl12->OID(sc1->oid());
pDictionaryStep* pdss2 = new pDictionaryStep(dictOid2, tableOid2, ct2, jobInfo); pDictionaryStep* pdss2 = new pDictionaryStep(dictOid2, tableOid2, ct2, jobInfo);
jobInfo.keyInfo->dictOidToColOid[dictOid2] = sc2->oid(); jobInfo.keyInfo->dictOidToColOid[dictOid2] = sc2->oid();
@ -882,9 +851,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 2 step 1 (pcolstep) output // data list for column 2 step 1 (pcolstep) output
AnyDataListSPtr spdl21(new AnyDataList()); AnyDataListSPtr spdl21(new AnyDataList());
FifoDataList* dl21 = new FifoDataList(1, jobInfo.fifoSize);
spdl21->fifoDL(dl21);
dl21->OID(sc2->oid());
JobStepAssociation outJs3; JobStepAssociation outJs3;
outJs3.outAdd(spdl21); outJs3.outAdd(spdl21);
@ -897,9 +863,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 2 step 2 (pdictionarystep) output // data list for column 2 step 2 (pdictionarystep) output
AnyDataListSPtr spdl22(new AnyDataList()); AnyDataListSPtr spdl22(new AnyDataList());
StringFifoDataList* dl22 = new StringFifoDataList(1, jobInfo.fifoSize);
spdl22->stringDL(dl22);
dl22->OID(sc2->oid());
JobStepAssociation outJs4; JobStepAssociation outJs4;
outJs4.outAdd(spdl22); outJs4.outAdd(spdl22);
@ -1471,17 +1434,17 @@ bool optimizeIdbPatitionSimpleFilter(SimpleFilter* sf, JobStepVector& jsv, JobIn
if (sf->op()->op() != opeq.op()) if (sf->op()->op() != opeq.op())
return false; return false;
const FunctionColumn* fc = static_cast<const FunctionColumn*>(sf->lhs()); const FunctionColumn* fc = dynamic_cast<const FunctionColumn*>(sf->lhs());
const ConstantColumn* cc = static_cast<const ConstantColumn*>(sf->rhs()); const ConstantColumn* cc = dynamic_cast<const ConstantColumn*>(sf->rhs());
if (fc == NULL) if (fc == nullptr)
{ {
cc = static_cast<const ConstantColumn*>(sf->lhs()); cc = dynamic_cast<const ConstantColumn*>(sf->lhs());
fc = static_cast<const FunctionColumn*>(sf->rhs()); fc = dynamic_cast<const FunctionColumn*>(sf->rhs());
} }
// not a function or not idbparttition // not a function or not idbparttition
if (fc == NULL || cc == NULL || fc->functionName().compare("idbpartition") != 0) if (fc == nullptr || cc == nullptr || fc->functionName().compare("idbpartition") != 0)
return false; return false;
// make sure the cc has 3 tokens // make sure the cc has 3 tokens
@ -1608,9 +1571,6 @@ const JobStepVector doSimpleFilter(SimpleFilter* sf, JobInfo& jobInfo)
// data list for pcolstep output // data list for pcolstep output
AnyDataListSPtr spdl1(new AnyDataList()); AnyDataListSPtr spdl1(new AnyDataList());
FifoDataList* dl1 = new FifoDataList(1, jobInfo.fifoSize);
spdl1->fifoDL(dl1);
dl1->OID(sc->oid());
JobStepAssociation outJs1; JobStepAssociation outJs1;
outJs1.outAdd(spdl1); outJs1.outAdd(spdl1);
@ -1618,9 +1578,6 @@ const JobStepVector doSimpleFilter(SimpleFilter* sf, JobInfo& jobInfo)
// data list for pdictionarystep output // data list for pdictionarystep output
AnyDataListSPtr spdl2(new AnyDataList()); AnyDataListSPtr spdl2(new AnyDataList());
StringFifoDataList* dl2 = new StringFifoDataList(1, jobInfo.fifoSize);
spdl2->stringDL(dl2);
dl2->OID(sc->oid());
JobStepAssociation outJs2; JobStepAssociation outJs2;
outJs2.outAdd(spdl2); outJs2.outAdd(spdl2);
@ -2712,9 +2669,6 @@ const JobStepVector doConstantFilter(const ConstantFilter* cf, JobInfo& jobInfo)
// data list for pcolstep output // data list for pcolstep output
AnyDataListSPtr spdl1(new AnyDataList()); AnyDataListSPtr spdl1(new AnyDataList());
FifoDataList* dl1 = new FifoDataList(1, jobInfo.fifoSize);
spdl1->fifoDL(dl1);
dl1->OID(sc->oid());
JobStepAssociation outJs1; JobStepAssociation outJs1;
outJs1.outAdd(spdl1); outJs1.outAdd(spdl1);
@ -2722,9 +2676,6 @@ const JobStepVector doConstantFilter(const ConstantFilter* cf, JobInfo& jobInfo)
// data list for pdictionarystep output // data list for pdictionarystep output
AnyDataListSPtr spdl2(new AnyDataList()); AnyDataListSPtr spdl2(new AnyDataList());
StringFifoDataList* dl2 = new StringFifoDataList(1, jobInfo.fifoSize);
spdl2->stringDL(dl2);
dl2->OID(sc->oid());
JobStepAssociation outJs2; JobStepAssociation outJs2;
outJs2.outAdd(spdl2); outJs2.outAdd(spdl2);

View File

@ -201,19 +201,8 @@ ostream& writeDotCmds(ostream& dotFile, const JobStepVector& query, const JobSte
for (unsigned int i = 0; i < qsi->get()->outputAssociation().outSize(); i++) for (unsigned int i = 0; i < qsi->get()->outputAssociation().outSize(); i++)
{ {
ptrdiff_t dloutptr; RowGroupDL* dlout = qsi->get()->outputAssociation().outAt(i)->rowGroupDL();
DataList_t* dlout = qsi->get()->outputAssociation().outAt(i)->dataList(); ptrdiff_t dloutptr = (ptrdiff_t)dlout;
uint32_t numConsumers = qsi->get()->outputAssociation().outAt(i)->getNumConsumers();
if (dlout)
{
dloutptr = (ptrdiff_t)dlout;
}
else
{
StrDataList* sdl = qsi->get()->outputAssociation().outAt(i)->stringDataList();
dloutptr = (ptrdiff_t)sdl;
}
for (unsigned int k = 0; k < querySteps.size(); k++) for (unsigned int k = 0; k < querySteps.size(); k++)
{ {
@ -222,31 +211,12 @@ ostream& writeDotCmds(ostream& dotFile, const JobStepVector& query, const JobSte
for (unsigned int j = 0; j < queryInputSA.outSize(); j++) for (unsigned int j = 0; j < queryInputSA.outSize(); j++)
{ {
ptrdiff_t dlinptr; RowGroupDL* dlin = queryInputSA.outAt(j)->rowGroupDL();
DataList_t* dlin = queryInputSA.outAt(j)->dataList(); ptrdiff_t dlinptr = (ptrdiff_t)dlin;;
StrDataList* sdl = 0;
if (dlin)
dlinptr = (ptrdiff_t)dlin;
else
{
sdl = queryInputSA.outAt(j)->stringDataList();
dlinptr = (ptrdiff_t)sdl;
}
if ((ptrdiff_t)dloutptr == (ptrdiff_t)dlinptr) if ((ptrdiff_t)dloutptr == (ptrdiff_t)dlinptr)
{ {
dotFile << stepidIn << " -> " << stepidOut; dotFile << stepidIn << " -> " << stepidOut;
if (dlin)
{
dotFile << " [label=\"[" << AnyDataList::dlType(dlin) << "/" << numConsumers << "]\"]" << endl;
}
else
{
dotFile << " [label=\"[" << AnyDataList::strDlType(sdl) << "/" << numConsumers << "]\"]"
<< endl;
}
} }
} }
} }
@ -258,32 +228,12 @@ ostream& writeDotCmds(ostream& dotFile, const JobStepVector& query, const JobSte
for (unsigned int j = 0; j < projectInputSA.outSize(); j++) for (unsigned int j = 0; j < projectInputSA.outSize(); j++)
{ {
ptrdiff_t dlinptr; RowGroupDL* dlin = projectInputSA.outAt(j)->rowGroupDL();
DataList_t* dlin = projectInputSA.outAt(j)->dataList(); ptrdiff_t dlinptr = (ptrdiff_t)dlin;;
StrDataList* sdl = 0;
if (dlin)
dlinptr = (ptrdiff_t)dlin;
else
{
sdl = projectInputSA.outAt(j)->stringDataList();
dlinptr = (ptrdiff_t)sdl;
}
if (dloutptr == dlinptr) if (dloutptr == dlinptr)
// if ((ptrdiff_t)dlout == (ptrdiff_t)dlin)
{ {
dotFile << stepidIn << " -> " << stepidOut; dotFile << stepidIn << " -> " << stepidOut;
if (dlin)
{
dotFile << " [label=\"[" << AnyDataList::dlType(dlin) << "/" << numConsumers << "]\"]" << endl;
}
else
{
dotFile << " [label=\"[" << AnyDataList::strDlType(sdl) << "/" << numConsumers << "]\"]"
<< endl;
}
} }
} }
} }
@ -359,19 +309,8 @@ ostream& writeDotCmds(ostream& dotFile, const JobStepVector& query, const JobSte
for (unsigned int i = 0; i < psi->get()->outputAssociation().outSize(); i++) for (unsigned int i = 0; i < psi->get()->outputAssociation().outSize(); i++)
{ {
ptrdiff_t dloutptr; RowGroupDL* dlout = psi->get()->outputAssociation().outAt(i)->rowGroupDL();
DataList_t* dlout = psi->get()->outputAssociation().outAt(i)->dataList(); ptrdiff_t dloutptr = (ptrdiff_t)dlout;
uint32_t numConsumers = psi->get()->outputAssociation().outAt(i)->getNumConsumers();
if (dlout)
{
dloutptr = (ptrdiff_t)dlout;
}
else
{
StrDataList* sdl = psi->get()->outputAssociation().outAt(i)->stringDataList();
dloutptr = (ptrdiff_t)sdl;
}
for (unsigned int k = ctn + 1; k < projectSteps.size(); k++) for (unsigned int k = ctn + 1; k < projectSteps.size(); k++)
{ {
@ -380,31 +319,13 @@ ostream& writeDotCmds(ostream& dotFile, const JobStepVector& query, const JobSte
for (unsigned int j = 0; j < projectInputSA.outSize(); j++) for (unsigned int j = 0; j < projectInputSA.outSize(); j++)
{ {
ptrdiff_t dlinptr; RowGroupDL* dlin = projectInputSA.outAt(j)->rowGroupDL();
DataList_t* dlin = projectInputSA.outAt(j)->dataList(); ptrdiff_t dlinptr = (ptrdiff_t)dlin;
StrDataList* sdl = 0;
if (dlin)
dlinptr = (ptrdiff_t)dlin;
else
{
sdl = projectInputSA.outAt(j)->stringDataList();
dlinptr = (ptrdiff_t)sdl;
}
if ((ptrdiff_t)dloutptr == (ptrdiff_t)dlinptr) if ((ptrdiff_t)dloutptr == (ptrdiff_t)dlinptr)
{ {
dotFile << stepidIn << " -> " << stepidOut; dotFile << stepidIn << " -> " << stepidOut;
if (dlin)
{
dotFile << " [label=\"[" << AnyDataList::dlType(dlin) << "/" << numConsumers << "]\"]" << endl;
}
else
{
dotFile << " [label=\"[" << AnyDataList::strDlType(sdl) << "/" << numConsumers << "]\"]"
<< endl;
}
} }
} }
} }

View File

@ -775,22 +775,15 @@ void JobList::graph(uint32_t sessionID)
for (unsigned int i = 0; i < qsi->get()->outputAssociation().outSize(); i++) for (unsigned int i = 0; i < qsi->get()->outputAssociation().outSize(); i++)
{ {
ptrdiff_t dloutptr = 0; ptrdiff_t dloutptr = 0;
DataList_t* dlout; RowGroupDL* dlout;
StrDataList* sdl;
// TupleDataList* tdl; // TupleDataList* tdl;
if ((dlout = qsi->get()->outputAssociation().outAt(i)->dataList())) if ((dlout = qsi->get()->outputAssociation().outAt(i)->rowGroupDL()))
{ {
dloutptr = (ptrdiff_t)dlout; dloutptr = (ptrdiff_t)dlout;
outSize = dlout->totalSize(); outSize = dlout->totalSize();
diskIo = dlout->totalDiskIoTime(saveTime, loadTime); diskIo = dlout->totalDiskIoTime(saveTime, loadTime);
} }
else if ((sdl = qsi->get()->outputAssociation().outAt(i)->stringDataList()))
{
dloutptr = (ptrdiff_t)sdl;
outSize = sdl->totalSize();
diskIo = sdl->totalDiskIoTime(saveTime, loadTime);
}
// if HashJoinStep, determine if output fifo was cached to disk // if HashJoinStep, determine if output fifo was cached to disk
bool hjTempDiskFlag = false; bool hjTempDiskFlag = false;
@ -803,15 +796,10 @@ void JobList::graph(uint32_t sessionID)
for (unsigned int j = 0; j < queryInputSA.outSize(); j++) for (unsigned int j = 0; j < queryInputSA.outSize(); j++)
{ {
ptrdiff_t dlinptr = 0; ptrdiff_t dlinptr = 0;
DataList_t* dlin = queryInputSA.outAt(j)->dataList(); RowGroupDL* dlin = queryInputSA.outAt(j)->rowGroupDL();
StrDataList* sdl = 0;
if (dlin) if (dlin)
dlinptr = (ptrdiff_t)dlin; dlinptr = (ptrdiff_t)dlin;
else if ((sdl = queryInputSA.outAt(j)->stringDataList()))
{
dlinptr = (ptrdiff_t)sdl;
}
if (dloutptr == dlinptr) if (dloutptr == dlinptr)
{ {
@ -856,19 +844,8 @@ void JobList::graph(uint32_t sessionID)
for (unsigned int j = 0; j < projectInputSA.outSize(); j++) for (unsigned int j = 0; j < projectInputSA.outSize(); j++)
{ {
ptrdiff_t dlinptr; RowGroupDL* dlin = projectInputSA.outAt(j)->rowGroupDL();
DataList_t* dlin = projectInputSA.outAt(j)->dataList(); ptrdiff_t dlinptr = (ptrdiff_t)dlin;
StrDataList* sdl = 0;
if (dlin)
{
dlinptr = (ptrdiff_t)dlin;
}
else
{
sdl = projectInputSA.outAt(j)->stringDataList();
dlinptr = (ptrdiff_t)sdl;
}
if (dloutptr == dlinptr) if (dloutptr == dlinptr)
{ {

View File

@ -310,44 +310,6 @@ pColScanStep::pColScanStep(const pColStep& rhs) : JobStep(rhs), fRm(rhs.resource
fTraceFlags = rhs.fTraceFlags; fTraceFlags = rhs.fTraceFlags;
} }
void pColScanStep::addFilters()
{
AnyDataListSPtr dl = fInputJobStepAssociation.outAt(0);
DataList_t* bdl = dl->dataList();
idbassert(bdl);
int it = -1;
bool more;
ElementType e;
int64_t token;
try
{
it = bdl->getIterator();
}
catch (std::exception& ex)
{
cerr << "pColScanStep::addFilters: caught exception: " << ex.what() << " stepno: " << fStepId << endl;
throw;
}
catch (...)
{
cerr << "pColScanStep::addFilters: caught exception" << endl;
throw;
}
fBOP = BOP_OR;
more = bdl->next(it, &e);
while (more)
{
token = e.second;
addFilter(COMPARE_EQ, token);
more = bdl->next(it, &e);
}
return;
}
bool pColScanStep::isEmptyVal(const uint8_t* val8) const bool pColScanStep::isEmptyVal(const uint8_t* val8) const
{ {
const int width = fColType.colWidth; const int width = fColType.colWidth;

View File

@ -498,77 +498,6 @@ const string pColStep::toString() const
return oss.str(); return oss.str();
} }
void pColStep::addFilters()
{
AnyDataListSPtr dl = fInputJobStepAssociation.outAt(0);
DataList_t* bdl = dl->dataList();
FifoDataList* fifo = fInputJobStepAssociation.outAt(0)->fifoDL();
idbassert(bdl);
int it = -1;
bool more;
ElementType e;
int64_t token;
if (fifo != NULL)
{
try
{
it = fifo->getIterator();
}
catch (exception& ex)
{
cerr << "pColStep::addFilters: caught exception: " << ex.what() << " stepno: " << fStepId << endl;
}
catch (...)
{
cerr << "pColStep::addFilters: caught exception" << endl;
}
fBOP = BOP_OR;
UintRowGroup rw;
more = fifo->next(it, &rw);
while (more)
{
for (uint64_t i = 0; i < rw.count; ++i)
addFilter(COMPARE_EQ, (int64_t)rw.et[i].second);
more = fifo->next(it, &rw);
}
}
else
{
try
{
it = bdl->getIterator();
}
catch (exception& ex)
{
cerr << "pColStep::addFilters: caught exception: " << ex.what() << " stepno: " << fStepId << endl;
}
catch (...)
{
cerr << "pColStep::addFilters: caught exception" << endl;
}
fBOP = BOP_OR;
more = bdl->next(it, &e);
while (more)
{
token = e.second;
addFilter(COMPARE_EQ, token);
more = bdl->next(it, &e);
}
}
return;
}
/* This exists to avoid a DBRM lookup for every rid. */ /* This exists to avoid a DBRM lookup for every rid. */
inline uint64_t pColStep::getLBID(uint64_t rid, bool& scan) inline uint64_t pColStep::getLBID(uint64_t rid, bool& scan)
{ {

View File

@ -259,9 +259,6 @@ class pColStep : public JobStep
return fFilters; return fFilters;
} }
protected:
void addFilters();
private: private:
/** @brief constructor for completeness /** @brief constructor for completeness
*/ */
@ -293,7 +290,7 @@ class pColStep : public JobStep
// Running with this one will swallow rows at projection. // Running with this one will swallow rows at projection.
bool fSwallowRows; bool fSwallowRows;
bool isFilterFeeder; bool isFilterFeeder = false;
uint64_t fNumBlksSkipped; // total number of block scans skipped due to CP uint64_t fNumBlksSkipped; // total number of block scans skipped due to CP
uint64_t fMsgBytesIn; // total byte count for incoming messages uint64_t fMsgBytesIn; // total byte count for incoming messages
uint64_t fMsgBytesOut; // total byte count for outcoming messages uint64_t fMsgBytesOut; // total byte count for outcoming messages
@ -482,8 +479,6 @@ class pColScanStep : public JobStep
return fFilters; return fFilters;
} }
protected:
void addFilters();
private: private:
// defaults okay? // defaults okay?
@ -518,7 +513,7 @@ class pColScanStep : public JobStep
uint32_t extentSize, divShift, ridsPerBlock, rpbShift, numExtents; uint32_t extentSize, divShift, ridsPerBlock, rpbShift, numExtents;
// config::Config *fConfig; // config::Config *fConfig;
bool isFilterFeeder; bool isFilterFeeder = false;
uint64_t fNumBlksSkipped; // total number of block scans skipped due to CP uint64_t fNumBlksSkipped; // total number of block scans skipped due to CP
uint64_t fMsgBytesIn; // total byte count for incoming messages uint64_t fMsgBytesIn; // total byte count for incoming messages
uint64_t fMsgBytesOut; // total byte count for outcoming messages uint64_t fMsgBytesOut; // total byte count for outcoming messages
@ -1233,7 +1228,7 @@ class TupleBPS : public BatchPrimitive, public TupleDeliveryStep
uint32_t fMaxNumThreads; uint32_t fMaxNumThreads;
uint32_t fNumThreads; uint32_t fNumThreads;
PrimitiveStepType ffirstStepType; PrimitiveStepType ffirstStepType;
bool isFilterFeeder; bool isFilterFeeder = false;
std::vector<uint64_t> fProducerThreads; // thread pool handles std::vector<uint64_t> fProducerThreads; // thread pool handles
std::vector<uint64_t> fProcessorThreads; std::vector<uint64_t> fProcessorThreads;
messageqcpp::ByteStream fFilterString; messageqcpp::ByteStream fFilterString;

View File

@ -2157,8 +2157,8 @@ void TupleBPS::processByteStreamVector(vector<boost::shared_ptr<messageqcpp::Byt
vector<rowgroup::RGData> fromPrimProc; vector<rowgroup::RGData> fromPrimProc;
auto data = getJoinLocalDataByIndex(threadID); auto data = getJoinLocalDataByIndex(threadID);
bool validCPData; bool validCPData = false;
bool hasBinaryColumn; bool hasBinaryColumn = false;
int128_t min; int128_t min;
int128_t max; int128_t max;
uint64_t lbid; uint64_t lbid;
@ -2203,8 +2203,8 @@ void TupleBPS::processByteStreamVector(vector<boost::shared_ptr<messageqcpp::Byt
return; return;
} }
bool unused; bool unused = false;
bool fromDictScan; bool fromDictScan = false;
fromPrimProc.clear(); fromPrimProc.clear();
fBPP->getRowGroupData(*bs, &fromPrimProc, &validCPData, &lbid, &fromDictScan, &min, &max, &cachedIO, fBPP->getRowGroupData(*bs, &fromPrimProc, &validCPData, &lbid, &fromDictScan, &min, &max, &cachedIO,
&physIO, &touchedBlocks, &unused, threadID, &hasBinaryColumn, fColType); &physIO, &touchedBlocks, &unused, threadID, &hasBinaryColumn, fColType);

View File

@ -271,7 +271,11 @@ void TupleHashJoinStep::startSmallRunners(uint index)
std::shared_ptr<TupleJoiner> joiner; std::shared_ptr<TupleJoiner> joiner;
jt = joinTypes[index]; jt = joinTypes[index];
if (traceOn())
{
extendedInfo += toString(); extendedInfo += toString();
}
if (typelessJoin[index]) if (typelessJoin[index])
{ {
@ -349,14 +353,20 @@ void TupleHashJoinStep::startSmallRunners(uint index)
" size = " << joiner->size() << endl; " size = " << joiner->size() << endl;
*/ */
if (traceOn())
{
extendedInfo += "\n"; extendedInfo += "\n";
}
ostringstream oss; ostringstream oss;
if (!joiner->onDisk()) if (!joiner->onDisk())
{ {
// add extended info, and if not aborted then tell joiner // add extended info, and if not aborted then tell joiner
// we're done reading the small side. // we're done reading the small side.
if (traceOn())
{
if (joiner->inPM()) if (joiner->inPM())
{
{ {
oss << "PM join (" << index << ")" << endl; oss << "PM join (" << index << ")" << endl;
#ifdef JLF_DEBUG #ifdef JLF_DEBUG
@ -364,6 +374,7 @@ void TupleHashJoinStep::startSmallRunners(uint index)
#endif #endif
extendedInfo += oss.str(); extendedInfo += oss.str();
} }
}
else if (joiner->inUM()) else if (joiner->inUM())
{ {
oss << "UM join (" << index << ")" << endl; oss << "UM join (" << index << ")" << endl;
@ -372,14 +383,18 @@ void TupleHashJoinStep::startSmallRunners(uint index)
#endif #endif
extendedInfo += oss.str(); extendedInfo += oss.str();
} }
}
if (!cancelled()) if (!cancelled())
joiner->doneInserting(); joiner->doneInserting();
} }
if (traceOn())
{
boost::mutex::scoped_lock lk(*fStatsMutexPtr); boost::mutex::scoped_lock lk(*fStatsMutexPtr);
fExtendedInfo += extendedInfo; fExtendedInfo += extendedInfo;
formatMiniStats(index); formatMiniStats(index);
} }
}
/* Index is which small input to read. */ /* Index is which small input to read. */
void TupleHashJoinStep::smallRunnerFcn(uint32_t index, uint threadID, uint64_t* jobs) void TupleHashJoinStep::smallRunnerFcn(uint32_t index, uint threadID, uint64_t* jobs)
@ -1099,7 +1114,8 @@ const string TupleHashJoinStep::toString() const
for (size_t i = 0; i < idlsz; ++i) for (size_t i = 0; i < idlsz; ++i)
{ {
RowGroupDL* idl = fInputJobStepAssociation.outAt(i)->rowGroupDL(); const AnyDataListSPtr& dl = fInputJobStepAssociation.outAt(i);
RowGroupDL* idl = dl->rowGroupDL();
CalpontSystemCatalog::OID oidi = 0; CalpontSystemCatalog::OID oidi = 0;
if (idl) if (idl)
@ -1111,7 +1127,7 @@ const string TupleHashJoinStep::toString() const
oss << "*"; oss << "*";
oss << "tb/col:" << fTableOID1 << "/" << oidi; oss << "tb/col:" << fTableOID1 << "/" << oidi;
oss << " " << fInputJobStepAssociation.outAt(i); oss << " " << dl;
} }
idlsz = fOutputJobStepAssociation.outSize(); idlsz = fOutputJobStepAssociation.outSize();

View File

@ -343,7 +343,7 @@ void getColNameFromItem(std::ostringstream& ostream, Item* item)
} }
else else
{ {
Item_ident* iip = reinterpret_cast<Item_ident*>(item); Item_ident* iip = static_cast<Item_ident*>(item);
if (iip->db_name.str) if (iip->db_name.str)
ostream << iip->db_name.str << '.'; ostream << iip->db_name.str << '.';
@ -386,13 +386,11 @@ bool sortItemIsInGroupRec(Item* sort_item, Item* group_item)
return found; return found;
} }
Item_func* ifp_sort = reinterpret_cast<Item_func*>(sort_item);
// base cases for Item_field and Item_ref. The second arg is binary cmp switch // base cases for Item_field and Item_ref. The second arg is binary cmp switch
found = group_item->eq(sort_item, false); found = group_item->eq(sort_item, false);
if (!found && sort_item->type() == Item::REF_ITEM) if (!found && sort_item->type() == Item::REF_ITEM)
{ {
Item_ref* ifp_sort_ref = reinterpret_cast<Item_ref*>(sort_item); Item_ref* ifp_sort_ref = static_cast<Item_ref*>(sort_item);
found = sortItemIsInGroupRec(*ifp_sort_ref->ref, group_item); found = sortItemIsInGroupRec(*ifp_sort_ref->ref, group_item);
} }
else if (!found && sort_item->type() == Item::FIELD_ITEM) else if (!found && sort_item->type() == Item::FIELD_ITEM)
@ -400,6 +398,8 @@ bool sortItemIsInGroupRec(Item* sort_item, Item* group_item)
return found; return found;
} }
Item_func* ifp_sort = static_cast<Item_func*>(sort_item);
// seeking for a group_item match // seeking for a group_item match
for (uint32_t i = 0; !found && i < ifp_sort->argument_count(); i++) for (uint32_t i = 0; !found && i < ifp_sort->argument_count(); i++)
{ {
@ -412,7 +412,7 @@ bool sortItemIsInGroupRec(Item* sort_item, Item* group_item)
else if (ifp_sort_arg->type() == Item::REF_ITEM) else if (ifp_sort_arg->type() == Item::REF_ITEM)
{ {
// dereference the Item // dereference the Item
Item_ref* ifp_sort_ref = reinterpret_cast<Item_ref*>(ifp_sort_arg); Item_ref* ifp_sort_ref = static_cast<Item_ref*>(ifp_sort_arg);
found = sortItemIsInGroupRec(*ifp_sort_ref->ref, group_item); found = sortItemIsInGroupRec(*ifp_sort_ref->ref, group_item);
} }
} }
@ -432,14 +432,14 @@ bool sortItemIsInGroupRec(Item* sort_item, Item* group_item)
*********************************************************/ *********************************************************/
void check_sum_func_item(const Item* item, void* arg) void check_sum_func_item(const Item* item, void* arg)
{ {
bool* found = reinterpret_cast<bool*>(arg); bool* found = static_cast<bool*>(arg);
if (*found) if (*found)
return; return;
if (item->type() == Item::REF_ITEM) if (item->type() == Item::REF_ITEM)
{ {
const Item_ref* ref_item = reinterpret_cast<const Item_ref*>(item); const Item_ref* ref_item = static_cast<const Item_ref*>(item);
Item* ref_item_item = (Item*)*ref_item->ref; Item* ref_item_item = (Item*)*ref_item->ref;
if (ref_item_item->type() == Item::SUM_FUNC_ITEM) if (ref_item_item->type() == Item::SUM_FUNC_ITEM)
{ {
@ -479,7 +479,7 @@ bool sortItemIsInGrouping(Item* sort_item, ORDER* groupcol)
// e.g. select a, if (sum(b) > 1, 2, 1) from t1 group by 1 order by 2; // e.g. select a, if (sum(b) > 1, 2, 1) from t1 group by 1 order by 2;
if (sort_item->type() == Item::FUNC_ITEM) if (sort_item->type() == Item::FUNC_ITEM)
{ {
Item_func* ifp = reinterpret_cast<Item_func*>(sort_item); Item_func* ifp = static_cast<Item_func*>(sort_item);
ifp->traverse_cond(check_sum_func_item, &found, Item::POSTFIX); ifp->traverse_cond(check_sum_func_item, &found, Item::POSTFIX);
} }
else if (sort_item->type() == Item::CONST_ITEM || sort_item->type() == Item::WINDOW_FUNC_ITEM) else if (sort_item->type() == Item::CONST_ITEM || sort_item->type() == Item::WINDOW_FUNC_ITEM)
@ -528,11 +528,11 @@ ReturnedColumn* buildAggFrmTempField(Item* item, gp_walk_info& gwi)
switch (item->type()) switch (item->type())
{ {
case Item::FIELD_ITEM: ifip = reinterpret_cast<Item_field*>(item); break; case Item::FIELD_ITEM: ifip = static_cast<Item_field*>(item); break;
default: default:
irip = reinterpret_cast<Item_ref*>(item); irip = static_cast<Item_ref*>(item);
if (irip) if (irip)
ifip = reinterpret_cast<Item_field*>(irip->ref[0]); ifip = static_cast<Item_field*>(irip->ref[0]);
break; break;
} }
@ -541,7 +541,7 @@ ReturnedColumn* buildAggFrmTempField(Item* item, gp_walk_info& gwi)
std::vector<Item*>::iterator iter = gwi.extSelAggColsItems.begin(); std::vector<Item*>::iterator iter = gwi.extSelAggColsItems.begin();
for (; iter != gwi.extSelAggColsItems.end(); iter++) for (; iter != gwi.extSelAggColsItems.end(); iter++)
{ {
isfp = reinterpret_cast<Item_func_or_sum*>(*iter); isfp = static_cast<Item_func_or_sum*>(*iter);
if (isfp->type() == Item::SUM_FUNC_ITEM && isfp->result_field == ifip->field) if (isfp->type() == Item::SUM_FUNC_ITEM && isfp->result_field == ifip->field)
{ {
@ -1044,7 +1044,7 @@ void debug_walk(const Item* item, void* arg)
if (join) if (join)
{ {
Item_cond* cond = reinterpret_cast<Item_cond*>(join->conds); Item_cond* cond = static_cast<Item_cond*>(join->conds);
if (cond) if (cond)
cond->traverse_cond(debug_walk, arg, Item::POSTFIX); cond->traverse_cond(debug_walk, arg, Item::POSTFIX);
@ -1724,7 +1724,7 @@ bool buildEqualityPredicate(execplan::ReturnedColumn* lhs, execplan::ReturnedCol
boost::shared_ptr<Operator>& sop, const Item_func::Functype& funcType, boost::shared_ptr<Operator>& sop, const Item_func::Functype& funcType,
const vector<Item*>& itemList, bool isInSubs) const vector<Item*>& itemList, bool isInSubs)
{ {
cal_connection_info* ci = reinterpret_cast<cal_connection_info*>(get_fe_conn_info_ptr()); cal_connection_info* ci = static_cast<cal_connection_info*>(get_fe_conn_info_ptr());
// push the column that is associated with the correlated column to the returned // push the column that is associated with the correlated column to the returned
// column list, so the materialized view have the complete projection list. // column list, so the materialized view have the complete projection list.
@ -2048,7 +2048,7 @@ bool buildPredicateItem(Item_func* ifp, gp_walk_info* gwip)
else if (ifp->functype() == Item_func::GUSERVAR_FUNC) else if (ifp->functype() == Item_func::GUSERVAR_FUNC)
{ {
Item_func_get_user_var* udf = reinterpret_cast<Item_func_get_user_var*>(ifp); Item_func_get_user_var* udf = static_cast<Item_func_get_user_var*>(ifp);
String buf; String buf;
if (udf->result_type() == INT_RESULT) if (udf->result_type() == INT_RESULT)
@ -2801,7 +2801,7 @@ void setError(THD* thd, uint32_t errcode, string errmsg)
thd_set_ha_data(current_thd, mcs_hton, get_fe_conn_info_ptr()); thd_set_ha_data(current_thd, mcs_hton, get_fe_conn_info_ptr());
} }
cal_connection_info* ci = reinterpret_cast<cal_connection_info*>(get_fe_conn_info_ptr()); cal_connection_info* ci = static_cast<cal_connection_info*>(get_fe_conn_info_ptr());
ci->expressionId = 0; ci->expressionId = 0;
} }
@ -3195,12 +3195,11 @@ CalpontSystemCatalog::ColType colType_MysqlToIDB(const Item* item)
*/ */
case DECIMAL_RESULT: case DECIMAL_RESULT:
{ {
Item_decimal* idp = (Item_decimal*)item; // decimal result do not shows us Item is Item_decimal
ct.colDataType = CalpontSystemCatalog::DECIMAL; ct.colDataType = CalpontSystemCatalog::DECIMAL;
unsigned int precision = idp->decimal_precision(); unsigned int precision = item->decimal_precision();
unsigned int scale = idp->decimal_scale(); unsigned int scale = item->decimal_scale();
ct.setDecimalScalePrecision(precision, scale); ct.setDecimalScalePrecision(precision, scale);
@ -3612,7 +3611,7 @@ ArithmeticColumn* buildArithmeticColumn(Item_func* item, gp_walk_info& gwi, bool
thd_set_ha_data(current_thd, mcs_hton, get_fe_conn_info_ptr()); thd_set_ha_data(current_thd, mcs_hton, get_fe_conn_info_ptr());
} }
cal_connection_info* ci = reinterpret_cast<cal_connection_info*>(get_fe_conn_info_ptr()); cal_connection_info* ci = static_cast<cal_connection_info*>(get_fe_conn_info_ptr());
ArithmeticColumn* ac = new ArithmeticColumn(); ArithmeticColumn* ac = new ArithmeticColumn();
Item** sfitempp = item->arguments(); Item** sfitempp = item->arguments();
@ -3638,8 +3637,7 @@ ArithmeticColumn* buildArithmeticColumn(Item_func* item, gp_walk_info& gwi, bool
if (!lhs->data() && (sfitempp[0]->type() == Item::FUNC_ITEM)) if (!lhs->data() && (sfitempp[0]->type() == Item::FUNC_ITEM))
{ {
delete lhs; delete lhs;
Item_func* ifp = (Item_func*)sfitempp[0]; lhs = buildParseTree(sfitempp[0], gwi, nonSupport);
lhs = buildParseTree(ifp, gwi, nonSupport);
} }
else if (!lhs->data() && (sfitempp[0]->type() == Item::REF_ITEM)) else if (!lhs->data() && (sfitempp[0]->type() == Item::REF_ITEM))
{ {
@ -3657,8 +3655,7 @@ ArithmeticColumn* buildArithmeticColumn(Item_func* item, gp_walk_info& gwi, bool
if (!rhs->data() && (sfitempp[1]->type() == Item::FUNC_ITEM)) if (!rhs->data() && (sfitempp[1]->type() == Item::FUNC_ITEM))
{ {
delete rhs; delete rhs;
Item_func* ifp = (Item_func*)sfitempp[1]; rhs = buildParseTree(sfitempp[1], gwi, nonSupport);
rhs = buildParseTree(ifp, gwi, nonSupport);
} }
else if (!rhs->data() && (sfitempp[1]->type() == Item::REF_ITEM)) else if (!rhs->data() && (sfitempp[1]->type() == Item::REF_ITEM))
{ {
@ -3875,7 +3872,7 @@ ReturnedColumn* buildFunctionColumn(Item_func* ifp, gp_walk_info& gwi, bool& non
thd_set_ha_data(current_thd, mcs_hton, get_fe_conn_info_ptr()); thd_set_ha_data(current_thd, mcs_hton, get_fe_conn_info_ptr());
} }
cal_connection_info* ci = reinterpret_cast<cal_connection_info*>(get_fe_conn_info_ptr()); cal_connection_info* ci = static_cast<cal_connection_info*>(get_fe_conn_info_ptr());
string funcName = ifp->func_name(); string funcName = ifp->func_name();
FuncExp* funcExp = FuncExp::instance(); FuncExp* funcExp = FuncExp::instance();
@ -3923,7 +3920,7 @@ ReturnedColumn* buildFunctionColumn(Item_func* ifp, gp_walk_info& gwi, bool& non
else if ((funcName == "charset" || funcName == "collation") && ifp->argument_count() == 1 && else if ((funcName == "charset" || funcName == "collation") && ifp->argument_count() == 1 &&
ifp->arguments()[0]->type() == Item::FIELD_ITEM) ifp->arguments()[0]->type() == Item::FIELD_ITEM)
{ {
Item_field* item = reinterpret_cast<Item_field*>(ifp->arguments()[0]); Item_field* item = static_cast<Item_field*>(ifp->arguments()[0]);
CHARSET_INFO* info = item->charset_for_protocol(); CHARSET_INFO* info = item->charset_for_protocol();
ReturnedColumn* rc; ReturnedColumn* rc;
string val; string val;
@ -4047,9 +4044,10 @@ ReturnedColumn* buildFunctionColumn(Item_func* ifp, gp_walk_info& gwi, bool& non
// @todo. merge this logic to buildParseTree(). // @todo. merge this logic to buildParseTree().
if ((funcName == "if" && i == 0) || funcName == "xor") if ((funcName == "if" && i == 0) || funcName == "xor")
{ {
// make sure the rcWorkStack is cleaned. // make sure the rcWorkStack is cleaned.
gwi.clauseType = WHERE; gwi.clauseType = WHERE;
sptp.reset(buildParseTree((Item_func*)(ifp->arguments()[i]), gwi, nonSupport)); sptp.reset(buildParseTree(ifp->arguments()[i], gwi, nonSupport));
gwi.clauseType = clauseType; gwi.clauseType = clauseType;
if (!sptp) if (!sptp)
@ -4155,50 +4153,6 @@ ReturnedColumn* buildFunctionColumn(Item_func* ifp, gp_walk_info& gwi, bool& non
addIntervalArgs(&gwi, ifp, funcParms); addIntervalArgs(&gwi, ifp, funcParms);
} }
// check for unsupported arguments add the keyword unit argument for extract functions
if (funcName == "extract")
{
Item_date_add_interval* idai = (Item_date_add_interval*)ifp;
switch (idai->int_type)
{
case INTERVAL_DAY_MICROSECOND:
{
nonSupport = true;
gwi.fatalParseError = true;
Message::Args args;
string info = funcName + " with DAY_MICROSECOND parameter";
args.add(info);
gwi.parseErrorText = IDBErrorInfo::instance()->errorMsg(ERR_NON_SUPPORTED_FUNCTION, args);
return NULL;
}
case INTERVAL_HOUR_MICROSECOND:
{
nonSupport = true;
gwi.fatalParseError = true;
Message::Args args;
string info = funcName + " with HOUR_MICROSECOND parameter";
args.add(info);
gwi.parseErrorText = IDBErrorInfo::instance()->errorMsg(ERR_NON_SUPPORTED_FUNCTION, args);
return NULL;
}
case INTERVAL_MINUTE_MICROSECOND:
{
nonSupport = true;
gwi.fatalParseError = true;
Message::Args args;
string info = funcName + " with MINUTE_MICROSECOND parameter";
args.add(info);
gwi.parseErrorText = IDBErrorInfo::instance()->errorMsg(ERR_NON_SUPPORTED_FUNCTION, args);
return NULL;
}
default: break;
}
}
// add the keyword unit argument and char length for cast functions // add the keyword unit argument and char length for cast functions
if (funcName == "cast_as_char") if (funcName == "cast_as_char")
{ {
@ -4496,7 +4450,7 @@ FunctionColumn* buildCaseFunction(Item_func* item, gp_walk_info& gwi, bool& nonS
thd_set_ha_data(current_thd, mcs_hton, get_fe_conn_info_ptr()); thd_set_ha_data(current_thd, mcs_hton, get_fe_conn_info_ptr());
} }
cal_connection_info* ci = reinterpret_cast<cal_connection_info*>(get_fe_conn_info_ptr()); cal_connection_info* ci = static_cast<cal_connection_info*>(get_fe_conn_info_ptr());
FunctionColumn* fc = new FunctionColumn(); FunctionColumn* fc = new FunctionColumn();
FunctionParm funcParms; FunctionParm funcParms;
@ -4555,7 +4509,7 @@ FunctionColumn* buildCaseFunction(Item_func* item, gp_walk_info& gwi, bool& nonS
// to pull off of rcWorkStack, so we set this inCaseStmt flag to tell it // to pull off of rcWorkStack, so we set this inCaseStmt flag to tell it
// not to. // not to.
gwi.inCaseStmt = true; gwi.inCaseStmt = true;
sptp.reset(buildParseTree((Item_func*)(item->arguments()[i]), gwi, nonSupport)); sptp.reset(buildParseTree(item->arguments()[i], gwi, nonSupport));
gwi.inCaseStmt = false; gwi.inCaseStmt = false;
if (!gwi.ptWorkStack.empty() && *gwi.ptWorkStack.top() == *sptp.get()) if (!gwi.ptWorkStack.empty() && *gwi.ptWorkStack.top() == *sptp.get())
{ {
@ -4591,7 +4545,7 @@ FunctionColumn* buildCaseFunction(Item_func* item, gp_walk_info& gwi, bool& nonS
} }
else else
{ {
sptp.reset(buildParseTree((Item_func*)(item->arguments()[i]), gwi, nonSupport)); sptp.reset(buildParseTree(item->arguments()[i], gwi, nonSupport));
// We need to pop whichever stack is holding it, if any. // We need to pop whichever stack is holding it, if any.
if ((!gwi.ptWorkStack.empty()) && *gwi.ptWorkStack.top()->data() == sptp->data()) if ((!gwi.ptWorkStack.empty()) && *gwi.ptWorkStack.top()->data() == sptp->data())
@ -4800,19 +4754,18 @@ SimpleColumn* buildSimpleColumn(Item_field* ifp, gp_walk_info& gwi)
return sc; return sc;
} }
ParseTree* buildParseTree(Item_func* item, gp_walk_info& gwi, bool& nonSupport) ParseTree* buildParseTree(Item* item, gp_walk_info& gwi, bool& nonSupport)
{ {
ParseTree* pt = 0; ParseTree* pt = 0;
Item_cond* icp = (Item_cond*)item;
#ifdef DEBUG_WALK_COND #ifdef DEBUG_WALK_COND
// debug // debug
cerr << "Build Parsetree: " << endl; cerr << "Build Parsetree: " << endl;
icp->traverse_cond(debug_walk, &gwi, Item::POSTFIX); item->traverse_cond(debug_walk, &gwi, Item::POSTFIX);
#endif #endif
//@bug5044. PPSTFIX walking should always be treated as WHERE clause filter //@bug5044. PPSTFIX walking should always be treated as WHERE clause filter
ClauseType clauseType = gwi.clauseType; ClauseType clauseType = gwi.clauseType;
gwi.clauseType = WHERE; gwi.clauseType = WHERE;
icp->traverse_cond(gp_walk, &gwi, Item::POSTFIX); item->traverse_cond(gp_walk, &gwi, Item::POSTFIX);
gwi.clauseType = clauseType; gwi.clauseType = clauseType;
if (gwi.fatalParseError) if (gwi.fatalParseError)
@ -4924,9 +4877,9 @@ ReturnedColumn* buildAggregateColumn(Item* item, gp_walk_info& gwi)
thd_set_ha_data(current_thd, mcs_hton, get_fe_conn_info_ptr()); thd_set_ha_data(current_thd, mcs_hton, get_fe_conn_info_ptr());
} }
cal_connection_info* ci = reinterpret_cast<cal_connection_info*>(get_fe_conn_info_ptr()); cal_connection_info* ci = static_cast<cal_connection_info*>(get_fe_conn_info_ptr());
Item_sum* isp = reinterpret_cast<Item_sum*>(item); Item_sum* isp = static_cast<Item_sum*>(item);
Item** sfitempp = isp->get_orig_args(); Item** sfitempp = isp->get_orig_args();
SRCP parm; SRCP parm;
@ -5156,7 +5109,7 @@ ReturnedColumn* buildAggregateColumn(Item* item, gp_walk_info& gwi)
{ {
case Item::FIELD_ITEM: case Item::FIELD_ITEM:
{ {
Item_field* ifp = reinterpret_cast<Item_field*>(sfitemp); Item_field* ifp = static_cast<Item_field*>(sfitemp);
SimpleColumn* sc = buildSimpleColumn(ifp, gwi); SimpleColumn* sc = buildSimpleColumn(ifp, gwi);
if (!sc) if (!sc)
@ -5580,6 +5533,8 @@ because it has multiple arguments.";
return ac; return ac;
} }
void addIntervalArgs(gp_walk_info* gwip, Item_func* ifp, FunctionParm& functionParms) void addIntervalArgs(gp_walk_info* gwip, Item_func* ifp, FunctionParm& functionParms)
{ {
string funcName = ifp->func_name(); string funcName = ifp->func_name();
@ -5678,7 +5633,7 @@ bool isNotFuncAndConstScalarSubSelect(Item_func* ifp, const std::string& funcNam
void gp_walk(const Item* item, void* arg) void gp_walk(const Item* item, void* arg)
{ {
gp_walk_info* gwip = reinterpret_cast<gp_walk_info*>(arg); gp_walk_info* gwip = static_cast<gp_walk_info*>(arg);
idbassert(gwip); idbassert(gwip);
// Bailout... // Bailout...
@ -5745,8 +5700,8 @@ void gp_walk(const Item* item, void* arg)
{ {
case INT_RESULT: case INT_RESULT:
{ {
Item_int* iip = (Item_int*)item; Item* non_const_item = const_cast<Item*>(item);
gwip->rcWorkStack.push(buildReturnedColumn(iip, *gwip, gwip->fatalParseError)); gwip->rcWorkStack.push(buildReturnedColumn(non_const_item, *gwip, gwip->fatalParseError));
break; break;
} }
@ -5755,19 +5710,17 @@ void gp_walk(const Item* item, void* arg)
// Special handling for 0xHHHH literals // Special handling for 0xHHHH literals
if (item->type_handler() == &type_handler_hex_hybrid) if (item->type_handler() == &type_handler_hex_hybrid)
{ {
Item_hex_hybrid* hip = reinterpret_cast<Item_hex_hybrid*>(const_cast<Item*>(item)); Item_hex_hybrid* hip = static_cast<Item_hex_hybrid*>(const_cast<Item*>(item));
gwip->rcWorkStack.push(new ConstantColumn((int64_t)hip->val_int(), ConstantColumn::NUM)); gwip->rcWorkStack.push(new ConstantColumn((int64_t)hip->val_int(), ConstantColumn::NUM));
ConstantColumn* cc = dynamic_cast<ConstantColumn*>(gwip->rcWorkStack.top()); ConstantColumn* cc = dynamic_cast<ConstantColumn*>(gwip->rcWorkStack.top());
cc->timeZone(gwip->timeZone); cc->timeZone(gwip->timeZone);
break; break;
} }
Item_string* isp = (Item_string*)item; if (item->result_type() == STRING_RESULT)
if (isp)
{
if (isp->result_type() == STRING_RESULT)
{ {
// dangerous cast here
Item* isp = const_cast<Item*>(item);
String val, *str = isp->val_str(&val); String val, *str = isp->val_str(&val);
if (str) if (str)
{ {
@ -5788,7 +5741,6 @@ void gp_walk(const Item* item, void* arg)
(dynamic_cast<ConstantColumn*>(gwip->rcWorkStack.top()))->timeZone(gwip->timeZone); (dynamic_cast<ConstantColumn*>(gwip->rcWorkStack.top()))->timeZone(gwip->timeZone);
break; break;
} }
}
gwip->rcWorkStack.push(buildReturnedColumn(isp, *gwip, gwip->fatalParseError)); gwip->rcWorkStack.push(buildReturnedColumn(isp, *gwip, gwip->fatalParseError));
} }
@ -5796,25 +5748,14 @@ void gp_walk(const Item* item, void* arg)
} }
case REAL_RESULT: case REAL_RESULT:
{
Item_float* ifp = (Item_float*)item;
gwip->rcWorkStack.push(buildReturnedColumn(ifp, *gwip, gwip->fatalParseError));
break;
}
case DECIMAL_RESULT: case DECIMAL_RESULT:
{
Item_decimal* idp = (Item_decimal*)item;
gwip->rcWorkStack.push(buildReturnedColumn(idp, *gwip, gwip->fatalParseError));
break;
}
case TIME_RESULT: case TIME_RESULT:
{ {
Item_temporal_literal* itp = (Item_temporal_literal*)item; Item* nonConstItem = const_cast<Item*>(item);
gwip->rcWorkStack.push(buildReturnedColumn(itp, *gwip, gwip->fatalParseError)); gwip->rcWorkStack.push(buildReturnedColumn(nonConstItem, *gwip, gwip->fatalParseError));
break; break;
} }
default: default:
{ {
if (gwip->condPush) if (gwip->condPush)
@ -5853,14 +5794,16 @@ void gp_walk(const Item* item, void* arg)
case Item::FUNC_ITEM: case Item::FUNC_ITEM:
{ {
Item_func* ifp = (Item_func*)item; Item* ncitem = const_cast<Item*>(item);
Item_func* ifp = static_cast<Item_func*>(ncitem);
string funcName = ifp->func_name(); string funcName = ifp->func_name();
if (!gwip->condPush) if (!gwip->condPush)
{ {
if (!ifp->fixed()) if (!ifp->fixed())
{ {
ifp->fix_fields(gwip->thd, reinterpret_cast<Item**>(&ifp)); ifp->fix_fields(gwip->thd, &ncitem);
} }
// Special handling for queries of the form: // Special handling for queries of the form:
@ -6176,11 +6119,15 @@ void gp_walk(const Item* item, void* arg)
if (col->type() == Item::FIELD_ITEM) if (col->type() == Item::FIELD_ITEM)
{ {
const auto& field_name = string(((Item_field*)item)->field_name.str); const Item_ident* ident_field = dynamic_cast<const Item_ident*>(item);
if (ident_field)
{
const auto& field_name = string(ident_field->field_name.str);
auto colMap = CalpontSelectExecutionPlan::ColumnMap::value_type(field_name, scsp); auto colMap = CalpontSelectExecutionPlan::ColumnMap::value_type(field_name, scsp);
gwip->columnMap.insert(colMap); gwip->columnMap.insert(colMap);
} }
} }
}
bool cando = true; bool cando = true;
gwip->clauseType = clauseType; gwip->clauseType = clauseType;
@ -6252,8 +6199,7 @@ void gp_walk(const Item* item, void* arg)
} }
else if (col->type() == Item::COND_ITEM) else if (col->type() == Item::COND_ITEM)
{ {
Item_func* ifp = (Item_func*)col; gwip->ptWorkStack.push(buildParseTree(col, *gwip, gwip->fatalParseError));
gwip->ptWorkStack.push(buildParseTree(ifp, *gwip, gwip->fatalParseError));
} }
else if (col->type() == Item::FIELD_ITEM && gwip->clauseType == HAVING) else if (col->type() == Item::FIELD_ITEM && gwip->clauseType == HAVING)
{ {
@ -6404,7 +6350,7 @@ void parse_item(Item* item, vector<Item_field*>& field_vec, bool& hasNonSupportI
{ {
case Item::FIELD_ITEM: case Item::FIELD_ITEM:
{ {
Item_field* ifp = reinterpret_cast<Item_field*>(item); Item_field* ifp = static_cast<Item_field*>(item);
field_vec.push_back(ifp); field_vec.push_back(ifp);
return; return;
} }
@ -6413,7 +6359,7 @@ void parse_item(Item* item, vector<Item_field*>& field_vec, bool& hasNonSupportI
{ {
// hasAggColumn = true; // hasAggColumn = true;
parseInfo |= AGG_BIT; parseInfo |= AGG_BIT;
Item_sum* isp = reinterpret_cast<Item_sum*>(item); Item_sum* isp = static_cast<Item_sum*>(item);
Item** sfitempp = isp->arguments(); Item** sfitempp = isp->arguments();
for (uint32_t i = 0; i < isp->argument_count(); i++) for (uint32_t i = 0; i < isp->argument_count(); i++)
@ -6424,7 +6370,7 @@ void parse_item(Item* item, vector<Item_field*>& field_vec, bool& hasNonSupportI
case Item::FUNC_ITEM: case Item::FUNC_ITEM:
{ {
Item_func* isp = reinterpret_cast<Item_func*>(item); Item_func* isp = static_cast<Item_func*>(item);
if (string(isp->func_name()) == "<in_optimizer>") if (string(isp->func_name()) == "<in_optimizer>")
{ {
@ -6441,7 +6387,7 @@ void parse_item(Item* item, vector<Item_field*>& field_vec, bool& hasNonSupportI
case Item::COND_ITEM: case Item::COND_ITEM:
{ {
Item_cond* icp = reinterpret_cast<Item_cond*>(item); Item_cond* icp = static_cast<Item_cond*>(item);
List_iterator_fast<Item> it(*(icp->argument_list())); List_iterator_fast<Item> it(*(icp->argument_list()));
Item* cond_item; Item* cond_item;
@ -6465,13 +6411,13 @@ void parse_item(Item* item, vector<Item_field*>& field_vec, bool& hasNonSupportI
if ((*(ref->ref))->type() == Item::SUM_FUNC_ITEM) if ((*(ref->ref))->type() == Item::SUM_FUNC_ITEM)
{ {
parseInfo |= AGG_BIT; parseInfo |= AGG_BIT;
Item_sum* isp = reinterpret_cast<Item_sum*>(*(ref->ref)); Item_sum* isp = static_cast<Item_sum*>(*(ref->ref));
Item** sfitempp = isp->arguments(); Item** sfitempp = isp->arguments();
// special handling for count(*). This should not be treated as constant. // special handling for count(*). This should not be treated as constant.
if (isSupportedAggregateWithOneConstArg(isp, sfitempp)) if (isSupportedAggregateWithOneConstArg(isp, sfitempp))
{ {
field_vec.push_back((Item_field*)item); // dummy field_vec.push_back(nullptr); // dummy
} }
for (uint32_t i = 0; i < isp->argument_count(); i++) for (uint32_t i = 0; i < isp->argument_count(); i++)
@ -6490,14 +6436,14 @@ void parse_item(Item* item, vector<Item_field*>& field_vec, bool& hasNonSupportI
if (!rc) if (!rc)
{ {
Item_field* ifp = reinterpret_cast<Item_field*>(*(ref->ref)); Item_field* ifp = static_cast<Item_field*>(*(ref->ref));
field_vec.push_back(ifp); field_vec.push_back(ifp);
} }
break; break;
} }
else if ((*(ref->ref))->type() == Item::FUNC_ITEM) else if ((*(ref->ref))->type() == Item::FUNC_ITEM)
{ {
Item_func* isp = reinterpret_cast<Item_func*>(*(ref->ref)); Item_func* isp = static_cast<Item_func*>(*(ref->ref));
Item** sfitempp = isp->arguments(); Item** sfitempp = isp->arguments();
for (uint32_t i = 0; i < isp->argument_count(); i++) for (uint32_t i = 0; i < isp->argument_count(); i++)
@ -6507,7 +6453,7 @@ void parse_item(Item* item, vector<Item_field*>& field_vec, bool& hasNonSupportI
} }
else if ((*(ref->ref))->type() == Item::CACHE_ITEM) else if ((*(ref->ref))->type() == Item::CACHE_ITEM)
{ {
Item_cache* isp = reinterpret_cast<Item_cache*>(*(ref->ref)); Item_cache* isp = static_cast<Item_cache*>(*(ref->ref));
parse_item(isp->get_example(), field_vec, hasNonSupportItem, parseInfo, gwi); parse_item(isp->get_example(), field_vec, hasNonSupportItem, parseInfo, gwi);
break; break;
} }
@ -7580,7 +7526,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
case Item::FUNC_ITEM: case Item::FUNC_ITEM:
{ {
Item_func* ifp = reinterpret_cast<Item_func*>(item); Item_func* ifp = static_cast<Item_func*>(item);
// @bug4383. error out non-support stored function // @bug4383. error out non-support stored function
if (ifp->functype() == Item_func::FUNC_SP) if (ifp->functype() == Item_func::FUNC_SP)
@ -7714,7 +7660,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
} // End of FUNC_ITEM } // End of FUNC_ITEM
// DRRTUY Replace the whole section with typeid() checks or use // DRRTUY Replace the whole section with typeid() checks or use
// reinterpret_cast here // static_cast here
case Item::CONST_ITEM: case Item::CONST_ITEM:
{ {
switch (item->cmp_type()) switch (item->cmp_type())
@ -7792,7 +7738,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
if (join) if (join)
{ {
Item_cond* cond = reinterpret_cast<Item_cond*>(join->conds); Item_cond* cond = static_cast<Item_cond*>(join->conds);
if (cond) if (cond)
cond->traverse_cond(debug_walk, &gwi, Item::POSTFIX); cond->traverse_cond(debug_walk, &gwi, Item::POSTFIX);
@ -7928,13 +7874,12 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
if (select_lex.having != 0) if (select_lex.having != 0)
{ {
Item_cond* having = reinterpret_cast<Item_cond*>(select_lex.having);
#ifdef DEBUG_WALK_COND #ifdef DEBUG_WALK_COND
cerr << "------------------- HAVING ---------------------" << endl; cerr << "------------------- HAVING ---------------------" << endl;
having->traverse_cond(debug_walk, &gwi, Item::POSTFIX); select_lex.having->traverse_cond(debug_walk, &gwi, Item::POSTFIX);
cerr << "------------------------------------------------\n" << endl; cerr << "------------------------------------------------\n" << endl;
#endif #endif
having->traverse_cond(gp_walk, &gwi, Item::POSTFIX); select_lex.having->traverse_cond(gp_walk, &gwi, Item::POSTFIX);
if (gwi.fatalParseError) if (gwi.fatalParseError)
{ {
@ -8090,7 +8035,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
{ {
gwi.clauseType = GROUP_BY; gwi.clauseType = GROUP_BY;
Item* nonSupportItem = NULL; Item* nonSupportItem = NULL;
ORDER* groupcol = reinterpret_cast<ORDER*>(select_lex.group_list.first); ORDER* groupcol = static_cast<ORDER*>(select_lex.group_list.first);
// check if window functions are in order by. InfiniDB process order by list if // check if window functions are in order by. InfiniDB process order by list if
// window functions are involved, either in order by or projection. // window functions are involved, either in order by or projection.
@ -8112,7 +8057,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
} }
gwi.hasWindowFunc = hasWindowFunc; gwi.hasWindowFunc = hasWindowFunc;
groupcol = reinterpret_cast<ORDER*>(select_lex.group_list.first); groupcol = static_cast<ORDER*>(select_lex.group_list.first);
for (; groupcol; groupcol = groupcol->next) for (; groupcol; groupcol = groupcol->next)
{ {
@ -8366,7 +8311,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
// ORDER BY processing // ORDER BY processing
{ {
SQL_I_List<ORDER> order_list = select_lex.order_list; SQL_I_List<ORDER> order_list = select_lex.order_list;
ORDER* ordercol = reinterpret_cast<ORDER*>(order_list.first); ORDER* ordercol = static_cast<ORDER*>(order_list.first);
// check if window functions are in order by. InfiniDB process order by list if // check if window functions are in order by. InfiniDB process order by list if
// window functions are involved, either in order by or projection. // window functions are involved, either in order by or projection.
@ -8393,7 +8338,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
} }
// re-visit the first of ordercol list // re-visit the first of ordercol list
ordercol = reinterpret_cast<ORDER*>(order_list.first); ordercol = static_cast<ORDER*>(order_list.first);
{ {
for (; ordercol; ordercol = ordercol->next) for (; ordercol; ordercol = ordercol->next)
@ -8551,7 +8496,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
if (unionSel) if (unionSel)
order_list = select_lex.master_unit()->global_parameters()->order_list; order_list = select_lex.master_unit()->global_parameters()->order_list;
ordercol = reinterpret_cast<ORDER*>(order_list.first); ordercol = static_cast<ORDER*>(order_list.first);
for (; ordercol; ordercol = ordercol->next) for (; ordercol; ordercol = ordercol->next)
{ {
@ -8956,7 +8901,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
Item_cond* icp = 0; Item_cond* icp = 0;
if (gi.groupByWhere) if (gi.groupByWhere)
icp = reinterpret_cast<Item_cond*>(gi.groupByWhere); icp = static_cast<Item_cond*>(gi.groupByWhere);
uint32_t sessionID = csep->sessionID(); uint32_t sessionID = csep->sessionID();
gwi.sessionid = sessionID; gwi.sessionid = sessionID;
@ -9432,7 +9377,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
case Item::FUNC_ITEM: case Item::FUNC_ITEM:
{ {
Item_func* ifp = reinterpret_cast<Item_func*>(item); Item_func* ifp = static_cast<Item_func*>(item);
// @bug4383. error out non-support stored function // @bug4383. error out non-support stored function
if (ifp->functype() == Item_func::FUNC_SP) if (ifp->functype() == Item_func::FUNC_SP)
@ -9563,7 +9508,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
} }
// DRRTUY Replace the whole section with typeid() checks or use // DRRTUY Replace the whole section with typeid() checks or use
// reinterpret_cast here // static_cast here
case Item::CONST_ITEM: case Item::CONST_ITEM:
{ {
switch (item->cmp_type()) switch (item->cmp_type())
@ -9640,7 +9585,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
if (join) if (join)
{ {
Item_cond* cond = reinterpret_cast<Item_cond*>(join->conds); Item_cond* cond = static_cast<Item_cond*>(join->conds);
if (cond) if (cond)
cond->traverse_cond(debug_walk, &gwi, Item::POSTFIX); cond->traverse_cond(debug_walk, &gwi, Item::POSTFIX);
@ -9761,7 +9706,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
if (gi.groupByHaving != 0) if (gi.groupByHaving != 0)
{ {
Item_cond* having = reinterpret_cast<Item_cond*>(gi.groupByHaving); Item_cond* having = static_cast<Item_cond*>(gi.groupByHaving);
#ifdef DEBUG_WALK_COND #ifdef DEBUG_WALK_COND
cerr << "------------------- HAVING ---------------------" << endl; cerr << "------------------- HAVING ---------------------" << endl;
having->traverse_cond(debug_walk, &gwi, Item::POSTFIX); having->traverse_cond(debug_walk, &gwi, Item::POSTFIX);
@ -9881,7 +9826,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
{ {
gwi.clauseType = GROUP_BY; gwi.clauseType = GROUP_BY;
Item* nonSupportItem = NULL; Item* nonSupportItem = NULL;
ORDER* groupcol = reinterpret_cast<ORDER*>(gi.groupByGroup); ORDER* groupcol = static_cast<ORDER*>(gi.groupByGroup);
// check if window functions are in order by. InfiniDB process order by list if // check if window functions are in order by. InfiniDB process order by list if
// window functions are involved, either in order by or projection. // window functions are involved, either in order by or projection.
@ -9903,7 +9848,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
} }
gwi.hasWindowFunc = hasWindowFunc; gwi.hasWindowFunc = hasWindowFunc;
groupcol = reinterpret_cast<ORDER*>(gi.groupByGroup); groupcol = static_cast<ORDER*>(gi.groupByGroup);
for (; groupcol; groupcol = groupcol->next) for (; groupcol; groupcol = groupcol->next)
{ {
@ -10156,7 +10101,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
// ORDER BY processing starts here // ORDER BY processing starts here
{ {
ORDER* ordercol = reinterpret_cast<ORDER*>(gi.groupByOrder); ORDER* ordercol = static_cast<ORDER*>(gi.groupByOrder);
// check if window functions are in order by. InfiniDB process order by list if // check if window functions are in order by. InfiniDB process order by list if
// window functions are involved, either in order by or projection. // window functions are involved, either in order by or projection.
@ -10167,7 +10112,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
} }
// re-visit the first of ordercol list // re-visit the first of ordercol list
ordercol = reinterpret_cast<ORDER*>(gi.groupByOrder); ordercol = static_cast<ORDER*>(gi.groupByOrder);
// for subquery, order+limit by will be supported in infinidb. build order by columns // for subquery, order+limit by will be supported in infinidb. build order by columns
// @todo union order by and limit support // @todo union order by and limit support
@ -10444,7 +10389,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
// MCOL-1052 // MCOL-1052
if (unionSel) if (unionSel)
{ {
ordercol = reinterpret_cast<ORDER*>(gi.groupByOrder); ordercol = static_cast<ORDER*>(gi.groupByOrder);
} }
else else
ordercol = 0; ordercol = 0;
@ -10478,7 +10423,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
// @bug 3518. if order by clause = selected column, use position. // @bug 3518. if order by clause = selected column, use position.
else if (ord_item->name.length && ord_item->type() == Item::FIELD_ITEM) else if (ord_item->name.length && ord_item->type() == Item::FIELD_ITEM)
{ {
Item_field* field = reinterpret_cast<Item_field*>(ord_item); Item_field* field = static_cast<Item_field*>(ord_item);
string fullname; string fullname;
if (field->db_name.str) if (field->db_name.str)

View File

@ -414,7 +414,7 @@ execplan::ArithmeticColumn* buildArithmeticColumn(Item_func* item, gp_walk_info&
execplan::ConstantColumn* buildDecimalColumn(const Item* item, const std::string& str, gp_walk_info& gwi); execplan::ConstantColumn* buildDecimalColumn(const Item* item, const std::string& str, gp_walk_info& gwi);
execplan::SimpleColumn* buildSimpleColumn(Item_field* item, gp_walk_info& gwi); execplan::SimpleColumn* buildSimpleColumn(Item_field* item, gp_walk_info& gwi);
execplan::FunctionColumn* buildCaseFunction(Item_func* item, gp_walk_info& gwi, bool& nonSupport); execplan::FunctionColumn* buildCaseFunction(Item_func* item, gp_walk_info& gwi, bool& nonSupport);
execplan::ParseTree* buildParseTree(Item_func* item, gp_walk_info& gwi, bool& nonSupport); execplan::ParseTree* buildParseTree(Item* item, gp_walk_info& gwi, bool& nonSupport);
execplan::ReturnedColumn* buildAggregateColumn(Item* item, gp_walk_info& gwi); execplan::ReturnedColumn* buildAggregateColumn(Item* item, gp_walk_info& gwi);
execplan::ReturnedColumn* buildWindowFunctionColumn(Item* item, gp_walk_info& gwi, bool& nonSupport); execplan::ReturnedColumn* buildWindowFunctionColumn(Item* item, gp_walk_info& gwi, bool& nonSupport);
execplan::ReturnedColumn* buildPseudoColumn(Item* item, gp_walk_info& gwi, bool& nonSupport, execplan::ReturnedColumn* buildPseudoColumn(Item* item, gp_walk_info& gwi, bool& nonSupport,

View File

@ -314,14 +314,12 @@ void item_check(Item* item, bool* unsupported_feature)
{ {
case Item::COND_ITEM: case Item::COND_ITEM:
{ {
Item_cond* icp = reinterpret_cast<Item_cond*>(item); item->traverse_cond(check_user_var_func, unsupported_feature, Item::POSTFIX);
icp->traverse_cond(check_user_var_func, unsupported_feature, Item::POSTFIX);
break; break;
} }
case Item::FUNC_ITEM: case Item::FUNC_ITEM:
{ {
Item_func* ifp = reinterpret_cast<Item_func*>(item); item->traverse_cond(check_user_var_func, unsupported_feature, Item::POSTFIX);
ifp->traverse_cond(check_user_var_func, unsupported_feature, Item::POSTFIX);
break; break;
} }
default: default:
@ -352,9 +350,7 @@ bool check_user_var(SELECT_LEX* select_lex)
if (join->conds) if (join->conds)
{ {
Item_cond* icp = reinterpret_cast<Item_cond*>(join->conds); join->conds->traverse_cond(check_user_var_func, &is_user_var_func, Item::POSTFIX);
icp->traverse_cond(check_user_var_func, &is_user_var_func, Item::POSTFIX);
} }
return is_user_var_func; return is_user_var_func;
@ -420,23 +416,15 @@ group_by_handler* create_columnstore_group_by_handler(THD* thd, Query* query)
if (!unsupported_feature) if (!unsupported_feature)
{ {
JOIN* join = select_lex->join; JOIN* join = select_lex->join;
Item_cond* icp = 0;
if (join != 0) if (unsupported_feature == false && join && join->conds)
icp = reinterpret_cast<Item_cond*>(join->conds);
if (unsupported_feature == false && icp)
{ {
icp->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX); join->conds->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX);
} }
// Optimizer could move some join conditions into where if (unsupported_feature == false && select_lex->where)
if (select_lex->where != 0)
icp = reinterpret_cast<Item_cond*>(select_lex->where);
if (unsupported_feature == false && icp)
{ {
icp->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX); select_lex->where->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX);
} }
} }
@ -521,18 +509,16 @@ derived_handler* create_columnstore_derived_handler(THD* thd, TABLE_LIST* table_
{ {
if (tl->where) if (tl->where)
{ {
Item_cond* where_icp = reinterpret_cast<Item_cond*>(tl->where); tl->where->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX);
where_icp->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX); tl->where->traverse_cond(save_join_predicates, &join_preds_list, Item::POSTFIX);
where_icp->traverse_cond(save_join_predicates, &join_preds_list, Item::POSTFIX);
} }
// Looking for JOIN with ON expression through // Looking for JOIN with ON expression through
// TABLE_LIST in FROM until CS meets unsupported feature // TABLE_LIST in FROM until CS meets unsupported feature
if (tl->on_expr) if (tl->on_expr)
{ {
Item_cond* on_icp = reinterpret_cast<Item_cond*>(tl->on_expr); tl->on_expr->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX);
on_icp->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX); tl->on_expr->traverse_cond(save_join_predicates, &join_preds_list, Item::POSTFIX);
on_icp->traverse_cond(save_join_predicates, &join_preds_list, Item::POSTFIX);
} }
// Iterate and traverse through the item list and the JOIN cond // Iterate and traverse through the item list and the JOIN cond
@ -546,9 +532,8 @@ derived_handler* create_columnstore_derived_handler(THD* thd, TABLE_LIST* table_
if (!unsupported_feature && !join_preds_list.elements && join && join->conds) if (!unsupported_feature && !join_preds_list.elements && join && join->conds)
{ {
Item_cond* conds = reinterpret_cast<Item_cond*>(join->conds); join->conds->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX);
conds->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX); join->conds->traverse_cond(save_join_predicates, &join_preds_list, Item::POSTFIX);
conds->traverse_cond(save_join_predicates, &join_preds_list, Item::POSTFIX);
} }
// CROSS JOIN w/o conditions isn't supported until MCOL-301 // CROSS JOIN w/o conditions isn't supported until MCOL-301
@ -768,7 +753,8 @@ select_handler* create_columnstore_select_handler_(THD* thd, SELECT_LEX* sel_lex
// Disable processing of select_result_interceptor classes // Disable processing of select_result_interceptor classes
// which intercept and transform result set rows. E.g.: // which intercept and transform result set rows. E.g.:
// select a,b into @a1, @a2 from t1; // select a,b into @a1, @a2 from t1;
if (((thd->lex)->result && !((select_dumpvar*)(thd->lex)->result)->var_list.is_empty()) && (!isPS)) select_dumpvar* dumpvar = dynamic_cast<select_dumpvar*>((thd->lex)->result);
if (dumpvar && !dumpvar->var_list.is_empty() && !isPS)
{ {
return nullptr; return nullptr;
} }

View File

@ -23,9 +23,6 @@ sub is_default { 0 }
sub start_test { sub start_test {
# we should guard this for --force-restart flag condition. # we should guard this for --force-restart flag condition.
my ($self, $tinfo)= @_; my ($self, $tinfo)= @_;
print "Here I am restarting Columnstore backend\n";
#system("systemctl", "restart", "mariadb-columnstore");
print "Columnstore restarted\n";
My::Suite::start_test(@_); My::Suite::start_test(@_);
} }

View File

@ -44,7 +44,7 @@ if (WITH_UBSAN)
endif (WITH_COLUMNSTORE_REPORT_PATH) endif (WITH_COLUMNSTORE_REPORT_PATH)
SET(LD_PRELOAD_STRING "") SET(LD_PRELOAD_STRING "")
SET(ALLOC_CONFIG "UBSAN_OPTIONS=abort_on_error=0") SET(ALLOC_CONFIG "UBSAN_OPTIONS=abort_on_error=0,print_stacktrace=true")
SET(PRIMPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${UBSAN_PATH}.primproc) SET(PRIMPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${UBSAN_PATH}.primproc)
SET(DMLPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${UBSAN_PATH}.dmlproc) SET(DMLPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${UBSAN_PATH}.dmlproc)
SET(DDLPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${UBSAN_PATH}.ddlproc) SET(DDLPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${UBSAN_PATH}.ddlproc)

View File

@ -63,12 +63,12 @@ class OamCache
dbRootPMMap_t dbRootPMMap; dbRootPMMap_t dbRootPMMap;
dbRootPMMap_t dbRootConnectionMap; dbRootPMMap_t dbRootConnectionMap;
PMDbrootsMap_t pmDbrootsMap; PMDbrootsMap_t pmDbrootsMap;
uint32_t numDBRoots; uint32_t numDBRoots = 1;
time_t mtime; time_t mtime = 0;
DBRootConfigList dbroots; DBRootConfigList dbroots;
std::vector<int> moduleIds; std::vector<int> moduleIds;
std::string OAMParentModuleName; std::string OAMParentModuleName;
int mLocalPMId; // The PM id running on this machine int mLocalPMId = 0; // The PM id running on this machine
std::string systemName; std::string systemName;
std::string moduleName; std::string moduleName;
}; };

View File

@ -597,6 +597,7 @@ void PrimitiveProcessor::p_Dictionary(const DictInput* in, vector<uint8_t>* out,
outValue = reinterpret_cast<DataValue*>(&(*out)[header.NBYTES]); outValue = reinterpret_cast<DataValue*>(&(*out)[header.NBYTES]);
outValue->isnull = sigptr.data == nullptr; outValue->isnull = sigptr.data == nullptr;
outValue->len = sigptr.len; outValue->len = sigptr.len;
if (sigptr.data != nullptr)
memcpy(outValue->data, sigptr.data, sigptr.len); memcpy(outValue->data, sigptr.data, sigptr.len);
header.NBYTES += sizeof(DataValue) + sigptr.len; header.NBYTES += sizeof(DataValue) + sigptr.len;
} }

View File

@ -3,7 +3,11 @@ mysql -e "create database if not exists test;"
SOCKET=`mysql -e "show variables like 'socket';" | grep socket | cut -f2` SOCKET=`mysql -e "show variables like 'socket';" | grep socket | cut -f2`
cd /usr/share/mysql/mysql-test cd /usr/share/mysql/mysql-test
./mtr --force --max-test-fail=0 --testcase-timeout=60 --extern socket=$SOCKET --suite=columnstore/basic | tee $CURRENT_DIR/mtr.basic.log 2>&1 ./mtr --force --max-test-fail=0 --testcase-timeout=60 --extern socket=$SOCKET --suite=columnstore/basic $1 | tee $CURRENT_DIR/mtr.basic.log 2>&1
if [[ $1 != '' ]]; then
exit 1
fi
./mtr --force --max-test-fail=0 --testcase-timeout=60 --extern socket=$SOCKET --suite=columnstore/setup | tee $CURRENT_DIR/mtr.setup.log 2>&1 ./mtr --force --max-test-fail=0 --testcase-timeout=60 --extern socket=$SOCKET --suite=columnstore/setup | tee $CURRENT_DIR/mtr.setup.log 2>&1
./mtr --force --max-test-fail=0 --testcase-timeout=60 --extern socket=$SOCKET --suite=columnstore/bugfixes | tee $CURRENT_DIR/mtr.bugfixes.log 2>&1 ./mtr --force --max-test-fail=0 --testcase-timeout=60 --extern socket=$SOCKET --suite=columnstore/bugfixes | tee $CURRENT_DIR/mtr.bugfixes.log 2>&1
./mtr --force --max-test-fail=0 --testcase-timeout=60 --extern socket=$SOCKET --suite=columnstore/devregression | tee $CURRENT_DIR/mtr.devregression.log 2>&1 ./mtr --force --max-test-fail=0 --testcase-timeout=60 --extern socket=$SOCKET --suite=columnstore/devregression | tee $CURRENT_DIR/mtr.devregression.log 2>&1

View File

@ -304,7 +304,7 @@ inline bool calc_time_diff(int64_t time1, int64_t time2, int l_sign, long long*
{ {
int64_t days; int64_t days;
bool neg; bool neg;
int64_t microseconds; int128_t microseconds;
uint64_t year1 = 0, month1 = 0, day1 = 0, hour1 = 0, min1 = 0, sec1 = 0, msec1 = 0; uint64_t year1 = 0, month1 = 0, day1 = 0, hour1 = 0, min1 = 0, sec1 = 0, msec1 = 0;

View File

@ -1328,7 +1328,10 @@ inline void Row::setVarBinaryField(const uint8_t* val, uint32_t len, uint32_t co
if (len > getColumnWidth(colIndex)) if (len > getColumnWidth(colIndex))
len = getColumnWidth(colIndex); len = getColumnWidth(colIndex);
idbassert(val != nullptr || !len);
*((uint16_t*)&data[offsets[colIndex]]) = len; *((uint16_t*)&data[offsets[colIndex]]) = len;
if (val != nullptr)
memcpy(&data[offsets[colIndex] + 2], val, len); memcpy(&data[offsets[colIndex] + 2], val, len);
} }
} }

View File

@ -396,7 +396,7 @@ class mcsv1Context
mcsv1sdk::mcsv1_UDAF* func; mcsv1sdk::mcsv1_UDAF* func;
int32_t fParamCount; int32_t fParamCount;
std::vector<uint32_t> paramKeys; std::vector<uint32_t> paramKeys;
enum_mariadb_return_type mariadbReturnType; enum_mariadb_return_type mariadbReturnType = MYSQL_TYPE_LONGLONG;
uint32_t fCharsetNumber; uint32_t fCharsetNumber;
public: public:

View File

@ -315,8 +315,8 @@ struct BulkUpdateDBRootArg
/* Input Arg type for DBRM::createStripeColumnExtents() */ /* Input Arg type for DBRM::createStripeColumnExtents() */
struct CreateStripeColumnExtentsArgIn struct CreateStripeColumnExtentsArgIn
{ {
OID_t oid; // column OID OID_t oid = 0; // column OID
uint32_t width; // column width in bytes uint32_t width = 0; // column width in bytes
execplan::CalpontSystemCatalog::ColDataType colDataType; execplan::CalpontSystemCatalog::ColDataType colDataType;
}; };

View File

@ -65,12 +65,13 @@ ExtentStripeAlloc::~ExtentStripeAlloc()
// Add a column to be associated with the "stripe" allocations for "this" // Add a column to be associated with the "stripe" allocations for "this"
// ExtentStripeAlloc object. // ExtentStripeAlloc object.
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
void ExtentStripeAlloc::addColumn(OID colOID, int colWidth) void ExtentStripeAlloc::addColumn(OID colOID, int colWidth, datatypes::SystemCatalog::ColDataType colDataType)
{ {
boost::mutex::scoped_lock lock(fMapMutex); boost::mutex::scoped_lock lock(fMapMutex);
fColOIDs.push_back(colOID); fColOIDs.push_back(colOID);
fColWidths.push_back(colWidth); fColWidths.push_back(colWidth);
fColDataTypes.push_back(colDataType);
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -161,6 +162,7 @@ int ExtentStripeAlloc::allocateExtent(OID oid, uint16_t dbRoot,
BRM::CreateStripeColumnExtentsArgIn colEntry; BRM::CreateStripeColumnExtentsArgIn colEntry;
colEntry.oid = fColOIDs[j]; colEntry.oid = fColOIDs[j];
colEntry.width = fColWidths[j]; colEntry.width = fColWidths[j];
colEntry.colDataType = fColDataTypes[j];
cols.push_back(colEntry); cols.push_back(colEntry);
} }

View File

@ -128,7 +128,7 @@ class ExtentStripeAlloc
* @param colOID Column OID to be added to extent allocation list. * @param colOID Column OID to be added to extent allocation list.
* @param colWidth Width of column associated with colOID. * @param colWidth Width of column associated with colOID.
*/ */
void addColumn(OID colOID, int colWidth); void addColumn(OID colOID, int colWidth, datatypes::SystemCatalog::ColDataType colDataType);
/** @brief Request an extent allocation for the specified OID and DBRoot. /** @brief Request an extent allocation for the specified OID and DBRoot.
* A "stripe" of extents for the corresponding table will be allocated * A "stripe" of extents for the corresponding table will be allocated
@ -159,6 +159,7 @@ class ExtentStripeAlloc
boost::mutex fMapMutex; // protects unordered map access boost::mutex fMapMutex; // protects unordered map access
std::vector<OID> fColOIDs; // Vector of column OIDs std::vector<OID> fColOIDs; // Vector of column OIDs
std::vector<int> fColWidths; // Widths associated with fColOIDs std::vector<int> fColWidths; // Widths associated with fColOIDs
std::vector<datatypes::SystemCatalog::ColDataType> fColDataTypes;
// unordered map where we collect the allocated extents // unordered map where we collect the allocated extents
std::tr1::unordered_multimap<OID, AllocExtEntry, AllocExtHasher> fMap; std::tr1::unordered_multimap<OID, AllocExtEntry, AllocExtHasher> fMap;

View File

@ -1249,7 +1249,7 @@ void TableInfo::addColumn(ColumnInfo* info)
fColumns.push_back(info); fColumns.push_back(info);
fNumberOfColumns = fColumns.size(); fNumberOfColumns = fColumns.size();
fExtentStrAlloc.addColumn(info->column.mapOid, info->column.width); fExtentStrAlloc.addColumn(info->column.mapOid, info->column.width, info->column.dataType);
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------