1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-07-30 19:23:07 +03:00

MCOL-5493: First portion of UBSan fixes (#2842)

Multiple UB fixes
This commit is contained in:
Leonid Fedorov
2023-06-02 17:02:09 +03:00
committed by GitHub
parent 0a2e9760ee
commit 8f93fc3623
31 changed files with 274 additions and 916 deletions

View File

@ -270,7 +270,6 @@ ENDIF()
MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-deprecated-copy" DEBUG RELEASE RELWITHDEBINFO MINSIZEREL)
MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-deprecated-declarations" DEBUG RELEASE RELWITHDEBINFO MINSIZEREL)
MY_CHECK_AND_SET_COMPILER_FLAG("-Werror -Wall -Wextra")
SET (ENGINE_LDFLAGS "-Wl,--no-as-needed -Wl,--add-needed")
SET (ENGINE_DT_LIB datatypes)

View File

@ -40,9 +40,11 @@ optparse.define short=T long=tsan desc="Build with TSAN" variable=TSAN default=f
optparse.define short=U long=ubsan desc="Build with UBSAN" variable=UBSAN default=false value=true
optparse.define short=P long=report-path desc="Path for storing reports and profiles" variable=REPORT_PATH default="/core"
optparse.define short=N long=ninja desc="Build with ninja" variable=USE_NINJA default=false value=true
optparse.define short=T long=draw-deps desc="Draw dependencies graph" variable=DRAW_DEPS default=false value=true
optparse.define short=G long=draw-deps desc="Draw dependencies graph" variable=DRAW_DEPS default=false value=true
optparse.define short=M long=skip-smoke desc="Skip final smoke test" variable=SKIP_SMOKE default=false value=true
optparse.define short=n long=no-clean-install desc="Do not perform a clean install (keep existing db files)" variable=NO_CLEAN default=false value=true
optparse.define short=j long=parallel desc="Number of paralles for build" variable=CPUS default=$(getconf _NPROCESSORS_ONLN)
optparse.define short=F long=show-build-flags desc="Print CMake flags, while build" variable=PRINT_CMAKE_FLAGS default=false
source $( optparse.build )
@ -140,9 +142,9 @@ stop_service()
check_service()
{
if systemctl is-active --quiet $1; then
message "$1 service started$color_green OK $color_normal"
message "$1 $color_normal[$color_green OK $color_normal]"
else
error "$1 service failed"
message "$1 $color_normal[$color_red Fail $color_normal]"
service $1 status
fi
}
@ -154,8 +156,14 @@ start_service()
systemctl start mariadb-columnstore
systemctl start mariadb
check_service mariadb-columnstore
check_service mariadb
check_service mariadb-columnstore
check_service mcs-controllernode
check_service mcs-ddlproc
check_service mcs-dmlproc
check_service mcs-primproc
check_service mcs-workernode@1
check_service mcs-writeengineserver
}
clean_old_installation()
@ -290,10 +298,11 @@ build()
MDB_CMAKE_FLAGS="${MDB_CMAKE_FLAGS} -DRPM=sles15"
fi
if [[ $PRINT_CMAKE_FLAGS = true ]] ; then
message "Building with flags"
newline_array ${MDB_CMAKE_FLAGS[@]}
fi
local CPUS=$(getconf _NPROCESSORS_ONLN)
message "Configuring cmake silently"
${CMAKE_BIN_NAME} -DCMAKE_BUILD_TYPE=$MCS_BUILD_TYPE $MDB_CMAKE_FLAGS . | spinner
message_split
@ -400,7 +409,7 @@ fix_config_files()
if grep -q UBSAN $MDB_SERVICE_FILE; then
warn "MDB Server has UBSAN options in $MDB_SERVICE_FILE, check it's compatibility"
else
echo Environment="'UBSAN_OPTIONS=abort_on_error=0,log_path=${REPORT_PATH}/ubsan.mariadb'" >> $MDB_SERVICE_FILE
echo Environment="'UBSAN_OPTIONS=abort_on_error=0,print_stacktrace=true,log_path=${REPORT_PATH}/ubsan.mariadb'" >> $MDB_SERVICE_FILE
message "UBSAN options were added to $MDB_SERVICE_FILE"
fi
fi
@ -498,7 +507,7 @@ smoke()
message "Selecting magic numbers"
MAGIC=`mysql -N test < $MDB_SOURCE_PATH/storage/columnstore/columnstore/tests/scripts/smoke.sql`
if [[ $MAGIC == '42' ]] ; then
message "Great answer correct"
message "Great answer correct!"
else
warn "Smoke failed, answer is '$MAGIC'"
fi

View File

@ -325,8 +325,7 @@ void ArithmeticColumn::serialize(messageqcpp::ByteStream& b) const
ObjectReader::writeParseTree(fExpression, b);
b << fTableAlias;
b << fData;
const ByteStream::doublebyte tmp = fAsc;
b << tmp;
b << (uint8_t)fAsc;
}
void ArithmeticColumn::unserialize(messageqcpp::ByteStream& b)
@ -340,9 +339,7 @@ void ArithmeticColumn::unserialize(messageqcpp::ByteStream& b)
fExpression = ObjectReader::createParseTree(b);
b >> fTableAlias;
b >> fData;
ByteStream::doublebyte tmp;
b >> tmp;
fAsc = (tmp);
b >> (uint8_t&)fAsc;
fSimpleColumnList.clear();
fExpression->walk(getSimpleCols, &fSimpleColumnList);

View File

@ -187,7 +187,7 @@ class ArithmeticColumn : public ReturnedColumn
private:
std::string fTableAlias; // table alias for this column
bool fAsc; // asc flag for order by column
bool fAsc = false; // asc flag for order by column
std::string fData;
/** build expression tree
@ -272,7 +272,7 @@ class ArithmeticColumn : public ReturnedColumn
}
private:
ParseTree* fExpression;
ParseTree* fExpression = nullptr;
using TreeNode::evaluate;
void evaluate(rowgroup::Row& row)
{

View File

@ -96,8 +96,7 @@ CalpontSelectExecutionPlan::CalpontSelectExecutionPlan(
const ReturnedColumnList& returnedCols, ParseTree* filters, const SelectList& subSelects,
const GroupByColumnList& groupByCols, ParseTree* having, const OrderByColumnList& orderByCols,
const string alias, const int location, const bool dependent)
: fLocalQuery(GLOBAL_QUERY)
, fReturnedCols(returnedCols)
: fReturnedCols(returnedCols)
, fFilters(filters)
, fSubSelects(subSelects)
, fGroupByCols(groupByCols)
@ -106,55 +105,15 @@ CalpontSelectExecutionPlan::CalpontSelectExecutionPlan(
, fTableAlias(alias)
, fLocation(location)
, fDependent(dependent)
, fTxnID(-1)
, fTraceFlags(TRACE_NONE)
, fStatementID(0)
, fDistinct(false)
, fOverrideLargeSideEstimate(false)
, fDistinctUnionNum(0)
, fSubType(MAIN_SELECT)
, fLimitStart(0)
, fLimitNum(-1)
, fHasOrderBy(false)
, fStringScanThreshold(ULONG_MAX)
, fQueryType(SELECT)
, fPriority(querystats::DEFAULT_USER_PRIORITY_LEVEL)
, fStringTableThreshold(20)
, fOrderByThreads(1)
, fDJSSmallSideLimit(0)
, fDJSLargeSideLimit(0)
, fDJSPartitionSize(100 * 1024 * 1024)
, // 100MB mem usage for disk based join
fUMMemLimit(numeric_limits<int64_t>::max())
, fIsDML(false)
{
fUuid = QueryTeleClient::genUUID();
}
CalpontSelectExecutionPlan::CalpontSelectExecutionPlan(string data)
: fLocalQuery(GLOBAL_QUERY)
, fData(data)
, fTxnID(-1)
, fTraceFlags(TRACE_NONE)
, fStatementID(0)
, fDistinct(false)
, fOverrideLargeSideEstimate(false)
, fDistinctUnionNum(0)
, fSubType(MAIN_SELECT)
, fLimitStart(0)
, fLimitNum(-1)
, fHasOrderBy(false)
, fStringScanThreshold(ULONG_MAX)
, fQueryType(SELECT)
: fData(data)
, fPriority(querystats::DEFAULT_USER_PRIORITY_LEVEL)
, fStringTableThreshold(20)
, fOrderByThreads(1)
, fDJSSmallSideLimit(0)
, fDJSLargeSideLimit(0)
, fDJSPartitionSize(100 * 1024 * 1024)
, // 100MB mem usage for disk based join
fUMMemLimit(numeric_limits<int64_t>::max())
, fIsDML(false)
{
fUuid = QueryTeleClient::genUUID();
}

View File

@ -801,7 +801,7 @@ class CalpontSelectExecutionPlan : public CalpontExecutionPlan
/**
* If set, then the local PM only option is turned on
*/
uint32_t fLocalQuery;
uint32_t fLocalQuery = GLOBAL_QUERY;
/**
* A list of ReturnedColumn objects
@ -822,7 +822,7 @@ class CalpontSelectExecutionPlan : public CalpontExecutionPlan
/**
* A tree of Filter objects
*/
ParseTree* fFilters;
ParseTree* fFilters = nullptr;
/**
* A list of CalpontExecutionPlan objects
*/
@ -846,11 +846,11 @@ class CalpontSelectExecutionPlan : public CalpontExecutionPlan
/**
* An enum indicating the location of this select statement in the enclosing select statement
*/
int fLocation;
int fLocation = 0;
/**
* A flag indicating if this sub-select is dependent on the enclosing query or is constant
*/
bool fDependent;
bool fDependent = false;
/**
* SQL representation of this execution plan
@ -859,57 +859,57 @@ class CalpontSelectExecutionPlan : public CalpontExecutionPlan
static ColumnMap fColMap; // for getplan to use. class-wise map
ColumnMap fColumnMap; // for ExeMgr to use. not shared between objects
uint32_t fSessionID;
int fTxnID; // SQLEngine only needs the ID value
uint32_t fSessionID = 0;
int fTxnID = -1; // SQLEngine only needs the ID value
BRM::QueryContext fVerID;
// @bug5316. remove static
std::string fSchemaName;
std::string fTableName;
uint32_t fTraceFlags;
uint32_t fTraceFlags = TRACE_NONE;
/**
* One-up statementID number for this session (fSessionID)
*/
uint32_t fStatementID;
uint32_t fStatementID = 0;
RMParmVec frmParms;
TableList fTableList;
SelectList fDerivedTableList;
bool fDistinct;
bool fOverrideLargeSideEstimate;
bool fDistinct = false;
bool fOverrideLargeSideEstimate = false;
// for union
SelectList fUnionVec;
uint8_t fDistinctUnionNum;
uint8_t fDistinctUnionNum = 0;
// for subselect
uint64_t fSubType;
uint64_t fSubType = MAIN_SELECT;
std::string fDerivedTbAlias;
std::string fDerivedTbView;
// for limit
uint64_t fLimitStart;
uint64_t fLimitNum;
uint64_t fLimitStart = 0;
uint64_t fLimitNum = -1;
// for parent select order by
bool fHasOrderBy;
bool fHasOrderBy = false;
// for Select clause subquery
SelectList fSelectSubList;
// @bug3321, for string scan blocks
uint64_t fStringScanThreshold;
uint64_t fStringScanThreshold = ULONG_MAX;
// query type
uint32_t fQueryType;
uint32_t fQueryType = SELECT;
uint32_t fPriority;
uint32_t fStringTableThreshold;
uint32_t fStringTableThreshold = 20;
// for specific handlers processing, e.g. GROUP BY
bool fSpecHandlerProcessed;
uint32_t fOrderByThreads;
bool fSpecHandlerProcessed = false;
uint32_t fOrderByThreads = 1;
// Derived table involved in the query. For derived table optimization
std::vector<SCSEP> fSubSelectList;
@ -917,14 +917,12 @@ class CalpontSelectExecutionPlan : public CalpontExecutionPlan
boost::uuids::uuid fUuid;
/* Disk-based join vars */
uint64_t fDJSSmallSideLimit;
uint64_t fDJSLargeSideLimit;
uint64_t fDJSPartitionSize;
int64_t fUMMemLimit;
bool fIsDML;
long fTimeZone;
uint64_t fDJSSmallSideLimit = 0;
uint64_t fDJSLargeSideLimit = 0;
uint64_t fDJSPartitionSize = 100 * 1024 * 1024;
int64_t fUMMemLimit = numeric_limits<int64_t>::max();
bool fIsDML = false;
long fTimeZone = 0;
std::vector<execplan::ParseTree*> fDynamicParseTreeVec;
};

View File

@ -27,61 +27,11 @@ namespace joblist
//
static const int showOidInDataList_Index = std::ios_base::xalloc();
/*static*/
AnyDataList::DataListTypes AnyDataList::dlType(const DataList_t* dl)
{
if (dl == 0)
return UNKNOWN_DATALIST;
// if (typeid(*dl) == typeid(BandedDataList)) return BANDED_DATALIST;
// if (typeid(*dl) == typeid(WorkingSetDataList)) return WORKING_SET_DATALIST;
if (typeid(*dl) == typeid(FifoDataList))
return FIFO_DATALIST;
// if (typeid(*dl) == typeid(BucketDataList)) return BUCKET_DATALIST;
// if (typeid(*dl) == typeid(ConstantDataList_t)) return CONSTANT_DATALIST;
// if (typeid(*dl) == typeid(SortedWSDL)) return SORTED_WORKING_SET_DATALIST;
// if (typeid(*dl) == typeid(ZonedDL)) return ZONED_DATALIST;
// if (typeid(*dl) == typeid(DeliveryWSDL)) return DELIVERYWSDL;
if (typeid(*dl) == typeid(RowGroupDL))
return ROWGROUP_DATALIST;
return UNKNOWN_DATALIST;
}
AnyDataList::DataListTypes AnyDataList::strDlType(const StrDataList* dl)
{
if (dl == 0)
return UNKNOWN_DATALIST;
// if (typeid(*dl) == typeid(StringDataList)) return STRINGBANDED_DATALIST;
// if (typeid(*dl) == typeid(StringFifoDataList)) return STRINGFIFO_DATALIST;
// if (typeid(*dl) == typeid(StringBucketDataList)) return STRINGBUCKET_DATALIST;
if (typeid(*dl) == typeid(StrDataList))
return STRING_DATALIST;
// if (typeid(*dl) == typeid(StringConstantDataList_t)) return STRINGCONSTANT_DATALIST;
// if (typeid(*dl) == typeid(StringSortedWSDL)) return STRINGSORTED_WORKING_SET_DATALIST;
// if (typeid(*dl) == typeid(StringZonedDL)) return STRINGZONED_DATALIST;
return UNKNOWN_DATALIST;
}
// AnyDataList::DataListTypes AnyDataList::tupleDlType(const TupleDataList* dl)
//{
// if (dl == 0) return UNKNOWN_DATALIST;
// if (typeid(*dl) == typeid(TupleBucketDataList)) return TUPLEBUCKET_DATALIST;
// return UNKNOWN_DATALIST;
//}
std::ostream& operator<<(std::ostream& oss, const AnyDataListSPtr& dl)
{
DataList_t* dle = NULL;
StrDataList* dls = NULL;
// DoubleDataList * dld = NULL;
// TupleBucketDataList * dlt = NULL;
bool withOid = (oss.iword(showOidInDataList_Index) != 0);
if ((dle = dl->dataList()) != NULL)
if (auto* dle = dl->rowGroupDL(); dle != nullptr)
{
if (withOid)
oss << dle->OID() << " ";
@ -95,50 +45,8 @@ std::ostream& operator<<(std::ostream& oss, const AnyDataListSPtr& dl)
elemSizeStr << "(" << dle->getDiskElemSize1st() << "," << dle->getDiskElemSize2nd() << ")";
}
oss << "(0x" << std::hex << (ptrdiff_t)dle << std::dec << "[" << AnyDataList::dlType(dle) << "]"
<< elemSizeStr.str() << ")";
oss << "(0x" << std::hex << (ptrdiff_t)dle << std::dec << elemSizeStr.str() << ")";
}
else if ((dls = dl->stringDataList()) != NULL)
{
if (withOid)
oss << dls->OID() << " ";
//...If this datalist is saved to disk, then include the saved
//...element size in the printed information.
std::ostringstream elemSizeStr;
if (dls->useDisk())
{
elemSizeStr << "(" << dls->getDiskElemSize1st() << "," << dls->getDiskElemSize2nd() << ")";
}
oss << "(0x" << std::hex << (ptrdiff_t)dls << std::dec << "[" << AnyDataList::strDlType(dls) << "]"
<< elemSizeStr.str() << ")";
}
// else if ((dld = dl->doubleDL()) != NULL)
// {
// if (withOid)
// oss << dld->OID() << " ";
//
// //...If this datalist is saved to disk, then include the saved
// //...element size in the printed information.
// std::ostringstream elemSizeStr;
// if ( dld->useDisk() )
// {
// elemSizeStr << "(" << dld->getDiskElemSize1st() << "," <<
// dld->getDiskElemSize2nd() << ")";
// }
//
// oss << "(0x"
// << std::hex << (ptrdiff_t)dld << std::dec << "[" <<
// AnyDataList::DOUBLE_DATALIST << "])";
// }
// else if ((dlt = dl->tupleBucketDL()) != NULL)
// {
// oss << dlt->OID() << " (0x";
// oss << std::hex << (ptrdiff_t)dlt << std::dec << "[" << AnyDataList::TUPLEBUCKET_DATALIST << "]),
//";
// }
else
{
oss << "0 (0x0000 [0])";

View File

@ -346,187 +346,29 @@ namespace joblist
class AnyDataList
{
public:
AnyDataList() : fDl3(0), fDl6(0), fDl9(0), fDisown(false)
{
}
~AnyDataList()
{
if (!fDisown)
{
delete fDl3;
delete fDl6;
delete fDl9;
}
}
AnyDataList() = default;
// AnyDataList() : fDl1(0), fDl2(0), fDl3(0), fDl4(0), fDl5(0), fDl6(0), fDl7(0), fDl8(0), fDl9(0),
// fDl10(0), fDl11(0), fDl12(0), fDl13(0), fDl14(0), fDl15(0), fDl16(0), fDl17(0), fDl18(0),
// fDl19(0), fDl20(0), fDisown(false) { }
// ~AnyDataList() { if (!fDisown) { delete fDl1; delete fDl2; delete fDl3; delete fDl4;
// delete fDl5; delete fDl6; delete fDl7; delete fDl8; delete fDl9; delete fDl10; delete fDl11;
// delete fDl12; delete fDl13; delete fDl14; delete fDl15; delete fDl16; delete fDl17;
// delete fDl18; delete fDl19; delete fDl20; } }
~AnyDataList() = default;
// disown() fixes the problem of multiple ownership of a single DL,
// or one on the stack
// In the world of bad ideas these are at the top. The whole point of this class is to manage
// dynamically allocated data in an automatic way. These 2 methods circumvent this, and they
// are not necessary in any event, because you can safely share AnyDataList's via a AnyDataListSPtr.
inline void disown() __attribute__((deprecated))
{
fDisown = true;
}
inline void posess() __attribute__((deprecated))
{
fDisown = false;
}
// inline void bandedDL(BandedDataList* dl) { fDl1 = dl; }
// inline BandedDataList* bandedDL() { return fDl1; }
// inline const BandedDataList* bandedDL() const { return fDl1; }
//
// inline void workingSetDL(WorkingSetDataList* dl) { fDl2 = dl; }
// inline WorkingSetDataList* workingSetDL() { return fDl2; }
// inline const WorkingSetDataList* workingSetDL() const { return fDl2; }
//
inline void fifoDL(FifoDataList* dl)
{
fDl3 = dl;
}
inline FifoDataList* fifoDL()
{
return fDl3;
}
inline const FifoDataList* fifoDL() const
{
return fDl3;
}
//
// inline void bucketDL(BucketDataList* dl) { fDl4 = dl; }
// inline BucketDataList* bucketDL() { return fDl4; }
// inline const BucketDataList* bucketDL() const { return fDl4; }
//
// inline void constantDL(ConstantDataList_t* dl) { fDl5 = dl; }
// inline ConstantDataList_t* constantDL() { return fDl5; }
// inline const ConstantDataList_t* constantDL() const { return fDl5; }
//
// inline void sortedWSDL(SortedWSDL* dl) { fDl13 = dl; }
// inline SortedWSDL* sortedWSDL() { return fDl13; }
// inline const SortedWSDL* sortedWSDL() const { return fDl13; }
//
// inline void zonedDL(ZonedDL* dl) { fDl15 = dl; }
// inline ZonedDL* zonedDL() { return fDl15; }
// inline const ZonedDL* zonedDL() const { return fDl15; }
//
inline void stringDL(StringFifoDataList* dl)
{
fDl6 = dl;
}
inline StringFifoDataList* stringDL()
{
return fDl6;
}
inline const StringFifoDataList* stringDL() const
{
return fDl6;
}
//
// inline void stringBandedDL(StringDataList* dl) { fDl10 = dl; }
// inline StringDataList* stringBandedDL() { return fDl10; }
// inline const StringDataList* stringBandedDL() const { return fDl10; }
//
// inline void stringBucketDL(StringBucketDataList* dl) { fDl11 = dl; }
// inline StringBucketDataList* stringBucketDL() { return fDl11; }
// inline const StringBucketDataList* stringBucketDL() const { return fDl11; }
//
// inline void stringConstantDL(StringConstantDataList_t* dl) { fDl12 = dl; }
// inline StringConstantDataList_t* stringConstantDL() { return fDl12; }
// inline const StringConstantDataList_t* stringConstantDL() const { return fDl12; }
//
// inline void stringSortedWSDL(StringSortedWSDL* dl) { fDl14 = dl; }
// inline StringSortedWSDL* stringSortedWSDL() { return fDl14; }
// inline const StringSortedWSDL* stringSortedWSDL() const { return fDl14; }
//
// inline void stringZonedDL(StringZonedDL* dl) { fDl16 = dl; }
// inline StringZonedDL* stringZonedDL() { return fDl16; }
// inline const StringZonedDL* stringZonedDL() const { return fDl16; }
//
// inline void tupleBucketDL(TupleBucketDataList* dl) { fDl18 = dl; }
// inline TupleBucketDataList* tupleBucketDL() { return fDl18; }
// inline const TupleBucketDataList* tupleBucketDL() const { return fDl18; }
//
// inline void deliveryWSDL(DeliveryWSDL *dl) { fDl19 = dl; }
// inline DeliveryWSDL * deliveryWSDL() { return fDl19; }
// inline const DeliveryWSDL * deliveryWSDL() const { return fDl19; }
inline void rowGroupDL(boost::shared_ptr<RowGroupDL> dl)
{
fDl20 = dl;
fDatalist = dl;
}
inline void rowGroupDL(RowGroupDL* dl)
{
fDl20.reset(dl);
fDatalist.reset(dl);
}
inline RowGroupDL* rowGroupDL()
{
return fDl20.get();
return fDatalist.get();
}
inline const RowGroupDL* rowGroupDL() const
{
return fDl20.get();
return fDatalist.get();
}
DataList_t* dataList()
{
if (fDl3 != NULL)
return reinterpret_cast<DataList_t*>(fDl3);
else if (fDl9 != NULL)
return fDl9;
return reinterpret_cast<DataList_t*>(fDl20.get());
// if (fDl1 != NULL) return fDl1;
// else if (fDl2 != NULL) return fDl2;
// else if (fDl3 != NULL) return reinterpret_cast<DataList_t*>(fDl3);
// else if (fDl4 != NULL) return fDl4;
// else if (fDl9 != NULL) return fDl9;
// else if (fDl13 != NULL) return fDl13;
// else if (fDl15 != NULL) return fDl15;
// else if (fDl19 != NULL) return fDl19;
// else if (fDl20 != NULL) return reinterpret_cast<DataList_t*>(fDl20);
// else return fDl5;
}
//
StrDataList* stringDataList()
{
// if (fDl6 != NULL) return reinterpret_cast<StrDataList*>(fDl6);
// else if (fDl10 != NULL) return fDl10;
// else if (fDl11 != NULL) return fDl11;
// else if (fDl12 != NULL) return fDl12;
// else if (fDl14 != NULL) return fDl14;
// else if (fDl16 != NULL) return fDl16;
// return fDl8;
return reinterpret_cast<StrDataList*>(fDl6);
}
//
// TupleDataList* tupleDataList() {
// if (fDl18 != NULL) return fDl18;
// return fDl17;
// }
//
// /* fDl{7,8} store base class pointers. For consistency, maybe strDataList
// should consider fDl6 also. */
// inline StrDataList * strDataList()
// { return fDl8; }
//
// inline void strDataList(StrDataList *d)
// { fDl8 = d; }
//
// inline DoubleDataList * doubleDL()
// { return fDl7; }
//
// inline void doubleDL(DoubleDataList *d)
// { fDl7 = d; }
enum DataListTypes
{
@ -552,34 +394,10 @@ class AnyDataList
ROWGROUP_DATALIST
};
static DataListTypes dlType(const DataList_t* dl);
static DataListTypes strDlType(const StrDataList* dl);
// static DataListTypes tupleDlType(const TupleDataList* dl);
uint32_t getNumConsumers()
{
// if (fDl1 != NULL) return fDl1->getNumConsumers();
// else if (fDl2 != NULL) return fDl2->getNumConsumers();
// else if (fDl3 != NULL) return fDl3->getNumConsumers();
// else if (fDl6 != NULL) return fDl6->getNumConsumers();
// else if (fDl10 != NULL) return fDl10->getNumConsumers();
// else if (fDl13 != NULL) return fDl13->getNumConsumers();
// else if (fDl14 != NULL) return fDl14->getNumConsumers();
// else if (fDl15 != NULL) return fDl15->getNumConsumers();
// else if (fDl16 != NULL) return fDl16->getNumConsumers();
// else if (fDl4 != NULL) return 1;
// else if (fDl11 != NULL) return 1;
// else if (fDl18 != NULL) return 1;
// else if (fDl19 != NULL) return fDl19->getNumConsumers();
// else if (fDl20 != NULL) return 1;
// else return 0;
if (fDl20)
if (fDatalist)
return 1;
else if (fDl3 != NULL)
return fDl3->getNumConsumers();
else if (fDl6 != NULL)
return fDl6->getNumConsumers();
return 0;
}
@ -592,27 +410,7 @@ class AnyDataList
private:
AnyDataList(const AnyDataList& rhs);
AnyDataList& operator=(const AnyDataList& rhs);
// BandedDataList* fDl1;
// WorkingSetDataList* fDl2;
FifoDataList* fDl3;
// BucketDataList* fDl4;
// ConstantDataList_t* fDl5;
StringFifoDataList* fDl6;
// DoubleDataList* fDl7;
// StrDataList* fDl8;
DataList_t* fDl9;
// StringDataList* fDl10;
// StringBucketDataList* fDl11;
// StringConstantDataList_t* fDl12;
// SortedWSDL* fDl13;
// StringSortedWSDL* fDl14;
// ZonedDL* fDl15;
// StringZonedDL* fDl16;
// TupleDataList* fDl17;
// TupleBucketDataList *fDl18;
// DeliveryWSDL *fDl19;
boost::shared_ptr<RowGroupDL> fDl20;
boost::shared_ptr<RowGroupDL> fDatalist;
bool fDisown;
};

View File

@ -624,21 +624,15 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
{
// not strings, no need for dictionary steps, output fifo datalist
AnyDataListSPtr spdl1(new AnyDataList());
FifoDataList* dl1 = new FifoDataList(1, jobInfo.fifoSize);
spdl1->fifoDL(dl1);
dl1->OID(sc1->oid());
JobStepAssociation outJs1;
outJs1.outAdd(spdl1);
pcs1->outputAssociation(outJs1);
AnyDataListSPtr spdl2(new AnyDataList());
FifoDataList* dl2 = new FifoDataList(1, jobInfo.fifoSize);
spdl2->fifoDL(dl2);
dl2->OID(sc2->oid());
JobStepAssociation outJs2;
outJs2.outAdd(spdl2);
pcs2->outputAssociation(outJs2);
pcs2->inputAssociation(outJs1);
@ -682,9 +676,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 1 step 1 (pcolstep) output
AnyDataListSPtr spdl11(new AnyDataList());
FifoDataList* dl11 = new FifoDataList(1, jobInfo.fifoSize);
spdl11->fifoDL(dl11);
dl11->OID(sc1->oid());
JobStepAssociation outJs1;
outJs1.outAdd(spdl11);
@ -692,9 +683,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 1 step 2 (pdictionarystep) output
AnyDataListSPtr spdl12(new AnyDataList());
StringFifoDataList* dl12 = new StringFifoDataList(1, jobInfo.fifoSize);
spdl12->stringDL(dl12);
dl12->OID(sc1->oid());
JobStepAssociation outJs2;
outJs2.outAdd(spdl12);
@ -715,9 +703,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 2 step 1 (pcolstep) output
AnyDataListSPtr spdl21(new AnyDataList());
FifoDataList* dl21 = new FifoDataList(1, jobInfo.fifoSize);
spdl21->fifoDL(dl21);
dl21->OID(sc2->oid());
JobStepAssociation outJs3;
outJs3.outAdd(spdl21);
@ -730,9 +715,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 2 step 2 (pdictionarystep) output
AnyDataListSPtr spdl22(new AnyDataList());
StringFifoDataList* dl22 = new StringFifoDataList(1, jobInfo.fifoSize);
spdl22->stringDL(dl22);
dl22->OID(sc2->oid());
JobStepAssociation outJs4;
outJs4.outAdd(spdl22);
@ -789,9 +771,7 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 1 step 1 (pcolstep) output
AnyDataListSPtr spdl11(new AnyDataList());
FifoDataList* dl11 = new FifoDataList(1, jobInfo.fifoSize);
spdl11->fifoDL(dl11);
dl11->OID(sc1->oid());
JobStepAssociation outJs1;
outJs1.outAdd(spdl11);
@ -799,9 +779,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 1 step 2 (pdictionarystep) output
AnyDataListSPtr spdl12(new AnyDataList());
StringFifoDataList* dl12 = new StringFifoDataList(1, jobInfo.fifoSize);
spdl12->stringDL(dl12);
dl12->OID(sc1->oid());
JobStepAssociation outJs2;
outJs2.outAdd(spdl12);
@ -814,9 +791,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 2 step 1 (pcolstep) output
AnyDataListSPtr spdl21(new AnyDataList());
FifoDataList* dl21 = new FifoDataList(1, jobInfo.fifoSize);
spdl21->fifoDL(dl21);
dl21->OID(sc2->oid());
JobStepAssociation outJs3;
outJs3.outAdd(spdl21);
@ -858,9 +832,7 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// extra steps for string column greater than eight bytes -- from token to string
// data list for column 1 step 1 (pcolstep) output
AnyDataListSPtr spdl11(new AnyDataList());
FifoDataList* dl11 = new FifoDataList(1, jobInfo.fifoSize);
spdl11->fifoDL(dl11);
dl11->OID(sc1->oid());
JobStepAssociation outJs1;
outJs1.outAdd(spdl11);
@ -868,9 +840,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 1 step 2 (pdictionarystep) output
AnyDataListSPtr spdl12(new AnyDataList());
StringFifoDataList* dl12 = new StringFifoDataList(1, jobInfo.fifoSize);
spdl12->stringDL(dl12);
dl12->OID(sc1->oid());
pDictionaryStep* pdss2 = new pDictionaryStep(dictOid2, tableOid2, ct2, jobInfo);
jobInfo.keyInfo->dictOidToColOid[dictOid2] = sc2->oid();
@ -882,9 +851,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 2 step 1 (pcolstep) output
AnyDataListSPtr spdl21(new AnyDataList());
FifoDataList* dl21 = new FifoDataList(1, jobInfo.fifoSize);
spdl21->fifoDL(dl21);
dl21->OID(sc2->oid());
JobStepAssociation outJs3;
outJs3.outAdd(spdl21);
@ -897,9 +863,6 @@ const JobStepVector doColFilter(const SimpleColumn* sc1, const SimpleColumn* sc2
// data list for column 2 step 2 (pdictionarystep) output
AnyDataListSPtr spdl22(new AnyDataList());
StringFifoDataList* dl22 = new StringFifoDataList(1, jobInfo.fifoSize);
spdl22->stringDL(dl22);
dl22->OID(sc2->oid());
JobStepAssociation outJs4;
outJs4.outAdd(spdl22);
@ -1471,17 +1434,17 @@ bool optimizeIdbPatitionSimpleFilter(SimpleFilter* sf, JobStepVector& jsv, JobIn
if (sf->op()->op() != opeq.op())
return false;
const FunctionColumn* fc = static_cast<const FunctionColumn*>(sf->lhs());
const ConstantColumn* cc = static_cast<const ConstantColumn*>(sf->rhs());
const FunctionColumn* fc = dynamic_cast<const FunctionColumn*>(sf->lhs());
const ConstantColumn* cc = dynamic_cast<const ConstantColumn*>(sf->rhs());
if (fc == NULL)
if (fc == nullptr)
{
cc = static_cast<const ConstantColumn*>(sf->lhs());
fc = static_cast<const FunctionColumn*>(sf->rhs());
cc = dynamic_cast<const ConstantColumn*>(sf->lhs());
fc = dynamic_cast<const FunctionColumn*>(sf->rhs());
}
// not a function or not idbparttition
if (fc == NULL || cc == NULL || fc->functionName().compare("idbpartition") != 0)
if (fc == nullptr || cc == nullptr || fc->functionName().compare("idbpartition") != 0)
return false;
// make sure the cc has 3 tokens
@ -1608,9 +1571,6 @@ const JobStepVector doSimpleFilter(SimpleFilter* sf, JobInfo& jobInfo)
// data list for pcolstep output
AnyDataListSPtr spdl1(new AnyDataList());
FifoDataList* dl1 = new FifoDataList(1, jobInfo.fifoSize);
spdl1->fifoDL(dl1);
dl1->OID(sc->oid());
JobStepAssociation outJs1;
outJs1.outAdd(spdl1);
@ -1618,9 +1578,6 @@ const JobStepVector doSimpleFilter(SimpleFilter* sf, JobInfo& jobInfo)
// data list for pdictionarystep output
AnyDataListSPtr spdl2(new AnyDataList());
StringFifoDataList* dl2 = new StringFifoDataList(1, jobInfo.fifoSize);
spdl2->stringDL(dl2);
dl2->OID(sc->oid());
JobStepAssociation outJs2;
outJs2.outAdd(spdl2);
@ -2712,9 +2669,6 @@ const JobStepVector doConstantFilter(const ConstantFilter* cf, JobInfo& jobInfo)
// data list for pcolstep output
AnyDataListSPtr spdl1(new AnyDataList());
FifoDataList* dl1 = new FifoDataList(1, jobInfo.fifoSize);
spdl1->fifoDL(dl1);
dl1->OID(sc->oid());
JobStepAssociation outJs1;
outJs1.outAdd(spdl1);
@ -2722,9 +2676,6 @@ const JobStepVector doConstantFilter(const ConstantFilter* cf, JobInfo& jobInfo)
// data list for pdictionarystep output
AnyDataListSPtr spdl2(new AnyDataList());
StringFifoDataList* dl2 = new StringFifoDataList(1, jobInfo.fifoSize);
spdl2->stringDL(dl2);
dl2->OID(sc->oid());
JobStepAssociation outJs2;
outJs2.outAdd(spdl2);

View File

@ -201,19 +201,8 @@ ostream& writeDotCmds(ostream& dotFile, const JobStepVector& query, const JobSte
for (unsigned int i = 0; i < qsi->get()->outputAssociation().outSize(); i++)
{
ptrdiff_t dloutptr;
DataList_t* dlout = qsi->get()->outputAssociation().outAt(i)->dataList();
uint32_t numConsumers = qsi->get()->outputAssociation().outAt(i)->getNumConsumers();
if (dlout)
{
dloutptr = (ptrdiff_t)dlout;
}
else
{
StrDataList* sdl = qsi->get()->outputAssociation().outAt(i)->stringDataList();
dloutptr = (ptrdiff_t)sdl;
}
RowGroupDL* dlout = qsi->get()->outputAssociation().outAt(i)->rowGroupDL();
ptrdiff_t dloutptr = (ptrdiff_t)dlout;
for (unsigned int k = 0; k < querySteps.size(); k++)
{
@ -222,31 +211,12 @@ ostream& writeDotCmds(ostream& dotFile, const JobStepVector& query, const JobSte
for (unsigned int j = 0; j < queryInputSA.outSize(); j++)
{
ptrdiff_t dlinptr;
DataList_t* dlin = queryInputSA.outAt(j)->dataList();
StrDataList* sdl = 0;
if (dlin)
dlinptr = (ptrdiff_t)dlin;
else
{
sdl = queryInputSA.outAt(j)->stringDataList();
dlinptr = (ptrdiff_t)sdl;
}
RowGroupDL* dlin = queryInputSA.outAt(j)->rowGroupDL();
ptrdiff_t dlinptr = (ptrdiff_t)dlin;;
if ((ptrdiff_t)dloutptr == (ptrdiff_t)dlinptr)
{
dotFile << stepidIn << " -> " << stepidOut;
if (dlin)
{
dotFile << " [label=\"[" << AnyDataList::dlType(dlin) << "/" << numConsumers << "]\"]" << endl;
}
else
{
dotFile << " [label=\"[" << AnyDataList::strDlType(sdl) << "/" << numConsumers << "]\"]"
<< endl;
}
}
}
}
@ -258,32 +228,12 @@ ostream& writeDotCmds(ostream& dotFile, const JobStepVector& query, const JobSte
for (unsigned int j = 0; j < projectInputSA.outSize(); j++)
{
ptrdiff_t dlinptr;
DataList_t* dlin = projectInputSA.outAt(j)->dataList();
StrDataList* sdl = 0;
if (dlin)
dlinptr = (ptrdiff_t)dlin;
else
{
sdl = projectInputSA.outAt(j)->stringDataList();
dlinptr = (ptrdiff_t)sdl;
}
RowGroupDL* dlin = projectInputSA.outAt(j)->rowGroupDL();
ptrdiff_t dlinptr = (ptrdiff_t)dlin;;
if (dloutptr == dlinptr)
// if ((ptrdiff_t)dlout == (ptrdiff_t)dlin)
{
dotFile << stepidIn << " -> " << stepidOut;
if (dlin)
{
dotFile << " [label=\"[" << AnyDataList::dlType(dlin) << "/" << numConsumers << "]\"]" << endl;
}
else
{
dotFile << " [label=\"[" << AnyDataList::strDlType(sdl) << "/" << numConsumers << "]\"]"
<< endl;
}
}
}
}
@ -359,19 +309,8 @@ ostream& writeDotCmds(ostream& dotFile, const JobStepVector& query, const JobSte
for (unsigned int i = 0; i < psi->get()->outputAssociation().outSize(); i++)
{
ptrdiff_t dloutptr;
DataList_t* dlout = psi->get()->outputAssociation().outAt(i)->dataList();
uint32_t numConsumers = psi->get()->outputAssociation().outAt(i)->getNumConsumers();
if (dlout)
{
dloutptr = (ptrdiff_t)dlout;
}
else
{
StrDataList* sdl = psi->get()->outputAssociation().outAt(i)->stringDataList();
dloutptr = (ptrdiff_t)sdl;
}
RowGroupDL* dlout = psi->get()->outputAssociation().outAt(i)->rowGroupDL();
ptrdiff_t dloutptr = (ptrdiff_t)dlout;
for (unsigned int k = ctn + 1; k < projectSteps.size(); k++)
{
@ -380,31 +319,13 @@ ostream& writeDotCmds(ostream& dotFile, const JobStepVector& query, const JobSte
for (unsigned int j = 0; j < projectInputSA.outSize(); j++)
{
ptrdiff_t dlinptr;
DataList_t* dlin = projectInputSA.outAt(j)->dataList();
StrDataList* sdl = 0;
if (dlin)
dlinptr = (ptrdiff_t)dlin;
else
{
sdl = projectInputSA.outAt(j)->stringDataList();
dlinptr = (ptrdiff_t)sdl;
}
RowGroupDL* dlin = projectInputSA.outAt(j)->rowGroupDL();
ptrdiff_t dlinptr = (ptrdiff_t)dlin;
if ((ptrdiff_t)dloutptr == (ptrdiff_t)dlinptr)
{
dotFile << stepidIn << " -> " << stepidOut;
if (dlin)
{
dotFile << " [label=\"[" << AnyDataList::dlType(dlin) << "/" << numConsumers << "]\"]" << endl;
}
else
{
dotFile << " [label=\"[" << AnyDataList::strDlType(sdl) << "/" << numConsumers << "]\"]"
<< endl;
}
}
}
}

View File

@ -775,22 +775,15 @@ void JobList::graph(uint32_t sessionID)
for (unsigned int i = 0; i < qsi->get()->outputAssociation().outSize(); i++)
{
ptrdiff_t dloutptr = 0;
DataList_t* dlout;
StrDataList* sdl;
RowGroupDL* dlout;
// TupleDataList* tdl;
if ((dlout = qsi->get()->outputAssociation().outAt(i)->dataList()))
if ((dlout = qsi->get()->outputAssociation().outAt(i)->rowGroupDL()))
{
dloutptr = (ptrdiff_t)dlout;
outSize = dlout->totalSize();
diskIo = dlout->totalDiskIoTime(saveTime, loadTime);
}
else if ((sdl = qsi->get()->outputAssociation().outAt(i)->stringDataList()))
{
dloutptr = (ptrdiff_t)sdl;
outSize = sdl->totalSize();
diskIo = sdl->totalDiskIoTime(saveTime, loadTime);
}
// if HashJoinStep, determine if output fifo was cached to disk
bool hjTempDiskFlag = false;
@ -803,15 +796,10 @@ void JobList::graph(uint32_t sessionID)
for (unsigned int j = 0; j < queryInputSA.outSize(); j++)
{
ptrdiff_t dlinptr = 0;
DataList_t* dlin = queryInputSA.outAt(j)->dataList();
StrDataList* sdl = 0;
RowGroupDL* dlin = queryInputSA.outAt(j)->rowGroupDL();
if (dlin)
dlinptr = (ptrdiff_t)dlin;
else if ((sdl = queryInputSA.outAt(j)->stringDataList()))
{
dlinptr = (ptrdiff_t)sdl;
}
if (dloutptr == dlinptr)
{
@ -856,19 +844,8 @@ void JobList::graph(uint32_t sessionID)
for (unsigned int j = 0; j < projectInputSA.outSize(); j++)
{
ptrdiff_t dlinptr;
DataList_t* dlin = projectInputSA.outAt(j)->dataList();
StrDataList* sdl = 0;
if (dlin)
{
dlinptr = (ptrdiff_t)dlin;
}
else
{
sdl = projectInputSA.outAt(j)->stringDataList();
dlinptr = (ptrdiff_t)sdl;
}
RowGroupDL* dlin = projectInputSA.outAt(j)->rowGroupDL();
ptrdiff_t dlinptr = (ptrdiff_t)dlin;
if (dloutptr == dlinptr)
{

View File

@ -310,44 +310,6 @@ pColScanStep::pColScanStep(const pColStep& rhs) : JobStep(rhs), fRm(rhs.resource
fTraceFlags = rhs.fTraceFlags;
}
void pColScanStep::addFilters()
{
AnyDataListSPtr dl = fInputJobStepAssociation.outAt(0);
DataList_t* bdl = dl->dataList();
idbassert(bdl);
int it = -1;
bool more;
ElementType e;
int64_t token;
try
{
it = bdl->getIterator();
}
catch (std::exception& ex)
{
cerr << "pColScanStep::addFilters: caught exception: " << ex.what() << " stepno: " << fStepId << endl;
throw;
}
catch (...)
{
cerr << "pColScanStep::addFilters: caught exception" << endl;
throw;
}
fBOP = BOP_OR;
more = bdl->next(it, &e);
while (more)
{
token = e.second;
addFilter(COMPARE_EQ, token);
more = bdl->next(it, &e);
}
return;
}
bool pColScanStep::isEmptyVal(const uint8_t* val8) const
{
const int width = fColType.colWidth;

View File

@ -498,77 +498,6 @@ const string pColStep::toString() const
return oss.str();
}
void pColStep::addFilters()
{
AnyDataListSPtr dl = fInputJobStepAssociation.outAt(0);
DataList_t* bdl = dl->dataList();
FifoDataList* fifo = fInputJobStepAssociation.outAt(0)->fifoDL();
idbassert(bdl);
int it = -1;
bool more;
ElementType e;
int64_t token;
if (fifo != NULL)
{
try
{
it = fifo->getIterator();
}
catch (exception& ex)
{
cerr << "pColStep::addFilters: caught exception: " << ex.what() << " stepno: " << fStepId << endl;
}
catch (...)
{
cerr << "pColStep::addFilters: caught exception" << endl;
}
fBOP = BOP_OR;
UintRowGroup rw;
more = fifo->next(it, &rw);
while (more)
{
for (uint64_t i = 0; i < rw.count; ++i)
addFilter(COMPARE_EQ, (int64_t)rw.et[i].second);
more = fifo->next(it, &rw);
}
}
else
{
try
{
it = bdl->getIterator();
}
catch (exception& ex)
{
cerr << "pColStep::addFilters: caught exception: " << ex.what() << " stepno: " << fStepId << endl;
}
catch (...)
{
cerr << "pColStep::addFilters: caught exception" << endl;
}
fBOP = BOP_OR;
more = bdl->next(it, &e);
while (more)
{
token = e.second;
addFilter(COMPARE_EQ, token);
more = bdl->next(it, &e);
}
}
return;
}
/* This exists to avoid a DBRM lookup for every rid. */
inline uint64_t pColStep::getLBID(uint64_t rid, bool& scan)
{

View File

@ -259,9 +259,6 @@ class pColStep : public JobStep
return fFilters;
}
protected:
void addFilters();
private:
/** @brief constructor for completeness
*/
@ -293,7 +290,7 @@ class pColStep : public JobStep
// Running with this one will swallow rows at projection.
bool fSwallowRows;
bool isFilterFeeder;
bool isFilterFeeder = false;
uint64_t fNumBlksSkipped; // total number of block scans skipped due to CP
uint64_t fMsgBytesIn; // total byte count for incoming messages
uint64_t fMsgBytesOut; // total byte count for outcoming messages
@ -482,8 +479,6 @@ class pColScanStep : public JobStep
return fFilters;
}
protected:
void addFilters();
private:
// defaults okay?
@ -518,7 +513,7 @@ class pColScanStep : public JobStep
uint32_t extentSize, divShift, ridsPerBlock, rpbShift, numExtents;
// config::Config *fConfig;
bool isFilterFeeder;
bool isFilterFeeder = false;
uint64_t fNumBlksSkipped; // total number of block scans skipped due to CP
uint64_t fMsgBytesIn; // total byte count for incoming messages
uint64_t fMsgBytesOut; // total byte count for outcoming messages
@ -1233,7 +1228,7 @@ class TupleBPS : public BatchPrimitive, public TupleDeliveryStep
uint32_t fMaxNumThreads;
uint32_t fNumThreads;
PrimitiveStepType ffirstStepType;
bool isFilterFeeder;
bool isFilterFeeder = false;
std::vector<uint64_t> fProducerThreads; // thread pool handles
std::vector<uint64_t> fProcessorThreads;
messageqcpp::ByteStream fFilterString;

View File

@ -2157,8 +2157,8 @@ void TupleBPS::processByteStreamVector(vector<boost::shared_ptr<messageqcpp::Byt
vector<rowgroup::RGData> fromPrimProc;
auto data = getJoinLocalDataByIndex(threadID);
bool validCPData;
bool hasBinaryColumn;
bool validCPData = false;
bool hasBinaryColumn = false;
int128_t min;
int128_t max;
uint64_t lbid;
@ -2203,8 +2203,8 @@ void TupleBPS::processByteStreamVector(vector<boost::shared_ptr<messageqcpp::Byt
return;
}
bool unused;
bool fromDictScan;
bool unused = false;
bool fromDictScan = false;
fromPrimProc.clear();
fBPP->getRowGroupData(*bs, &fromPrimProc, &validCPData, &lbid, &fromDictScan, &min, &max, &cachedIO,
&physIO, &touchedBlocks, &unused, threadID, &hasBinaryColumn, fColType);

View File

@ -271,7 +271,11 @@ void TupleHashJoinStep::startSmallRunners(uint index)
std::shared_ptr<TupleJoiner> joiner;
jt = joinTypes[index];
if (traceOn())
{
extendedInfo += toString();
}
if (typelessJoin[index])
{
@ -349,36 +353,47 @@ void TupleHashJoinStep::startSmallRunners(uint index)
" size = " << joiner->size() << endl;
*/
if (traceOn())
{
extendedInfo += "\n";
}
ostringstream oss;
if (!joiner->onDisk())
{
// add extended info, and if not aborted then tell joiner
// we're done reading the small side.
if (traceOn())
{
if (joiner->inPM())
{
{
oss << "PM join (" << index << ")" << endl;
#ifdef JLF_DEBUG
#ifdef JLF_DEBUG
cout << oss.str();
#endif
#endif
extendedInfo += oss.str();
}
}
else if (joiner->inUM())
{
oss << "UM join (" << index << ")" << endl;
#ifdef JLF_DEBUG
#ifdef JLF_DEBUG
cout << oss.str();
#endif
#endif
extendedInfo += oss.str();
}
}
if (!cancelled())
joiner->doneInserting();
}
if (traceOn())
{
boost::mutex::scoped_lock lk(*fStatsMutexPtr);
fExtendedInfo += extendedInfo;
formatMiniStats(index);
}
}
/* Index is which small input to read. */
@ -1099,7 +1114,8 @@ const string TupleHashJoinStep::toString() const
for (size_t i = 0; i < idlsz; ++i)
{
RowGroupDL* idl = fInputJobStepAssociation.outAt(i)->rowGroupDL();
const AnyDataListSPtr& dl = fInputJobStepAssociation.outAt(i);
RowGroupDL* idl = dl->rowGroupDL();
CalpontSystemCatalog::OID oidi = 0;
if (idl)
@ -1111,7 +1127,7 @@ const string TupleHashJoinStep::toString() const
oss << "*";
oss << "tb/col:" << fTableOID1 << "/" << oidi;
oss << " " << fInputJobStepAssociation.outAt(i);
oss << " " << dl;
}
idlsz = fOutputJobStepAssociation.outSize();

View File

@ -343,7 +343,7 @@ void getColNameFromItem(std::ostringstream& ostream, Item* item)
}
else
{
Item_ident* iip = reinterpret_cast<Item_ident*>(item);
Item_ident* iip = static_cast<Item_ident*>(item);
if (iip->db_name.str)
ostream << iip->db_name.str << '.';
@ -386,13 +386,11 @@ bool sortItemIsInGroupRec(Item* sort_item, Item* group_item)
return found;
}
Item_func* ifp_sort = reinterpret_cast<Item_func*>(sort_item);
// base cases for Item_field and Item_ref. The second arg is binary cmp switch
found = group_item->eq(sort_item, false);
if (!found && sort_item->type() == Item::REF_ITEM)
{
Item_ref* ifp_sort_ref = reinterpret_cast<Item_ref*>(sort_item);
Item_ref* ifp_sort_ref = static_cast<Item_ref*>(sort_item);
found = sortItemIsInGroupRec(*ifp_sort_ref->ref, group_item);
}
else if (!found && sort_item->type() == Item::FIELD_ITEM)
@ -400,6 +398,8 @@ bool sortItemIsInGroupRec(Item* sort_item, Item* group_item)
return found;
}
Item_func* ifp_sort = static_cast<Item_func*>(sort_item);
// seeking for a group_item match
for (uint32_t i = 0; !found && i < ifp_sort->argument_count(); i++)
{
@ -412,7 +412,7 @@ bool sortItemIsInGroupRec(Item* sort_item, Item* group_item)
else if (ifp_sort_arg->type() == Item::REF_ITEM)
{
// dereference the Item
Item_ref* ifp_sort_ref = reinterpret_cast<Item_ref*>(ifp_sort_arg);
Item_ref* ifp_sort_ref = static_cast<Item_ref*>(ifp_sort_arg);
found = sortItemIsInGroupRec(*ifp_sort_ref->ref, group_item);
}
}
@ -432,14 +432,14 @@ bool sortItemIsInGroupRec(Item* sort_item, Item* group_item)
*********************************************************/
void check_sum_func_item(const Item* item, void* arg)
{
bool* found = reinterpret_cast<bool*>(arg);
bool* found = static_cast<bool*>(arg);
if (*found)
return;
if (item->type() == Item::REF_ITEM)
{
const Item_ref* ref_item = reinterpret_cast<const Item_ref*>(item);
const Item_ref* ref_item = static_cast<const Item_ref*>(item);
Item* ref_item_item = (Item*)*ref_item->ref;
if (ref_item_item->type() == Item::SUM_FUNC_ITEM)
{
@ -479,7 +479,7 @@ bool sortItemIsInGrouping(Item* sort_item, ORDER* groupcol)
// e.g. select a, if (sum(b) > 1, 2, 1) from t1 group by 1 order by 2;
if (sort_item->type() == Item::FUNC_ITEM)
{
Item_func* ifp = reinterpret_cast<Item_func*>(sort_item);
Item_func* ifp = static_cast<Item_func*>(sort_item);
ifp->traverse_cond(check_sum_func_item, &found, Item::POSTFIX);
}
else if (sort_item->type() == Item::CONST_ITEM || sort_item->type() == Item::WINDOW_FUNC_ITEM)
@ -528,11 +528,11 @@ ReturnedColumn* buildAggFrmTempField(Item* item, gp_walk_info& gwi)
switch (item->type())
{
case Item::FIELD_ITEM: ifip = reinterpret_cast<Item_field*>(item); break;
case Item::FIELD_ITEM: ifip = static_cast<Item_field*>(item); break;
default:
irip = reinterpret_cast<Item_ref*>(item);
irip = static_cast<Item_ref*>(item);
if (irip)
ifip = reinterpret_cast<Item_field*>(irip->ref[0]);
ifip = static_cast<Item_field*>(irip->ref[0]);
break;
}
@ -541,7 +541,7 @@ ReturnedColumn* buildAggFrmTempField(Item* item, gp_walk_info& gwi)
std::vector<Item*>::iterator iter = gwi.extSelAggColsItems.begin();
for (; iter != gwi.extSelAggColsItems.end(); iter++)
{
isfp = reinterpret_cast<Item_func_or_sum*>(*iter);
isfp = static_cast<Item_func_or_sum*>(*iter);
if (isfp->type() == Item::SUM_FUNC_ITEM && isfp->result_field == ifip->field)
{
@ -1044,7 +1044,7 @@ void debug_walk(const Item* item, void* arg)
if (join)
{
Item_cond* cond = reinterpret_cast<Item_cond*>(join->conds);
Item_cond* cond = static_cast<Item_cond*>(join->conds);
if (cond)
cond->traverse_cond(debug_walk, arg, Item::POSTFIX);
@ -1724,7 +1724,7 @@ bool buildEqualityPredicate(execplan::ReturnedColumn* lhs, execplan::ReturnedCol
boost::shared_ptr<Operator>& sop, const Item_func::Functype& funcType,
const vector<Item*>& itemList, bool isInSubs)
{
cal_connection_info* ci = reinterpret_cast<cal_connection_info*>(get_fe_conn_info_ptr());
cal_connection_info* ci = static_cast<cal_connection_info*>(get_fe_conn_info_ptr());
// push the column that is associated with the correlated column to the returned
// column list, so the materialized view have the complete projection list.
@ -2048,7 +2048,7 @@ bool buildPredicateItem(Item_func* ifp, gp_walk_info* gwip)
else if (ifp->functype() == Item_func::GUSERVAR_FUNC)
{
Item_func_get_user_var* udf = reinterpret_cast<Item_func_get_user_var*>(ifp);
Item_func_get_user_var* udf = static_cast<Item_func_get_user_var*>(ifp);
String buf;
if (udf->result_type() == INT_RESULT)
@ -2801,7 +2801,7 @@ void setError(THD* thd, uint32_t errcode, string errmsg)
thd_set_ha_data(current_thd, mcs_hton, get_fe_conn_info_ptr());
}
cal_connection_info* ci = reinterpret_cast<cal_connection_info*>(get_fe_conn_info_ptr());
cal_connection_info* ci = static_cast<cal_connection_info*>(get_fe_conn_info_ptr());
ci->expressionId = 0;
}
@ -3195,12 +3195,11 @@ CalpontSystemCatalog::ColType colType_MysqlToIDB(const Item* item)
*/
case DECIMAL_RESULT:
{
Item_decimal* idp = (Item_decimal*)item;
// decimal result do not shows us Item is Item_decimal
ct.colDataType = CalpontSystemCatalog::DECIMAL;
unsigned int precision = idp->decimal_precision();
unsigned int scale = idp->decimal_scale();
unsigned int precision = item->decimal_precision();
unsigned int scale = item->decimal_scale();
ct.setDecimalScalePrecision(precision, scale);
@ -3612,7 +3611,7 @@ ArithmeticColumn* buildArithmeticColumn(Item_func* item, gp_walk_info& gwi, bool
thd_set_ha_data(current_thd, mcs_hton, get_fe_conn_info_ptr());
}
cal_connection_info* ci = reinterpret_cast<cal_connection_info*>(get_fe_conn_info_ptr());
cal_connection_info* ci = static_cast<cal_connection_info*>(get_fe_conn_info_ptr());
ArithmeticColumn* ac = new ArithmeticColumn();
Item** sfitempp = item->arguments();
@ -3638,8 +3637,7 @@ ArithmeticColumn* buildArithmeticColumn(Item_func* item, gp_walk_info& gwi, bool
if (!lhs->data() && (sfitempp[0]->type() == Item::FUNC_ITEM))
{
delete lhs;
Item_func* ifp = (Item_func*)sfitempp[0];
lhs = buildParseTree(ifp, gwi, nonSupport);
lhs = buildParseTree(sfitempp[0], gwi, nonSupport);
}
else if (!lhs->data() && (sfitempp[0]->type() == Item::REF_ITEM))
{
@ -3657,8 +3655,7 @@ ArithmeticColumn* buildArithmeticColumn(Item_func* item, gp_walk_info& gwi, bool
if (!rhs->data() && (sfitempp[1]->type() == Item::FUNC_ITEM))
{
delete rhs;
Item_func* ifp = (Item_func*)sfitempp[1];
rhs = buildParseTree(ifp, gwi, nonSupport);
rhs = buildParseTree(sfitempp[1], gwi, nonSupport);
}
else if (!rhs->data() && (sfitempp[1]->type() == Item::REF_ITEM))
{
@ -3875,7 +3872,7 @@ ReturnedColumn* buildFunctionColumn(Item_func* ifp, gp_walk_info& gwi, bool& non
thd_set_ha_data(current_thd, mcs_hton, get_fe_conn_info_ptr());
}
cal_connection_info* ci = reinterpret_cast<cal_connection_info*>(get_fe_conn_info_ptr());
cal_connection_info* ci = static_cast<cal_connection_info*>(get_fe_conn_info_ptr());
string funcName = ifp->func_name();
FuncExp* funcExp = FuncExp::instance();
@ -3923,7 +3920,7 @@ ReturnedColumn* buildFunctionColumn(Item_func* ifp, gp_walk_info& gwi, bool& non
else if ((funcName == "charset" || funcName == "collation") && ifp->argument_count() == 1 &&
ifp->arguments()[0]->type() == Item::FIELD_ITEM)
{
Item_field* item = reinterpret_cast<Item_field*>(ifp->arguments()[0]);
Item_field* item = static_cast<Item_field*>(ifp->arguments()[0]);
CHARSET_INFO* info = item->charset_for_protocol();
ReturnedColumn* rc;
string val;
@ -4047,9 +4044,10 @@ ReturnedColumn* buildFunctionColumn(Item_func* ifp, gp_walk_info& gwi, bool& non
// @todo. merge this logic to buildParseTree().
if ((funcName == "if" && i == 0) || funcName == "xor")
{
// make sure the rcWorkStack is cleaned.
gwi.clauseType = WHERE;
sptp.reset(buildParseTree((Item_func*)(ifp->arguments()[i]), gwi, nonSupport));
sptp.reset(buildParseTree(ifp->arguments()[i], gwi, nonSupport));
gwi.clauseType = clauseType;
if (!sptp)
@ -4155,50 +4153,6 @@ ReturnedColumn* buildFunctionColumn(Item_func* ifp, gp_walk_info& gwi, bool& non
addIntervalArgs(&gwi, ifp, funcParms);
}
// check for unsupported arguments add the keyword unit argument for extract functions
if (funcName == "extract")
{
Item_date_add_interval* idai = (Item_date_add_interval*)ifp;
switch (idai->int_type)
{
case INTERVAL_DAY_MICROSECOND:
{
nonSupport = true;
gwi.fatalParseError = true;
Message::Args args;
string info = funcName + " with DAY_MICROSECOND parameter";
args.add(info);
gwi.parseErrorText = IDBErrorInfo::instance()->errorMsg(ERR_NON_SUPPORTED_FUNCTION, args);
return NULL;
}
case INTERVAL_HOUR_MICROSECOND:
{
nonSupport = true;
gwi.fatalParseError = true;
Message::Args args;
string info = funcName + " with HOUR_MICROSECOND parameter";
args.add(info);
gwi.parseErrorText = IDBErrorInfo::instance()->errorMsg(ERR_NON_SUPPORTED_FUNCTION, args);
return NULL;
}
case INTERVAL_MINUTE_MICROSECOND:
{
nonSupport = true;
gwi.fatalParseError = true;
Message::Args args;
string info = funcName + " with MINUTE_MICROSECOND parameter";
args.add(info);
gwi.parseErrorText = IDBErrorInfo::instance()->errorMsg(ERR_NON_SUPPORTED_FUNCTION, args);
return NULL;
}
default: break;
}
}
// add the keyword unit argument and char length for cast functions
if (funcName == "cast_as_char")
{
@ -4496,7 +4450,7 @@ FunctionColumn* buildCaseFunction(Item_func* item, gp_walk_info& gwi, bool& nonS
thd_set_ha_data(current_thd, mcs_hton, get_fe_conn_info_ptr());
}
cal_connection_info* ci = reinterpret_cast<cal_connection_info*>(get_fe_conn_info_ptr());
cal_connection_info* ci = static_cast<cal_connection_info*>(get_fe_conn_info_ptr());
FunctionColumn* fc = new FunctionColumn();
FunctionParm funcParms;
@ -4555,7 +4509,7 @@ FunctionColumn* buildCaseFunction(Item_func* item, gp_walk_info& gwi, bool& nonS
// to pull off of rcWorkStack, so we set this inCaseStmt flag to tell it
// not to.
gwi.inCaseStmt = true;
sptp.reset(buildParseTree((Item_func*)(item->arguments()[i]), gwi, nonSupport));
sptp.reset(buildParseTree(item->arguments()[i], gwi, nonSupport));
gwi.inCaseStmt = false;
if (!gwi.ptWorkStack.empty() && *gwi.ptWorkStack.top() == *sptp.get())
{
@ -4591,7 +4545,7 @@ FunctionColumn* buildCaseFunction(Item_func* item, gp_walk_info& gwi, bool& nonS
}
else
{
sptp.reset(buildParseTree((Item_func*)(item->arguments()[i]), gwi, nonSupport));
sptp.reset(buildParseTree(item->arguments()[i], gwi, nonSupport));
// We need to pop whichever stack is holding it, if any.
if ((!gwi.ptWorkStack.empty()) && *gwi.ptWorkStack.top()->data() == sptp->data())
@ -4800,19 +4754,18 @@ SimpleColumn* buildSimpleColumn(Item_field* ifp, gp_walk_info& gwi)
return sc;
}
ParseTree* buildParseTree(Item_func* item, gp_walk_info& gwi, bool& nonSupport)
ParseTree* buildParseTree(Item* item, gp_walk_info& gwi, bool& nonSupport)
{
ParseTree* pt = 0;
Item_cond* icp = (Item_cond*)item;
#ifdef DEBUG_WALK_COND
// debug
cerr << "Build Parsetree: " << endl;
icp->traverse_cond(debug_walk, &gwi, Item::POSTFIX);
item->traverse_cond(debug_walk, &gwi, Item::POSTFIX);
#endif
//@bug5044. PPSTFIX walking should always be treated as WHERE clause filter
ClauseType clauseType = gwi.clauseType;
gwi.clauseType = WHERE;
icp->traverse_cond(gp_walk, &gwi, Item::POSTFIX);
item->traverse_cond(gp_walk, &gwi, Item::POSTFIX);
gwi.clauseType = clauseType;
if (gwi.fatalParseError)
@ -4924,9 +4877,9 @@ ReturnedColumn* buildAggregateColumn(Item* item, gp_walk_info& gwi)
thd_set_ha_data(current_thd, mcs_hton, get_fe_conn_info_ptr());
}
cal_connection_info* ci = reinterpret_cast<cal_connection_info*>(get_fe_conn_info_ptr());
cal_connection_info* ci = static_cast<cal_connection_info*>(get_fe_conn_info_ptr());
Item_sum* isp = reinterpret_cast<Item_sum*>(item);
Item_sum* isp = static_cast<Item_sum*>(item);
Item** sfitempp = isp->get_orig_args();
SRCP parm;
@ -5156,7 +5109,7 @@ ReturnedColumn* buildAggregateColumn(Item* item, gp_walk_info& gwi)
{
case Item::FIELD_ITEM:
{
Item_field* ifp = reinterpret_cast<Item_field*>(sfitemp);
Item_field* ifp = static_cast<Item_field*>(sfitemp);
SimpleColumn* sc = buildSimpleColumn(ifp, gwi);
if (!sc)
@ -5580,6 +5533,8 @@ because it has multiple arguments.";
return ac;
}
void addIntervalArgs(gp_walk_info* gwip, Item_func* ifp, FunctionParm& functionParms)
{
string funcName = ifp->func_name();
@ -5678,7 +5633,7 @@ bool isNotFuncAndConstScalarSubSelect(Item_func* ifp, const std::string& funcNam
void gp_walk(const Item* item, void* arg)
{
gp_walk_info* gwip = reinterpret_cast<gp_walk_info*>(arg);
gp_walk_info* gwip = static_cast<gp_walk_info*>(arg);
idbassert(gwip);
// Bailout...
@ -5745,8 +5700,8 @@ void gp_walk(const Item* item, void* arg)
{
case INT_RESULT:
{
Item_int* iip = (Item_int*)item;
gwip->rcWorkStack.push(buildReturnedColumn(iip, *gwip, gwip->fatalParseError));
Item* non_const_item = const_cast<Item*>(item);
gwip->rcWorkStack.push(buildReturnedColumn(non_const_item, *gwip, gwip->fatalParseError));
break;
}
@ -5755,19 +5710,17 @@ void gp_walk(const Item* item, void* arg)
// Special handling for 0xHHHH literals
if (item->type_handler() == &type_handler_hex_hybrid)
{
Item_hex_hybrid* hip = reinterpret_cast<Item_hex_hybrid*>(const_cast<Item*>(item));
Item_hex_hybrid* hip = static_cast<Item_hex_hybrid*>(const_cast<Item*>(item));
gwip->rcWorkStack.push(new ConstantColumn((int64_t)hip->val_int(), ConstantColumn::NUM));
ConstantColumn* cc = dynamic_cast<ConstantColumn*>(gwip->rcWorkStack.top());
cc->timeZone(gwip->timeZone);
break;
}
Item_string* isp = (Item_string*)item;
if (isp)
{
if (isp->result_type() == STRING_RESULT)
if (item->result_type() == STRING_RESULT)
{
// dangerous cast here
Item* isp = const_cast<Item*>(item);
String val, *str = isp->val_str(&val);
if (str)
{
@ -5788,7 +5741,6 @@ void gp_walk(const Item* item, void* arg)
(dynamic_cast<ConstantColumn*>(gwip->rcWorkStack.top()))->timeZone(gwip->timeZone);
break;
}
}
gwip->rcWorkStack.push(buildReturnedColumn(isp, *gwip, gwip->fatalParseError));
}
@ -5796,25 +5748,14 @@ void gp_walk(const Item* item, void* arg)
}
case REAL_RESULT:
{
Item_float* ifp = (Item_float*)item;
gwip->rcWorkStack.push(buildReturnedColumn(ifp, *gwip, gwip->fatalParseError));
break;
}
case DECIMAL_RESULT:
{
Item_decimal* idp = (Item_decimal*)item;
gwip->rcWorkStack.push(buildReturnedColumn(idp, *gwip, gwip->fatalParseError));
break;
}
case TIME_RESULT:
{
Item_temporal_literal* itp = (Item_temporal_literal*)item;
gwip->rcWorkStack.push(buildReturnedColumn(itp, *gwip, gwip->fatalParseError));
Item* nonConstItem = const_cast<Item*>(item);
gwip->rcWorkStack.push(buildReturnedColumn(nonConstItem, *gwip, gwip->fatalParseError));
break;
}
default:
{
if (gwip->condPush)
@ -5853,14 +5794,16 @@ void gp_walk(const Item* item, void* arg)
case Item::FUNC_ITEM:
{
Item_func* ifp = (Item_func*)item;
Item* ncitem = const_cast<Item*>(item);
Item_func* ifp = static_cast<Item_func*>(ncitem);
string funcName = ifp->func_name();
if (!gwip->condPush)
{
if (!ifp->fixed())
{
ifp->fix_fields(gwip->thd, reinterpret_cast<Item**>(&ifp));
ifp->fix_fields(gwip->thd, &ncitem);
}
// Special handling for queries of the form:
@ -6176,11 +6119,15 @@ void gp_walk(const Item* item, void* arg)
if (col->type() == Item::FIELD_ITEM)
{
const auto& field_name = string(((Item_field*)item)->field_name.str);
const Item_ident* ident_field = dynamic_cast<const Item_ident*>(item);
if (ident_field)
{
const auto& field_name = string(ident_field->field_name.str);
auto colMap = CalpontSelectExecutionPlan::ColumnMap::value_type(field_name, scsp);
gwip->columnMap.insert(colMap);
}
}
}
bool cando = true;
gwip->clauseType = clauseType;
@ -6252,8 +6199,7 @@ void gp_walk(const Item* item, void* arg)
}
else if (col->type() == Item::COND_ITEM)
{
Item_func* ifp = (Item_func*)col;
gwip->ptWorkStack.push(buildParseTree(ifp, *gwip, gwip->fatalParseError));
gwip->ptWorkStack.push(buildParseTree(col, *gwip, gwip->fatalParseError));
}
else if (col->type() == Item::FIELD_ITEM && gwip->clauseType == HAVING)
{
@ -6404,7 +6350,7 @@ void parse_item(Item* item, vector<Item_field*>& field_vec, bool& hasNonSupportI
{
case Item::FIELD_ITEM:
{
Item_field* ifp = reinterpret_cast<Item_field*>(item);
Item_field* ifp = static_cast<Item_field*>(item);
field_vec.push_back(ifp);
return;
}
@ -6413,7 +6359,7 @@ void parse_item(Item* item, vector<Item_field*>& field_vec, bool& hasNonSupportI
{
// hasAggColumn = true;
parseInfo |= AGG_BIT;
Item_sum* isp = reinterpret_cast<Item_sum*>(item);
Item_sum* isp = static_cast<Item_sum*>(item);
Item** sfitempp = isp->arguments();
for (uint32_t i = 0; i < isp->argument_count(); i++)
@ -6424,7 +6370,7 @@ void parse_item(Item* item, vector<Item_field*>& field_vec, bool& hasNonSupportI
case Item::FUNC_ITEM:
{
Item_func* isp = reinterpret_cast<Item_func*>(item);
Item_func* isp = static_cast<Item_func*>(item);
if (string(isp->func_name()) == "<in_optimizer>")
{
@ -6441,7 +6387,7 @@ void parse_item(Item* item, vector<Item_field*>& field_vec, bool& hasNonSupportI
case Item::COND_ITEM:
{
Item_cond* icp = reinterpret_cast<Item_cond*>(item);
Item_cond* icp = static_cast<Item_cond*>(item);
List_iterator_fast<Item> it(*(icp->argument_list()));
Item* cond_item;
@ -6465,13 +6411,13 @@ void parse_item(Item* item, vector<Item_field*>& field_vec, bool& hasNonSupportI
if ((*(ref->ref))->type() == Item::SUM_FUNC_ITEM)
{
parseInfo |= AGG_BIT;
Item_sum* isp = reinterpret_cast<Item_sum*>(*(ref->ref));
Item_sum* isp = static_cast<Item_sum*>(*(ref->ref));
Item** sfitempp = isp->arguments();
// special handling for count(*). This should not be treated as constant.
if (isSupportedAggregateWithOneConstArg(isp, sfitempp))
{
field_vec.push_back((Item_field*)item); // dummy
field_vec.push_back(nullptr); // dummy
}
for (uint32_t i = 0; i < isp->argument_count(); i++)
@ -6490,14 +6436,14 @@ void parse_item(Item* item, vector<Item_field*>& field_vec, bool& hasNonSupportI
if (!rc)
{
Item_field* ifp = reinterpret_cast<Item_field*>(*(ref->ref));
Item_field* ifp = static_cast<Item_field*>(*(ref->ref));
field_vec.push_back(ifp);
}
break;
}
else if ((*(ref->ref))->type() == Item::FUNC_ITEM)
{
Item_func* isp = reinterpret_cast<Item_func*>(*(ref->ref));
Item_func* isp = static_cast<Item_func*>(*(ref->ref));
Item** sfitempp = isp->arguments();
for (uint32_t i = 0; i < isp->argument_count(); i++)
@ -6507,7 +6453,7 @@ void parse_item(Item* item, vector<Item_field*>& field_vec, bool& hasNonSupportI
}
else if ((*(ref->ref))->type() == Item::CACHE_ITEM)
{
Item_cache* isp = reinterpret_cast<Item_cache*>(*(ref->ref));
Item_cache* isp = static_cast<Item_cache*>(*(ref->ref));
parse_item(isp->get_example(), field_vec, hasNonSupportItem, parseInfo, gwi);
break;
}
@ -7580,7 +7526,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
case Item::FUNC_ITEM:
{
Item_func* ifp = reinterpret_cast<Item_func*>(item);
Item_func* ifp = static_cast<Item_func*>(item);
// @bug4383. error out non-support stored function
if (ifp->functype() == Item_func::FUNC_SP)
@ -7714,7 +7660,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
} // End of FUNC_ITEM
// DRRTUY Replace the whole section with typeid() checks or use
// reinterpret_cast here
// static_cast here
case Item::CONST_ITEM:
{
switch (item->cmp_type())
@ -7792,7 +7738,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
if (join)
{
Item_cond* cond = reinterpret_cast<Item_cond*>(join->conds);
Item_cond* cond = static_cast<Item_cond*>(join->conds);
if (cond)
cond->traverse_cond(debug_walk, &gwi, Item::POSTFIX);
@ -7928,13 +7874,12 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
if (select_lex.having != 0)
{
Item_cond* having = reinterpret_cast<Item_cond*>(select_lex.having);
#ifdef DEBUG_WALK_COND
cerr << "------------------- HAVING ---------------------" << endl;
having->traverse_cond(debug_walk, &gwi, Item::POSTFIX);
select_lex.having->traverse_cond(debug_walk, &gwi, Item::POSTFIX);
cerr << "------------------------------------------------\n" << endl;
#endif
having->traverse_cond(gp_walk, &gwi, Item::POSTFIX);
select_lex.having->traverse_cond(gp_walk, &gwi, Item::POSTFIX);
if (gwi.fatalParseError)
{
@ -8090,7 +8035,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
{
gwi.clauseType = GROUP_BY;
Item* nonSupportItem = NULL;
ORDER* groupcol = reinterpret_cast<ORDER*>(select_lex.group_list.first);
ORDER* groupcol = static_cast<ORDER*>(select_lex.group_list.first);
// check if window functions are in order by. InfiniDB process order by list if
// window functions are involved, either in order by or projection.
@ -8112,7 +8057,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
}
gwi.hasWindowFunc = hasWindowFunc;
groupcol = reinterpret_cast<ORDER*>(select_lex.group_list.first);
groupcol = static_cast<ORDER*>(select_lex.group_list.first);
for (; groupcol; groupcol = groupcol->next)
{
@ -8366,7 +8311,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
// ORDER BY processing
{
SQL_I_List<ORDER> order_list = select_lex.order_list;
ORDER* ordercol = reinterpret_cast<ORDER*>(order_list.first);
ORDER* ordercol = static_cast<ORDER*>(order_list.first);
// check if window functions are in order by. InfiniDB process order by list if
// window functions are involved, either in order by or projection.
@ -8393,7 +8338,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
}
// re-visit the first of ordercol list
ordercol = reinterpret_cast<ORDER*>(order_list.first);
ordercol = static_cast<ORDER*>(order_list.first);
{
for (; ordercol; ordercol = ordercol->next)
@ -8551,7 +8496,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
if (unionSel)
order_list = select_lex.master_unit()->global_parameters()->order_list;
ordercol = reinterpret_cast<ORDER*>(order_list.first);
ordercol = static_cast<ORDER*>(order_list.first);
for (; ordercol; ordercol = ordercol->next)
{
@ -8956,7 +8901,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
Item_cond* icp = 0;
if (gi.groupByWhere)
icp = reinterpret_cast<Item_cond*>(gi.groupByWhere);
icp = static_cast<Item_cond*>(gi.groupByWhere);
uint32_t sessionID = csep->sessionID();
gwi.sessionid = sessionID;
@ -9432,7 +9377,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
case Item::FUNC_ITEM:
{
Item_func* ifp = reinterpret_cast<Item_func*>(item);
Item_func* ifp = static_cast<Item_func*>(item);
// @bug4383. error out non-support stored function
if (ifp->functype() == Item_func::FUNC_SP)
@ -9563,7 +9508,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
}
// DRRTUY Replace the whole section with typeid() checks or use
// reinterpret_cast here
// static_cast here
case Item::CONST_ITEM:
{
switch (item->cmp_type())
@ -9640,7 +9585,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
if (join)
{
Item_cond* cond = reinterpret_cast<Item_cond*>(join->conds);
Item_cond* cond = static_cast<Item_cond*>(join->conds);
if (cond)
cond->traverse_cond(debug_walk, &gwi, Item::POSTFIX);
@ -9761,7 +9706,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
if (gi.groupByHaving != 0)
{
Item_cond* having = reinterpret_cast<Item_cond*>(gi.groupByHaving);
Item_cond* having = static_cast<Item_cond*>(gi.groupByHaving);
#ifdef DEBUG_WALK_COND
cerr << "------------------- HAVING ---------------------" << endl;
having->traverse_cond(debug_walk, &gwi, Item::POSTFIX);
@ -9881,7 +9826,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
{
gwi.clauseType = GROUP_BY;
Item* nonSupportItem = NULL;
ORDER* groupcol = reinterpret_cast<ORDER*>(gi.groupByGroup);
ORDER* groupcol = static_cast<ORDER*>(gi.groupByGroup);
// check if window functions are in order by. InfiniDB process order by list if
// window functions are involved, either in order by or projection.
@ -9903,7 +9848,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
}
gwi.hasWindowFunc = hasWindowFunc;
groupcol = reinterpret_cast<ORDER*>(gi.groupByGroup);
groupcol = static_cast<ORDER*>(gi.groupByGroup);
for (; groupcol; groupcol = groupcol->next)
{
@ -10156,7 +10101,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
// ORDER BY processing starts here
{
ORDER* ordercol = reinterpret_cast<ORDER*>(gi.groupByOrder);
ORDER* ordercol = static_cast<ORDER*>(gi.groupByOrder);
// check if window functions are in order by. InfiniDB process order by list if
// window functions are involved, either in order by or projection.
@ -10167,7 +10112,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
}
// re-visit the first of ordercol list
ordercol = reinterpret_cast<ORDER*>(gi.groupByOrder);
ordercol = static_cast<ORDER*>(gi.groupByOrder);
// for subquery, order+limit by will be supported in infinidb. build order by columns
// @todo union order by and limit support
@ -10444,7 +10389,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
// MCOL-1052
if (unionSel)
{
ordercol = reinterpret_cast<ORDER*>(gi.groupByOrder);
ordercol = static_cast<ORDER*>(gi.groupByOrder);
}
else
ordercol = 0;
@ -10478,7 +10423,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
// @bug 3518. if order by clause = selected column, use position.
else if (ord_item->name.length && ord_item->type() == Item::FIELD_ITEM)
{
Item_field* field = reinterpret_cast<Item_field*>(ord_item);
Item_field* field = static_cast<Item_field*>(ord_item);
string fullname;
if (field->db_name.str)

View File

@ -414,7 +414,7 @@ execplan::ArithmeticColumn* buildArithmeticColumn(Item_func* item, gp_walk_info&
execplan::ConstantColumn* buildDecimalColumn(const Item* item, const std::string& str, gp_walk_info& gwi);
execplan::SimpleColumn* buildSimpleColumn(Item_field* item, gp_walk_info& gwi);
execplan::FunctionColumn* buildCaseFunction(Item_func* item, gp_walk_info& gwi, bool& nonSupport);
execplan::ParseTree* buildParseTree(Item_func* item, gp_walk_info& gwi, bool& nonSupport);
execplan::ParseTree* buildParseTree(Item* item, gp_walk_info& gwi, bool& nonSupport);
execplan::ReturnedColumn* buildAggregateColumn(Item* item, gp_walk_info& gwi);
execplan::ReturnedColumn* buildWindowFunctionColumn(Item* item, gp_walk_info& gwi, bool& nonSupport);
execplan::ReturnedColumn* buildPseudoColumn(Item* item, gp_walk_info& gwi, bool& nonSupport,

View File

@ -314,14 +314,12 @@ void item_check(Item* item, bool* unsupported_feature)
{
case Item::COND_ITEM:
{
Item_cond* icp = reinterpret_cast<Item_cond*>(item);
icp->traverse_cond(check_user_var_func, unsupported_feature, Item::POSTFIX);
item->traverse_cond(check_user_var_func, unsupported_feature, Item::POSTFIX);
break;
}
case Item::FUNC_ITEM:
{
Item_func* ifp = reinterpret_cast<Item_func*>(item);
ifp->traverse_cond(check_user_var_func, unsupported_feature, Item::POSTFIX);
item->traverse_cond(check_user_var_func, unsupported_feature, Item::POSTFIX);
break;
}
default:
@ -352,9 +350,7 @@ bool check_user_var(SELECT_LEX* select_lex)
if (join->conds)
{
Item_cond* icp = reinterpret_cast<Item_cond*>(join->conds);
icp->traverse_cond(check_user_var_func, &is_user_var_func, Item::POSTFIX);
join->conds->traverse_cond(check_user_var_func, &is_user_var_func, Item::POSTFIX);
}
return is_user_var_func;
@ -420,23 +416,15 @@ group_by_handler* create_columnstore_group_by_handler(THD* thd, Query* query)
if (!unsupported_feature)
{
JOIN* join = select_lex->join;
Item_cond* icp = 0;
if (join != 0)
icp = reinterpret_cast<Item_cond*>(join->conds);
if (unsupported_feature == false && icp)
if (unsupported_feature == false && join && join->conds)
{
icp->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX);
join->conds->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX);
}
// Optimizer could move some join conditions into where
if (select_lex->where != 0)
icp = reinterpret_cast<Item_cond*>(select_lex->where);
if (unsupported_feature == false && icp)
if (unsupported_feature == false && select_lex->where)
{
icp->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX);
select_lex->where->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX);
}
}
@ -521,18 +509,16 @@ derived_handler* create_columnstore_derived_handler(THD* thd, TABLE_LIST* table_
{
if (tl->where)
{
Item_cond* where_icp = reinterpret_cast<Item_cond*>(tl->where);
where_icp->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX);
where_icp->traverse_cond(save_join_predicates, &join_preds_list, Item::POSTFIX);
tl->where->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX);
tl->where->traverse_cond(save_join_predicates, &join_preds_list, Item::POSTFIX);
}
// Looking for JOIN with ON expression through
// TABLE_LIST in FROM until CS meets unsupported feature
if (tl->on_expr)
{
Item_cond* on_icp = reinterpret_cast<Item_cond*>(tl->on_expr);
on_icp->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX);
on_icp->traverse_cond(save_join_predicates, &join_preds_list, Item::POSTFIX);
tl->on_expr->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX);
tl->on_expr->traverse_cond(save_join_predicates, &join_preds_list, Item::POSTFIX);
}
// Iterate and traverse through the item list and the JOIN cond
@ -546,9 +532,8 @@ derived_handler* create_columnstore_derived_handler(THD* thd, TABLE_LIST* table_
if (!unsupported_feature && !join_preds_list.elements && join && join->conds)
{
Item_cond* conds = reinterpret_cast<Item_cond*>(join->conds);
conds->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX);
conds->traverse_cond(save_join_predicates, &join_preds_list, Item::POSTFIX);
join->conds->traverse_cond(check_walk, &unsupported_feature, Item::POSTFIX);
join->conds->traverse_cond(save_join_predicates, &join_preds_list, Item::POSTFIX);
}
// CROSS JOIN w/o conditions isn't supported until MCOL-301
@ -768,7 +753,8 @@ select_handler* create_columnstore_select_handler_(THD* thd, SELECT_LEX* sel_lex
// Disable processing of select_result_interceptor classes
// which intercept and transform result set rows. E.g.:
// select a,b into @a1, @a2 from t1;
if (((thd->lex)->result && !((select_dumpvar*)(thd->lex)->result)->var_list.is_empty()) && (!isPS))
select_dumpvar* dumpvar = dynamic_cast<select_dumpvar*>((thd->lex)->result);
if (dumpvar && !dumpvar->var_list.is_empty() && !isPS)
{
return nullptr;
}

View File

@ -23,9 +23,6 @@ sub is_default { 0 }
sub start_test {
# we should guard this for --force-restart flag condition.
my ($self, $tinfo)= @_;
print "Here I am restarting Columnstore backend\n";
#system("systemctl", "restart", "mariadb-columnstore");
print "Columnstore restarted\n";
My::Suite::start_test(@_);
}

View File

@ -44,7 +44,7 @@ if (WITH_UBSAN)
endif (WITH_COLUMNSTORE_REPORT_PATH)
SET(LD_PRELOAD_STRING "")
SET(ALLOC_CONFIG "UBSAN_OPTIONS=abort_on_error=0")
SET(ALLOC_CONFIG "UBSAN_OPTIONS=abort_on_error=0,print_stacktrace=true")
SET(PRIMPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${UBSAN_PATH}.primproc)
SET(DMLPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${UBSAN_PATH}.dmlproc)
SET(DDLPROC_ALLOC_CONFIG ${ALLOC_CONFIG},log_path=${UBSAN_PATH}.ddlproc)

View File

@ -63,12 +63,12 @@ class OamCache
dbRootPMMap_t dbRootPMMap;
dbRootPMMap_t dbRootConnectionMap;
PMDbrootsMap_t pmDbrootsMap;
uint32_t numDBRoots;
time_t mtime;
uint32_t numDBRoots = 1;
time_t mtime = 0;
DBRootConfigList dbroots;
std::vector<int> moduleIds;
std::string OAMParentModuleName;
int mLocalPMId; // The PM id running on this machine
int mLocalPMId = 0; // The PM id running on this machine
std::string systemName;
std::string moduleName;
};

View File

@ -597,6 +597,7 @@ void PrimitiveProcessor::p_Dictionary(const DictInput* in, vector<uint8_t>* out,
outValue = reinterpret_cast<DataValue*>(&(*out)[header.NBYTES]);
outValue->isnull = sigptr.data == nullptr;
outValue->len = sigptr.len;
if (sigptr.data != nullptr)
memcpy(outValue->data, sigptr.data, sigptr.len);
header.NBYTES += sizeof(DataValue) + sigptr.len;
}

View File

@ -3,7 +3,11 @@ mysql -e "create database if not exists test;"
SOCKET=`mysql -e "show variables like 'socket';" | grep socket | cut -f2`
cd /usr/share/mysql/mysql-test
./mtr --force --max-test-fail=0 --testcase-timeout=60 --extern socket=$SOCKET --suite=columnstore/basic | tee $CURRENT_DIR/mtr.basic.log 2>&1
./mtr --force --max-test-fail=0 --testcase-timeout=60 --extern socket=$SOCKET --suite=columnstore/basic $1 | tee $CURRENT_DIR/mtr.basic.log 2>&1
if [[ $1 != '' ]]; then
exit 1
fi
./mtr --force --max-test-fail=0 --testcase-timeout=60 --extern socket=$SOCKET --suite=columnstore/setup | tee $CURRENT_DIR/mtr.setup.log 2>&1
./mtr --force --max-test-fail=0 --testcase-timeout=60 --extern socket=$SOCKET --suite=columnstore/bugfixes | tee $CURRENT_DIR/mtr.bugfixes.log 2>&1
./mtr --force --max-test-fail=0 --testcase-timeout=60 --extern socket=$SOCKET --suite=columnstore/devregression | tee $CURRENT_DIR/mtr.devregression.log 2>&1

View File

@ -304,7 +304,7 @@ inline bool calc_time_diff(int64_t time1, int64_t time2, int l_sign, long long*
{
int64_t days;
bool neg;
int64_t microseconds;
int128_t microseconds;
uint64_t year1 = 0, month1 = 0, day1 = 0, hour1 = 0, min1 = 0, sec1 = 0, msec1 = 0;

View File

@ -1328,7 +1328,10 @@ inline void Row::setVarBinaryField(const uint8_t* val, uint32_t len, uint32_t co
if (len > getColumnWidth(colIndex))
len = getColumnWidth(colIndex);
idbassert(val != nullptr || !len);
*((uint16_t*)&data[offsets[colIndex]]) = len;
if (val != nullptr)
memcpy(&data[offsets[colIndex] + 2], val, len);
}
}

View File

@ -396,7 +396,7 @@ class mcsv1Context
mcsv1sdk::mcsv1_UDAF* func;
int32_t fParamCount;
std::vector<uint32_t> paramKeys;
enum_mariadb_return_type mariadbReturnType;
enum_mariadb_return_type mariadbReturnType = MYSQL_TYPE_LONGLONG;
uint32_t fCharsetNumber;
public:

View File

@ -315,8 +315,8 @@ struct BulkUpdateDBRootArg
/* Input Arg type for DBRM::createStripeColumnExtents() */
struct CreateStripeColumnExtentsArgIn
{
OID_t oid; // column OID
uint32_t width; // column width in bytes
OID_t oid = 0; // column OID
uint32_t width = 0; // column width in bytes
execplan::CalpontSystemCatalog::ColDataType colDataType;
};

View File

@ -65,12 +65,13 @@ ExtentStripeAlloc::~ExtentStripeAlloc()
// Add a column to be associated with the "stripe" allocations for "this"
// ExtentStripeAlloc object.
//------------------------------------------------------------------------------
void ExtentStripeAlloc::addColumn(OID colOID, int colWidth)
void ExtentStripeAlloc::addColumn(OID colOID, int colWidth, datatypes::SystemCatalog::ColDataType colDataType)
{
boost::mutex::scoped_lock lock(fMapMutex);
fColOIDs.push_back(colOID);
fColWidths.push_back(colWidth);
fColDataTypes.push_back(colDataType);
}
//------------------------------------------------------------------------------
@ -161,6 +162,7 @@ int ExtentStripeAlloc::allocateExtent(OID oid, uint16_t dbRoot,
BRM::CreateStripeColumnExtentsArgIn colEntry;
colEntry.oid = fColOIDs[j];
colEntry.width = fColWidths[j];
colEntry.colDataType = fColDataTypes[j];
cols.push_back(colEntry);
}

View File

@ -128,7 +128,7 @@ class ExtentStripeAlloc
* @param colOID Column OID to be added to extent allocation list.
* @param colWidth Width of column associated with colOID.
*/
void addColumn(OID colOID, int colWidth);
void addColumn(OID colOID, int colWidth, datatypes::SystemCatalog::ColDataType colDataType);
/** @brief Request an extent allocation for the specified OID and DBRoot.
* A "stripe" of extents for the corresponding table will be allocated
@ -159,6 +159,7 @@ class ExtentStripeAlloc
boost::mutex fMapMutex; // protects unordered map access
std::vector<OID> fColOIDs; // Vector of column OIDs
std::vector<int> fColWidths; // Widths associated with fColOIDs
std::vector<datatypes::SystemCatalog::ColDataType> fColDataTypes;
// unordered map where we collect the allocated extents
std::tr1::unordered_multimap<OID, AllocExtEntry, AllocExtHasher> fMap;

View File

@ -1249,7 +1249,7 @@ void TableInfo::addColumn(ColumnInfo* info)
fColumns.push_back(info);
fNumberOfColumns = fColumns.size();
fExtentStrAlloc.addColumn(info->column.mapOid, info->column.width);
fExtentStrAlloc.addColumn(info->column.mapOid, info->column.width, info->column.dataType);
}
//------------------------------------------------------------------------------