1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-08-08 14:22:09 +03:00

Merge branch 'develop-1.2' into develop-merge-up-20190514

This commit is contained in:
Andrew Hutchings
2019-05-14 13:58:33 +01:00
83 changed files with 469 additions and 638 deletions

View File

@@ -5,10 +5,10 @@
set +e; \
if [ -f dml-scan.cpp ]; \
then diff -abBq dml-scan-temp.cpp dml-scan.cpp >/dev/null 2>&1; \
if [ $$? -ne 0 ]; \
if [ $? -ne 0 ]; \
then mv -f dml-scan-temp.cpp dml-scan.cpp; \
else touch dml-scan.cpp; \
fi; \
else mv -f dml-scan-temp.cpp dml-scan.cpp; \
fi
rm -f dml-scan-temp.cpp
rm -f dml-scan-temp.cpp

View File

@@ -40,22 +40,6 @@ struct to_lower
}
};
//Trim any leading/trailing ws
const string lrtrim(const string& in)
{
string::size_type p1;
p1 = in.find_first_not_of(" \t\n");
if (p1 == string::npos) p1 = 0;
string::size_type p2;
p2 = in.find_last_not_of(" \t\n");
if (p2 == string::npos) p2 = in.size() - 1;
return string(in, p1, (p2 - p1 + 1));
}
}
namespace execplan

View File

@@ -40,22 +40,6 @@ struct to_lower
}
};
//Trim any leading/trailing ws
const string lrtrim(const string& in)
{
string::size_type p1;
p1 = in.find_first_not_of(" \t\n");
if (p1 == string::npos) p1 = 0;
string::size_type p2;
p2 = in.find_last_not_of(" \t\n");
if (p2 == string::npos) p2 = in.size() - 1;
return string(in, p1, (p2 - p1 + 1));
}
}
namespace execplan

View File

@@ -46,22 +46,6 @@ struct to_lower
}
};
//Trim any leading/trailing ws
const string lrtrim(const string& in)
{
string::size_type p1;
p1 = in.find_first_not_of(" \t\n");
if (p1 == string::npos) p1 = 0;
string::size_type p2;
p2 = in.find_last_not_of(" \t\n");
if (p2 == string::npos) p2 = in.size() - 1;
return string(in, p1, (p2 - p1 + 1));
}
}
namespace execplan

View File

@@ -1136,7 +1136,9 @@ inline int64_t TreeNode::getDatetimeIntVal()
dataconvert::Time tt;
int day = 0;
memcpy(&tt, &fResult.intVal, 8);
void *ttp = static_cast<void*>(&tt);
memcpy(ttp, &fResult.intVal, 8);
// Note, this should probably be current date +/- time
if ((tt.hour > 23) && (!tt.is_neg))
@@ -1166,7 +1168,7 @@ inline int64_t TreeNode::getTimeIntVal()
{
dataconvert::DateTime dt;
memcpy(&dt, &fResult.intVal, 8);
memcpy((int64_t*)(&dt), &fResult.intVal, 8);
dataconvert::Time tt(0, dt.hour, dt.minute, dt.second, dt.msecond, false);
memcpy(&fResult.intVal, &tt, 8);
return fResult.intVal;

View File

@@ -336,7 +336,8 @@ bool isNotInSubquery(JobStepVector& jsv)
return notIn;
}
// This fcn is currently unused. Will keep it in the code for now.
#if 0
void alterCsepInExistsFilter(CalpontSelectExecutionPlan* csep, JobInfo& jobInfo)
{
// This is for window function in IN/EXISTS sub-query.
@@ -364,7 +365,7 @@ void alterCsepInExistsFilter(CalpontSelectExecutionPlan* csep, JobInfo& jobInfo)
if (wcs.size() > 1)
retCols.insert(retCols.end(), wcs.begin() + 1, wcs.end());
}
#endif
void doCorrelatedExists(const ExistsFilter* ef, JobInfo& jobInfo)
{

View File

@@ -94,68 +94,6 @@ namespace
{
using namespace joblist;
//Find the next step downstream from *in. Assumes only the first such step is needed.
const JobStepVector::iterator getNextStep(JobStepVector::iterator& in, JobStepVector& list)
{
JobStepVector::iterator end = list.end();
for (unsigned i = 0; i < in->get()->outputAssociation().outSize(); ++i)
{
JobStepVector::iterator iter = list.begin();
AnyDataListSPtr outAdl = in->get()->outputAssociation().outAt(i);
while (iter != end)
{
if (iter != in)
{
AnyDataListSPtr inAdl;
for (unsigned j = 0; j < iter->get()->inputAssociation().outSize(); j++)
{
inAdl = iter->get()->inputAssociation().outAt(j);
if (inAdl.get() == outAdl.get())
return iter;
}
}
++iter;
}
}
return end;
}
bool checkCombinable(JobStep* jobStepPtr)
{
if (typeid(*(jobStepPtr)) == typeid(pColScanStep))
{
return true;
}
else if (typeid(*(jobStepPtr)) == typeid(PseudoColStep))
{
return true;
}
else if (typeid(*(jobStepPtr)) == typeid(pColStep))
{
return true;
}
else if (typeid(*(jobStepPtr)) == typeid(pDictionaryStep))
{
return true;
}
else if (typeid(*(jobStepPtr)) == typeid(PassThruStep))
{
return true;
}
else if (typeid(*(jobStepPtr)) == typeid(FilterStep))
{
return true;
}
return false;
}
void projectSimpleColumn(const SimpleColumn* sc, JobStepVector& jsv, JobInfo& jobInfo)
{
@@ -1447,54 +1385,6 @@ void changePcolStepToPcolScan(JobStepVector::iterator& it, JobStepVector::iterat
}
}
uint32_t shouldSort(const JobStep* inJobStep, int colWidth)
{
//only pColStep and pColScan have colType
const pColStep* inStep = dynamic_cast<const pColStep*>(inJobStep);
if (inStep && colWidth > inStep->colType().colWidth)
{
return 1;
}
const pColScanStep* inScan = dynamic_cast<const pColScanStep*>(inJobStep);
if (inScan && colWidth > inScan->colType().colWidth)
{
return 1;
}
return 0;
}
void convertPColStepInProjectToPassThru(JobStepVector& psv, JobInfo& jobInfo)
{
for (JobStepVector::iterator iter = psv.begin(); iter != psv.end(); ++iter)
{
pColStep* colStep = dynamic_cast<pColStep*>(iter->get());
if (colStep != NULL)
{
JobStepAssociation ia = iter->get()->inputAssociation();
DataList_t* fifoDlp = ia.outAt(0).get()->dataList();
if (fifoDlp)
{
if (iter->get()->oid() >= 3000 && iter->get()->oid() == fifoDlp->OID())
{
PassThruStep* pts = 0;
pts = new PassThruStep(*colStep);
pts->alias(colStep->alias());
pts->view(colStep->view());
pts->name(colStep->name());
pts->tupleId(iter->get()->tupleId());
iter->reset(pts);
}
}
}
}
}
// optimize filter order
// perform none string filters first because string filter joins the tokens.
void optimizeFilterOrder(JobStepVector& qsv)
@@ -1819,7 +1709,7 @@ void makeVtableModeSteps(CalpontSelectExecutionPlan* csep, JobInfo& jobInfo,
jobInfo.limitCount = (uint64_t) - 1;
}
// support order by and limit in sub-query/union or
// support order by and limit in sub-query/union or
// GROUP BY handler processed outer query order
else if (csep->orderByCols().size() > 0)
{

View File

@@ -483,7 +483,8 @@ void pDictionaryScan::sendAPrimitiveMessage(
)
{
DictTokenByScanRequestHeader hdr;
memset(&hdr, 0, sizeof(hdr));
void *hdrp = static_cast<void*>(&hdr);
memset(hdrp, 0, sizeof(hdr));
hdr.ism.Interleave = pm;
hdr.ism.Flags = planFlagsToPrimFlags(fTraceFlags);
@@ -913,7 +914,8 @@ void pDictionaryScan::serializeEqualityFilter()
uint32_t i;
vector<string> empty;
memset(&ism, 0, sizeof(ISMPacketHeader));
void *ismp = static_cast<void*>(&ism);
memset(ismp, 0, sizeof(ISMPacketHeader));
ism.Command = DICT_CREATE_EQUALITY_FILTER;
msg.load((uint8_t*) &ism, sizeof(ISMPacketHeader));
msg << uniqueID;
@@ -954,7 +956,8 @@ void pDictionaryScan::destroyEqualityFilter()
ByteStream msg;
ISMPacketHeader ism;
memset(&ism, 0, sizeof(ISMPacketHeader));
void *ismp = static_cast<void*>(&ism);
memset(ismp, 0, sizeof(ISMPacketHeader));
ism.Command = DICT_DESTROY_EQUALITY_FILTER;
msg.load((uint8_t*) &ism, sizeof(ISMPacketHeader));
msg << uniqueID;

View File

@@ -81,21 +81,6 @@ using namespace joblist;
namespace
{
string keyName(uint64_t i, uint32_t key, const joblist::JobInfo& jobInfo)
{
string name = jobInfo.projectionCols[i]->alias();
if (name.empty())
{
name = jobInfo.keyInfo->tupleKeyToName[key];
if (jobInfo.keyInfo->tupleKeyVec[key].fId < 100)
name = "Expression/Function";
}
return name = "'" + name + "'";
}
uint64_t getColumnIndex(const SRCP& c, const map<uint64_t, uint64_t>& m, JobInfo& jobInfo)
{

View File

@@ -88,103 +88,6 @@ inline uint32_t tid2sid(const uint32_t tid)
}
//StopWatch timer;
int buildBuffer(uchar* buf, string& buffer, int& columns, TABLE* table)
{
char attribute_buffer[1024];
String attribute(attribute_buffer, sizeof(attribute_buffer),
&my_charset_bin);
std::string cols = " (";
std::string vals = " values (";
columns = 0;
for (Field** field = table->field; *field; field++)
{
const char* ptr;
const char* end_ptr;
if ((*field)->is_null())
ptr = end_ptr = 0;
else
{
bitmap_set_bit(table->read_set, (*field)->field_index);
(*field)->val_str(&attribute, &attribute);
ptr = attribute.ptr();
end_ptr = attribute.length() + ptr;
}
if (columns > 0)
{
cols.append(",");
vals.append(",");
}
columns++;
cols.append((*field)->field_name.str);
if (ptr == end_ptr)
{
vals.append ("NULL");
}
else
{
if ( (*field)->type() == MYSQL_TYPE_VARCHAR ||
/*FIXME: (*field)->type() == MYSQL_TYPE_VARBINARY || */
(*field)->type() == MYSQL_TYPE_VAR_STRING ||
(*field)->type() == MYSQL_TYPE_STRING ||
(*field)->type() == MYSQL_TYPE_DATE ||
(*field)->type() == MYSQL_TYPE_DATETIME ||
(*field)->type() == MYSQL_TYPE_DATETIME2 ||
(*field)->type() == MYSQL_TYPE_TIME )
vals.append("'");
while (ptr < end_ptr)
{
if (*ptr == '\r')
{
ptr++;
}
else if (*ptr == '\n')
{
ptr++;
}
else if (*ptr == '\'' )
{
//@Bug 1820. Replace apostrophe with strange character to pass parser.
vals += '\252';
ptr++;
}
else
vals += *ptr++;
}
if ( (*field)->type() == MYSQL_TYPE_VARCHAR ||
/*FIXME: (*field)->type() == MYSQL_TYPE_VARBINARY || */
(*field)->type() == MYSQL_TYPE_VAR_STRING ||
(*field)->type() == MYSQL_TYPE_STRING ||
(*field)->type() == MYSQL_TYPE_DATE ||
(*field)->type() == MYSQL_TYPE_DATETIME ||
(*field)->type() == MYSQL_TYPE_DATETIME2 ||
(*field)->type() == MYSQL_TYPE_TIME )
vals.append("'");
}
}
if (columns)
{
cols.append(") ");
vals.append(") ");
buffer = "INSERT INTO ";
buffer.append(table->s->table_name.str);
buffer.append(cols);
buffer.append(vals);
}
return columns;
}
uint32_t buildValueList (TABLE* table, cal_connection_info& ci )
{

View File

@@ -4686,7 +4686,7 @@ ReturnedColumn* buildAggregateColumn(Item* item, gp_walk_info& gwi)
}
}
}
catch (std::logic_error e)
catch (std::logic_error &e)
{
gwi.fatalParseError = true;
gwi.parseErrorText = "error building Aggregate Function: ";

View File

@@ -2114,7 +2114,7 @@ int ha_calpont_impl_rnd_init(TABLE* table)
//check whether the system is ready to process statement.
#ifndef _MSC_VER
static DBRM dbrm(true);
bool bSystemQueryReady = dbrm.getSystemQueryReady();
int bSystemQueryReady = dbrm.getSystemQueryReady();
if (bSystemQueryReady == 0)
{
@@ -4269,7 +4269,7 @@ int ha_calpont_impl_group_by_init(ha_calpont_group_by_handler* group_hand, TABLE
//check whether the system is ready to process statement.
#ifndef _MSC_VER
static DBRM dbrm(true);
bool bSystemQueryReady = dbrm.getSystemQueryReady();
int bSystemQueryReady = dbrm.getSystemQueryReady();
if (bSystemQueryReady == 0)
{

View File

@@ -239,21 +239,6 @@ struct PartitionInfo
typedef map<LogicalPartition, PartitionInfo> PartitionMap;
const string charcolToString(int64_t v)
{
ostringstream oss;
char c;
for (int i = 0; i < 8; i++)
{
c = v & 0xff;
oss << c;
v >>= 8;
}
return oss.str();
}
const string format(int64_t v, CalpontSystemCatalog::ColType& ct)
{
ostringstream oss;

View File

@@ -100,6 +100,7 @@ static int generate_result(BRM::OID_t oid, BRM::DBRM* emp, TABLE* table, THD* th
messageqcpp::MessageQueueClient* msgQueueClient;
oam::Oam oam_instance;
int pmId = 0;
int rc;
emp->getExtents(oid, entries, false, false, true);
@@ -121,7 +122,7 @@ static int generate_result(BRM::OID_t oid, BRM::DBRM* emp, TABLE* table, THD* th
{
oam_instance.getDbrootPmConfig(iter->dbRoot, pmId);
}
catch (std::runtime_error)
catch (std::runtime_error&)
{
// MCOL-1116: If we are here a DBRoot is offline/missing
iter++;
@@ -137,14 +138,16 @@ static int generate_result(BRM::OID_t oid, BRM::DBRM* emp, TABLE* table, THD* th
DbRootName << "DBRoot" << iter->dbRoot;
std::string DbRootPath = config->getConfig("SystemConfig", DbRootName.str());
fileSize = compressedFileSize = 0;
snprintf(fullFileName, WriteEngine::FILE_NAME_SIZE, "%s/%s", DbRootPath.c_str(), oidDirName);
rc = snprintf(fullFileName, WriteEngine::FILE_NAME_SIZE, "%s/%s", DbRootPath.c_str(), oidDirName);
std::ostringstream oss;
oss << "pm" << pmId << "_WriteEngineServer";
std::string client = oss.str();
msgQueueClient = messageqcpp::MessageQueueClientPool::getInstance(oss.str());
if (!get_file_sizes(msgQueueClient, fullFileName, &fileSize, &compressedFileSize))
// snprintf output truncation check
if (rc == WriteEngine::FILE_NAME_SIZE ||
!get_file_sizes(msgQueueClient, fullFileName, &fileSize, &compressedFileSize))
{
messageqcpp::MessageQueueClientPool::releaseInstance(msgQueueClient);
delete emp;