1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-07-29 08:21:15 +03:00

Fix code style

This commit is contained in:
Andrew Hutchings
2018-05-31 14:44:48 +01:00
parent 53f281933a
commit 1d8964ec0d
12 changed files with 38 additions and 28 deletions

View File

@ -460,7 +460,7 @@ void TupleUnion::normalize(const Row& in, Row* out)
if (in.getScale(i))
{
double d = in.getIntField(i);
d /= exp10(in.getScale(i));
d /= exp10(in.getScale(i));
os.precision(15);
os << d;
}

View File

@ -2266,7 +2266,7 @@ extern "C"
int rc = ProcessDDLStatement(stmt, db, "", tid2sid(thd->thread_id), emsg, compressiontype);
if (rc != 0)
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 9999, emsg.c_str());
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 9999, emsg.c_str());
return rc;
}

View File

@ -9877,8 +9877,9 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
select_query += ord_cols;
}
}
// LIMIT and OFFSET are extracted from TABLE_LIST elements.
// All of JOIN-ed tables contain relevant limit and offset.
// LIMIT and OFFSET are extracted from TABLE_LIST elements.
// All of JOIN-ed tables contain relevant limit and offset.
if (gi.groupByTables->select_lex->select_limit)
{
csep->limitNum(((Item_int*)gi.groupByTables->select_lex->select_limit)->val_int());
@ -9901,7 +9902,7 @@ int getGroupPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, cal_gro
setError(gwi.thd, ER_INTERNAL_ERROR, gwi.parseErrorText, gwi);
return ER_CHECK_NOT_IMPLEMENTED;
}
} // ORDER BY processing ends here
if ( gi.groupByDistinct )

View File

@ -5581,18 +5581,18 @@ internal_error:
*/
/***********************************************************
* DESCRIPTION:
* Return a result record for each
* Return a result record for each
* group_by_handler::next_row() call.
* PARAMETERS:
* group_hand - group by handler, that preserves initial
* table and items lists. .
* table - TABLE pointer The table to save the result
* table - TABLE pointer The table to save the result
* set in.
* RETURN:
* 0 if success
* HA_ERR_END_OF_FILE if the record set has come to
* HA_ERR_END_OF_FILE if the record set has come to
* an end
* others if something went wrong whilst getting the
* others if something went wrong whilst getting the
* result set
***********************************************************/
int ha_calpont_impl_group_by_next(ha_calpont_group_by_handler* group_hand, TABLE* table)

View File

@ -2769,6 +2769,7 @@ int64_t DataConvert::stringToTime(const string& data)
if (*end != '\0')
return -1;
hour = day * 24;
day = -1;
time = data.substr(pos + 1, data.length() - pos - 1);

View File

@ -86,6 +86,7 @@ int64_t addTime(DateTime& dt1, Time& dt2)
dt2.day = hour / 24;
hour = hour % 24;
}
if (hour < 0)
{
dt.hour = hour + 24;

View File

@ -60,7 +60,7 @@ struct IdbSortSpec
{
int fIndex;
// TODO There are three ordering specs since 10.2
int fAsc; // <ordering specification> ::= ASC | DESC
int fAsc; // <ordering specification> ::= ASC | DESC
int fNf; // <null ordering> ::= NULLS FIRST | NULLS LAST
IdbSortSpec() : fIndex(-1), fAsc(1), fNf(1) {}

View File

@ -77,7 +77,10 @@ public:
{
return fLocFile;
}
int getReadBufSize() { return fReadBufSize; }
int getReadBufSize()
{
return fReadBufSize;
}
{
return fMode;
}

View File

@ -87,6 +87,7 @@ WEFileReadThread::WEFileReadThread(WESDHandler& aSdh): fSdh(aSdh),
{
//TODO batch qty to get from config
fBatchQty = 10000;
if (fSdh.getReadBufSize() < DEFAULTBUFFSIZE)
{
fBuffSize = DEFAULTBUFFSIZE;
@ -95,6 +96,7 @@ WEFileReadThread::WEFileReadThread(WESDHandler& aSdh): fSdh(aSdh),
{
fBuffSize = fSdh.getReadBufSize();
}
fBuff = new char [fBuffSize];
}
@ -362,17 +364,17 @@ unsigned int WEFileReadThread::readDataFile(messageqcpp::SBS& Sbs)
if (fEnclEsc)
{
//pStart = aBuff;
aLen = getNextRow(fInFile, fBuff, fBuffSize-1);
aLen = getNextRow(fInFile, fBuff, fBuffSize - 1);
}
else
{
fInFile.getline(fBuff, fBuffSize-1);
fInFile.getline(fBuff, fBuffSize - 1);
aLen = fInFile.gcount();
}
////aLen chars incl \n, Therefore aLen-1; '<<' oper won't go past it
//cout << "Data Length " << aLen <<endl;
if((aLen < (fBuffSize-2)) && (aLen>0))
if ((aLen < (fBuffSize - 2)) && (aLen > 0))
{
fBuff[aLen - 1] = '\n';
fBuff[aLen] = 0;
@ -384,7 +386,7 @@ unsigned int WEFileReadThread::readDataFile(messageqcpp::SBS& Sbs)
if (fSdh.getDebugLvl() > 2) cout << "File data line = " << aIdx << endl;
}
else if(aLen>=fBuffSize-2) //Didn't hit delim; BIG ROW
else if (aLen >= fBuffSize - 2) //Didn't hit delim; BIG ROW
{
cout << "Bad Row data " << endl;
cout << fBuff << endl;

View File

@ -126,7 +126,7 @@ public:
void add2InputDataFileList(std::string& FileName);
private:
enum { DEFAULTBUFFSIZE=1024*1024 };
enum { DEFAULTBUFFSIZE = 1024 * 1024 };
// don't allow anyone else to set
void setTgtPmId(unsigned int fTgtPmId)

View File

@ -222,7 +222,7 @@ int ColumnOp::allocRowId(const TxnID& txnid, bool useStartingExtent,
//Find out where the rest rows go
BRM::LBID_t startLbid;
//need to put in a loop until newExtent is true
newExtent = dbRootExtentTrackers[column.colNo]->nextSegFile(dbRoot, partition, segment, newHwm, startLbid);
newExtent = dbRootExtentTrackers[column.colNo]->nextSegFile(dbRoot, partition, segment, newHwm, startLbid);
TableMetaData* tableMetaData = TableMetaData::makeTableMetaData(tableOid);
while (!newExtent)
@ -238,7 +238,7 @@ int ColumnOp::allocRowId(const TxnID& txnid, bool useStartingExtent,
for (i = 0; i < dbRootExtentTrackers.size(); i++)
{
if (i != column.colNo)
if (i != column.colNo)
dbRootExtentTrackers[i]->nextSegFile(dbRoot, partition, segment, newHwm, startLbid);
// Round up HWM to the end of the current extent
@ -302,7 +302,8 @@ int ColumnOp::allocRowId(const TxnID& txnid, bool useStartingExtent,
tableMetaData->setColExtsInfo(newColStructList[i].dataOid, aColExtsInfo);
}
newExtent = dbRootExtentTrackers[column.colNo]->nextSegFile(dbRoot, partition, segment, newHwm, startLbid);
newExtent = dbRootExtentTrackers[column.colNo]->nextSegFile(dbRoot, partition, segment, newHwm, startLbid);
}
}
@ -323,7 +324,7 @@ int ColumnOp::allocRowId(const TxnID& txnid, bool useStartingExtent,
}
rc = BRMWrapper::getInstance()->allocateStripeColExtents(cols, dbRoot, partition, segment, extents);
newHwm = extents[column.colNo].startBlkOffset;
newHwm = extents[column.colNo].startBlkOffset;
if (rc != NO_ERROR)
return rc;

View File

@ -1683,18 +1683,19 @@ int WriteEngineWrapper::insertColumnRecsBinary(const TxnID& txnid,
for (i = 0; i < colStructList.size(); i++)
Convertor::convertColType(&colStructList[i]);
// MCOL-984: find the smallest column width to calculate the RowID from so
// that all HWMs will be incremented by this operation
int32_t lowColLen = 8192;
int32_t colId = 0;
for (uint32_t colIt = 0; colIt < colStructList.size(); colIt++)
{
// MCOL-984: find the smallest column width to calculate the RowID from so
// that all HWMs will be incremented by this operation
int32_t lowColLen = 8192;
int32_t colId = 0;
for (uint32_t colIt = 0; colIt < colStructList.size(); colIt++)
{
if (colStructList[colIt].colWidth < lowColLen)
{
colId = colIt;
lowColLen = colStructList[colId].colWidth;
}
}
}
// rc = checkValid(txnid, colStructList, colValueList, ridList);
// if (rc != NO_ERROR)
@ -1944,7 +1945,7 @@ int WriteEngineWrapper::insertColumnRecsBinary(const TxnID& txnid,
// allocate row id(s)
//--------------------------------------------------------------------------
curColStruct = colStructList[colId];
curColStruct = colStructList[colId];
colOp = m_colOp[op(curColStruct.fCompressionType)];