1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-07-29 08:21:15 +03:00

MCOL-4839: Fix clang build (#2100)

* Fix clang build

* Extern C returned to plugin_instance

Co-authored-by: Leonid Fedorov <l.fedorov@mail.corp.ru>
This commit is contained in:
Leonid Fedorov
2021-08-23 18:45:10 +03:00
committed by GitHub
parent 923bbf4033
commit 5c5f103f98
59 changed files with 422 additions and 600 deletions

1
.gitignore vendored
View File

@ -169,3 +169,4 @@ storage-manager/unit_tests
.drone.yml
result/
mariadb-columnstore-regression-test/
build/Testing/

View File

@ -97,13 +97,13 @@ namespace BRM
namespace rowgroup
{
struct Row;
class Row;
};
namespace execplan
{
struct SimpleColumn;
class SimpleColumn;
};

View File

@ -14,14 +14,16 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
*/
MA 02110-1301, USA.
*/
#ifndef MCS_TSFLOAT128_H_INCLUDED
#define MCS_TSFLOAT128_H_INCLUDED
#include <cfloat>
#include <cctype>
#include <cstdint>
#include <cstring>
#include <string>
#include "mcs_numeric_limits.h"
#ifdef __aarch64__
@ -103,7 +105,7 @@ static const float128_t mcs_fl_one = 1.0, mcs_fl_Zero[] = {0.0, -0.0,};
template<> class numeric_limits<float128_t> {
public:
static constexpr bool is_specialized = true;
static constexpr float128_t max()
static float128_t max()
{
return mcs_ieee854_float128{ .ieee = {0xffffffff,
0xffffffff,
@ -112,7 +114,7 @@ template<> class numeric_limits<float128_t> {
0x7ffe,
0x0}}.value;
}
static constexpr float128_t min()
static float128_t min()
{
return mcs_ieee854_float128{ .ieee = {0x0,
0x0,

View File

@ -14,8 +14,8 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
*/
MA 02110-1301, USA.
*/
#ifndef MCS_INT128_H_INCLUDED
#define MCS_INT128_H_INCLUDED
@ -40,7 +40,7 @@
src_restrictions, \
clobb) \
::memcpy((dst), (src), sizeof(int128_t));
#elif defined(__GNUC__) && (__GNUC___ > 7)
#elif defined(__GNUC__) && (__GNUC___ > 7) || defined(__clang__)
#define MACRO_VALUE_PTR_128(dst, \
dst_restrictions, \
src, \
@ -114,7 +114,7 @@ template<>
struct is_int128_t<int128_t> {
static const bool value = true;
};
template <typename T>
struct is_uint128_t {
static const bool value = false;
@ -129,7 +129,7 @@ inline int128_t abs(int128_t x)
{
return (x >= 0) ? x : -x;
}
class TSInt128
{
public:
@ -143,6 +143,12 @@ class TSInt128
// Copy ctor
TSInt128(const TSInt128& other): s128Value(other.s128Value) { }
TSInt128& operator=(const TSInt128& other)
{
s128Value = other.s128Value;
return *this;
}
// aligned argument
explicit TSInt128(const int128_t& x) { s128Value = x; }
@ -335,7 +341,7 @@ class TSInt128
int128_t s128Value;
}; // end of class
} //end of namespace datatypes

View File

@ -62,6 +62,14 @@ using namespace cacheutils;
#include "IDBPolicy.h"
using namespace idbdatafile;
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpotentially-evaluated-expression"
// for warnings on typeid :expression with side effects will be evaluated despite being used as an operand to 'typeid'
#endif
//TODO: this should be in a common header somewhere
struct extentInfo
{
@ -2504,3 +2512,6 @@ void AlterTableProcessor::renameColumn(uint32_t sessionID, execplan::CalpontSyst
}
// vim:ts=4 sw=4:
#ifdef __clang__
#pragma clang diagnostic pop
#endif

View File

@ -414,7 +414,6 @@ string CalpontSelectExecutionPlan::queryTypeToString(const uint32_t queryType)
void CalpontSelectExecutionPlan::serialize(messageqcpp::ByteStream& b) const
{
ReturnedColumnList::const_iterator rcit;
vector<ReturnedColumn*>::const_iterator it;
ColumnMap::const_iterator mapiter;
TableList::const_iterator tit;
@ -727,7 +726,6 @@ bool CalpontSelectExecutionPlan::operator==(const CalpontSelectExecutionPlan& t)
ReturnedColumnList::const_iterator rcit;
ReturnedColumnList::const_iterator rcit2;
vector<ReturnedColumn*>::const_iterator it, it2;
SelectList::const_iterator sit, sit2;
ColumnMap::const_iterator map_it, map_it2;

View File

@ -279,7 +279,6 @@ void FunctionColumn::unserialize(messageqcpp::ByteStream& b)
uint32_t size, i;
//SRCP rc;
SPTP pt;
FunctionParm::iterator it;
fFunctionParms.erase(fFunctionParms.begin(), fFunctionParms.end());
fSimpleColumnList.clear();

View File

@ -87,6 +87,14 @@ using namespace logging;
#include "jlf_tuplejoblist.h"
#include "mcs_decimal.h"
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpotentially-evaluated-expression"
// for warnings on typeid :expression with side effects will be evaluated despite being used as an operand to 'typeid'
#endif
namespace
{
using namespace joblist;
@ -3558,3 +3566,6 @@ void JLF_ExecPlanToJobList::addJobSteps(JobStepVector& nsv, JobInfo& jobInfo, bo
} // end of joblist namespace
// vim:ts=4 sw=4:
#ifdef __clang__
#pragma clang diagnostic pop
#endif

View File

@ -35,6 +35,14 @@ using namespace joblist;
#include "jlf_graphics.h"
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpotentially-evaluated-expression"
// for warnings on typeid :expression with side effects will be evaluated despite being used as an operand to 'typeid'
#endif
namespace jlf_graphics
{
@ -411,3 +419,6 @@ ostream& writeDotCmds(ostream& dotFile, const JobStepVector& query, const JobSte
// vim:ts=4 sw=4 syntax=cpp:
#ifdef __clang__
#pragma clang diagnostic pop
#endif

View File

@ -73,6 +73,14 @@ using namespace joblist;
#include "statistics.h"
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpotentially-evaluated-expression"
// for warnings on typeid :expression with side effects will be evaluated despite being used as an operand to 'typeid'
#endif
namespace
{
@ -189,7 +197,7 @@ inline void addColumnInExpToRG(uint32_t cid, vector<uint32_t>& pos, vector<uint3
inline void addColumnsToRG(uint32_t tid, vector<uint32_t>& pos, vector<uint32_t>& oids,
vector<uint32_t>& keys, vector<uint32_t>& scale, vector<uint32_t>& precision,
vector<CalpontSystemCatalog::ColDataType>& types,
vector<uint32_t>& csNums,
vector<uint32_t>& csNums,
TableInfoMap& tableInfoMap, JobInfo& jobInfo)
{
// -- the selected columns
@ -494,7 +502,7 @@ void adjustLastStep(JobStepVector& querySteps, DeliveredTableMap& deliverySteps,
deliverySteps[CNX_VTABLE_ID] = ws;
}
// TODO MCOL-894 we don't need to run sorting|distinct
// TODO MCOL-894 we don't need to run sorting|distinct
// every time
// if ((jobInfo.limitCount != (uint64_t) - 1) ||
// (jobInfo.constantCol == CONST_COL_EXIST) ||
@ -532,7 +540,7 @@ void adjustLastStep(JobStepVector& querySteps, DeliveredTableMap& deliverySteps,
AnyDataListSPtr spdlIn(new AnyDataList());
RowGroupDL* dlIn;
if (jobInfo.orderByColVec.size() > 0)
if (jobInfo.orderByColVec.size() > 0)
dlIn = new RowGroupDL(jobInfo.orderByThreads, jobInfo.fifoSize);
else
dlIn = new RowGroupDL(1, jobInfo.fifoSize);
@ -2610,7 +2618,7 @@ SP_JoinInfo joinToLargeTable(uint32_t large, TableInfoMap& tableInfoMap, JobInfo
// @bug6158, try to put BPS on large side if possible
if (tsas && smallSides.size() == 1)
{
SJSTEP sspjs = tableInfoMap[cId].fQuerySteps.back(), get();
SJSTEP sspjs = tableInfoMap[cId].fQuerySteps.back();
BatchPrimitive* sbps = dynamic_cast<BatchPrimitive*>(sspjs.get());
TupleHashJoinStep* sthjs = dynamic_cast<TupleHashJoinStep*>(sspjs.get());
@ -4520,7 +4528,7 @@ SJSTEP unionQueries(JobStepVector& queries, uint64_t distinctUnionNum, JobInfo&
const vector<uint32_t>& precisionIn = rg.getPrecision();
const vector<CalpontSystemCatalog::ColDataType>& typesIn = rg.getColTypes();
const vector<uint32_t>& csNumsIn = rg.getCharsetNumbers();
for (uint64_t j = 0; j < colCount; ++j)
{
queryColTypes[j][i].colDataType = typesIn[j];
@ -4623,3 +4631,7 @@ SJSTEP unionQueries(JobStepVector& queries, uint64_t distinctUnionNum, JobInfo&
}
// vim:ts=4 sw=4:
#ifdef __clang__
#pragma clang diagnostic pop
#endif

View File

@ -47,6 +47,13 @@ using namespace execplan;
#include "atomicops.h"
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpotentially-evaluated-expression"
// for warnings on typeid :expression with side effects will be evaluated despite being used as an operand to 'typeid'
#endif
namespace joblist
{
int JobList::fPmsConfigured = 0;
@ -1218,5 +1225,9 @@ void TupleJobList::abort()
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// vim:ts=4 sw=4:

View File

@ -92,11 +92,17 @@ using namespace rowgroup;
#include "mcsv1_udaf.h"
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpotentially-evaluated-expression"
// for warnings on typeid :expression with side effects will be evaluated despite being used as an operand to 'typeid'
#endif
namespace
{
using namespace joblist;
void projectSimpleColumn(const SimpleColumn* sc, JobStepVector& jsv, JobInfo& jobInfo)
{
if (sc == NULL)
@ -279,7 +285,7 @@ const JobStepVector doProject(const RetColsVector& retCols, JobInfo& jobInfo)
if (retCols[i]->windowfunctionColumnList().size() > 0)
jobInfo.expressionVec.push_back(key);
else if (find(jobInfo.expressionVec.begin(), jobInfo.expressionVec.end(), key)
else if (find(jobInfo.expressionVec.begin(), jobInfo.expressionVec.end(), key)
== jobInfo.expressionVec.end())
{
jobInfo.returnedExpressions.push_back(sjstep);
@ -887,7 +893,7 @@ const JobStepVector doAggProject(const CalpontSelectExecutionPlan* csep, JobInfo
AggregateColumn* aggc = dynamic_cast<AggregateColumn*>(srcp.get());
bool doDistinct = (csep->distinct() && csep->groupByCols().empty());
// Use this instead of the above line to mimic MariaDB's sql_mode = 'ONLY_FULL_GROUP_BY'
// bool doDistinct = (csep->distinct() &&
// bool doDistinct = (csep->distinct() &&
// csep->groupByCols().empty() &&
// !jobInfo.hasAggregation);
uint32_t tupleKey = -1;
@ -2346,3 +2352,6 @@ SJLP JobListFactory::makeJobList(
}
// vim:ts=4 sw=4:
#ifdef __clang__
#pragma clang diagnostic pop
#endif

View File

@ -147,7 +147,6 @@ pColScanStep::pColScanStep(
return;
int err, i, mask;
BRM::LBIDRange_v::iterator it;
finishedSending = false;
recvWaiting = 0;
@ -173,7 +172,7 @@ pColScanStep::pColScanStep(
fIsDict = true;
}
// MCOL-641 WIP
else if (fColType.colWidth > 8
else if (fColType.colWidth > 8
&& fColType.colDataType != CalpontSystemCatalog::DECIMAL
&& fColType.colDataType != CalpontSystemCatalog::UDECIMAL)
{

View File

@ -23,7 +23,7 @@
* @file
*/
#ifndef JOBLIST_RESOURCEDISTRIBUTER_H
#define JOBLIST_RESOURCEMANAGER_H
#define JOBLIST_RESOURCEDISTRIBUTER_H
#include <unistd.h>
#include <list>

View File

@ -153,7 +153,6 @@ uint32_t RowEstimator::estimateDistinctValues(const execplan::CalpontSystemCatal
{
switch (ct.colDataType)
{
case CalpontSystemCatalog::BIT:
return 2;

View File

@ -117,7 +117,8 @@ void VirtualTable::addColumn(const SRCP& column)
}
else // new column type has added, but this code is not updated.
{
oss << "not supported column type: " << typeid(*(column.get())).name();
auto & columnType = *(column.get());
oss << "not supported column type: " << typeid(columnType).name();
throw runtime_error(oss.str());
}

View File

@ -337,7 +337,7 @@ void WindowFunctionStep::AddSimplColumn(const vector<SimpleColumn*>& scs,
jobInfo.windowDels.push_back(SRCP((*i)->clone()));
// MCOL-3343 Enable this if we decide to allow Window Functions to run with
// aggregates with no group by. MariaDB allows this. Nobody else in the world does.
// There will be more work to get it to function if we try this.
// There will be more work to get it to function if we try this.
// jobInfo.windowSet.insert(getTupleKey(jobInfo, *i, true));
scProjected.insert(UniqId(*i));
}
@ -499,7 +499,7 @@ void WindowFunctionStep::checkWindowFunction(CalpontSelectExecutionPlan* csep, J
if (colSet.find(key) == colSet.end())
{
jobInfo.deliveredCols.push_back(*j);
// MCOL-3435 Allow Window Functions to run with aggregates with
// MCOL-3435 Allow Window Functions to run with aggregates with
// no group by by inserting a group by for window parameters.
if (hasAggregation)
{
@ -507,7 +507,7 @@ void WindowFunctionStep::checkWindowFunction(CalpontSelectExecutionPlan* csep, J
if (dynamic_cast<AggregateColumn*>(j->get()) == NULL)
{
bool bFound = false;
for (std::vector<SRCP>::iterator igpc = csep->groupByCols().begin();
for (std::vector<SRCP>::iterator igpc = csep->groupByCols().begin();
igpc < csep->groupByCols().end();
++igpc)
{

View File

@ -65,7 +65,7 @@ public:
/** @brief
The name that will be used for display purposes.
*/
const char* table_type() const
const char* table_type() const override
{
return "ColumnStore";
}
@ -85,7 +85,7 @@ public:
This is a list of flags that indicate what functionality the storage engine
implements. The current table flags are documented in handler.h
*/
ulonglong table_flags() const
ulonglong table_flags() const override
{
return int_table_flags;
}
@ -100,7 +100,7 @@ public:
If all_parts is set, MySQL wants to know the flags for the combined
index, up to and including 'part'.
*/
ulong index_flags(uint32_t inx, uint32_t part, bool all_parts) const
ulong index_flags(uint32_t inx, uint32_t part, bool all_parts) const override
{
return 0;
}
@ -113,7 +113,7 @@ public:
send. Return *real* limits of your storage engine here; MySQL will do
min(your_limits, MySQL_limits) automatically.
*/
uint32_t max_supported_record_length() const
uint32_t max_supported_record_length() const override
{
return HA_MAX_REC_LENGTH;
}
@ -121,7 +121,7 @@ public:
/** @brief
Called in test_quick_select to determine if indexes should be used.
*/
virtual double scan_time()
virtual double scan_time() override
{
return (double) (stats.records + stats.deleted) / 20.0 + 10;
}
@ -129,7 +129,7 @@ public:
/** @brief
Analyze table command.
*/
int analyze(THD* thd, HA_CHECK_OPT* check_opt);
int analyze(THD* thd, HA_CHECK_OPT* check_opt) override;
/*
Everything below are methods that we implement in ha_example.cc.
@ -141,7 +141,7 @@ public:
/** @brief
We implement this in ha_example.cc; it's a required method.
*/
int open(const char* name, int mode, uint32_t test_if_locked); // required
int open(const char* name, int mode, uint32_t test_if_locked) override; // required
// MCOL-4282 This function is called by open_tables in sql_base.cc.
// We mutate the optimizer flags here for prepared statements as this
@ -160,71 +160,71 @@ public:
/** @brief
We implement this in ha_example.cc; it's a required method.
*/
int close(void); // required
int close(void) override; // required
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int write_row(const uchar* buf);
int write_row(const uchar* buf) override;
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
void start_bulk_insert(ha_rows rows, uint flags = 0) ;
void start_bulk_insert_from_cache(ha_rows rows, uint flags = 0) ;
void start_bulk_insert(ha_rows rows, uint flags = 0) override;
void start_bulk_insert_from_cache(ha_rows rows, uint flags = 0);
/**@bug 2461 - Overloaded end_bulk_insert. MariaDB uses the abort bool, mysql does not. */
int end_bulk_insert() ;
int end_bulk_insert() override;
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int update_row(const uchar* old_data, const uchar* new_data) override;
int direct_update_rows_init(List<Item> *update_fields);
int direct_update_rows_init(List<Item> *update_fields) override;
int direct_update_rows(ha_rows *update_rows);
int direct_update_rows(ha_rows *update_rows, ha_rows *found_rows);
int direct_update_rows(ha_rows *update_rows, ha_rows *found_rows) override;
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int delete_row(const uchar* buf);
int direct_delete_rows_init();
int direct_delete_rows(ha_rows *deleted_rows);
int delete_row(const uchar* buf) override;
int direct_delete_rows_init() override;
int direct_delete_rows(ha_rows *deleted_rows) override;
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int index_read_map(uchar* buf, const uchar* key,
key_part_map keypart_map, enum ha_rkey_function find_flag);
key_part_map keypart_map, enum ha_rkey_function find_flag) override;
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int index_next(uchar* buf);
int index_next(uchar* buf) override;
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int index_prev(uchar* buf);
int index_prev(uchar* buf) override;
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int index_first(uchar* buf);
int index_first(uchar* buf) override;
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int index_last(uchar* buf);
int index_last(uchar* buf) override;
/** @brief
Unlike index_init(), rnd_init() can be called two consecutive times
@ -234,34 +234,34 @@ public:
cursor to the start of the table; no need to deallocate and allocate
it again. This is a required method.
*/
int rnd_init(bool scan); //required
int rnd_end();
int rnd_next(uchar* buf); ///< required
int rnd_pos(uchar* buf, uchar* pos); ///< required
int rnd_init(bool scan) override; //required
int rnd_end() override;
int rnd_next(uchar* buf) override; ///< required
int rnd_pos(uchar* buf, uchar* pos) override; ///< required
int reset() override;
void position(const uchar* record); ///< required
int info(uint32_t); ///< required
int extra(enum ha_extra_function operation);
int external_lock(THD* thd, int lock_type); ///< required
int delete_all_rows(void);
void position(const uchar* record) override; ///< required
int info(uint32_t) override; ///< required
int extra(enum ha_extra_function operation) override;
int external_lock(THD* thd, int lock_type) override; ///< required
int delete_all_rows(void) override;
ha_rows records_in_range(uint32_t inx, const key_range* min_key,
const key_range* max_key, page_range* res) override;
int delete_table(const char* from);
int rename_table(const char* from, const char* to);
int delete_table(const char* from) override;
int rename_table(const char* from, const char* to) override;
int create(const char* name, TABLE* form,
HA_CREATE_INFO* create_info); ///< required
HA_CREATE_INFO* create_info) override; ///< required
THR_LOCK_DATA** store_lock(THD* thd, THR_LOCK_DATA** to,
enum thr_lock_type lock_type); ///< required
const COND* cond_push(const COND* cond);
enum thr_lock_type lock_type) override; ///< required
const COND* cond_push(const COND* cond) override;
void cond_pop() override;
uint8 table_cache_type()
uint8 table_cache_type() override
{
return HA_CACHE_TBL_NOCACHE;
}
int repair(THD* thd, HA_CHECK_OPT* check_opt);
bool is_crashed() const;
int repair(THD* thd, HA_CHECK_OPT* check_opt) override;
bool is_crashed() const override;
bool isReadOnly() const
{

View File

@ -32,9 +32,10 @@ class StoreFieldMariaDB: public StoreField
Field *m_field;
const CalpontSystemCatalog::ColType &m_type;
public:
StoreFieldMariaDB(Field *f, CalpontSystemCatalog::ColType &type)
StoreFieldMariaDB(Field *f, const CalpontSystemCatalog::ColType &type)
:m_field(f), m_type(type)
{ }
const CalpontSystemCatalog::ColType &type() const { return m_type; }
int32_t colWidth() const override { return m_type.colWidth; }
int32_t precision() const override { return m_type.precision; }

View File

@ -169,8 +169,6 @@ static const string interval_names[] =
"second_microsecond"
};
const unsigned NONSUPPORTED_ERR_THRESH = 2000;
// HDFS is never used nowadays, so don't bother
bool useHdfs = false; // ResourceManager::instance()->useHdfs();
@ -834,7 +832,7 @@ uint32_t doUpdateDelete(THD* thd, gp_walk_info& gwi, const std::vector<COND*>& c
if (isUpdateStatement(thd->lex->sql_command))
args.add("Update");
#if 0
else if (thd->rgi_slave && thd->rgi_slave->m_table_map.count() != 0)
else if (thd->rgi_slave && thd->rgi_slave->m_table_map.count() != 0)
args.add("Row based replication event");
#endif
else
@ -1816,7 +1814,7 @@ uint32_t doUpdateDelete(THD* thd, gp_walk_info& gwi, const std::vector<COND*>& c
errorMsg = "Unknown error caught";
b = 1;
}
// Clear tableOid for the next SQL statement
ci->tableOid = 0;
}
@ -1829,7 +1827,7 @@ uint32_t doUpdateDelete(THD* thd, gp_walk_info& gwi, const std::vector<COND*>& c
//@Bug 2540. Set error status instead of warning
thd->raise_error_printf(ER_INTERNAL_ERROR, errorMsg.c_str());
ci->rc = b;
rc = ER_INTERNAL_ERROR;
rc = ER_INTERNAL_ERROR;
}
if (b == dmlpackageprocessor::DMLPackageProcessor::IDBRANGE_WARNING)
@ -1840,7 +1838,7 @@ uint32_t doUpdateDelete(THD* thd, gp_walk_info& gwi, const std::vector<COND*>& c
ci->rc = b;
// Turn this on as MariaDB doesn't do it until the next phase
thd->abort_on_warning = thd->is_strict_mode();
rc = ER_INTERNAL_ERROR;
rc = ER_INTERNAL_ERROR;
}
else
{

View File

@ -118,7 +118,7 @@ public:
int init_scan() override;
int next_row() override;
int end_scan() override;
void print_error(int, unsigned long);
void print_error(int, unsigned long) override;
};
/*@brief select_handler class*/

View File

@ -63,7 +63,6 @@ namespace
typedef messageqcpp::ByteStream::quadbyte quadbyte;
const quadbyte UNRECOGNIZED_PACKAGE_TYPE = 100;
const quadbyte NO_PKNAME_AVAILABLE = 101;
const std::string DDLProcName = "DDLProc";

View File

@ -19,8 +19,6 @@
//
#include <unistd.h>
using namespace std;
#include <boost/thread/mutex.hpp>
using namespace boost;

View File

@ -255,18 +255,6 @@ struct FdCountEntry
uint16_t segNum;
uint32_t cnt;
FdCacheType_t::iterator fdit;
friend ostream& operator<<(ostream& out, const FdCountEntry& o)
{
out << " o: " << o.oid
<< " d: " << o.dbroot
<< " p: " << o.partNum
<< " s: " << o.segNum
<< " c: " << o.cnt;
return out;
}
}; // FdCountEntry
typedef FdCountEntry FdCountEntry_t;
@ -1380,7 +1368,7 @@ void ioManager::buildOidFileName(const BRM::OID_t oid, uint16_t dbRoot, const ui
// when it's a request for the version buffer, the dbroot comes in as 0 for legacy reasons
if (dbRoot == 0 && oid < 1000)
dbRoot = fdbrm.getDBRootOfVBOID(oid);
fFileOp.getFileNameForPrimProc(oid, file_name, dbRoot, partNum, segNum);
}

View File

@ -489,7 +489,7 @@ inline bool isNullVal(uint32_t length, uint8_t type, const uint8_t* val8)
case 1:
return isNullVal<1>(type, val8);
};
return false;
}
@ -950,87 +950,6 @@ inline int64_t nextColValue(int type,
}
// done should be init'd to false and
// index should be init'd to 0 on the first call
// done == true when there are no more elements to return.
inline uint64_t nextUnsignedColValueHelper(int type,
int width,
const uint16_t* ridArray,
int NVALS,
int* index,
bool* done,
bool* isNull,
bool* isEmpty,
uint16_t* rid,
uint8_t OutputType, uint8_t* val8, unsigned itemsPerBlk)
{
switch (width)
{
case 8:
return nextUnsignedColValue<8>(type, ridArray, NVALS, index, done, isNull, isEmpty, rid, OutputType, val8,
itemsPerBlk);
case 4:
return nextUnsignedColValue<4>(type, ridArray, NVALS, index, done, isNull, isEmpty, rid, OutputType, val8,
itemsPerBlk);
case 2:
return nextUnsignedColValue<2>(type, ridArray, NVALS, index, done, isNull, isEmpty, rid, OutputType, val8,
itemsPerBlk);
case 1:
return nextUnsignedColValue<1>(type, ridArray, NVALS, index, done, isNull, isEmpty, rid, OutputType, val8,
itemsPerBlk);
default:
idbassert(0);
}
/*NOTREACHED*/
return 0;
}
// done should be init'd to false and
// index should be init'd to 0 on the first call
// done == true when there are no more elements to return.
inline int64_t nextColValueHelper(int type,
int width,
const uint16_t* ridArray,
int NVALS,
int* index,
bool* done,
bool* isNull,
bool* isEmpty,
uint16_t* rid,
uint8_t OutputType, uint8_t* val8, unsigned itemsPerBlk)
{
switch (width)
{
case 8:
return nextColValue<8>(type, ridArray, NVALS, index, done, isNull, isEmpty, rid, OutputType, val8,
itemsPerBlk);
case 4:
return nextColValue<4>(type, ridArray, NVALS, index, done, isNull, isEmpty, rid, OutputType, val8,
itemsPerBlk);
case 2:
return nextColValue<2>(type, ridArray, NVALS, index, done, isNull, isEmpty, rid, OutputType, val8,
itemsPerBlk);
case 1:
return nextColValue<1>(type, ridArray, NVALS, index, done, isNull, isEmpty, rid, OutputType, val8,
itemsPerBlk);
default:
idbassert(0);
}
/*NOTREACHED*/
return 0;
}
template<int W>
inline void p_Col_ridArray(NewColRequestHeader* in,
NewColResultHeader* out,

View File

@ -102,9 +102,9 @@ void PrimitiveProcessor::p_TokenByScan(const TokenByScanRequestHeader* h,
retTokens = reinterpret_cast<PrimToken*>(&niceRet[rdvOffset]);
retDataValues = reinterpret_cast<DataValue*>(&niceRet[rdvOffset]);
{
void *retp = static_cast<void*>(ret);
void *retp = static_cast<void*>(ret);
memcpy(retp, h, sizeof(PrimitiveHeader) + sizeof(ISMPacketHeader));
}
ret->NVALS = 0;
@ -405,14 +405,12 @@ again:
dict_OffsetIndex++;
}
const char backslash = '\\';
void PrimitiveProcessor::p_Dictionary(const DictInput* in,
void PrimitiveProcessor::p_Dictionary(const DictInput* in,
vector<uint8_t>* out,
bool skipNulls,
bool skipNulls,
uint32_t charsetNumber,
boost::shared_ptr<DictEqualityFilter> eqFilter,
boost::shared_ptr<DictEqualityFilter> eqFilter,
uint8_t eqOp)
{
PrimToken* outToken;

View File

@ -151,7 +151,7 @@ void ColumnCommand::makeStepMsg()
template <int W>
void ColumnCommand::_loadData()
{
using ByteStreamType = typename messageqcpp::ByteStreamType<W>::type;
using ByteStreamType = typename messageqcpp::ByteStreamType<W>::type;
uint32_t wasCached;
uint32_t blocksRead;
uint16_t _mask;
@ -1016,7 +1016,8 @@ ColumnCommand* ColumnCommandFabric::createCommand(messageqcpp::ByteStream& bs)
break;
default:
throw NotImplementedExcept("ColumnCommandFabric::createCommand: unsupported width " + colType.colWidth);
throw NotImplementedExcept(std::string("ColumnCommandFabric::createCommand: unsupported width ")
+ std::to_string(colType.colWidth));
}
return nullptr;
@ -1024,31 +1025,32 @@ ColumnCommand* ColumnCommandFabric::createCommand(messageqcpp::ByteStream& bs)
ColumnCommand* ColumnCommandFabric::duplicate(const ColumnCommandUniquePtr& rhs)
{
if (LIKELY(typeid(*rhs) == typeid(ColumnCommandInt64)))
auto & command = *rhs;
if (LIKELY(typeid(command) == typeid(ColumnCommandInt64)))
{
ColumnCommandInt64* ret = new ColumnCommandInt64();
*ret = *dynamic_cast<ColumnCommandInt64*>(rhs.get());
return ret;
}
else if (typeid(*rhs) == typeid(ColumnCommandInt128))
else if (typeid(command) == typeid(ColumnCommandInt128))
{
ColumnCommandInt128* ret = new ColumnCommandInt128();
*ret = *dynamic_cast<ColumnCommandInt128*>(rhs.get());
return ret;
}
else if (typeid(*rhs) == typeid(ColumnCommandInt8))
else if (typeid(command) == typeid(ColumnCommandInt8))
{
ColumnCommandInt8* ret = new ColumnCommandInt8();
*ret = *dynamic_cast<ColumnCommandInt8*>(rhs.get());
return ret;
}
else if (typeid(*rhs) == typeid(ColumnCommandInt16))
else if (typeid(command) == typeid(ColumnCommandInt16))
{
ColumnCommandInt16* ret = new ColumnCommandInt16();
*ret = *dynamic_cast<ColumnCommandInt16*>(rhs.get());
return ret;
}
else if (typeid(*rhs) == typeid(ColumnCommandInt32))
else if (typeid(command) == typeid(ColumnCommandInt32))
{
ColumnCommandInt32* ret = new ColumnCommandInt32();
*ret = *dynamic_cast<ColumnCommandInt32*>(rhs.get());

View File

@ -147,7 +147,6 @@ uint32_t lowPriorityThreads;
int directIOFlag = O_DIRECT;
int noVB = 0;
const uint8_t fMaxColWidth(8);
BPPMap bppMap;
boost::mutex bppLock;
@ -1287,7 +1286,7 @@ struct BPPHandler
// threads lying around
std::vector<uint32_t> bppKeys;
std::vector<uint32_t>::iterator bppKeysIt;
~BPPHandler()
{
boost::mutex::scoped_lock scoped(bppLock);
@ -1553,7 +1552,7 @@ struct BPPHandler
return *ret->second;
}
}
inline void deleteDJLock(uint32_t uniqueID)
{
boost::mutex::scoped_lock lk(djMutex);
@ -1564,7 +1563,7 @@ struct BPPHandler
djLock.erase(it);
}
}
int addJoinerToBPP(ByteStream& bs, const posix_time::ptime& dieTime)
{
SBPPV bppv;
@ -1618,8 +1617,8 @@ struct BPPHandler
}
boost::unique_lock<shared_mutex> lk(getDJLock(uniqueID));
for (i = 0; i < bppv->get().size(); i++)
{
err = bppv->get()[i]->endOfJoiner();
@ -1699,7 +1698,7 @@ struct BPPHandler
if (posix_time::second_clock::universal_time() > dieTime)
{
// XXXPAT: going to let this fall through and delete jobs for
// XXXPAT: going to let this fall through and delete jobs for
// uniqueID if there are any. Not clear what the downside is.
/*
lk.unlock();

View File

@ -18,7 +18,7 @@
#ifndef READTASK_H_
#define READTASH_H_
#define READTASK_H_
#include "PosixTask.h"
@ -30,9 +30,9 @@ class ReadTask : public PosixTask
public:
ReadTask(int sock, uint length);
virtual ~ReadTask();
bool run();
private:
ReadTask();
};

View File

@ -69,10 +69,10 @@ void printUsage() {
struct scoped_closer {
scoped_closer(int f) : fd(f) { }
~scoped_closer() {
~scoped_closer() {
int s_errno = errno;
::close(fd);
errno = s_errno;
errno = s_errno;
}
int fd;
};
@ -83,7 +83,7 @@ void makeTestObject(const char *dest)
int objFD = open(dest, O_WRONLY | O_CREAT | O_TRUNC, 0600);
assert(objFD >= 0);
scoped_closer s1(objFD);
for (int i = 0; i < 2048; i++)
assert(write(objFD, &i, 4) == 4);
}
@ -95,7 +95,7 @@ void makeTestJournal(const char *dest)
int journalFD = open(dest, O_WRONLY | O_CREAT | O_TRUNC, 0600);
assert(journalFD >= 0);
scoped_closer s2(journalFD);
char header[] = "{ \"version\" : 1, \"max_offset\": 39 }";
size_t result = write(journalFD, header, strlen(header) + 1);
assert(result==(strlen(header) + 1));
@ -149,7 +149,7 @@ int getSocket()
}
int sessionSock = -1; // tester uses this end of the connection
int serverSock = -1;
int serverSock = -1;
int clientSock = -1; // have the Tasks use this end of the connection
void acceptConnection()
@ -162,13 +162,13 @@ void acceptConnection()
memset(&sa, 0, sizeof(sa));
sa.sun_family = AF_UNIX;
memcpy(&sa.sun_path[1], "testing", 7);
err = ::bind(serverSock, (struct sockaddr *) &sa, sizeof(sa));
assert(err == 0);
err = ::listen(serverSock, 2);
assert(err == 0);
}
sessionSock = ::accept(serverSock, NULL, NULL);
assert(sessionSock > 0);
}
@ -181,7 +181,7 @@ void makeConnection()
memset(&sa, 0, sizeof(sa));
sa.sun_family = AF_UNIX;
memcpy(&sa.sun_path[1], "testing", 7);
clientSock = ::socket(AF_UNIX, SOCK_STREAM, 0);
assert(clientSock > 0);
sleep(1); // let server thread get to accept()
@ -189,7 +189,7 @@ void makeConnection()
assert(err == 0);
t.join();
}
bool opentask(bool connectionTest=false)
{
// going to rely on msgs being smaller than the buffer here
@ -199,18 +199,18 @@ bool opentask(bool connectionTest=false)
open_cmd *cmd = (open_cmd *) &hdr[1];
string testFile = "metadataJournalTest";
// open/create a file named 'opentest1'
const char *filename = string(homepath.string()+"/"+prefix+"/"+testFile).c_str();
std::string filename = homepath.string()+"/"+prefix+"/"+testFile;
hdr->type = SM_MSG_START;
hdr->flags = 0;
hdr->payloadLen = sizeof(*cmd) + strlen(filename);
hdr->payloadLen = sizeof(*cmd) + filename.size();
cmd->opcode = OPEN;
cmd->openmode = O_WRONLY | O_CREAT;
cmd->flen = 19;
strcpy((char *) cmd->filename, filename);
strcpy((char *) cmd->filename, filename.c_str());
cout << "open file " << filename << endl;
::unlink(filename);
::unlink(filename.c_str());
// set payload to be shorter than actual message lengh
// and send a shortened message.
if (connectionTest)
@ -288,7 +288,7 @@ bool replicatorTest()
// test newObject
repli->newObject(newobject,data,0,10);
//check file contents
fd = ::open(newObjectCacheFullPath.c_str(), O_RDONLY);
err = ::read(fd, buf, sizeof(buf));
@ -414,7 +414,7 @@ bool writetask()
int fd = ::open(filename, O_CREAT | O_RDWR, 0666);
assert(fd > 0);
scoped_closer f(fd);
uint8_t buf[1024];
sm_msg_header *hdr = (sm_msg_header *) buf;
write_cmd *cmd = (write_cmd *) &hdr[1];
@ -436,7 +436,7 @@ bool writetask()
assert(result == static_cast<ssize_t>(hdr->payloadLen));
w.run();
// verify response
int err = ::recv(sessionSock, buf, sizeof(buf), MSG_DONTWAIT);
sm_response *resp = (sm_response *) buf;
@ -467,26 +467,26 @@ bool appendtask()
scoped_closer f(fd);
int err = ::write(fd, "testjunk", 8);
assert(err == 8);
uint8_t buf[1024];
append_cmd *cmd = (append_cmd *) buf;
uint8_t *data;
cmd->opcode = APPEND;
cmd->count = 9;
cmd->flen = 11;
memcpy(&cmd->filename, filename, cmd->flen);
data = (uint8_t *) &cmd->filename[cmd->flen];
memcpy(data, "123456789", cmd->count);
int payloadLen = sizeof(*cmd) + cmd->flen + cmd->count;
AppendTask a(clientSock, payloadLen);
ssize_t result = ::write(sessionSock, cmd, payloadLen);
assert(result==(payloadLen));
a.run();
// verify response
err = ::recv(sessionSock, buf, sizeof(buf), MSG_DONTWAIT);
sm_response *resp = (sm_response *) buf;
@ -495,7 +495,7 @@ bool appendtask()
assert(resp->header.payloadLen == sizeof(ssize_t));
assert(resp->header.flags == 0);
assert(resp->returnCode == 9);
//check file contents
::lseek(fd, 0, SEEK_SET);
err = ::read(fd, buf, sizeof(buf));
@ -524,14 +524,14 @@ void unlinktask(bool connectionTest=false)
meta.writeMetadata();
assert(bf::exists(fullPathMeta));
uint8_t buf[1024];
unlink_cmd *cmd = (unlink_cmd *) buf;
cmd->opcode = UNLINK;
cmd->flen = strlen(filename);
memcpy(&cmd->filename, filename, cmd->flen);
// set payload to be shorter than actual message lengh
// and send a shortened message.
if (connectionTest)
@ -575,11 +575,11 @@ void unlinktask(bool connectionTest=false)
// confirm it no longer exists
assert(!bf::exists(fullPathMeta));
}
// delete it again, make sure we get an error message & reasonable error code
// Interesting. boost::filesystem::remove() doesn't consider it an error if the file doesn't
// exist. Need to look into the reasoning for that, and decide whether IOC
// should return an error anyway. For now, this test below doesn't get
// should return an error anyway. For now, this test below doesn't get
// an error msg.
#if 0
memset(buf, 0, 1024);
@ -592,7 +592,7 @@ void unlinktask(bool connectionTest=false)
assert(result==(sizeof(unlink_cmd) + cmd->flen));
u2.run();
// verify response
err = ::recv(sessionSock, buf, 1024, MSG_DONTWAIT);
resp = (sm_response *) buf;
@ -604,7 +604,7 @@ void unlinktask(bool connectionTest=false)
err = (*(int *) resp->payload);
assert(err == ENOENT);
#endif
cout << "unlink task OK" << endl;
}
@ -616,17 +616,17 @@ bool stattask(bool connectionTest=false)
string Metafilename = prefix + "/stattest1";
string fullFilename = Config::get()->getValue("ObjectStorage", "metadata_path") + "/" + Metafilename + ".meta";
::unlink(fullFilename.c_str());
makeTestMetadata(fullFilename.c_str(),testObjKey);
uint8_t buf[1024];
stat_cmd *cmd = (stat_cmd *) buf;
cmd->opcode = STAT;
cmd->flen = filename.length();
strcpy((char *) cmd->filename, filename.c_str());
// set payload to be shorter than actual message lengh
// and send a shortened message.
if (connectionTest)
@ -694,15 +694,15 @@ bool IOCTruncate()
}
Cache *cache = Cache::get();
cache->reset();
bf::path cachePath = ioc->getCachePath();
bf::path journalPath = ioc->getJournalPath();
bf::path metaPath = ioc->getMetadataPath();
bf::path cloudPath = ls->getPrefix();
bf::path cloudPath = ls->getPrefix();
// metaPath doesn't necessarily exist until a MetadataFile instance is created
bf::create_directories(metaPath);
/* start with one object in cloud storage
truncate past the end of the object
verify nothing changed & got success
@ -712,14 +712,14 @@ bool IOCTruncate()
truncate at 0 bytes
verify file now looks empty
verify the object was deleted
add 2 8k test objects and a journal against the second one
truncate @ 10000 bytes
verify all files still exist
truncate @ 6000 bytes, 2nd object & journal were deleted
truncate @ 0 bytes, verify no files are left
*/
bf::path metadataFile = metaPath/prefix/"test-file.meta";
bf::path objectPath = cloudPath/testObjKey;
bf::path cachedObjectPath = cachePath/prefix/testObjKey;
@ -729,7 +729,7 @@ bool IOCTruncate()
int err;
uint8_t buf[1<<14];
int *buf32 = (int *) buf;
/* Need to enable this later.
// Extend the test file to 10000 bytes
err = ioc->truncate(testFile, 10000);
@ -742,12 +742,12 @@ bool IOCTruncate()
for (int i = 2048; i < 2500; i++)
assert(buf32[i] == 0);
*/
err = ioc->truncate(testFile, 4000);
assert(!err);
MetadataFile meta(metaTestFile);
assert(meta.getLength() == 4000);
// read the data, make sure there are only 4000 bytes & the object still exists
err = ioc->read(testFile, buf, 0, 8192);
assert(err == 4000);
@ -756,7 +756,7 @@ bool IOCTruncate()
err = ioc->read(testFile, buf, 4005, 1);
assert(err == 0);
assert(bf::exists(objectPath));
// truncate to 0 bytes, make sure everything is consistent with that, and the object no longer exists
err = ioc->truncate(testFile, 0);
assert(!err);
@ -769,18 +769,18 @@ bool IOCTruncate()
sync->forceFlush();
sleep(1); // give Sync a chance to delete the object from the cloud
assert(!bf::exists(objectPath));
// recreate the meta file, make a 2-object version
ioc->unlink(testFile);
makeTestMetadata(metadataFile.string().c_str(),testObjKey);
makeTestObject(objectPath.string().c_str());
meta = MetadataFile(metaTestFile);
bf::path secondObjectPath = cloudPath / meta.addMetadataObject(testFile, 8192).key;
bf::path cachedSecondObject = cachePath/prefix/secondObjectPath.filename();
makeTestObject(secondObjectPath.string().c_str());
meta.writeMetadata();
// make sure there are 16k bytes, and the data is valid before going forward
memset(buf, 0, sizeof(buf));
err = ioc->read(testFile, buf, 0, sizeof(buf));
@ -789,7 +789,7 @@ bool IOCTruncate()
assert(buf32[i] == (i % 2048));
assert(bf::exists(cachedSecondObject));
assert(bf::exists(cachedObjectPath));
// truncate to 10k, make sure everything looks right
err = ioc->truncate(testFile, 10240);
assert(!err);
@ -801,7 +801,7 @@ bool IOCTruncate()
assert(buf32[i] == (i % 2048));
err = ioc->read(testFile, buf, 10239, 10);
assert(err == 1);
// truncate to 6000 bytes, make sure second object got deleted
err = ioc->truncate(testFile, 6000);
meta = MetadataFile(metaTestFile);
@ -812,7 +812,7 @@ bool IOCTruncate()
sleep(1); // give Synchronizer a chance to delete the file from the 'cloud'
assert(!bf::exists(secondObjectPath));
assert(!bf::exists(cachedSecondObject));
cache->reset();
ioc->unlink(testFile);
@ -839,12 +839,12 @@ bool truncatetask(bool connectionTest=false)
uint8_t buf[1024];
truncate_cmd *cmd = (truncate_cmd *) buf;
cmd->opcode = TRUNCATE;
cmd->length = 1000;
cmd->flen = strlen(filename);
strcpy((char *) cmd->filename, filename);
// set payload to be shorter than actual message lengh
// and send a shortened message.
if (connectionTest)
@ -852,7 +852,7 @@ bool truncatetask(bool connectionTest=false)
size_t result = ::write(sessionSock, cmd, sizeof(*cmd) + cmd->flen);
assert(result==(sizeof(*cmd) + cmd->flen));
// set payload to be correct length again
if (connectionTest)
cmd->flen += 2;
@ -889,7 +889,7 @@ bool truncatetask(bool connectionTest=false)
meta = MetadataFile(Metafilename);
assert(meta.getLength() == 1000);
}
cache->reset();
::unlink(metaFullName.c_str());
cout << "truncate task OK" << endl;
@ -906,12 +906,12 @@ bool listdirtask(bool connectionTest=false)
const char *MetarelPath = metaStr.c_str();
bf::path tmpPath = metaPath/MetarelPath;
// make some dummy files, make sure they are in the list returned.
set<string> files;
int err;
vector<SharedCloser> fdMinders;
bf::create_directories(tmpPath);
for (int i = 0; i < 10; i++) {
string file(tmpPath.string() + "/dummy" + to_string(i));
@ -921,11 +921,11 @@ bool listdirtask(bool connectionTest=false)
assert(err >= 0);
fdMinders.push_back(err);
}
uint8_t buf[8192];
memset(buf,0,sizeof(buf));
listdir_cmd *cmd = (listdir_cmd *) buf;
cmd->opcode = LIST_DIRECTORY;
cmd->plen = strlen(relPath);
memcpy(cmd->path, relPath, cmd->plen);
@ -1001,12 +1001,12 @@ void pingtask()
uint8_t buf[1024];
ping_cmd *cmd = (ping_cmd *) buf;
cmd->opcode = PING;
size_t len = sizeof(*cmd);
ssize_t result = ::write(sessionSock, cmd, sizeof(*cmd));
assert(result==(sizeof(*cmd)));
// process task will look for the full length and
// will wait on the rest of the message.
@ -1029,7 +1029,7 @@ void pingtask()
bool copytask(bool connectionTest=false)
{
/*
/*
make a file
copy it
verify it exists
@ -1046,7 +1046,7 @@ bool copytask(bool connectionTest=false)
MetadataFile meta1(Metasource);
uint8_t buf[1024];
copy_cmd *cmd = (copy_cmd *) buf;
cmd->opcode = COPY;
@ -1055,7 +1055,7 @@ bool copytask(bool connectionTest=false)
f_name *file2 = (f_name *) &cmd->file1.filename[cmd->file1.flen];
file2->flen = strlen(dest);
strncpy(file2->filename, dest, file2->flen);
uint len = (uint64_t) &file2->filename[file2->flen] - (uint64_t) buf;
// set payload to be shorter than actual message lengh
@ -1115,7 +1115,7 @@ bool copytask(bool connectionTest=false)
bool localstorageTest1()
{
LocalStorage ls;
/* TODO: Some stuff */
cout << "local storage test 1 OK" << endl;
return true;
@ -1131,15 +1131,15 @@ bool cacheTest1()
cout << "Cache test 1 requires using local storage" << endl;
return false;
}
cache->reset();
assert(cache->getCurrentCacheSize() == 0);
bf::path storagePath = ls->getPrefix();
bf::path cachePath = cache->getCachePath() / prefix;
vector<string> v_bogus;
vector<bool> exists;
// make sure nothing shows up in the cache path for files that don't exist
v_bogus.push_back("does-not-exist");
cache->read(prefix, v_bogus);
@ -1147,7 +1147,7 @@ bool cacheTest1()
cache->exists(prefix, v_bogus, &exists);
assert(exists.size() == 1);
assert(!exists[0]);
// make sure a file that does exist does show up in the cache path
makeTestObject((storagePath/testObjKey).string().c_str());
@ -1161,7 +1161,7 @@ bool cacheTest1()
assert(exists[0]);
size_t currentSize = cache->getCurrentCacheSize();
assert(currentSize == bf::file_size(cachePath / testObjKey));
// lie about the file being deleted and then replaced
cache->deletedObject(prefix, testObjKey, currentSize);
assert(cache->getCurrentCacheSize() == 0);
@ -1170,7 +1170,7 @@ bool cacheTest1()
cache->exists(prefix, v_bogus, &exists);
assert(exists.size() == 1);
assert(exists[0]);
// cleanup
bf::remove(cachePath / testObjKey);
bf::remove(storagePath / testObjKey);
@ -1185,10 +1185,10 @@ bool mergeJournalTest()
call mergeJournal to process it with various params
verify the expected values
*/
makeTestObject("test-object");
makeTestJournal("test-journal");
int i;
IOCoordinator *ioc = IOCoordinator::get();
size_t len = 8192, tmp;
@ -1201,7 +1201,7 @@ bool mergeJournalTest()
assert(idata[i] == i-5);
for (; i < 2048; i++)
assert(idata[i] == i);
// try different range parameters
// read at the beginning of the change
len = 40;
@ -1212,7 +1212,7 @@ bool mergeJournalTest()
assert(idata[i] == i);
for (; i < 10; i++)
assert(idata[i] == i+5);
// read s.t. beginning of the change is in the middle of the range
len = 24;
data = ioc->mergeJournal("test-object", "test-journal", 8, len, &tmp);
@ -1222,7 +1222,7 @@ bool mergeJournalTest()
assert(idata[i] == i + 2);
for (; i < 6; i++)
assert(idata[i] == i - 3);
// read s.t. end of the change is in the middle of the range
len = 20;
data = ioc->mergeJournal("test-object", "test-journal", 28, len, &tmp);
@ -1232,7 +1232,7 @@ bool mergeJournalTest()
assert(idata[i] == i + 2);
for (; i < 3; i++)
assert(idata[i] == i + 7);
// cleanup
bf::remove("test-object");
bf::remove("test-journal");
@ -1253,60 +1253,60 @@ bool syncTest1()
cout << "syncTest1() requires using local storage at the moment." << endl;
return true;
}
cache->reset();
// delete everything in the fake cloud to make it easier to list later
bf::path fakeCloudPath = ls->getPrefix();
cout << "fakeCLoudPath = " << fakeCloudPath << endl;
for (bf::directory_iterator dir(fakeCloudPath); dir != bf::directory_iterator(); ++dir)
bf::remove(dir->path());
bf::path cachePath = sync->getCachePath();
bf::path journalPath = sync->getJournalPath();
string stmp = config->getValue("ObjectStorage", "metadata_path");
assert(!stmp.empty());
bf::path metaPath = stmp;
// nothing creates the dir yet
bf::create_directories(metaPath);
// make the test obj, journal, and metadata
string journalName = prefix + "/" + testObjKey + ".journal";
makeTestObject((cachePath/prefix/testObjKey).string().c_str());
makeTestJournal((journalPath/journalName).string().c_str());
makeTestMetadata((metaPath/string(metaTestFile + ".meta")).string().c_str(),testObjKey);
cache->newObject(prefix, testObjKey, bf::file_size(cachePath/prefix/testObjKey));
cache->newJournalEntry(prefix, bf::file_size(journalPath/journalName));
vector<string> vObj;
vObj.push_back(testObjKey);
sync->newObjects(prefix, vObj);
sync->forceFlush();
sleep(2); // wait for the job to run
// make sure that it made it to the cloud
bool exists = false;
int err = cs->exists(testObjKey, &exists);
assert(!err);
assert(exists);
sync->newJournalEntry(prefix, testObjKey, 0);
sync->forceFlush();
sleep(1); // let it do what it does
// check that the original objects no longer exist
assert(!cache->exists(prefix, testObjKey));
assert(!bf::exists(journalPath/journalName));
// Replicator doesn't implement all of its functionality yet, need to delete key from the cache manually for now
bf::remove(cachePath/testObjKey);
// check that a new version of object exists in cloud storage
// D'oh, this would have to list the objects to find it, not going to implement
// D'oh, this would have to list the objects to find it, not going to implement
// that everywhere just now. For now, making this test require LocalStorage.
bool foundIt = false;
string newKey;
@ -1321,22 +1321,22 @@ bool syncTest1()
break;
}
}
assert(foundIt);
cache->makeSpace(prefix, cache->getMaxCacheSize()); // clear the cache & make it call sync->flushObject()
// the key should now be back in cloud storage and deleted from the cache
assert(!cache->exists(prefix, newKey));
err = cs->exists(newKey, &exists);
assert(!err && exists);
// make the journal again, call sync->newJournalObject()
makeTestJournal((journalPath / prefix / (newKey + ".journal")).string().c_str());
cache->newJournalEntry(prefix, bf::file_size(journalPath / prefix / (newKey + ".journal")));
sync->newJournalEntry(prefix, newKey, 0);
sync->forceFlush();
sleep(1);
// verify that newkey is no longer in cloud storage, and that another permutation is
err = cs->exists(newKey, &exists);
assert(!err && !exists);
@ -1347,9 +1347,9 @@ bool syncTest1()
foundIt = (MetadataFile::getSourceFromKey(testObjKey) == metaTestFile);
}
assert(foundIt);
// TODO test error paths, pass in some junk
// cleanup, just blow away everything for now
cache->reset();
vector<string> keys;
@ -1359,7 +1359,7 @@ bool syncTest1()
sync->forceFlush();
sleep(1);
ioc->unlink(testFile);
cout << "Sync test 1 OK" << endl;
return true;
}
@ -1378,7 +1378,7 @@ void metadataUpdateTest()
string metaFilePath = metaPath + "/" + "metadataUpdateTest.meta";
::unlink(metaFilePath.c_str());
}
}
void s3storageTest1()
{
@ -1467,10 +1467,10 @@ void IOCReadTest1()
{
/* Generate the test object & metadata
read it, verify result
Generate the journal object
read it, verify the merged result
TODO: do partial reads with an offset similar to what the mergeJournal tests do
TODO: some error path testing
*/
@ -1494,11 +1494,11 @@ void IOCReadTest1()
bf::path metaPath = config->getValue("ObjectStorage", "metadata_path");
assert(!metaPath.empty());
bf::create_directories(metaPath/prefix);
string objFilename = (storagePath/testObjKey).string();
string journalFilename = (journalPath/prefix/testObjKey).string() + ".journal";
string metaFilename = (metaPath/metaTestFile).string() + ".meta";
boost::scoped_array<uint8_t> data(new uint8_t[1<<20]);
memset(data.get(), 0, 1<<20);
@ -1512,7 +1512,7 @@ void IOCReadTest1()
size_t objSize = bf::file_size(objFilename);
err = ioc->read(testFile, data.get(), 0, 1<<20);
assert(err == (int) objSize);
// verify the data
int *data32 = (int *) data.get();
int i;
@ -1520,9 +1520,9 @@ void IOCReadTest1()
assert(data32[i] == i);
for (; i < (1<<20)/4; i++)
assert(data32[i] == 0);
makeTestJournal(journalFilename.c_str());
err = ioc->read(testFile, data.get(), 0, 1<<20);
assert(err == (int) objSize);
for (i = 0; i < 5; i++)
@ -1533,14 +1533,14 @@ void IOCReadTest1()
assert(data32[i] == i);
for (; i < (1<<20)/4; i++)
assert(data32[i] == 0);
err = ioc->read(testFile, data.get(), 9000, 4000);
assert(err==0);
cache->reset();
err = ioc->unlink(testFile);
assert(err>= 0);
cout << "IOC read test 1 OK" << endl;
}
@ -1550,17 +1550,17 @@ void IOCUnlink()
CloudStorage *cs = CloudStorage::get();
Cache *cache = Cache::get();
Synchronizer *sync = Synchronizer::get();
cache->reset();
/*
/*
Make a metadata file with a complex path
make the test object and test journal
delete it at the parent dir level
make sure the parent dir was deleted
make sure the object and journal were deleted
*/
bf::path metaPath = ioc->getMetadataPath();
bf::path cachePath = ioc->getCachePath();
bf::path journalPath = ioc->getJournalPath();
@ -1570,7 +1570,7 @@ void IOCUnlink()
makeTestMetadata(metadataFile.string().c_str(),testObjKey);
makeTestObject(cachedObjPath.string().c_str());
makeTestJournal(cachedJournalPath.string().c_str());
cache->newObject(prefix, cachedObjPath.filename().string(), bf::file_size(cachedObjPath));
cache->newJournalEntry(prefix, bf::file_size(cachedJournalPath));
vector<string> keys;
@ -1579,7 +1579,7 @@ void IOCUnlink()
//sync->newJournalEntry(keys[0]); don't want to end up renaming it
sync->forceFlush();
sleep(1);
// ok, they should be fully 'in the system' now.
// verify that they are
assert(bf::exists(metadataFile));
@ -1588,10 +1588,10 @@ void IOCUnlink()
bool exists;
cs->exists(cachedObjPath.filename().string(), &exists);
assert(exists);
int err = ioc->unlink(testFile);
assert(err == 0);
assert(!bf::exists(metadataFile));
assert(!bf::exists(cachedObjPath));
assert(!bf::exists(cachedJournalPath));
@ -1600,7 +1600,7 @@ void IOCUnlink()
cs->exists(cachedObjPath.filename().string(), &exists);
assert(!exists);
assert(cache->getCurrentCacheSize() == 0);
cout << "IOC unlink test OK" << endl;
}
@ -1666,7 +1666,7 @@ void IOCCopyFile2()
{
// call IOC::copyFile() with non-existant file
IOCoordinator *ioc = IOCoordinator::get();
bf::path fullPath = homepath / prefix / "not-there";
const char *source = fullPath.string().c_str();
bf::path fullPath2 = homepath / prefix / "not-there2";
@ -1675,13 +1675,13 @@ void IOCCopyFile2()
bf::path metaPath = ioc->getMetadataPath();
bf::remove(metaPath/prefix/"not-there.meta");
bf::remove(metaPath/prefix/"not-there2.meta");
int err = ioc->copyFile(source, dest);
assert(err);
assert(errno == ENOENT);
assert(!bf::exists(metaPath/"not-there.meta"));
assert(!bf::exists(metaPath/"not-there2.meta"));
cout << "IOC copy file 2 OK" << endl;
}
@ -1704,7 +1704,7 @@ void IOCCopyFile3()
bf::path destPath = metaPath/prefix/"dest.meta";
bf::path l_sourceFile = homepath / prefix / string("source");
bf::path l_destFile = homepath / prefix / string("dest");
cache->reset();
makeTestObject((cachePath/prefix/copyfileObjKey).string().c_str());
@ -1722,7 +1722,7 @@ void IOCCopyFile3()
err = ioc->read(l_destFile.string().c_str(), buf2, 0, sizeof(buf2));
assert(err == sizeof(buf2));
assert(memcmp(buf1, buf2, 8192) == 0);
assert(ioc->unlink(l_sourceFile.string().c_str()) == 0);
assert(ioc->unlink(l_destFile.string().c_str()) == 0);
sync->forceFlush();
@ -1770,8 +1770,8 @@ void bigMergeJournal1()
buf = ioc->mergeJournal(fNamePath.string().c_str(), jNamePath.string().c_str(), 100, 10, &tmp);
assert(buf);
}
// This should write an incomplete msg(s) to make sure SM does the right thing. Not
// done yet, handing this off to Ben.
void shortMsg()
@ -1786,7 +1786,7 @@ void shortMsg()
size_t size = 27;
std::vector<uint8_t> bufWrite(sizeof(write_cmd)+std::strlen(filename)+size);
sm_msg_header *hdrWrite = (sm_msg_header *) bufWrite.data();
write_cmd *cmdWrite = (write_cmd *) &hdrWrite[1];
uint8_t *dataWrite;
@ -1807,7 +1807,7 @@ void shortMsg()
assert(result==static_cast<ssize_t>(hdrWrite->payloadLen));
w.run();
// verify response
uint8_t bufRead[1024];
int err = ::recv(sessionSock, bufRead, sizeof(bufRead), MSG_DONTWAIT);
@ -1817,7 +1817,7 @@ void shortMsg()
assert(resp->header.payloadLen == sizeof(ssize_t));
assert(resp->header.flags == 0);
assert(resp->returnCode == 9);
std::vector<uint8_t> bufAppend(sizeof(append_cmd)+std::strlen(filename)+size);
uint8_t *dataAppend;
@ -1851,9 +1851,9 @@ void shortMsg()
}
// write and append are the biggest vulnerabilities here b/c those msgs could be sent in multiple
// pieces, are much larger, and thus if there is a crash mid-message it's most likely to happen
// pieces, are much larger, and thus if there is a crash mid-message it's most likely to happen
// during a call to write/append().
// it may not even be possible for CS to write a partial open/stat/read/etc msg, but that should be
// it may not even be possible for CS to write a partial open/stat/read/etc msg, but that should be
// tested as well.
void shortMsgTests()
{
@ -1900,7 +1900,7 @@ int main(int argc, char* argv[])
bf::remove_all(config->getValue("ObjectStorage", "journal_path"));
bf::remove_all(config->getValue("LocalStorage", "path"));
bf::remove_all(config->getValue("Cache", "path"));
cout << "connecting" << endl;
makeConnection();
cout << "connected" << endl;
@ -1942,7 +1942,7 @@ int main(int argc, char* argv[])
listdirtask();
pingtask();
copytask();
localstorageTest1();
cacheTest1();
mergeJournalTest();
@ -1954,18 +1954,18 @@ int main(int argc, char* argv[])
IOCUnlink();
IOCCopyFile();
shortMsgTests();
// For the moment, this next one just verifies no error happens as reported by the fcns called.
// It doesn't verify the result yet.
bigMergeJournal1();
// skip the s3 test if s3 is not configured
if (config->getValue("S3", "region") != "")
{
s3storageTest1();
}
else
cout << "To run the S3Storage unit tests, configure the S3 section of test-data/storagemanager.cnf"
cout << "To run the S3Storage unit tests, configure the S3 section of test-data/storagemanager.cnf"
<< endl;
cout << "Cleanup";

View File

@ -95,7 +95,6 @@ const HexLookupTable hex_lookup_table = init_hex_lookup_table();
/* used in the bin2hex function */
const char hex_upper[] = "0123456789ABCDEF";
const char hex_lower[] = "0123456789abcdef";
HexLookupTable init_hex_lookup_table() noexcept
{

View File

@ -21,6 +21,11 @@
using namespace idbdatafile;
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wreturn-type-c-linkage"
#endif
extern "C"
{
FileFactoryEnt plugin_instance()
@ -28,3 +33,7 @@ extern "C"
return FileFactoryEnt(IDBDataFile::CLOUD, "cloud", new SMFileFactory(), new SMFileSystem());
}
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif

View File

@ -31,6 +31,7 @@
#include <list>
#include <stdint.h>
#include <limits>
#include <memory>
#undef min
#undef max
@ -102,7 +103,7 @@ public:
};
SimpleAllocator() throw() {}
SimpleAllocator(boost::shared_ptr<SimplePool> pool) throw()
SimpleAllocator(std::shared_ptr<SimplePool> pool) throw()
{
fPool = pool;
}
@ -158,10 +159,10 @@ public:
inline void setPool(SimplePool* pool)
{
fPool = pool;
fPool.reset(pool);
}
boost::shared_ptr<SimplePool> fPool;
std::shared_ptr<SimplePool> fPool;
};

View File

@ -45,32 +45,16 @@ using namespace std;
namespace
{
const uint64_t MAGIC_NUMBER = 0xfdc119a384d0778eULL;
const uint64_t VERSION_NUM1 = 1;
const uint64_t VERSION_NUM2 = 2;
const uint64_t VERSION_NUM3 = 3;
const int PTR_SECTION_OFFSET = compress::CompressInterface::HDR_BUF_LEN;
// version 1.1 of the chunk data has a short header
// QuickLZ compressed data never has the high bit set on the first byte
const uint8_t CHUNK_MAGIC1 = 0xff;
const int SIG_OFFSET = 0;
const int CHECKSUM_OFFSET = 1;
const int LEN_OFFSET = 5;
const unsigned HEADER_SIZE = 9;
/* version 1.2 of the chunk data changes the hash function used to calculate
* checksums. We can no longer use the algorithm used in ver 1.1. Everything
* else is the same
*/
const uint8_t CHUNK_MAGIC2 = 0xfe;
/* version 2.0 of the chunk data uses a new compression algo. For us, because of
* the finite number of block sizes we compress, the first byte of the compressed
* data will always be 0x80, so it can't be confused with V1.0 data (that has no
* header).
*/
const uint8_t CHUNK_MAGIC3 = 0xfd;
// The max number of lbids to be stored in segment file.
const uint32_t LBID_MAX_SIZE = 10;

View File

@ -1378,7 +1378,7 @@ DataConvert::StringToUDecimal(const datatypes::SystemCatalog::TypeAttributesStd&
const std::string& data, bool& pushWarning)
{
const cscDataType typeCode= datatypes::SystemCatalog::UDECIMAL;
// UDECIMAL numbers may not be negative
if (LIKELY(colType.colWidth == 16))
{
@ -1532,8 +1532,8 @@ DataConvert::StringToFloat(cscDataType typeCode,
if (floatvalue < 0.0 &&
typeCode == datatypes::SystemCatalog::UFLOAT &&
floatvalue != joblist::FLOATEMPTYROW &&
floatvalue != joblist::FLOATNULL)
floatvalue != static_cast<float>(joblist::FLOATEMPTYROW) &&
floatvalue != static_cast<float>(joblist::FLOATNULL))
{
value = 0.0; // QQ: should it assign floatvalue?
pushWarning = true;
@ -1595,8 +1595,8 @@ DataConvert::StringToDouble(cscDataType typeCode,
if (doublevalue < 0.0 &&
typeCode == datatypes::SystemCatalog::UDOUBLE &&
doublevalue != joblist::DOUBLEEMPTYROW &&
doublevalue != joblist::DOUBLENULL)
doublevalue != static_cast<double>(joblist::DOUBLEEMPTYROW) &&
doublevalue != static_cast<double>(joblist::DOUBLENULL))
{
doublevalue = 0.0; // QQ: should it assign "value" ?
pushWarning = true;

View File

@ -362,7 +362,7 @@ bool timeZoneToOffset(const char *str, std::string::size_type length, long *offs
return 1;
*offset = offset_tmp;
return 0;
}
@ -1111,11 +1111,11 @@ struct Time
signed is_neg : 1;
// NULL column value = 0xFFFFFFFFFFFFFFFE
Time() : msecond (0xFFFFFE),
second (0xFF),
minute (0xFF),
hour (0xFFF),
day (0x7FF),
Time() : msecond (-2),
second (-1),
minute (-1),
hour (-1),
day (-1),
is_neg (0b1)
{}
@ -1143,12 +1143,12 @@ struct Time
inline
void Time::reset()
{
msecond = 0xFFFFFE;
second = 0xFF;
minute = 0xFF;
hour = 0xFFF;
msecond = -2;
second = -1;
minute = -1;
hour = -1;
is_neg = 0b1;
day = 0x7FF;
day = -1;
}
inline
@ -1706,13 +1706,13 @@ inline int128_t strtoll128(const char* data, bool& saturate, char** ep)
if (*data == '\0')
{
if (ep)
if (ep)
*ep = (char*)data;
return res;
}
// skip leading whitespace characters
while (*data != '\0' &&
while (*data != '\0' &&
(*data == ' ' || *data == '\t' || *data == '\n'))
data++;

View File

@ -128,7 +128,7 @@ long long dateGet( uint64_t time, IntervalColumn::interval_type unit, bool dateT
return (sec * 1000000) + msec;
default:
throw runtime_error("unit type is not supported: " + unit);
throw runtime_error(std::string("unit type is not supported: ") + std::to_string(unit));
};
}
@ -215,13 +215,11 @@ long long timeGet( uint64_t time, IntervalColumn::interval_type unit )
return (sec * 1000000) + msec;
default:
throw runtime_error("unit type is not supported: " + unit);
};
throw runtime_error(std::string("unit type is not supported: ") + std::to_string(unit));
}; };
}
}
namespace funcexp
{

View File

@ -369,7 +369,7 @@ public:
uint64_t getUintVal(rowgroup::Row& row,
FunctionParm& fp,
bool& isNull,
execplan::CalpontSystemCatalog::ColType& op_ct);
execplan::CalpontSystemCatalog::ColType& op_ct) override;
};

View File

@ -155,14 +155,12 @@ private:
config::Config* fConfig; /// config file ptr
};
}//namespace logging
namespace std
{
template<> inline void swap<logging::Message>(logging::Message& lhs, logging::Message& rhs)
inline void swap(logging::Message& lhs, logging::Message& rhs)
{
lhs.swap(rhs);
}
}//namespace std
}//namespace logging
#endif

View File

@ -51,68 +51,6 @@ inline double cvtArgToDouble(int t, const char* v)
return d;
}
inline long long cvtArgToInt(int t, const char* v)
{
long long ll = 0;
switch (t)
{
case INT_RESULT:
ll = *((long long*)v);
break;
case REAL_RESULT:
ll = (long long)(*((double*)v));
break;
case DECIMAL_RESULT:
case STRING_RESULT:
ll = strtoll(v, 0, 0);
break;
case ROW_RESULT:
break;
}
return ll;
}
inline string cvtArgToString(int t, const char* v)
{
string str;
switch (t)
{
case INT_RESULT:
{
long long ll;
ll = *((long long*)v);
ostringstream oss;
oss << ll;
str = oss.str();
break;
}
case REAL_RESULT:
{
double d;
d = *((double*)v);
ostringstream oss;
oss << d;
str = oss.str();
break;
}
case DECIMAL_RESULT:
case STRING_RESULT:
str = v;
break;
case ROW_RESULT:
break;
}
return str;
}
}
/****************************************************************************
@ -143,14 +81,14 @@ extern "C"
//=======================================================================
/**
* regr_avgx
* regr_avgx
*/
struct regr_avgx_data
{
long double sumx;
int64_t cnt;
};
#ifdef _MSC_VER
__declspec(dllexport)
#endif
@ -171,7 +109,7 @@ extern "C"
{
initid->decimals += 4;
}
if (!(data = (struct regr_avgx_data*) malloc(sizeof(struct regr_avgx_data))))
{
strmov(message,"Couldn't allocate memory");
@ -190,7 +128,7 @@ extern "C"
void regr_avgx_deinit(UDF_INIT* initid)
{
free(initid->ptr);
}
}
#ifdef _MSC_VER
__declspec(dllexport)
@ -239,21 +177,21 @@ extern "C"
{
*is_null = 1;
}
return valOut;
}
//=======================================================================
/**
* regr_avgy
* regr_avgy
*/
struct regr_avgy_data
{
long double sumy;
int64_t cnt;
};
#ifdef _MSC_VER
__declspec(dllexport)
#endif
@ -275,7 +213,7 @@ extern "C"
{
initid->decimals += 4;
}
if (!(data = (struct regr_avgy_data*) malloc(sizeof(struct regr_avgy_data))))
{
strmov(message,"Couldn't allocate memory");
@ -294,7 +232,7 @@ extern "C"
void regr_avgy_deinit(UDF_INIT* initid)
{
free(initid->ptr);
}
}
#ifdef _MSC_VER
__declspec(dllexport)
@ -349,13 +287,13 @@ extern "C"
//=======================================================================
/**
* regr_count
* regr_count
*/
struct regr_count_data
{
int64_t cnt;
};
#ifdef _MSC_VER
__declspec(dllexport)
#endif
@ -385,7 +323,7 @@ extern "C"
void regr_count_deinit(UDF_INIT* initid)
{
free(initid->ptr);
}
}
#ifdef _MSC_VER
__declspec(dllexport)
@ -428,7 +366,7 @@ extern "C"
//=======================================================================
/**
* regr_slope
* regr_slope
*/
struct regr_slope_data
{
@ -438,7 +376,7 @@ extern "C"
long double sumy;
long double sumxy; // sum of (x*y)
};
#ifdef _MSC_VER
__declspec(dllexport)
#endif
@ -457,7 +395,7 @@ extern "C"
}
initid->decimals = DECIMAL_NOT_SPECIFIED;
if (!(data = (struct regr_slope_data*) malloc(sizeof(struct regr_slope_data))))
{
strmov(message,"Couldn't allocate memory");
@ -479,7 +417,7 @@ extern "C"
void regr_slope_deinit(UDF_INIT* initid)
{
free(initid->ptr);
}
}
#ifdef _MSC_VER
__declspec(dllexport)
@ -549,7 +487,7 @@ extern "C"
//=======================================================================
/**
* regr_intercept
* regr_intercept
*/
struct regr_intercept_data
{
@ -559,7 +497,7 @@ extern "C"
long double sumy;
long double sumxy; // sum of (x*y)
};
#ifdef _MSC_VER
__declspec(dllexport)
#endif
@ -599,7 +537,7 @@ extern "C"
void regr_intercept_deinit(UDF_INIT* initid)
{
free(initid->ptr);
}
}
#ifdef _MSC_VER
__declspec(dllexport)
@ -680,7 +618,7 @@ extern "C"
long double sumy2; // sum of (y squared)
long double sumxy; // sum of (x*y)
};
#ifdef _MSC_VER
__declspec(dllexport)
#endif
@ -722,7 +660,7 @@ extern "C"
void regr_r2_deinit(UDF_INIT* initid)
{
free(initid->ptr);
}
}
#ifdef _MSC_VER
__declspec(dllexport)
@ -820,7 +758,7 @@ extern "C"
long double sumy2; // sum of (y squared)
long double sumxy; // sum of (x*y)
};
#ifdef _MSC_VER
__declspec(dllexport)
#endif
@ -839,7 +777,7 @@ extern "C"
}
initid->decimals = DECIMAL_NOT_SPECIFIED;
if (!(data = (struct corr_data*) malloc(sizeof(struct corr_data))))
{
strmov(message,"Couldn't allocate memory");
@ -862,7 +800,7 @@ extern "C"
void corr_deinit(UDF_INIT* initid)
{
free(initid->ptr);
}
}
#ifdef _MSC_VER
__declspec(dllexport)
@ -957,7 +895,7 @@ extern "C"
long double sumx;
long double sumx2; // sum of (x squared)
};
#ifdef _MSC_VER
__declspec(dllexport)
#endif
@ -996,7 +934,7 @@ extern "C"
void regr_sxx_deinit(UDF_INIT* initid)
{
free(initid->ptr);
}
}
#ifdef _MSC_VER
__declspec(dllexport)
@ -1066,7 +1004,7 @@ extern "C"
long double sumy;
long double sumy2; // sum of (y squared)
};
#ifdef _MSC_VER
__declspec(dllexport)
#endif
@ -1085,7 +1023,7 @@ extern "C"
}
initid->decimals = DECIMAL_NOT_SPECIFIED;
if (!(data = (struct regr_syy_data*) malloc(sizeof(struct regr_syy_data))))
{
strmov(message,"Couldn't allocate memory");
@ -1105,7 +1043,7 @@ extern "C"
void regr_syy_deinit(UDF_INIT* initid)
{
free(initid->ptr);
}
}
#ifdef _MSC_VER
__declspec(dllexport)
@ -1177,7 +1115,7 @@ extern "C"
long double sumy;
long double sumxy; // sum of (x*y)
};
#ifdef _MSC_VER
__declspec(dllexport)
#endif
@ -1196,7 +1134,7 @@ extern "C"
}
initid->decimals = DECIMAL_NOT_SPECIFIED;
if (!(data = (struct regr_sxy_data*) malloc(sizeof(struct regr_sxy_data))))
{
strmov(message,"Couldn't allocate memory");
@ -1217,7 +1155,7 @@ extern "C"
void regr_sxy_deinit(UDF_INIT* initid)
{
free(initid->ptr);
}
}
#ifdef _MSC_VER
__declspec(dllexport)
@ -1291,7 +1229,7 @@ extern "C"
long double sumy;
long double sumxy; // sum of (x*y)
};
#ifdef _MSC_VER
__declspec(dllexport)
#endif
@ -1310,7 +1248,7 @@ extern "C"
}
initid->decimals = DECIMAL_NOT_SPECIFIED;
if (!(data = (struct covar_pop_data*) malloc(sizeof(struct covar_pop_data))))
{
strmov(message,"Couldn't allocate memory");
@ -1331,7 +1269,7 @@ extern "C"
void covar_pop_deinit(UDF_INIT* initid)
{
free(initid->ptr);
}
}
#ifdef _MSC_VER
__declspec(dllexport)
@ -1404,7 +1342,7 @@ extern "C"
long double sumy;
long double sumxy; // sum of (x*y)
};
#ifdef _MSC_VER
__declspec(dllexport)
#endif
@ -1423,7 +1361,7 @@ extern "C"
}
initid->decimals = DECIMAL_NOT_SPECIFIED;
if (!(data = (struct covar_samp_data*) malloc(sizeof(struct covar_samp_data))))
{
strmov(message,"Couldn't allocate memory");
@ -1444,7 +1382,7 @@ extern "C"
void covar_samp_deinit(UDF_INIT* initid)
{
free(initid->ptr);
}
}
#ifdef _MSC_VER
__declspec(dllexport)

View File

@ -18,6 +18,7 @@
#ifndef ROWSTORAGE_H
#define ROWSTORAGE_H
#include "resourcemanager.h"
#include "rowgroup.h"
#include <sys/stat.h>
#include <unistd.h>

View File

@ -33,68 +33,6 @@ inline double cvtArgToDouble(int t, const char* v)
return d;
}
inline long long cvtArgToInt(int t, const char* v)
{
long long ll = 0;
switch (t)
{
case INT_RESULT:
ll = *((long long*)v);
break;
case REAL_RESULT:
ll = (long long)(*((double*)v));
break;
case DECIMAL_RESULT:
case STRING_RESULT:
ll = strtoll(v, 0, 0);
break;
case ROW_RESULT:
break;
}
return ll;
}
inline string cvtArgToString(int t, const char* v)
{
string str;
switch (t)
{
case INT_RESULT:
{
long long ll;
ll = *((long long*)v);
ostringstream oss;
oss << ll;
str = oss.str();
break;
}
case REAL_RESULT:
{
double d;
d = *((double*)v);
ostringstream oss;
oss << d;
str = oss.str();
break;
}
case DECIMAL_RESULT:
case STRING_RESULT:
str = v;
break;
case ROW_RESULT:
break;
}
return str;
}
}
/****************************************************************************
@ -494,16 +432,16 @@ extern "C"
//=======================================================================
/**
* avgx connector stub. Exactly the same functionality as the
* built in avg() function. Use to test the performance of the
* API
* avgx connector stub. Exactly the same functionality as the
* built in avg() function. Use to test the performance of the
* API
*/
struct avgx_data
{
double sumx;
int64_t cnt;
};
#ifdef _MSC_VER
__declspec(dllexport)
#endif
@ -534,7 +472,7 @@ extern "C"
void avgx_deinit(UDF_INIT* initid)
{
free(initid->ptr);
}
}
#ifdef _MSC_VER
__declspec(dllexport)
@ -596,7 +534,7 @@ extern "C"
void distinct_count_deinit(UDF_INIT* initid)
{
// free(initid->ptr);
}
}
#ifdef _MSC_VER
__declspec(dllexport)

View File

@ -1194,9 +1194,6 @@ void WF_udaf::operator()(int64_t b, int64_t e, int64_t c)
fPrev = c;
}
boost::shared_ptr<WindowFunctionType> WF_udaf::makeFunction(int id, const string& name, int ct, mcsv1sdk::mcsv1Context& context, WindowFunctionColumn*);
} //namespace
// vim:ts=4 sw=4:

View File

@ -70,14 +70,17 @@ public:
{
return fUDAFContext;
}
bool getInterrupted()
{
return bInterrupted;
}
bool * getInterruptedPtr()
{
return &bInterrupted;
}
bool getDistinct()
{
return fDistinct;

View File

@ -105,7 +105,7 @@ public:
// The param noBRMFcns suppresses init of the ExtentMap, VSS, VBBM, and CopyLocks.
// It can speed up init if the caller only needs the other structures.
EXPORT DBRM(bool noBRMFcns = false);
EXPORT ~DBRM();
EXPORT ~DBRM() throw();
EXPORT static void refreshShm()
{
@ -113,7 +113,7 @@ public:
ExtentMapImpl::refreshShm();
FreeListImpl::refreshShm();
}
// @bug 1055+ - Added functions below for multiple files per OID enhancement.
/** @brief Get the OID, offset, db root, partition, and segment of a logical block ID.
@ -1026,7 +1026,7 @@ public:
EXPORT void invalidateUncommittedExtentLBIDs(execplan::CalpontSystemCatalog::SCN txnid,
bool allExtents,
std::vector<LBID_t>* plbidList = NULL);
private:
DBRM(const DBRM& brm);
DBRM& operator=(const DBRM& brm);

View File

@ -81,7 +81,7 @@ void LBIDResourceGraph::connectResources(LBID_t start, LBID_t end,
{
vector<ResourceNode*> intersection, reserveList;
RNodes_t::iterator sit;
vector<ResourceNode*>::iterator it, next;
vector<ResourceNode*>::iterator it;
LBID_t i;
#if 0

View File

@ -34,7 +34,7 @@
namespace WriteEngine
{
class Log;
struct ColumnInfo;
class ColumnInfo;
/** @brief A buffer class to store data written to column files
*

View File

@ -37,7 +37,7 @@
namespace WriteEngine
{
class Log;
struct ColumnInfo;
class ColumnInfo;
/** @brief A buffer class to store data written to compressed column files
*
@ -83,10 +83,10 @@ public:
*
* @param startOffset The buffer offset from where the write should begin
* @param writeSize The number of bytes to be written to the file
* @param fillUpWEmpties The flag to fill the buffer with empty magic
* @param fillUpWEmpties The flag to fill the buffer with empty magic
* values up to the block boundary.
*/
virtual int writeToFile(int startOffset, int writeSize,
virtual int writeToFile(int startOffset, int writeSize,
bool fillUpWEmpties = false);
private:

View File

@ -40,7 +40,7 @@ namespace WriteEngine
{
class Log;
struct ColumnInfo;
class ColumnInfo;
/**
* ColumnBufferManager class provides the functionality for multiple threads to
@ -194,7 +194,7 @@ protected:
* @param startOffset The buffer offset where the write should begin
* @param writeSize The number of bytes to be written to the file
* @param fillUpWEmpties The flag to fill the buffer with NULLs up to
* the block boundary.
* the block boundary.
* @return success or fail status
*/
virtual int writeToFileExtentCheck(uint32_t startOffset, uint32_t writeSize,

View File

@ -34,7 +34,7 @@
namespace WriteEngine
{
struct ColumnInfo;
class ColumnInfo;
class Log;
//------------------------------------------------------------------------------

View File

@ -487,7 +487,7 @@ public:
public:
friend class BulkLoad;
friend struct ColumnInfo;
friend class ColumnInfo;
friend class ColumnInfoCompressed;
};

View File

@ -165,18 +165,6 @@ struct QueueShutdown : public unary_function<T&, void>
x.shutdown();
}
};
inline const string sin_addr2String(const in_addr src)
{
string s;
#ifdef _MSC_VER
s = inet_ntoa(src);
#else
char dst[INET_ADDRSTRLEN];
s = inet_ntop(AF_INET, &src, dst, INET_ADDRSTRLEN);
#endif
return s;
}
}
namespace WriteEngine

View File

@ -58,12 +58,8 @@ namespace
// variable name later (to remove the m_ prefix) as time allows.
const uint16_t m_endHeader = DCTNRY_END_HEADER; // end of header flag (0xffff)
const uint16_t m_offSetZero = BYTE_PER_BLOCK; // value for 0 offset (8192)
const int m_lastOffSet = BYTE_PER_BLOCK; // end of last offset
const int m_totalHdrBytes = // # bytes in header
HDR_UNIT_SIZE + NEXT_PTR_BYTES + HDR_UNIT_SIZE + HDR_UNIT_SIZE;
const int m_bigSpace = // free space in an empty block
BYTE_PER_BLOCK - (m_totalHdrBytes + HDR_UNIT_SIZE);
const int START_HDR1 = // start loc of 2nd offset (HDR1)
HDR_UNIT_SIZE + NEXT_PTR_BYTES + HDR_UNIT_SIZE;
const int PSEUDO_COL_WIDTH = DICT_COL_WIDTH; // used to convert row count to block count

View File

@ -75,9 +75,6 @@ using namespace idbdatafile;
namespace redistribute
{
// need be consistent with we_config.cpp
const unsigned DEFAULT_FILES_PER_COLUMN_PARTITION = 4;
// static variables
boost::mutex RedistributeWorkerThread::fActionMutex;
volatile bool RedistributeWorkerThread::fStopAction = false;

View File

@ -19,7 +19,6 @@
// $Id: we_dmlcommandproc.cpp 3082 2011-09-26 22:00:38Z chao $
#include <unistd.h>
using namespace std;
#include "bytestream.h"
using namespace messageqcpp;
@ -54,6 +53,8 @@ using namespace BRM;
#include "checks.h"
#include "columnwidth.h"
using namespace std;
namespace WriteEngine
{
//StopWatch timer;
@ -1897,7 +1898,8 @@ uint8_t WE_DMLCommandProc::processBatchInsertBinary(messageqcpp::ByteStream& bs,
bs >> val64;
memcpy(&valD, &val64, 8);
if (valD < 0.0 && valD != joblist::DOUBLEEMPTYROW && valD != joblist::DOUBLENULL)
if (valD < 0.0 && valD != static_cast<double>(joblist::DOUBLEEMPTYROW)
&& valD != static_cast<double>(joblist::DOUBLENULL))
{
valD = 0.0;
pushWarning = true;
@ -1915,7 +1917,8 @@ uint8_t WE_DMLCommandProc::processBatchInsertBinary(messageqcpp::ByteStream& bs,
bs >> val32;
memcpy(&valF, &val32, 4);
if (valF < 0.0 && valF != joblist::FLOATEMPTYROW && valF != joblist::FLOATNULL)
if (valF < 0.0 && valF != static_cast<float>(joblist::FLOATEMPTYROW)
&& valF != static_cast<float>(joblist::FLOATNULL))
{
valF = 0.0;
pushWarning = true;
@ -2585,7 +2588,7 @@ uint8_t WE_DMLCommandProc::rollbackBatchAutoOn(messageqcpp::ByteStream& bs, std:
}
catch ( ... )
{
err = "No such table for oid " + tableOid;
err = std::string("No such table for oid ") + std::to_string(tableOid);
rc = 1;
return rc;
}
@ -2813,7 +2816,7 @@ uint8_t WE_DMLCommandProc::processUpdate(messageqcpp::ByteStream& bs,
}
int rrid = (int) relativeRID / (BYTE_PER_BLOCK / colWidth);
// populate stats.blocksChanged
// populate stats.blocksChanged
if (rrid > preBlkNums[j])
{
preBlkNums[j] = rrid ;
@ -3847,7 +3850,7 @@ uint8_t WE_DMLCommandProc::processFlushFiles(messageqcpp::ByteStream& bs, std::s
}
catch ( ... )
{
err = "Systemcatalog error for tableoid " + tableOid;
err = std::string("Systemcatalog error for tableoid ") + std::to_string(tableOid);
rc = 1;
return rc;
}

View File

@ -111,7 +111,7 @@ public:
{
log(logging::LOG_TYPE_CRITICAL, strerror(errno));
}
void ParentLogChildMessage(const std::string &str)
void ParentLogChildMessage(const std::string &str) override
{
log(logging::LOG_TYPE_INFO, str);
}

View File

@ -146,7 +146,7 @@ std::string WECmdArgs::getCpImportCmdLine()
else if (0 == fLocFile.length()) //No filename given, from job file
aSS << " -f " << fPmFilePath;
}
if (fErrorDir.length() > 0)
aSS << " -L " << fErrorDir;
@ -957,7 +957,7 @@ void WECmdArgs::parseCmdLineArgs(int argc, char** argv)
default:
{
std::string aErr = "Unknown command line option " + aCh;
std::string aErr = std::string("Unknown command line option ") + std::to_string(aCh);
//cout << "Unknown command line option " << aCh << endl;
throw (runtime_error(aErr));
}
@ -1256,7 +1256,7 @@ void WECmdArgs::parseCmdLineArgs(int argc, char** argv)
throw (runtime_error("No schema or local filename specified."));
}
}
/* check for all-or-nothing cmdline args to enable S3 import */
int s3Tmp = (fS3Key.empty() ? 0 : 1) + (fS3Bucket.empty() ? 0 : 1) +
(fS3Secret.empty() ? 0 : 1) + (fS3Region.empty() ? 0 : 1);
@ -1551,7 +1551,7 @@ unsigned int WECmdArgs::getBatchQuantity()
void WECmdArgs::setEnclByAndEscCharFromJobFile(std::string& JobName)
{
if ((fEnclosedChar == 0)) // check anything in Jobxml file
if (fEnclosedChar == 0) // check anything in Jobxml file
{
WEXmlgetter aXmlGetter(JobName);
vector<string> aSections;

View File

@ -742,7 +742,7 @@ void WESDHandler::setup()
else
{
std::string aStr;
aStr = "Encountered NULL WESplClient : " + PmId;
aStr = std::string("Encountered NULL WESplClient : ") + std::to_string(PmId);
cout << aStr << endl;
fLog.logMsg( aStr, MSGLVL_ERROR );
throw WESdHandlerException(aStr);
@ -974,7 +974,7 @@ void WESDHandler::checkForConnections()
if (aNow - fWeSplClients[PmId]->getLastInTime() > 180)
{
std::string aStr;
aStr = "Heartbeats missed - Non Responsive PM" + PmId;
aStr = std::string("Heartbeats missed - Non Responsive PM") + std::to_string(PmId);
fLog.logMsg( aStr, MSGLVL_ERROR );
fWeSplClients[PmId]->onDisconnect();
exit(1); //Otherwise; have to wait till write() comes out

View File

@ -78,7 +78,7 @@ XMLGenData::XMLGenData( )
br = Config::getBulkRoot();
boost::filesystem::path p(br);
#else
boost::filesystem::path p( std::string(Config::getBulkRoot()) );
boost::filesystem::path p{ std::string(Config::getBulkRoot()) };
#endif
p /= JOBDIR;
fParms.insert(ParmList::value_type(PATH, p.string()));