1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-07-30 19:23:07 +03:00

Reformat all code to coding standard

This commit is contained in:
Andrew Hutchings
2017-10-26 17:18:17 +01:00
parent 4985f3456e
commit 01446d1e22
1296 changed files with 403852 additions and 353747 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -88,7 +88,7 @@ public:
/**
* @brief Create a dictionary extent
*
*
* If 'flag' is true, a new file is created with an abbreviated extent.
* If 'flag' is false, then function adds a full exent to an already open
* file, basically assuming that the file already has 1 or more extents.
@ -107,7 +107,7 @@ public:
const uint32_t partition,
const uint16_t segment,
BRM::LBID_t& startLbid,
bool flag=true);
bool flag = true);
/**
* @brief Drop dictionary store
@ -119,10 +119,19 @@ public:
/**
* @brief Accessors
*/
const std::string& getFileName() const { return m_segFileName; }
HWM getHWM() const { return m_hwm; }
const std::string& getFileName() const
{
return m_segFileName;
}
HWM getHWM() const
{
return m_hwm;
}
EXPORT bool getTokenFromArray(Signature& sig);
EXPORT uint64_t getCurLbid(){return m_curLbid;}
EXPORT uint64_t getCurLbid()
{
return m_curLbid;
}
/**
* @brief Insert a signature value to a file block and return token/pointer.
@ -133,7 +142,7 @@ public:
* @param token - (output) token associated with inserted signature
*/
EXPORT int insertDctnry(const int& sgnature_size,
const unsigned char* sgnature_value, Token& token);
const unsigned char* sgnature_value, Token& token);
/**
* @brief Insert a signature value to a file block and return token/pointer
@ -146,10 +155,10 @@ public:
* @param tokenBuf - (output) list of tokens for the parsed strings
*/
EXPORT int insertDctnry(const char* buf,
ColPosPair ** pos,
const int totalRow, const int col,
char* tokenBuf,
long long& truncCount);
ColPosPair** pos,
const int totalRow, const int col,
char* tokenBuf,
long long& truncCount);
/**
* @brief Update dictionary store with tokenized strings (for DDL/DML use)
@ -172,8 +181,8 @@ public:
* @param useTmpSuffix - for Bulk HDFS usage: use or not use *.tmp file suffix
*/
EXPORT int openDctnry(const OID& dctnryOID, const uint16_t dbRoot,
const uint32_t partition, const uint16_t segment,
const bool useTmpSuffix);
const uint32_t partition, const uint16_t segment,
const bool useTmpSuffix);
/**
* @brief copy the dictionary header to buffer
@ -183,22 +192,36 @@ public:
/**
* @brief Set logger that can be used for logging (primarily by bulk load)
*/
void setLogger(Log* logger) { m_logger = logger; }
void setLogger(Log* logger)
{
m_logger = logger;
}
/**
* @brief Set dictionary column width for this column
*/
void setColWidth(int colWidth) { m_colWidth = colWidth; }
void setColWidth(int colWidth)
{
m_colWidth = colWidth;
}
/**
* @brief Set dictionary default for this column
*/
void setDefault(const std::string& defVal) { m_defVal = defVal; }
void setDefault(const std::string& defVal)
{
m_defVal = defVal;
}
void setImportDataMode( ImportDataMode importMode )
{ m_importDataMode = importMode; }
virtual int checkFixLastDictChunk() {return NO_ERROR;}
{
m_importDataMode = importMode;
}
virtual int checkFixLastDictChunk()
{
return NO_ERROR;
}
//------------------------------------------------------------------------------
// Protected members
@ -213,7 +236,11 @@ protected:
//
// Clear the dictionary store.
//
void clear() { m_dFile = NULL; m_dctnryOID =(OID)INVALID_NUM; }
void clear()
{
m_dFile = NULL;
m_dctnryOID = (OID)INVALID_NUM;
}
// Expand an abbreviated extent on disk.
int expandDctnryExtent();
@ -226,8 +253,8 @@ protected:
// getBlockOpCount - get the ordinal position (OP) count from the header
// getEndOp - read OP of the end of header for specified fbo
//
void getBlockOpCount(const DataBlock& fileBlock, int & op_count);
int getEndOp (IDBDataFile* dFile, int fbo, int &op);
void getBlockOpCount(const DataBlock& fileBlock, int& op_count);
int getEndOp (IDBDataFile* dFile, int fbo, int& op);
//
// Initialization
@ -242,7 +269,7 @@ protected:
int insertDctnry2(Signature& sig);
void insertDctnryHdr( unsigned char* blockBuf, const int& size);
void insertSgnture(unsigned char* blockBuf,
const int& size, unsigned char*value);
const int& size, unsigned char* value);
//
// Preloads the strings from the specified DataBlock. Currently
@ -252,10 +279,10 @@ protected:
// methods to be overriden by compression classes
// (width argument in createDctnryFile() is string width, not token width)
virtual IDBDataFile* createDctnryFile(const char *name, int width,
const char *mode, int ioBuffSize);
virtual IDBDataFile* createDctnryFile(const char* name, int width,
const char* mode, int ioBuffSize);
virtual IDBDataFile* openDctnryFile(bool useTmpSuffix);
virtual void closeDctnryFile(bool doFlush, std::map<FID,FID> & oids);
virtual void closeDctnryFile(bool doFlush, std::map<FID, FID>& oids);
virtual int numOfBlocksInFile();
Signature m_sigArray[MAX_STRING_CACHE_SIZE]; // string cache

View File

@ -28,33 +28,33 @@
#undef WRITEENGINEDCTSTORE_DLLEXPORT
namespace WriteEngine
{
{
/***********************************************************
* Constructor
***********************************************************/
DctnryStore::DctnryStore()
:m_hashMapFlag(true), m_hashMapSize(MAX_HASHMAP_SIZE)
***********************************************************/
DctnryStore::DctnryStore()
: m_hashMapFlag(true), m_hashMapSize(MAX_HASHMAP_SIZE)
{
m_dctnry.setUseHashMap(m_hashMapFlag);
}
/***********************************************************
* Destructor
***********************************************************/
***********************************************************/
DctnryStore::~DctnryStore()
{
}
}
/***********************************************************
* Open a dictionary store file
***********************************************************/
const int DctnryStore::openDctnryStore(const OID& dctnryOID,
const OID& treeOID,
const OID& listOID,
const uint16_t dbRoot,
const uint32_t partition,
const uint16_t segment)
***********************************************************/
const int DctnryStore::openDctnryStore(const OID& dctnryOID,
const OID& treeOID,
const OID& listOID,
const uint16_t dbRoot,
const uint32_t partition,
const uint16_t segment)
{
int rc2;
rc2 = m_dctnry.openDctnry(dctnryOID, dbRoot, partition, segment);
@ -65,29 +65,29 @@ const int DctnryStore::openDctnryStore(const OID& dctnryOID,
/***********************************************************
* Create a dictionary store file
***********************************************************/
const int DctnryStore::createDctnryStore( const OID& dctnryOID,
***********************************************************/
const int DctnryStore::createDctnryStore( const OID& dctnryOID,
const OID& treeOID, const OID& listOID, int colWidth, const uint16_t dbRoot,
const uint32_t partition, const uint16_t segment )
{
{
int rc2 ;
rc2 = m_dctnry.createDctnry(dctnryOID, colWidth, dbRoot, partition, segment);
return rc2;
}
/***********************************************************
* Drop a dictionary store file
***********************************************************/
const int DctnryStore::dropDctnryStore( const OID& dctnryOID,
const OID& treeOID,
const OID& listOID)
***********************************************************/
const int DctnryStore::dropDctnryStore( const OID& dctnryOID,
const OID& treeOID,
const OID& listOID)
{
int rc2;
rc2 = m_dctnry.dropDctnry(dctnryOID);
return rc2;
return rc2;
}
/***********************************************************
@ -95,23 +95,24 @@ const int DctnryStore::dropDctnryStore( const OID& dctnryOID,
* Function first checks to see if the signature is already
* in our string cache, and returns the corresponding token
* if it is found in the cache.
***********************************************************/
const int DctnryStore::updateDctnryStore(unsigned char* sigValue,
int& sigSize,
Token& token)
***********************************************************/
const int DctnryStore::updateDctnryStore(unsigned char* sigValue,
int& sigSize,
Token& token)
{
int rc = NO_ERROR;
Signature sig;
sig.signature = sigValue;
sig.size = sigSize;
sig.size = sigSize;
//if String cache is enabled then look for string in cache
if (m_hashMapFlag)
if (m_hashMapFlag)
{
if (m_dctnry.m_arraySize < (int)m_hashMapSize)
{
bool found = false;
found = m_dctnry.getTokenFromArray(sig);
if (found)
{
token = sig.token;
@ -119,9 +120,9 @@ const int DctnryStore::updateDctnryStore(unsigned char* sigValue,
}
} //end if use hash map and size >0
}
//Insert into Dictionary
rc = m_dctnry.insertDctnry(sigSize, sigValue, token);
rc = m_dctnry.insertDctnry(sigSize, sigValue, token);
//Add the new signature and token into cache if the hashmap flag is on
// (We currently use an array instead of a hashmap.)
@ -132,24 +133,25 @@ const int DctnryStore::updateDctnryStore(unsigned char* sigValue,
sig.signature = new unsigned char[sigSize];
memcpy (sig.signature, sigValue, sigSize);
sig.token = token;
m_dctnry.m_sigArray[m_dctnry.m_arraySize]=sig;
m_dctnry.m_sigArray[m_dctnry.m_arraySize] = sig;
m_dctnry.m_arraySize++;
}
return rc;
}
}
/***********************************************************
* Delete signature from the dictionary store file
***********************************************************/
const int DctnryStore::deleteDctnryToken(Token& token)
***********************************************************/
const int DctnryStore::deleteDctnryToken(Token& token)
{
int rc ;
int sigSize;
unsigned char* sigValue = NULL;
rc = m_dctnry.deleteDctnryValue( token, sigSize, &sigValue);
if (rc!=NO_ERROR)
if (rc != NO_ERROR)
{
return rc;
}
@ -162,7 +164,7 @@ const int DctnryStore::deleteDctnryToken(Token& token)
free(sigValue);
return rc;
}
return rc;
}
} //end of namespace

View File

@ -25,7 +25,7 @@
* stored in the dictionary, and token(s) are returned to denote the location
* of the signature(s). The caller is then responsible for storing the
* token(s) in the corresponding segment column file.
*
*
* References to tree and list OIDs are not currently pertinent, and may be
* removed at a later time.
*
@ -59,29 +59,35 @@ namespace WriteEngine
class DctnryStore : public DbFileOp
{
public:
/**
* @brief Constructor
*/
/**
* @brief Constructor
*/
EXPORT DctnryStore();
/**
* @brief Destructor
*/
/**
* @brief Destructor
*/
EXPORT ~DctnryStore();
/**
* @brief Close a dictionary store after it has been opened.
*/
EXPORT int closeDctnryStore(){return m_dctnry.closeDctnry();}
/**
* @brief Close a dictionary store after it has been opened.
*/
EXPORT int closeDctnryStore()
{
return m_dctnry.closeDctnry();
}
/**
* @brief Close a dictionary store without flushing the block buffer or
* updating BRM with HWM. Intended to be used for immediate file closure
* to shut down a job that has encountered an error, and intends to do
* a rollback.
*/
EXPORT int closeDctnryStoreOnly()
{
return m_dctnry.closeDctnryOnly();
}
/**
* @brief Close a dictionary store without flushing the block buffer or
* updating BRM with HWM. Intended to be used for immediate file closure
* to shut down a job that has encountered an error, and intends to do
* a rollback.
*/
EXPORT int closeDctnryStoreOnly(){return m_dctnry.closeDctnryOnly();}
/**
* @brief create dictionary store
*
@ -94,102 +100,124 @@ public:
* @param segment - column segment number for store file
*/
EXPORT const int createDctnryStore(
const OID& dctnryOID, const OID& treeOID,
const OID& listOID, int colWidth, const uint16_t dbRoot,
const uint32_t partition, const uint16_t segment );
const OID& dctnryOID, const OID& treeOID,
const OID& listOID, int colWidth, const uint16_t dbRoot,
const uint32_t partition, const uint16_t segment );
/**
* @brief Delete a token from dictionary store, for maintanance use
*
* @param token - token to be deleted
*/
/**
* @brief Delete a token from dictionary store, for maintanance use
*
* @param token - token to be deleted
*/
EXPORT const int deleteDctnryToken(Token& token);
/**
* @brief Drop dictionary store (for DDL/DML use)
*
* @param dctnryOID - dictionary file OID
* @param treeOID - index tree OID (not used)
* @param listOID - list OID (not used)
*/
EXPORT const int dropDctnryStore( const OID& dctnryOID, const OID& treeOID,
const OID& listOID);
/**
* @brief Open a dictionary store after creation
*
* @param dctnryOID - dictionary file OID
* @param treeOID - index tree OID (not used)
* @param listOID - list OID (not used)
* @param dbRoot - DBRoot for store file
* @param partition - partition number for store file
* @param segment - column segment number for store file
*/
/**
* @brief Drop dictionary store (for DDL/DML use)
*
* @param dctnryOID - dictionary file OID
* @param treeOID - index tree OID (not used)
* @param listOID - list OID (not used)
*/
EXPORT const int dropDctnryStore( const OID& dctnryOID, const OID& treeOID,
const OID& listOID);
/**
* @brief Open a dictionary store after creation
*
* @param dctnryOID - dictionary file OID
* @param treeOID - index tree OID (not used)
* @param listOID - list OID (not used)
* @param dbRoot - DBRoot for store file
* @param partition - partition number for store file
* @param segment - column segment number for store file
*/
EXPORT const int openDctnryStore(const OID& dctnryOID, const OID& treeOID,
const OID& listOID,
const uint16_t dbRoot,
const uint32_t partition,
const uint16_t segment);
const OID& listOID,
const uint16_t dbRoot,
const uint32_t partition,
const uint16_t segment);
/**
* @brief Update dictionary store with tokenized strings (for DDL/DML use)
*
* @param sigValue - signature value
* @param sigSize - signature size
* @param token - (output) token that was added
*/
EXPORT const int updateDctnryStore(unsigned char* sigValue,
int& sigSize, Token& token);
/**
* @brief Update dictionary store with tokenized strings (for DDL/DML use)
*
* @param sigValue - signature value
* @param sigSize - signature size
* @param token - (output) token that was added
*/
EXPORT const int updateDctnryStore(unsigned char* sigValue,
int& sigSize, Token& token);
/**
* @brief Update dictionary store with tokenized strings (for Bulk use)
*
* @param buf - bulk buffer containing strings to be parsed
* @param pos - list of offsets into buf
* @param totalRow - total number of rows in buf
* @param col - the column to be parsed from buf
* @param colWidth - width of the dictionary column being parsed
* @param tokenBuf - (output) list of tokens for the parsed strings
*/
const int updateDctnryStore(const char* buf,
ColPosPair ** pos,
const int totalRow,
const int col,
const int colWidth,
char* tokenBuf)
{ return(m_dctnry.insertDctnry(
buf, pos, totalRow, col, colWidth, tokenBuf)); }
/**
* @brief Update dictionary store with tokenized strings (for Bulk use)
*
* @param buf - bulk buffer containing strings to be parsed
* @param pos - list of offsets into buf
* @param totalRow - total number of rows in buf
* @param col - the column to be parsed from buf
* @param colWidth - width of the dictionary column being parsed
* @param tokenBuf - (output) list of tokens for the parsed strings
*/
const int updateDctnryStore(const char* buf,
ColPosPair** pos,
const int totalRow,
const int col,
const int colWidth,
char* tokenBuf)
{
return (m_dctnry.insertDctnry(
buf, pos, totalRow, col, colWidth, tokenBuf));
}
/**
* @brief TransId related function
*
* @param transId - Current transaction id (for DDL/DML use)
*/
void setAllTransId(const TxnID& transId){m_dctnry.setTransId(transId);}
/**
* @brief TransId related function
*
* @param transId - Current transaction id (for DDL/DML use)
*/
void setAllTransId(const TxnID& transId)
{
m_dctnry.setTransId(transId);
}
/**
* @brief String cache related routines
*/
void clearMap() { m_dctnry.clearMap(); }
void createMap(){ m_dctnry.createMap(); }
/**
* @brief String cache related routines
*/
void clearMap()
{
m_dctnry.clearMap();
}
void createMap()
{
m_dctnry.createMap();
}
void setUseHashMap(bool flag)
{ m_hashMapFlag = flag;
m_dctnry.setUseHashMap(flag); }
{
m_hashMapFlag = flag;
m_dctnry.setUseHashMap(flag);
}
void setHashMapSize(int size)
{ if (size < MAX_HASHMAP_SIZE)
m_hashMapSize = size;
else
m_hashMapSize = MAX_HASHMAP_SIZE;
m_dctnry.setHashMapSize(m_hashMapSize); }
{
if (size < MAX_HASHMAP_SIZE)
m_hashMapSize = size;
else
m_hashMapSize = MAX_HASHMAP_SIZE;
HWM getHWM() const { return m_dctnry.getHWM(); }
const std::string& getFileName() const { return m_dctnry.getFileName();}
m_dctnry.setHashMapSize(m_hashMapSize);
}
/**
* @brief public instance
*/
HWM getHWM() const
{
return m_dctnry.getHWM();
}
const std::string& getFileName() const
{
return m_dctnry.getFileName();
}
/**
* @brief public instance
*/
Dctnry m_dctnry;
private:
private:
// Used to configure string cache usage
bool m_hashMapFlag;
int m_hashMapSize;