1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-08-07 03:22:57 +03:00

Reformat all code to coding standard

This commit is contained in:
Andrew Hutchings
2017-10-26 17:18:17 +01:00
parent 4985f3456e
commit 01446d1e22
1296 changed files with 403852 additions and 353747 deletions

View File

@@ -44,15 +44,15 @@ class ChunkManager;
*/
ColumnOpCompress0::ColumnOpCompress0()
{
m_compressionType = 0;
m_compressionType = 0;
}
ColumnOpCompress0::ColumnOpCompress0(Log* logger)
{
m_compressionType = 0;
setDebugLevel( logger->getDebugLevel() );
setLogger ( logger );
m_compressionType = 0;
setDebugLevel( logger->getDebugLevel() );
setLogger ( logger );
}
/**
@@ -64,21 +64,23 @@ ColumnOpCompress0::~ColumnOpCompress0()
// @bug 5572 - HDFS usage: add *.tmp file backup flag
IDBDataFile* ColumnOpCompress0::openFile(
const Column& column, const uint16_t dbRoot, const uint32_t partition, const uint16_t segment,
std::string& segFile, bool useTmpSuffix, const char* mode, const int ioBuffSize) const
const Column& column, const uint16_t dbRoot, const uint32_t partition, const uint16_t segment,
std::string& segFile, bool useTmpSuffix, const char* mode, const int ioBuffSize) const
{
return FileOp::openFile(column.dataFile.fid, dbRoot, partition, segment, segFile,
mode, column.colWidth, useTmpSuffix);
return FileOp::openFile(column.dataFile.fid, dbRoot, partition, segment, segFile,
mode, column.colWidth, useTmpSuffix);
}
bool ColumnOpCompress0::abbreviatedExtent(IDBDataFile* pFile, int colWidth) const
{
long long fsize;
if (getFileSize(pFile, fsize) == NO_ERROR)
{
return (fsize == INITIAL_EXTENT_ROWS_TO_DISK*colWidth);
return (fsize == INITIAL_EXTENT_ROWS_TO_DISK * colWidth);
}
// TODO: Log error
return false;
}
@@ -87,10 +89,12 @@ bool ColumnOpCompress0::abbreviatedExtent(IDBDataFile* pFile, int colWidth) cons
int ColumnOpCompress0::blocksInFile(IDBDataFile* pFile) const
{
long long fsize;
if (getFileSize(pFile, fsize) == NO_ERROR)
{
return (fsize / BYTE_PER_BLOCK);
}
// TODO: Log error
return 0;
}
@@ -98,13 +102,13 @@ int ColumnOpCompress0::blocksInFile(IDBDataFile* pFile) const
int ColumnOpCompress0::readBlock(IDBDataFile* pFile, unsigned char* readBuf, const uint64_t fbo)
{
return readDBFile(pFile, readBuf, fbo, true);
return readDBFile(pFile, readBuf, fbo, true);
}
int ColumnOpCompress0::saveBlock(IDBDataFile* pFile, const unsigned char* writeBuf, const uint64_t fbo)
{
return writeDBFileFbo(pFile, writeBuf, fbo, 1);
return writeDBFileFbo(pFile, writeBuf, fbo, 1);
}
@@ -119,14 +123,16 @@ int ColumnOpCompress0::saveBlock(IDBDataFile* pFile, const unsigned char* writeB
ColumnOpCompress1::ColumnOpCompress1(Log* logger)
{
m_compressionType = 1;
m_chunkManager = new ChunkManager();
if (logger)
{
setDebugLevel( logger->getDebugLevel() );
setLogger ( logger );
}
m_chunkManager->fileOp(this);
m_compressionType = 1;
m_chunkManager = new ChunkManager();
if (logger)
{
setDebugLevel( logger->getDebugLevel() );
setLogger ( logger );
}
m_chunkManager->fileOp(this);
}
/**
@@ -134,77 +140,77 @@ ColumnOpCompress1::ColumnOpCompress1(Log* logger)
*/
ColumnOpCompress1::~ColumnOpCompress1()
{
if (m_chunkManager)
{
delete m_chunkManager;
}
if (m_chunkManager)
{
delete m_chunkManager;
}
}
// @bug 5572 - HDFS usage: add *.tmp file backup flag
IDBDataFile* ColumnOpCompress1::openFile(
const Column& column, const uint16_t dbRoot, const uint32_t partition, const uint16_t segment,
std::string& segFile, bool useTmpSuffix, const char* mode, const int ioBuffSize) const
const Column& column, const uint16_t dbRoot, const uint32_t partition, const uint16_t segment,
std::string& segFile, bool useTmpSuffix, const char* mode, const int ioBuffSize) const
{
return m_chunkManager->getFilePtr(column, dbRoot, partition, segment, segFile,
mode, ioBuffSize, useTmpSuffix);
mode, ioBuffSize, useTmpSuffix);
}
bool ColumnOpCompress1::abbreviatedExtent(IDBDataFile* pFile, int colWidth) const
{
return (blocksInFile(pFile) == INITIAL_EXTENT_ROWS_TO_DISK*colWidth/BYTE_PER_BLOCK);
return (blocksInFile(pFile) == INITIAL_EXTENT_ROWS_TO_DISK * colWidth / BYTE_PER_BLOCK);
}
int ColumnOpCompress1::blocksInFile(IDBDataFile* pFile) const
{
CompFileHeader compFileHeader;
readHeaders(pFile, compFileHeader.fControlData, compFileHeader.fPtrSection);
CompFileHeader compFileHeader;
readHeaders(pFile, compFileHeader.fControlData, compFileHeader.fPtrSection);
compress::IDBCompressInterface compressor;
return compressor.getBlockCount(compFileHeader.fControlData);
compress::IDBCompressInterface compressor;
return compressor.getBlockCount(compFileHeader.fControlData);
}
int ColumnOpCompress1::readBlock(IDBDataFile* pFile, unsigned char* readBuf, const uint64_t fbo)
{
return m_chunkManager->readBlock(pFile, readBuf, fbo);
return m_chunkManager->readBlock(pFile, readBuf, fbo);
}
int ColumnOpCompress1::saveBlock(IDBDataFile* pFile, const unsigned char* writeBuf, const uint64_t fbo)
{
return m_chunkManager->saveBlock(pFile, writeBuf, fbo);
return m_chunkManager->saveBlock(pFile, writeBuf, fbo);
}
int ColumnOpCompress1::flushFile(int rc, std::map<FID,FID> & columnOids)
int ColumnOpCompress1::flushFile(int rc, std::map<FID, FID>& columnOids)
{
return m_chunkManager->flushChunks(rc, columnOids);
return m_chunkManager->flushChunks(rc, columnOids);
}
int ColumnOpCompress1::expandAbbrevColumnExtent(
IDBDataFile* pFile, uint16_t dbRoot, uint64_t emptyVal, int width)
IDBDataFile* pFile, uint16_t dbRoot, uint64_t emptyVal, int width)
{
// update the uncompressed initial chunk to full chunk
RETURN_ON_ERROR(m_chunkManager->expandAbbrevColumnExtent(pFile, emptyVal, width));
// update the uncompressed initial chunk to full chunk
RETURN_ON_ERROR(m_chunkManager->expandAbbrevColumnExtent(pFile, emptyVal, width));
// let the base to physically expand extent.
return FileOp::expandAbbrevColumnExtent(pFile, dbRoot, emptyVal, width);
// let the base to physically expand extent.
return FileOp::expandAbbrevColumnExtent(pFile, dbRoot, emptyVal, width);
}
int ColumnOpCompress1::updateColumnExtent(IDBDataFile* pFile, int nBlocks)
{
return m_chunkManager->updateColumnExtent(pFile, nBlocks);
return m_chunkManager->updateColumnExtent(pFile, nBlocks);
}
void ColumnOpCompress1::closeColumnFile(Column& column) const
{
// Leave file closing to chunk manager.
column.dataFile.pFile = NULL;
// Leave file closing to chunk manager.
column.dataFile.pFile = NULL;
}
} //end of namespace