1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-12-24 14:20:59 +03:00

Merge branch 'develop' into MCOL-265

This commit is contained in:
Andrew Hutchings
2019-06-10 13:58:03 +01:00
committed by GitHub
383 changed files with 2979 additions and 24713 deletions

View File

@@ -1,44 +1,11 @@
include_directories(${KDE4_INCLUDES} ${KDE4_INCLUDE_DIR} ${QT_INCLUDES} )
include_directories(${ENGINE_COMMON_INCLUDES} ../dictionary)
add_executable(we_shared_components_tests ./shared_components_tests.cpp)
target_link_libraries(we_shared_components_tests ${ENGINE_LDFLAGS} ${MARIADB_CLIENT_LIBS} ${ENGINE_WRITE_LIBS} ${CPPUNIT_LIBRARIES})
install(TARGETS we_shared_components_tests DESTINATION ${ENGINE_BINDIR} COMPONENT platform)
########### install files ###############
install(FILES we_index.h we_define.h we_type.h we_fileop.h we_blockop.h we_dbfileop.h we_obj.h we_log.h we_simplesyslog.h we_convertor.h we_brm.h we_macro.h we_config.h we_cache.h we_stats.h we_bulkrollbackmgr.h we_typeext.h we_chunkmanager.h we_bulkrollbackfilecompressed.h we_bulkrollbackfilecompressedhdfs.h we_bulkrollbackfile.h we_rbmetawriter.h we_dbrootextenttracker.h we_confirmhdfsdbfile.h DESTINATION include)
#original Makefile.am contents follow:
## Copyright (C) 2014 InfiniDB, Inc.
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License
## as published by the Free Software Foundation; version 2 of
## the License.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
## MA 02110-1301, USA.
#
## $Id: Makefile.am 3720 2012-04-04 18:18:49Z rdempsey $
### Process this file with automake to produce Makefile.in
#
#include_HEADERS = we_index.h we_define.h we_type.h we_fileop.h we_blockop.h we_dbfileop.h we_obj.h we_log.h we_simplesyslog.h we_convertor.h we_brm.h we_macro.h we_config.h we_cache.h we_stats.h we_bulkrollbackmgr.h we_typeext.h we_chunkmanager.h we_bulkrollbackfilecompressed.h we_bulkrollbackfilecompressedhdfs.h we_bulkrollbackfile.h we_rbmetawriter.h we_dbrootextenttracker.h we_confirmhdfsdbfile.h
#
#test:
#
#coverage:
#
#leakcheck:
#
#docs:
#
#bootstrap: install-data-am
#

View File

@@ -29,18 +29,28 @@ using namespace boost;
#include <cppunit/extensions/HelperMacros.h>
#include <we_dbfileop.h>
#include <we_type.h>
#include <we_semop.h>
#include <we_log.h>
#include <we_convertor.h>
#include <we_brm.h>
#include <we_cache.h>
#include "we_dbfileop.h"
#include "we_type.h"
#include "we_log.h"
#include "we_convertor.h"
#include "we_brm.h"
#include "we_cache.h"
#include "we_colop.h"
#include "IDBDataFile.h"
#include "BufferedFile.h"
#include "IDBPolicy.h"
#include "IDBFileSystem.h"
#include "idbcompress.h"
#include "calpontsystemcatalog.h"
#include "we_colopcompress.h"
#include "we_dctnrycompress.h"
using namespace compress;
using namespace idbdatafile;
using namespace WriteEngine;
using namespace BRM;
int compare (const void* a, const void* b)
/*int compare (const void* a, const void* b)
{
return ( *(uint32_t*)a - * (uint32_t*)b );
}
@@ -49,51 +59,55 @@ int compare1(const void* a, const void* b)
{
return ( (*(SortTuple*)a).key - (*(SortTuple*)b).key );
}
*/
class SharedTest : public CppUnit::TestFixture
{
CPPUNIT_TEST_SUITE( SharedTest );
CPPUNIT_TEST_SUITE( SharedTest );
//CPPUNIT_TEST(setUp);
CPPUNIT_TEST(setUp);
//CPPUNIT_TEST( test1 );
// File operation testing
CPPUNIT_TEST( testFileNameOp );
CPPUNIT_TEST( testFileHandleOp );
// CPPUNIT_TEST( testFileNameOp );
// CPPUNIT_TEST( testFileHandleOp );
CPPUNIT_TEST( testDirBasic );
CPPUNIT_TEST( testCreateDeleteFile );
// Data block related testing
CPPUNIT_TEST( testCalculateRowIdBitmap );
CPPUNIT_TEST( testBlockBuffer );
CPPUNIT_TEST( testBitBasic );
CPPUNIT_TEST( testBufferBit );
CPPUNIT_TEST( testBitShift );
CPPUNIT_TEST( testEmptyRowValue );
CPPUNIT_TEST( testCorrectRowWidth );
// CPPUNIT_TEST( testCalculateRowIdBitmap );
// CPPUNIT_TEST( testBlockBuffer );
// CPPUNIT_TEST( testBitBasic );
// CPPUNIT_TEST( testBufferBit );
// CPPUNIT_TEST( testBitShift );
// CPPUNIT_TEST( testEmptyRowValue );
// CPPUNIT_TEST( testCorrectRowWidth );
// DB File Block related testing
CPPUNIT_TEST( testDbBlock );
// CPPUNIT_TEST( testDbBlock );
CPPUNIT_TEST( testCopyDbFile );
// CPPUNIT_TEST( testCopyDbFile );
// Extent & dict related testing
CPPUNIT_TEST( testExtensionWOPrealloc );
CPPUNIT_TEST( testDictExtensionWOPrealloc );
// Semaphore related testing
CPPUNIT_TEST( testSem );
// CPPUNIT_TEST( testSem );
// Log related testing
CPPUNIT_TEST( testLog );
// Version Buffer related testing
CPPUNIT_TEST( testHWM );
CPPUNIT_TEST( testVB );
// CPPUNIT_TEST( testHWM );
// CPPUNIT_TEST( testVB );
// Disk manager related testing
CPPUNIT_TEST( testDM );
CPPUNIT_TEST( tearDown );
// CPPUNIT_TEST( testDM );
// CPPUNIT_TEST( tearDown );
// Cache related testing
CPPUNIT_TEST( testCacheBasic );
CPPUNIT_TEST( testCacheReadWrite );
// CPPUNIT_TEST( testCacheBasic );
// CPPUNIT_TEST( testCacheReadWrite );
CPPUNIT_TEST( testCleanup ); // NEVER COMMENT OUT THIS LINE
CPPUNIT_TEST_SUITE_END();
@@ -113,8 +127,6 @@ public:
void test1()
{
//m_wrapper.test();
// int numOfBlock = 10240;
int numOfBlock = 1024;
FILE* pFile;
unsigned char writeBuf[BYTE_PER_BLOCK * 10];
@@ -139,7 +151,7 @@ public:
}
}
}
/*
void testFileNameOp()
{
FileOp fileOp;
@@ -342,23 +354,74 @@ public:
}
*/
void testDirBasic()
{
FileOp fileOp;
char dirName[30];
int rc;
strcpy( dirName, "testdir" );
fileOp.removeDir( dirName );
printf("\nRunning testDirBasic \n");
idbdatafile::IDBPolicy::init(true, false, "", 0);
IDBFileSystem& fs = IDBPolicy::getFs( "/tmp" );
strcpy( dirName, "/tmp/testdir42" );
fs.remove( dirName );
CPPUNIT_ASSERT( fileOp.isDir( dirName ) == false );
rc = fileOp.createDir( dirName );
CPPUNIT_ASSERT( rc == NO_ERROR );
CPPUNIT_ASSERT( fileOp.isDir( dirName ) == true );
fileOp.removeDir( dirName );
fs.remove( dirName );
}
void testCreateDeleteFile()
{
IDBDataFile* pFile = NULL;
FileOp fileOp;
BlockOp blockOp;
char fileName[20];
int rc;
char hdrs[ IDBCompressInterface::HDR_BUF_LEN * 2 ];
printf("\nRunning testCreateDeleteFile \n");
idbdatafile::IDBPolicy::init(true, false, "", 0);
// Set to versionbuffer to satisfy IDBPolicy::getType
strcpy( fileName, "versionbuffer" );
fileOp.compressionType(1);
fileOp.deleteFile( fileName );
CPPUNIT_ASSERT( fileOp.exists( fileName ) == false );
int width = blockOp.getCorrectRowWidth( execplan::CalpontSystemCatalog::BIGINT, 8 );
int nBlocks = INITIAL_EXTENT_ROWS_TO_DISK / BYTE_PER_BLOCK * width;
uint64_t emptyVal = blockOp.getEmptyRowValue( execplan::CalpontSystemCatalog::BIGINT, 8 );
// createFile runs IDBDataFile::open + initAbrevCompColumnExtent
// under the hood
// bigint column file
rc = fileOp.createFile( fileName,
nBlocks, // number of blocks
emptyVal, // NULL value
width, // width
1 ); // dbroot
CPPUNIT_ASSERT( rc == NO_ERROR );
fileOp.closeFile(pFile);
pFile = IDBDataFile::open(IDBPolicy::getType(fileName,
IDBPolicy::WRITEENG), fileName, "rb", 1);
rc = pFile->seek(0, 0);
CPPUNIT_ASSERT(rc == NO_ERROR);
rc = fileOp.readHeaders(pFile, hdrs);
CPPUNIT_ASSERT( rc == NO_ERROR );
// Couldn't use IDBDataFile->close() here w/o excplicit cast
fileOp.closeFile(pFile);
fileOp.deleteFile( fileName );
CPPUNIT_ASSERT( fileOp.exists( fileName ) == false );
}
/*
void testCalculateRowIdBitmap()
{
BlockOp blockOp;
@@ -374,23 +437,23 @@ public:
CPPUNIT_ASSERT( bio == 16 );
// Assuming 2048 per data block, 4 byte width
/* rowId = 2049;
CPPUNIT_ASSERT( blockOp.calculateRowId( rowId, 2048, 4, fbo, bio ) == true );
CPPUNIT_ASSERT( fbo == 1 );
CPPUNIT_ASSERT( bio == 16 );
// Assuming 4096 per data block, 2 byte width
rowId = 2049;
CPPUNIT_ASSERT( blockOp.calculateRowId( rowId, 4096, 2, fbo, bio ) == true );
CPPUNIT_ASSERT( fbo == 1 );
CPPUNIT_ASSERT( bio == 16 );
// Assuming 8192 per data block, 1 byte width
rowId = 2049;
CPPUNIT_ASSERT( blockOp.calculateRowId( rowId, 8192, 1, fbo, bio ) == true );
CPPUNIT_ASSERT( fbo == 1 );
CPPUNIT_ASSERT( bio == 16 );
*/
// rowId = 2049;
// CPPUNIT_ASSERT( blockOp.calculateRowId( rowId, 2048, 4, fbo, bio ) == true );
// CPPUNIT_ASSERT( fbo == 1 );
// CPPUNIT_ASSERT( bio == 16 );
//
// // Assuming 4096 per data block, 2 byte width
// rowId = 2049;
// CPPUNIT_ASSERT( blockOp.calculateRowId( rowId, 4096, 2, fbo, bio ) == true );
// CPPUNIT_ASSERT( fbo == 1 );
// CPPUNIT_ASSERT( bio == 16 );
//
// // Assuming 8192 per data block, 1 byte width
// rowId = 2049;
// CPPUNIT_ASSERT( blockOp.calculateRowId( rowId, 8192, 1, fbo, bio ) == true );
// CPPUNIT_ASSERT( fbo == 1 );
// CPPUNIT_ASSERT( bio == 16 );
//
rowId = 65546;
CPPUNIT_ASSERT( blockOp.calculateRowBitmap( rowId, BYTE_PER_BLOCK * 8, fbo, bio, bbo ) == true );
CPPUNIT_ASSERT( fbo == 1 );
@@ -550,19 +613,19 @@ public:
curVal = blockOp.getEmptyRowValue( WriteEngine::DECIMAL, 8 );
CPPUNIT_ASSERT( curVal == 0x8000000000000001LL );
/*
curVal = blockOp.getEmptyRowValue( WriteEngine::DECIMAL, 9 );
CPPUNIT_ASSERT( curVal == 0x80000001 );
curVal = blockOp.getEmptyRowValue( WriteEngine::DECIMAL, 10 );
CPPUNIT_ASSERT( curVal == 0x8000000000000001LL );
curVal = blockOp.getEmptyRowValue( WriteEngine::DECIMAL, 12 );
CPPUNIT_ASSERT( curVal == 0x8000000000000001LL );
curVal = blockOp.getEmptyRowValue( WriteEngine::DECIMAL, 19 );
CPPUNIT_ASSERT( curVal == 0xFFFFFFFFFFFFFFFFLL );
*/
// curVal = blockOp.getEmptyRowValue( WriteEngine::DECIMAL, 9 );
// CPPUNIT_ASSERT( curVal == 0x80000001 );
//
// curVal = blockOp.getEmptyRowValue( WriteEngine::DECIMAL, 10 );
// CPPUNIT_ASSERT( curVal == 0x8000000000000001LL );
//
// curVal = blockOp.getEmptyRowValue( WriteEngine::DECIMAL, 12 );
// CPPUNIT_ASSERT( curVal == 0x8000000000000001LL );
//
// curVal = blockOp.getEmptyRowValue( WriteEngine::DECIMAL, 19 );
// CPPUNIT_ASSERT( curVal == 0xFFFFFFFFFFFFFFFFLL );
//
curVal = blockOp.getEmptyRowValue( WriteEngine::DATE, 4 );
CPPUNIT_ASSERT( curVal == 0xFFFFFFFF );
@@ -645,18 +708,18 @@ public:
curVal = blockOp.getCorrectRowWidth( WriteEngine::DECIMAL, 8 );
CPPUNIT_ASSERT( curVal == 8 );
/* curVal = blockOp.getCorrectRowWidth( WriteEngine::DECIMAL, 9 );
CPPUNIT_ASSERT( curVal == 4 );
curVal = blockOp.getCorrectRowWidth( WriteEngine::DECIMAL, 10 );
CPPUNIT_ASSERT( curVal == 8 );
curVal = blockOp.getCorrectRowWidth( WriteEngine::DECIMAL, 12 );
CPPUNIT_ASSERT( curVal == 8 );
curVal = blockOp.getCorrectRowWidth( WriteEngine::DECIMAL, 19 );
CPPUNIT_ASSERT( curVal == 8 );
*/
// curVal = blockOp.getCorrectRowWidth( WriteEngine::DECIMAL, 9 );
// CPPUNIT_ASSERT( curVal == 4 );
//
// curVal = blockOp.getCorrectRowWidth( WriteEngine::DECIMAL, 10 );
// CPPUNIT_ASSERT( curVal == 8 );
//
// curVal = blockOp.getCorrectRowWidth( WriteEngine::DECIMAL, 12 );
// CPPUNIT_ASSERT( curVal == 8 );
//
// curVal = blockOp.getCorrectRowWidth( WriteEngine::DECIMAL, 19 );
// CPPUNIT_ASSERT( curVal == 8 );
//
curVal = blockOp.getCorrectRowWidth( WriteEngine::DATE, 8 );
CPPUNIT_ASSERT( curVal == 4 );
@@ -892,126 +955,229 @@ public:
dbFileOp.closeFile( pTargetFile );
}
*/
void testSem()
void testExtensionWOPrealloc()
{
SemOp semOp;
IDBDataFile* pFile = NULL;
FileOp fileOp;
BlockOp blockOp;
char fileName[20];
int rc;
bool bSuccess;
key_t key;
int sid, totalNum = 5;
char fileName[100];
char hdrs[ IDBCompressInterface::HDR_BUF_LEN * 2 ];
int dbRoot = 1;
semOp.setMaxSemVal( 3 );
printf("\nRunning testExtensionWOPrealloc \n");
idbdatafile::IDBPolicy::init(true, false, "", 0);
// Set to versionbuffer to satisfy IDBPolicy::getType
strcpy( fileName, "versionbuffer" );
fileOp.compressionType(1);
bSuccess = semOp.getKey( NULL, key );
CPPUNIT_ASSERT( bSuccess == false );
fileOp.deleteFile( fileName );
CPPUNIT_ASSERT( fileOp.exists( fileName ) == false );
rc = fileOp.getFileName( 9991, fileName );
int width = blockOp.getCorrectRowWidth( execplan::CalpontSystemCatalog::BIGINT, 8 );
int nBlocks = INITIAL_EXTENT_ROWS_TO_DISK / BYTE_PER_BLOCK * width;
uint64_t emptyVal = blockOp.getEmptyRowValue( execplan::CalpontSystemCatalog::BIGINT, 8 );
// createFile runs IDBDataFile::open + initAbrevCompColumnExtent
// under the hood
// bigint column file
rc = fileOp.createFile( fileName,
nBlocks, // number of blocks
emptyVal, // NULL value
width, // width
dbRoot ); // dbroot
CPPUNIT_ASSERT( rc == NO_ERROR );
bSuccess = semOp.getKey( fileName, key );
CPPUNIT_ASSERT( bSuccess == false );
// open created compressed file and check its header
pFile = IDBDataFile::open(IDBPolicy::getType(fileName,
IDBPolicy::WRITEENG), fileName, "rb", dbRoot);
rc = fileOp.getFileName( 999, fileName );
rc = pFile->seek(0, 0);
CPPUNIT_ASSERT(rc == NO_ERROR);
rc = fileOp.readHeaders(pFile, hdrs);
CPPUNIT_ASSERT( rc == NO_ERROR );
bSuccess = semOp.getKey( fileName, key );
printf( "\nkey=%d", key );
CPPUNIT_ASSERT( bSuccess == true );
// Couldn't use IDBDataFile->close() here w/o excplicit cast
fileOp.closeFile(pFile);
if ( semOp.existSem( sid, key ) )
semOp.deleteSem( sid );
// Extend the extent up to 64MB
// first run w preallocation
idbdatafile::BufferedFile* bFile = new idbdatafile::BufferedFile(fileName, "r+b", 0);
pFile = dynamic_cast<IDBDataFile*>(bFile);
rc = fileOp.initColumnExtent(pFile,
dbRoot,
BYTE_PER_BLOCK, // number of blocks
emptyVal,
width,
false, // use existing file
true, // expand the extent
false, // add full (not abbreviated) extent
false); // don't optimize extention
rc = semOp.createSem( sid, key, 1000 );
CPPUNIT_ASSERT( rc == ERR_MAX_SEM );
rc = semOp.createSem( sid, key, totalNum );
CPPUNIT_ASSERT(rc == NO_ERROR);
CPPUNIT_ASSERT(bFile->size() == 67108864);
fileOp.closeFile(pFile);
// file has been extended delete the file before
// the second run
fileOp.deleteFile( fileName );
CPPUNIT_ASSERT(fileOp.exists( fileName ) == false);
// second run with disabled preallocation
rc = fileOp.createFile( fileName,
nBlocks, // number of blocks
emptyVal, // NULL value
width, // width
dbRoot ); // dbroot
CPPUNIT_ASSERT( rc == NO_ERROR );
rc = semOp.createSem( sid, key, totalNum );
CPPUNIT_ASSERT( rc == ERR_SEM_EXIST );
// open created compressed file and check its header
pFile = IDBDataFile::open(IDBPolicy::getType(fileName,
IDBPolicy::WRITEENG), fileName, "rb", dbRoot);
rc = semOp.openSem( sid, key );
rc = pFile->seek(0, 0);
CPPUNIT_ASSERT(rc == NO_ERROR);
rc = fileOp.readHeaders(pFile, hdrs);
CPPUNIT_ASSERT( rc == NO_ERROR );
semOp.printAllVal( sid );
fileOp.closeFile(pFile);
// lock
printf( "\nlock one in 2" );
rc = semOp.lockSem( sid, 2 );
CPPUNIT_ASSERT( rc == NO_ERROR );
CPPUNIT_ASSERT( semOp.getVal( sid, 2 ) == 2 );
semOp.printAllVal( sid );
printf( "\nlock one in 2" );
rc = semOp.lockSem( sid, 2 );
CPPUNIT_ASSERT( rc == NO_ERROR );
CPPUNIT_ASSERT( semOp.getVal( sid, 2 ) == 1 );
semOp.printAllVal( sid );
printf( "\nlock one in 2" );
rc = semOp.lockSem( sid, 2 );
CPPUNIT_ASSERT( rc == NO_ERROR );
CPPUNIT_ASSERT( semOp.getVal( sid, 2 ) == 0 );
semOp.printAllVal( sid );
rc = semOp.lockSem( sid, 2 );
CPPUNIT_ASSERT( rc == ERR_NO_SEM_RESOURCE );
CPPUNIT_ASSERT( semOp.getVal( sid, 2 ) == 0 );
rc = semOp.lockSem( sid, -2 );
CPPUNIT_ASSERT( rc == ERR_VALUE_OUTOFRANGE );
rc = semOp.lockSem( sid + 1, 1 );
CPPUNIT_ASSERT( rc == ERR_LOCK_FAIL );
// unlock
rc = semOp.unlockSem( sid, -2 );
CPPUNIT_ASSERT( rc == ERR_VALUE_OUTOFRANGE );
rc = semOp.unlockSem( sid, 1 );
CPPUNIT_ASSERT( rc == ERR_NO_SEM_LOCK );
rc = semOp.unlockSem( sid + 1, 2 );
CPPUNIT_ASSERT( rc == ERR_UNLOCK_FAIL );
printf( "\nunlock one in 2" );
rc = semOp.unlockSem( sid, 2 );
CPPUNIT_ASSERT( rc == NO_ERROR );
CPPUNIT_ASSERT( semOp.getVal( sid, 2 ) == 1 );
semOp.printAllVal( sid );
semOp.deleteSem( sid );
CPPUNIT_ASSERT( semOp.existSem( sid, key ) == false );
CPPUNIT_ASSERT( semOp.getSemCount( sid + 1 ) == 0 );
bFile = new idbdatafile::BufferedFile(fileName, "r+b", 0);
pFile = dynamic_cast<IDBDataFile*>(bFile);
// disable disk space preallocation and extend
idbdatafile::IDBPolicy::setPreallocSpace(dbRoot);
rc = fileOp.initColumnExtent(pFile,
dbRoot,
BYTE_PER_BLOCK, // number of blocks
emptyVal,
width,
false, // use existing file
true, // expand the extent
false, // add full (not abbreviated) extent
true); // optimize extention
CPPUNIT_ASSERT(rc == NO_ERROR);
CPPUNIT_ASSERT(bFile->size() == 2105344);
fileOp.closeFile(pFile);
// file has been extended
fileOp.deleteFile( fileName );
CPPUNIT_ASSERT(fileOp.exists( fileName ) == false);
}
// Create a dict file. Extend it w and w/o preallocation.
// Check the file sizes.
void testDictExtensionWOPrealloc()
{
FileOp fileOp;
BlockOp blockOp;
char fileName[20];
int rc;
int dbRoot = 1;
int colWidth = 65535;
DctnryCompress1 m_Dctnry;
// This is the magic for the stub in FileOp::oid2FileName
int oId = 42;
printf("\nRunning testDictExtensionWOPrealloc ");
printf("There could be InetStreamSocket::connect errors \n");
m_Dctnry.setDebugLevel( DEBUG_3 );
idbdatafile::IDBPolicy::init(true, false, "", 0);
// Set to versionbuffer to satisfy IDBPolicy::getType
strcpy( fileName, "versionbuffer" );
rc = m_Dctnry.dropDctnry(oId);
// FileOp::oid2FileName is called under the hood
// Dctnry::createDctnry could be used with running CS
// createDctnryFile also uses DBRM under the hood it works though.
IDBDataFile* m_dFile = m_Dctnry.createDctnryFileUnit(fileName,
colWidth,
"w+b",
DEFAULT_BUFSIZ);
idbdatafile::BufferedFile* bFile = (idbdatafile::BufferedFile*)m_dFile;
CPPUNIT_ASSERT(m_dFile != NULL);
const int m_totalHdrBytes = HDR_UNIT_SIZE + NEXT_PTR_BYTES + HDR_UNIT_SIZE + HDR_UNIT_SIZE;
m_Dctnry.compressionType(1);
rc = m_Dctnry.initDctnryExtent( m_dFile,
dbRoot,
BYTE_PER_BLOCK, // 8192
const_cast<unsigned char*>(m_Dctnry.getDctnryHeader2()),
m_totalHdrBytes,
false,
false ); //enable preallocation
// Check the file size and remove the file
CPPUNIT_ASSERT(bFile->size() == 67379200);
CPPUNIT_ASSERT(rc == NO_ERROR);
fileOp.deleteFile( fileName );
CPPUNIT_ASSERT(fileOp.exists( fileName ) == false);
// Create a Dictionary for the second time
m_dFile = m_Dctnry.createDctnryFileUnit(fileName,
colWidth,
"w+b",
DEFAULT_BUFSIZ);
// Get the file size later
bFile = (idbdatafile::BufferedFile*)m_dFile;
CPPUNIT_ASSERT(m_dFile != NULL);
// disable preallocation and create a Dictionary
idbdatafile::IDBPolicy::setPreallocSpace(dbRoot);
m_Dctnry.compressionType(1);
rc = m_Dctnry.initDctnryExtent( m_dFile,
dbRoot,
BYTE_PER_BLOCK,
const_cast<unsigned char*>(m_Dctnry.getDctnryHeader2()),
m_totalHdrBytes,
false,
true ); //skip preallocation
// Check the size and remove the file.
CPPUNIT_ASSERT(bFile->size() == 483328);
CPPUNIT_ASSERT(rc == NO_ERROR);
fileOp.deleteFile(fileName);
CPPUNIT_ASSERT(fileOp.exists( fileName ) == false);
}
void testLog()
{
Log log;
FileOp fileOp;
string msg;
int iVal = 3;
float fVal = 2.0;
char logFile[] = "test1.log";
char logErrFile[] = "test1err.log";
log.setLogFileName( "test1.log", "test1err.log" );
log.setLogFileName( logFile, logErrFile );
msg = Convertor::int2Str( iVal );
log.logMsg( msg + " this is a info message", INFO );
log.logMsg( msg + " this is a info message", MSGLVL_INFO1 );
msg = Convertor::getTimeStr();
log.logMsg( Convertor::float2Str( fVal ) + " this is a warning message", WARNING );
log.logMsg( "this is an error message ", 1011, ERROR );
log.logMsg( "this is a critical message", 1211, CRITICAL );
//...Test formatting an unsigned 64 bit integer.
uint64_t i64Value(UINT64_MAX);
msg = Convertor::i64ToStr( i64Value );
CPPUNIT_ASSERT( (msg == "18446744073709551615") );
log.logMsg( msg + " this is an info message with the max uint64_t integer value", INFO );
log.logMsg( " this is a warning message", MSGLVL_WARNING );
log.logMsg( "this is an error message ", 1011, MSGLVL_ERROR );
log.logMsg( "this is a critical message", 1211, MSGLVL_CRITICAL );
CPPUNIT_ASSERT( fileOp.exists( logFile ) == true );
CPPUNIT_ASSERT( fileOp.exists( logErrFile ) == true );
fileOp.deleteFile( logFile );
fileOp.deleteFile( logErrFile );
CPPUNIT_ASSERT( fileOp.exists( logFile ) == false );
CPPUNIT_ASSERT( fileOp.exists( logErrFile ) == false );
}
/*
void testHWM()
{
int rc ;
@@ -1375,6 +1541,7 @@ public:
}
}
*/
void testCleanup()
{

View File

@@ -1,4 +1,5 @@
/* Copyright (C) 2014 InfiniDB, Inc.
Copyright (C) 2019 MariaDB Corporation.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
@@ -65,6 +66,8 @@ namespace WriteEngine
extern int NUM_BLOCKS_PER_INITIAL_EXTENT; // defined in we_dctnry.cpp
extern WErrorCodes ec; // defined in we_log.cpp
const int COMPRESSED_CHUNK_SIZE = compress::IDBCompressInterface::maxCompressedSize(UNCOMPRESSED_CHUNK_SIZE) + 64 + 3 + 8 * 1024;
//------------------------------------------------------------------------------
// Search for the specified chunk in fChunkList.
//------------------------------------------------------------------------------
@@ -1923,10 +1926,22 @@ int ChunkManager::reallocateChunks(CompFileData* fileData)
struct tm ltm;
localtime_r(reinterpret_cast<time_t*>(&tv.tv_sec), &ltm);
char tmText[24];
// this snprintf call causes a compiler warning b/c buffer size is less
// then maximum string size.
#if defined(__GNUC__) && __GNUC__ >= 7
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wformat-truncation="
snprintf(tmText, sizeof(tmText), ".%04d%02d%02d%02d%02d%02d%06ld",
ltm.tm_year + 1900, ltm.tm_mon + 1,
ltm.tm_mday, ltm.tm_hour, ltm.tm_min,
ltm.tm_sec, tv.tv_usec);
#pragma GCC diagnostic pop
#else
snprintf(tmText, sizeof(tmText), ".%04d%02d%02d%02d%02d%02d%06ld",
ltm.tm_year + 1900, ltm.tm_mon + 1,
ltm.tm_mday, ltm.tm_hour, ltm.tm_min,
ltm.tm_sec, tv.tv_usec);
#endif
string dbgFileName(rlcFileName + tmText);
ostringstream oss;
@@ -2106,10 +2121,22 @@ int ChunkManager::reallocateChunks(CompFileData* fileData)
struct tm ltm;
localtime_r(reinterpret_cast<time_t*>(&tv.tv_sec), &ltm);
char tmText[24];
// this snprintf call causes a compiler warning b/c buffer size is less
// then maximum string size.
#if defined(__GNUC__) && __GNUC__ >= 7
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wformat-truncation="
snprintf(tmText, sizeof(tmText), ".%04d%02d%02d%02d%02d%02d%06ld",
ltm.tm_year + 1900, ltm.tm_mon + 1,
ltm.tm_mday, ltm.tm_hour, ltm.tm_min,
ltm.tm_sec, tv.tv_usec);
#pragma GCC diagnostic pop
#else
snprintf(tmText, sizeof(tmText), ".%04d%02d%02d%02d%02d%02d%06ld",
ltm.tm_year + 1900, ltm.tm_mon + 1,
ltm.tm_mday, ltm.tm_hour, ltm.tm_min,
ltm.tm_sec, tv.tv_usec);
#endif
string dbgFileName(rlcFileName + tmText);
ostringstream oss;

View File

@@ -68,7 +68,6 @@ const int UNCOMPRESSED_CHUNK_SIZE = compress::IDBCompressInterface::UNCOMPRESSED
const int COMPRESSED_FILE_HEADER_UNIT = compress::IDBCompressInterface::HDR_BUF_LEN;
// assume UNCOMPRESSED_CHUNK_SIZE > 0xBFFF (49151), 8 * 1024 bytes padding
const int COMPRESSED_CHUNK_SIZE = compress::IDBCompressInterface::maxCompressedSize(UNCOMPRESSED_CHUNK_SIZE) + 64 + 3 + 8 * 1024;
const int BLOCKS_IN_CHUNK = UNCOMPRESSED_CHUNK_SIZE / BYTE_PER_BLOCK;
const int MAXOFFSET_PER_CHUNK = 511 * BYTE_PER_BLOCK;

View File

@@ -45,7 +45,8 @@ const short ROW_PER_BYTE = 8; // Rows/byte in bitmap file
const int BYTE_PER_BLOCK = 8192; // Num bytes per data block
const int BYTE_PER_SUBBLOCK = 256; // Num bytes per sub block
const int ENTRY_PER_SUBBLOCK = 32; // Num entries per sub block
const int INITIAL_EXTENT_ROWS_TO_DISK = 256 * 1024;
const int INITIAL_EXTENT_ROWS_TO_DISK = 256 * 1024; // Used for initial number of blocks calculation
const int MAX_INITIAL_EXTENT_BLOCKS_TO_DISK = 256; // Number of blocks in abbrev extent for 8byte col.
// Num rows reserved to disk for 'initial' extent
const int FILE_NAME_SIZE = 200; // Max size of file name
const long long MAX_ALLOW_ERROR_COUNT = 100000; //Max allowable error count

View File

@@ -18,7 +18,6 @@
// $Id: we_fileop.cpp 4737 2013-08-14 20:45:46Z bwilkinson $
#include "config.h"
#include <unistd.h>
#include <stdio.h>
#include <string.h>
@@ -63,7 +62,6 @@ namespace WriteEngine
/*static*/ boost::mutex FileOp::m_createDbRootMutexes;
/*static*/ boost::mutex FileOp::m_mkdirMutex;
/*static*/ std::map<int, boost::mutex*> FileOp::m_DbRootAddExtentMutexes;
const int MAX_NBLOCKS = 8192; // max number of blocks written to an extent
// in 1 call to fwrite(), during initialization
//StopWatch timer;
@@ -332,12 +330,15 @@ int FileOp::deleteFile( FID fid ) const
std::vector<std::string> dbRootPathList;
Config::getDBRootPathList( dbRootPathList );
int rc;
for (unsigned i = 0; i < dbRootPathList.size(); i++)
{
char rootOidDirName[FILE_NAME_SIZE];
sprintf(rootOidDirName, "%s/%s", dbRootPathList[i].c_str(), oidDirName);
rc = snprintf(rootOidDirName, FILE_NAME_SIZE, "%s/%s",
dbRootPathList[i].c_str(), oidDirName);
if ( IDBPolicy::remove( rootOidDirName ) != 0 )
if ( rc == FILE_NAME_SIZE || IDBPolicy::remove( rootOidDirName ) != 0 )
{
ostringstream oss;
oss << "Unable to remove " << rootOidDirName;
@@ -365,6 +366,7 @@ int FileOp::deleteFiles( const std::vector<int32_t>& fids ) const
char dbDir [MAX_DB_DIR_LEVEL][MAX_DB_DIR_NAME_SIZE];
std::vector<std::string> dbRootPathList;
Config::getDBRootPathList( dbRootPathList );
int rc;
for ( unsigned n = 0; n < fids.size(); n++ )
{
@@ -378,10 +380,10 @@ int FileOp::deleteFiles( const std::vector<int32_t>& fids ) const
for (unsigned i = 0; i < dbRootPathList.size(); i++)
{
char rootOidDirName[FILE_NAME_SIZE];
sprintf(rootOidDirName, "%s/%s", dbRootPathList[i].c_str(),
rc = snprintf(rootOidDirName, FILE_NAME_SIZE, "%s/%s", dbRootPathList[i].c_str(),
oidDirName);
if ( IDBPolicy::remove( rootOidDirName ) != 0 )
if ( rc == FILE_NAME_SIZE || IDBPolicy::remove( rootOidDirName ) != 0 )
{
ostringstream oss;
oss << "Unable to remove " << rootOidDirName;
@@ -412,6 +414,7 @@ int FileOp::deletePartitions( const std::vector<OID>& fids,
char dbDir [MAX_DB_DIR_LEVEL][MAX_DB_DIR_NAME_SIZE];
char rootOidDirName[FILE_NAME_SIZE];
char partitionDirName[FILE_NAME_SIZE];
int rcd, rcp;
for (uint32_t i = 0; i < partitions.size(); i++)
{
@@ -422,12 +425,13 @@ int FileOp::deletePartitions( const std::vector<OID>& fids,
dbDir[0], dbDir[1], dbDir[2], dbDir[3], dbDir[4]);
// config expects dbroot starting from 0
std::string rt( Config::getDBRootByNum(partitions[i].lp.dbroot) );
sprintf(rootOidDirName, "%s/%s",
rcd = snprintf(rootOidDirName, FILE_NAME_SIZE, "%s/%s",
rt.c_str(), tempFileName);
sprintf(partitionDirName, "%s/%s",
rcp = snprintf(partitionDirName, FILE_NAME_SIZE, "%s/%s",
rt.c_str(), oidDirName);
if ( IDBPolicy::remove( rootOidDirName ) != 0 )
if ( rcd == FILE_NAME_SIZE || rcp == FILE_NAME_SIZE
|| IDBPolicy::remove( rootOidDirName ) != 0 )
{
ostringstream oss;
oss << "Unable to remove " << rootOidDirName;
@@ -541,7 +545,9 @@ bool FileOp::existsOIDDir( FID fid ) const
* the applicable column segment file does not exist, it is created.
* If this is the very first file for the specified DBRoot, then the
* partition and segment number must be specified, else the selected
* partition and segment numbers are returned.
* partition and segment numbers are returned. This method tries to
* optimize full extents creation skiping disk space
* preallocation(if activated).
* PARAMETERS:
* oid - OID of the column to be extended
* emptyVal - Empty value to be used for oid
@@ -827,6 +833,7 @@ int FileOp::extendFile(
return rc;
// Initialize the contents of the extent.
// MCOL-498 optimize full extent creation.
rc = initColumnExtent( pFile,
dbRoot,
allocSize,
@@ -834,7 +841,8 @@ int FileOp::extendFile(
width,
newFile, // new or existing file
false, // don't expand; new extent
false ); // add full (not abbreviated) extent
false, // add full (not abbreviated) extent
true); // try to optimize extent creation
return rc;
}
@@ -982,6 +990,8 @@ int FileOp::addExtentExactFile(
return rc;
// Initialize the contents of the extent.
// CS doesn't optimize file operations to have a valid
// segment files with empty magics
rc = initColumnExtent( pFile,
dbRoot,
allocSize,
@@ -1006,6 +1016,9 @@ int FileOp::addExtentExactFile(
* This function can be used to initialize an entirely new extent, or
* to finish initializing an extent that has already been started.
* nBlocks controls how many 8192-byte blocks are to be written out.
* If bOptExtension is set then method first checks config for
* DBRootX.Prealloc. If it is disabled then it skips disk space
* preallocation.
* PARAMETERS:
* pFile (in) - IDBDataFile* of column segment file to be written to
* dbRoot (in) - DBRoot of pFile
@@ -1016,6 +1029,7 @@ int FileOp::addExtentExactFile(
* headers will be included "if" it is a compressed file.
* bExpandExtent (in) - Expand existing extent, or initialize a new one
* bAbbrevExtent(in) - if creating new extent, is it an abbreviated extent
* bOptExtension(in) - skip full extent preallocation.
* RETURN:
* returns ERR_FILE_WRITE if an error occurs,
* else returns NO_ERROR.
@@ -1028,7 +1042,8 @@ int FileOp::initColumnExtent(
int width,
bool bNewFile,
bool bExpandExtent,
bool bAbbrevExtent )
bool bAbbrevExtent,
bool bOptExtension)
{
if ((bNewFile) && (m_compressionType))
{
@@ -1061,6 +1076,19 @@ int FileOp::initColumnExtent(
// Create vector of mutexes used to serialize extent access per DBRoot
initDbRootExtentMutexes( );
// MCOL-498 Skip the huge preallocations if the option is set
// for the dbroot. This check is skiped for abbreviated extent.
// IMO it is better to check bool then to call a function.
if ( bOptExtension )
{
bOptExtension = (idbdatafile::IDBPolicy::PreallocSpace(dbRoot))
? bOptExtension : false;
}
// Reduce number of blocks allocated for abbreviated extents thus
// CS writes less when creates a new table. This couldn't be zero
// b/c Snappy compressed file format doesn't tolerate empty files.
int realNBlocks = ( bOptExtension && nBlocks <= MAX_INITIAL_EXTENT_BLOCKS_TO_DISK ) ? 3 : nBlocks;
// Determine the number of blocks in each call to fwrite(), and the
// number of fwrite() calls to make, based on this. In other words,
// we put a cap on the "writeSize" so that we don't allocate and write
@@ -1068,15 +1096,15 @@ int FileOp::initColumnExtent(
// expanding an abbreviated 64M extent, we may not have an even
// multiple of MAX_NBLOCKS to write; remWriteSize is the number of
// blocks above and beyond loopCount*MAX_NBLOCKS.
int writeSize = nBlocks * BYTE_PER_BLOCK; // 1M and 8M row extent size
int writeSize = realNBlocks * BYTE_PER_BLOCK; // 1M and 8M row extent size
int loopCount = 1;
int remWriteSize = 0;
if (nBlocks > MAX_NBLOCKS) // 64M row extent size
if (realNBlocks > MAX_NBLOCKS) // 64M row extent size
{
writeSize = MAX_NBLOCKS * BYTE_PER_BLOCK;
loopCount = nBlocks / MAX_NBLOCKS;
remWriteSize = nBlocks - (loopCount * MAX_NBLOCKS);
loopCount = realNBlocks / MAX_NBLOCKS;
remWriteSize = realNBlocks - (loopCount * MAX_NBLOCKS);
}
// Allocate a buffer, initialize it, and use it to create the extent
@@ -1096,69 +1124,77 @@ int FileOp::initColumnExtent(
Stats::stopParseEvent(WE_STATS_WAIT_TO_EXPAND_COL_EXTENT);
else
Stats::stopParseEvent(WE_STATS_WAIT_TO_CREATE_COL_EXTENT);
Stats::startParseEvent(WE_STATS_INIT_COL_EXTENT);
#endif
// Allocate buffer, and store in scoped_array to insure it's deletion.
// Create scope {...} to manage deletion of writeBuf.
// Skip space preallocation if configured so
// fallback to sequential write otherwise.
// Couldn't avoid preallocation for full extents,
// e.g. ADD COLUMN DDL b/c CS has to fill the file
// with empty magics.
if ( !bOptExtension )
{
unsigned char* writeBuf = new unsigned char[writeSize];
boost::scoped_array<unsigned char> writeBufPtr( writeBuf );
#ifdef PROFILE
Stats::startParseEvent(WE_STATS_INIT_COL_EXTENT);
#endif
// Allocate buffer, store it in scoped_array to insure it's deletion.
// Create scope {...} to manage deletion of writeBuf.
{
setEmptyBuf( writeBuf, writeSize, emptyVal, width );
unsigned char* writeBuf = new unsigned char[writeSize];
boost::scoped_array<unsigned char> writeBufPtr( writeBuf );
setEmptyBuf( writeBuf, writeSize, emptyVal, width );
#ifdef PROFILE
Stats::stopParseEvent(WE_STATS_INIT_COL_EXTENT);
if (bExpandExtent)
Stats::startParseEvent(WE_STATS_EXPAND_COL_EXTENT);
else
Stats::startParseEvent(WE_STATS_CREATE_COL_EXTENT);
#endif
//std::ostringstream oss;
//oss << "initColExtent: width-" << width <<
//"; loopCount-" << loopCount <<
//"; writeSize-" << writeSize;
//std::cout << oss.str() << std::endl;
if (remWriteSize > 0)
{
if ( pFile->write( writeBuf, remWriteSize ) != remWriteSize )
{
return ERR_FILE_WRITE;
}
}
for (int j = 0; j < loopCount; j++)
{
if ( pFile->write( writeBuf, writeSize ) != writeSize )
{
return ERR_FILE_WRITE;
}
}
}
//@Bug 3219. update the compression header after the extent is expanded.
if ((!bNewFile) && (m_compressionType) && (bExpandExtent))
{
updateColumnExtent(pFile, nBlocks);
}
// @bug 2378. Synchronize here to avoid write buffer pile up too much,
// which could cause controllernode to timeout later when it needs to
// save a snapshot.
pFile->flush();
#ifdef PROFILE
Stats::stopParseEvent(WE_STATS_INIT_COL_EXTENT);
if (bExpandExtent)
Stats::startParseEvent(WE_STATS_EXPAND_COL_EXTENT);
Stats::stopParseEvent(WE_STATS_EXPAND_COL_EXTENT);
else
Stats::startParseEvent(WE_STATS_CREATE_COL_EXTENT);
Stats::stopParseEvent(WE_STATS_CREATE_COL_EXTENT);
#endif
//std::ostringstream oss;
//oss << "initColExtent: width-" << width <<
//"; loopCount-" << loopCount <<
//"; writeSize-" << writeSize;
//std::cout << oss.str() << std::endl;
if (remWriteSize > 0)
{
if ( pFile->write( writeBuf, remWriteSize ) != remWriteSize )
{
return ERR_FILE_WRITE;
}
}
for (int j = 0; j < loopCount; j++)
{
if ( pFile->write( writeBuf, writeSize ) != writeSize )
{
return ERR_FILE_WRITE;
}
}
}
//@Bug 3219. update the compression header after the extent is expanded.
if ((!bNewFile) && (m_compressionType) && (bExpandExtent))
{
updateColumnExtent(pFile, nBlocks);
}
// @bug 2378. Synchronize here to avoid write buffer pile up too much,
// which could cause controllernode to timeout later when it needs to
// save a snapshot.
pFile->flush();
#ifdef PROFILE
if (bExpandExtent)
Stats::stopParseEvent(WE_STATS_EXPAND_COL_EXTENT);
else
Stats::stopParseEvent(WE_STATS_CREATE_COL_EXTENT);
#endif
}
return NO_ERROR;
@@ -1185,7 +1221,7 @@ int FileOp::initAbbrevCompColumnExtent(
uint64_t emptyVal,
int width)
{
// Reserve disk space for full abbreviated extent
// Reserve disk space for optimized abbreviated extent
int rc = initColumnExtent( pFile,
dbRoot,
nBlocks,
@@ -1193,8 +1229,8 @@ int FileOp::initAbbrevCompColumnExtent(
width,
true, // new file
false, // don't expand; add new extent
true ); // add abbreviated extent
true, // add abbreviated extent
true); // optimize the initial extent
if (rc != NO_ERROR)
{
return rc;
@@ -1767,6 +1803,9 @@ int FileOp::writeHeaders(IDBDataFile* pFile, const char* controlHdr,
* This function can be used to initialize an entirely new extent, or
* to finish initializing an extent that has already been started.
* nBlocks controls how many 8192-byte blocks are to be written out.
* If bOptExtension is set then method first checks config for
* DBRootX.Prealloc. If it is disabled then it skips disk space
* preallocation.
* PARAMETERS:
* pFile (in) - IDBDataFile* of column segment file to be written to
* dbRoot (in) - DBRoot of pFile
@@ -1774,6 +1813,7 @@ int FileOp::writeHeaders(IDBDataFile* pFile, const char* controlHdr,
* blockHdrInit(in) - data used to initialize each block
* blockHdrInitSize(in) - number of bytes in blockHdrInit
* bExpandExtent (in) - Expand existing extent, or initialize a new one
* bOptExtension(in) - skip full extent preallocation.
* RETURN:
* returns ERR_FILE_WRITE if an error occurs,
* else returns NO_ERROR.
@@ -1784,7 +1824,8 @@ int FileOp::initDctnryExtent(
int nBlocks,
unsigned char* blockHdrInit,
int blockHdrInitSize,
bool bExpandExtent )
bool bExpandExtent,
bool bOptExtension )
{
// @bug5769 Don't initialize extents or truncate db files on HDFS
if (idbdatafile::IDBPolicy::useHdfs())
@@ -1801,6 +1842,21 @@ int FileOp::initDctnryExtent(
// Create vector of mutexes used to serialize extent access per DBRoot
initDbRootExtentMutexes( );
// MCOL-498 Skip the huge preallocations if the option is set
// for the dbroot. This check is skiped for abbreviated extent.
// IMO it is better to check bool then to call a function.
// CS uses non-compressed dict files for its system catalog so
// CS doesn't optimize non-compressed dict creation.
if ( bOptExtension )
{
bOptExtension = (idbdatafile::IDBPolicy::PreallocSpace(dbRoot)
&& m_compressionType) ? bOptExtension : false;
}
// Reduce number of blocks allocated for abbreviated extents thus
// CS writes less when creates a new table. This couldn't be zero
// b/c Snappy compressed file format doesn't tolerate empty files.
int realNBlocks = ( bOptExtension && nBlocks <= MAX_INITIAL_EXTENT_BLOCKS_TO_DISK ) ? 1 : nBlocks;
// Determine the number of blocks in each call to fwrite(), and the
// number of fwrite() calls to make, based on this. In other words,
// we put a cap on the "writeSize" so that we don't allocate and write
@@ -1808,99 +1864,101 @@ int FileOp::initDctnryExtent(
// expanding an abbreviated 64M extent, we may not have an even
// multiple of MAX_NBLOCKS to write; remWriteSize is the number of
// blocks above and beyond loopCount*MAX_NBLOCKS.
int writeSize = nBlocks * BYTE_PER_BLOCK; // 1M and 8M row extent size
int writeSize = realNBlocks * BYTE_PER_BLOCK; // 1M and 8M row extent size
int loopCount = 1;
int remWriteSize = 0;
if (nBlocks > MAX_NBLOCKS) // 64M row extent size
if (realNBlocks > MAX_NBLOCKS) // 64M row extent size
{
writeSize = MAX_NBLOCKS * BYTE_PER_BLOCK;
loopCount = nBlocks / MAX_NBLOCKS;
remWriteSize = nBlocks - (loopCount * MAX_NBLOCKS);
loopCount = realNBlocks / MAX_NBLOCKS;
remWriteSize = realNBlocks - (loopCount * MAX_NBLOCKS);
}
// Allocate a buffer, initialize it, and use it to create the extent
idbassert(dbRoot > 0);
#ifdef PROFILE
#ifdef PROFILE
if (bExpandExtent)
Stats::startParseEvent(WE_STATS_WAIT_TO_EXPAND_DCT_EXTENT);
else
Stats::startParseEvent(WE_STATS_WAIT_TO_CREATE_DCT_EXTENT);
#endif
boost::mutex::scoped_lock lk(*m_DbRootAddExtentMutexes[dbRoot]);
#ifdef PROFILE
boost::mutex::scoped_lock lk(*m_DbRootAddExtentMutexes[dbRoot]);
#ifdef PROFILE
if (bExpandExtent)
Stats::stopParseEvent(WE_STATS_WAIT_TO_EXPAND_DCT_EXTENT);
else
Stats::stopParseEvent(WE_STATS_WAIT_TO_CREATE_DCT_EXTENT);
Stats::startParseEvent(WE_STATS_INIT_DCT_EXTENT);
#endif
// Allocate buffer, and store in scoped_array to insure it's deletion.
// Create scope {...} to manage deletion of writeBuf.
// Skip space preallocation if configured so
// fallback to sequential write otherwise.
// Couldn't avoid preallocation for full extents,
// e.g. ADD COLUMN DDL b/c CS has to fill the file
// with empty magics.
if ( !bOptExtension )
{
unsigned char* writeBuf = new unsigned char[writeSize];
boost::scoped_array<unsigned char> writeBufPtr( writeBuf );
memset(writeBuf, 0, writeSize);
for (int i = 0; i < nBlocks; i++)
// Allocate buffer, and store in scoped_array to insure it's deletion.
// Create scope {...} to manage deletion of writeBuf.
{
memcpy( writeBuf + (i * BYTE_PER_BLOCK),
blockHdrInit,
blockHdrInitSize );
}
#ifdef PROFILE
Stats::stopParseEvent(WE_STATS_INIT_DCT_EXTENT);
if (bExpandExtent)
Stats::startParseEvent(WE_STATS_EXPAND_DCT_EXTENT);
else
Stats::startParseEvent(WE_STATS_CREATE_DCT_EXTENT);
Stats::startParseEvent(WE_STATS_INIT_DCT_EXTENT);
#endif
//std::ostringstream oss;
//oss << "initDctnryExtent: width-8(assumed)" <<
//"; loopCount-" << loopCount <<
//"; writeSize-" << writeSize;
//std::cout << oss.str() << std::endl;
if (remWriteSize > 0)
{
if (pFile->write( writeBuf, remWriteSize ) != remWriteSize)
{
return ERR_FILE_WRITE;
}
}
unsigned char* writeBuf = new unsigned char[writeSize];
boost::scoped_array<unsigned char> writeBufPtr( writeBuf );
for (int j = 0; j < loopCount; j++)
{
if (pFile->write( writeBuf, writeSize ) != writeSize)
{
return ERR_FILE_WRITE;
}
}
}
memset(writeBuf, 0, writeSize);
if (m_compressionType)
for (int i = 0; i < realNBlocks; i++)
{
memcpy( writeBuf + (i * BYTE_PER_BLOCK),
blockHdrInit,
blockHdrInitSize );
}
#ifdef PROFILE
Stats::stopParseEvent(WE_STATS_INIT_DCT_EXTENT);
if (bExpandExtent)
Stats::startParseEvent(WE_STATS_EXPAND_DCT_EXTENT);
else
Stats::startParseEvent(WE_STATS_CREATE_DCT_EXTENT);
#endif
if (remWriteSize > 0)
{
if (pFile->write( writeBuf, remWriteSize ) != remWriteSize)
{
return ERR_FILE_WRITE;
}
}
for (int j = 0; j < loopCount; j++)
{
if (pFile->write( writeBuf, writeSize ) != writeSize)
{
return ERR_FILE_WRITE;
}
}
// CS doesn't account flush timings.
#ifdef PROFILE
if (bExpandExtent)
Stats::stopParseEvent(WE_STATS_EXPAND_DCT_EXTENT);
else
Stats::stopParseEvent(WE_STATS_CREATE_DCT_EXTENT);
#endif
}
} // preallocation fallback end
// MCOL-498 CS has to set a number of blocs in the chunk header
if ( m_compressionType )
{
updateDctnryExtent(pFile, nBlocks);
// Synchronize to avoid write buffer pile up too much, which could cause
// controllernode to timeout later when it needs to save a snapshot.
}
pFile->flush();
#ifdef PROFILE
if (bExpandExtent)
Stats::stopParseEvent(WE_STATS_EXPAND_DCT_EXTENT);
else
Stats::stopParseEvent(WE_STATS_CREATE_DCT_EXTENT);
#endif
}
return NO_ERROR;
@@ -2223,6 +2281,15 @@ int FileOp::oid2FileName( FID fid,
#endif
// Need this stub to use ColumnOp::writeRow in the unit tests
#ifdef WITH_UNIT_TESTS
if (fid == 42)
{
sprintf(fullFileName, "./versionbuffer");
return NO_ERROR;
}
#endif
/* If is a version buffer file, the format is different. */
if (fid < 1000)
{
@@ -2790,7 +2857,8 @@ int FileOp::expandAbbrevColumnExtent(
int rc = FileOp::initColumnExtent(pFile, dbRoot, blksToAdd, emptyVal, width,
false, // existing file
true, // expand existing extent
false); // n/a since not adding new extent
false, // n/a since not adding new extent
true); // optimize segment file extension
return rc;
}

View File

@@ -50,6 +50,8 @@
#define EXPORT
#endif
#define MAX_NBLOCKS 8192
#include "brmtypes.h"
/** Namespace WriteEngine */
@@ -90,6 +92,15 @@ public:
execplan::CalpontSystemCatalog::ColDataType colDataType,
uint64_t emptyVal = 0, int width = 1 ) ;
/**
* @brief Create a file with a fixed file size by its name.
* Changed to public for UT.
*/
int createFile( const char* fileName, int fileSize,
uint64_t emptyVal, int width,
uint16_t dbRoot );
/**
* @brief Delete a file
*/
@@ -324,13 +335,15 @@ public:
* @param blockHdrInit(in) - data used to initialize each block header
* @param blockHdrInitSize(in) - number of bytes in blockHdrInit
* @param bExpandExtent (in) - Expand existing extent, or initialize new one
* @param bOptExtension (in) - skip or optimize full extent preallocation
*/
EXPORT int initDctnryExtent( IDBDataFile* pFile,
uint16_t dbRoot,
int nBlocks,
unsigned char* blockHdrInit,
int blockHdrInitSize,
bool bExpandExtent );
bool bExpandExtent,
bool bOptExtension = false );
/**
* @brief Check whether it is an directory
@@ -463,6 +476,25 @@ public:
int compressionType() const;
EXPORT virtual int flushFile(int rc, std::map<FID, FID>& oids);
// Initialize an extent in a column segment file
// pFile (in) IDBDataFile* of column segment file to be written to
// dbRoot (in) - DBRoot of pFile
// nBlocks (in) - number of blocks to be written for an extent
// emptyVal(in) - empty value to be used for column data values
// width (in) - width of the applicable column
// bNewFile (in) - Adding extent to new file
// bExpandExtent (in) - Expand existing extent, or initialize new one
// bAbbrevExtent (in) - If adding new extent, is it abbreviated
// bOptExtension(in) - skip or optimize full extent preallocation
int initColumnExtent( IDBDataFile* pFile,
uint16_t dbRoot,
int nBlocks,
uint64_t emptyVal,
int width,
bool bNewFile,
bool bExpandExtent,
bool bAbbrevExtent,
bool bOptExtension=false );
protected:
EXPORT virtual int updateColumnExtent(IDBDataFile* pFile, int nBlocks);
@@ -475,10 +507,6 @@ private:
FileOp(const FileOp& rhs);
FileOp& operator=(const FileOp& rhs);
int createFile( const char* fileName, int fileSize,
uint64_t emptyVal, int width,
uint16_t dbRoot );
int expandAbbrevColumnChunk( IDBDataFile* pFile,
uint64_t emptyVal,
int colWidth,
@@ -491,24 +519,6 @@ private:
uint64_t emptyVal,
int width);
// Initialize an extent in a column segment file
// pFile (in) IDBDataFile* of column segment file to be written to
// dbRoot (in) - DBRoot of pFile
// nBlocks (in) - number of blocks to be written for an extent
// emptyVal(in) - empty value to be used for column data values
// width (in) - width of the applicable column
// bNewFile (in) - Adding extent to new file
// bExpandExtent (in) - Expand existing extent, or initialize new one
// bAbbrevExtent (in) - If adding new extent, is it abbreviated
int initColumnExtent( IDBDataFile* pFile,
uint16_t dbRoot,
int nBlocks,
uint64_t emptyVal,
int width,
bool bNewFile,
bool bExpandExtent,
bool bAbbrevExtent );
static void initDbRootExtentMutexes();
static void removeDbRootExtentMutexes();

View File

@@ -393,7 +393,7 @@ struct IdxMultiColKey
curMask.reset();
curLevel = maxLevel = 0;
totalBit = 0;
memset( testbitArray, 0, IDX_MAX_MULTI_COL_IDX_LEVEL);
memset( testbitArray, 0, IDX_MAX_MULTI_COL_IDX_LEVEL * sizeof(testbitArray[0]));
memset( keyBuf, 0, IDX_MAX_MULTI_COL_BIT / 8 );
curMask = 0x1F;
curMask = curMask << (IDX_MAX_MULTI_COL_BIT - 5);

View File

@@ -490,7 +490,7 @@ struct CacheControl /** @brief Cache control structure */
int checkInterval; /** @brief A check point interval in seconds */
CacheControl()
{
totalBlock = pctFree = checkInterval; /** @brief constructor */
totalBlock = pctFree = checkInterval = 0; /** @brief constructor */
}
};